diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 4e1cfae4489..20f1c034c58 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,7 +24,7 @@ com.gradle develocity-maven-extension - 1.22 + 1.22.1 com.gradle diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index c02306f8af8..9029019a121 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -386,12 +386,22 @@ private XceiverClientReply sendCommandWithRetry( List datanodeList = null; DatanodeBlockID blockID = null; - if (request.getCmdType() == ContainerProtos.Type.GetBlock) { + switch (request.getCmdType()) { + case GetBlock: blockID = request.getGetBlock().getBlockID(); - } else if (request.getCmdType() == ContainerProtos.Type.ReadChunk) { + break; + + case ReadChunk: blockID = request.getReadChunk().getBlockID(); - } else if (request.getCmdType() == ContainerProtos.Type.GetSmallFile) { + break; + + case GetSmallFile: blockID = request.getGetSmallFile().getBlock().getBlockID(); + break; + + case VerifyBlock: + blockID = request.getVerifyBlock().getBlockID(); + break; } if (blockID != null) { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 12ca9978c68..7776e245be0 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -38,9 +38,13 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Objects; +import java.util.Optional; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; @@ -142,8 +146,34 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } if (checksumBlockData != null) { - List currentChunks = getContainerBlockData().getChunksList(); + + // For the same BlockGroupLength, we need to find the larger value of Block DataSize. + // This is because we do not send empty chunks to the DataNode, so the larger value is more accurate. + Map> maxDataSizeByGroup = Arrays.stream(blockData) + .filter(Objects::nonNull) + .collect(Collectors.groupingBy(BlockData::getBlockGroupLength, + Collectors.maxBy(Comparator.comparingLong(BlockData::getSize)))); + BlockData maxBlockData = maxDataSizeByGroup.get(blockGroupLength).get(); + + // When calculating the checksum size, + // We need to consider both blockGroupLength and the actual size of blockData. + // + // We use the smaller value to determine the size of the ChunkList. + // + // 1. In most cases, blockGroupLength is equal to the size of blockData. + // 2. Occasionally, blockData is not fully filled; if a chunk is empty, + // it is not sent to the DN, resulting in blockData size being smaller than blockGroupLength. + // 3. In cases with 'dirty data', + // if an error occurs when writing to the EC-Stripe (e.g., DN reports Container Closed), + // and the length confirmed with OM is smaller, blockGroupLength may be smaller than blockData size. + long blockDataSize = Math.min(maxBlockData.getSize(), blockGroupLength); + int chunkSize = (int) Math.ceil(((double) blockDataSize / repConfig.getEcChunkSize())); List checksumBlockDataChunks = checksumBlockData.getChunks(); + if (chunkSize > 0) { + checksumBlockDataChunks = checksumBlockData.getChunks().subList(0, chunkSize); + } + + List currentChunks = getContainerBlockData().getChunksList(); Preconditions.checkArgument( currentChunks.size() == checksumBlockDataChunks.size(), @@ -269,7 +299,7 @@ public CompletableFuture executePutBlock(boolean close, throw ce; }); } catch (IOException | ExecutionException e) { - throw new IOException(EXCEPTION_MSG + e.toString(), e); + throw new IOException(EXCEPTION_MSG + e, e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 794b972f150..6bc070a0742 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -29,7 +29,9 @@ import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -102,11 +104,9 @@ @InterfaceStability.Stable public final class HddsUtils { - private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); - public static final ByteString REDACTED = - ByteString.copyFromUtf8(""); + public static final ByteString REDACTED = ByteString.copyFromUtf8(""); private static final int ONE_MB = SizeInBytes.valueOf("1m").getSizeInt(); @@ -116,36 +116,30 @@ private HddsUtils() { } /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM. + * Retrieve the socket address that clients should use to connect to the SCM. * * @return Target {@code InetSocketAddress} for the SCM client endpoint. */ - public static Collection getScmAddressForClients( - ConfigurationSource conf) { - + public static Collection getScmAddressForClients(ConfigurationSource conf) { if (SCMHAUtils.getScmServiceId(conf) != null) { List scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf); - Collection scmAddressList = - new HashSet<>(scmNodeInfoList.size()); + Collection scmAddressList = new HashSet<>(scmNodeInfoList.size()); for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) { if (scmNodeInfo.getScmClientAddress() == null) { throw new ConfigurationException("Ozone scm client address is not " + "set for SCM service-id " + scmNodeInfo.getServiceId() + "node-id" + scmNodeInfo.getNodeId()); } - scmAddressList.add( - NetUtils.createSocketAddr(scmNodeInfo.getScmClientAddress())); + scmAddressList.add(NetUtils.createSocketAddr(scmNodeInfo.getScmClientAddress())); } return scmAddressList; } else { String address = conf.getTrimmed(OZONE_SCM_CLIENT_ADDRESS_KEY); - int port = -1; + int port; if (address == null) { // fall back to ozone.scm.names for non-ha - Collection scmAddresses = - conf.getTrimmedStringCollection(OZONE_SCM_NAMES); + Collection scmAddresses = conf.getTrimmedStringCollection(OZONE_SCM_NAMES); if (scmAddresses.isEmpty()) { throw new ConfigurationException("Ozone scm client address is not " + @@ -160,35 +154,26 @@ public static Collection getScmAddressForClients( address = scmAddresses.iterator().next(); - port = conf.getInt(OZONE_SCM_CLIENT_PORT_KEY, - OZONE_SCM_CLIENT_PORT_DEFAULT); + port = conf.getInt(OZONE_SCM_CLIENT_PORT_KEY, OZONE_SCM_CLIENT_PORT_DEFAULT); } else { - port = getHostPort(address) - .orElse(conf.getInt(OZONE_SCM_CLIENT_PORT_KEY, - OZONE_SCM_CLIENT_PORT_DEFAULT)); + port = getHostPort(address).orElse(conf.getInt(OZONE_SCM_CLIENT_PORT_KEY, OZONE_SCM_CLIENT_PORT_DEFAULT)); } - return Collections.singletonList( - NetUtils.createSocketAddr(getHostName(address).get() + ":" + port)); + return Collections.singletonList(NetUtils.createSocketAddr(getHostName(address).get() + ":" + port)); } } /** * Retrieve the hostname, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). + * Each config value may be absent, or if present in the format host:port (the :port part is optional). * * @param conf - Conf * @param keys a list of configuration key names. - * * @return first hostname component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. + * @throws IllegalArgumentException if any values are not in the 'host' or host:port format. */ - public static Optional getHostNameFromConfigKeys( - ConfigurationSource conf, - String... keys) { - for (final String key : keys) { + public static Optional getHostNameFromConfigKeys(ConfigurationSource conf, String... keys) { + for (String key : keys) { final String value = conf.getTrimmed(key); final Optional hostName = getHostName(value); if (hostName.isPresent()) { @@ -200,6 +185,7 @@ public static Optional getHostNameFromConfigKeys( /** * Gets the hostname or Indicates that it is absent. + * * @param value host or host:port * @return hostname */ @@ -207,8 +193,8 @@ public static Optional getHostName(String value) { if ((value == null) || value.isEmpty()) { return Optional.empty(); } - String hostname = value.replaceAll("\\:[0-9]+$", ""); - if (hostname.length() == 0) { + String hostname = value.replaceAll(":[0-9]+$", ""); + if (hostname.isEmpty()) { return Optional.empty(); } else { return Optional.of(hostname); @@ -217,6 +203,7 @@ public static Optional getHostName(String value) { /** * Gets the port if there is one, returns empty {@code OptionalInt} otherwise. + * * @param value String in host:port format. * @return Port */ @@ -238,12 +225,10 @@ public static OptionalInt getHostPort(String value) { * * @param conf Conf * @param keys a list of configuration key names. - * * @return first number found from the given keys, or absent. */ - public static OptionalInt getNumberFromConfigKeys( - ConfigurationSource conf, String... keys) { - for (final String key : keys) { + public static OptionalInt getNumberFromConfigKeys(ConfigurationSource conf, String... keys) { + for (String key : keys) { final String value = conf.getTrimmed(key); if (value != null) { return OptionalInt.of(Integer.parseInt(value)); @@ -254,19 +239,15 @@ public static OptionalInt getNumberFromConfigKeys( /** * Retrieve the port number, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). + * Each config value may be absent, or if present in the format host:port (the :port part is optional). * * @param conf Conf * @param keys a list of configuration key names. - * * @return first port number component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. + * @throws IllegalArgumentException if any values are not in the 'host' or host:port format. */ - public static OptionalInt getPortNumberFromConfigKeys( - ConfigurationSource conf, String... keys) { - for (final String key : keys) { + public static OptionalInt getPortNumberFromConfigKeys(ConfigurationSource conf, String... keys) { + for (String key : keys) { final String value = conf.getTrimmed(key); final OptionalInt hostPort = getHostPort(value); if (hostPort.isPresent()) { @@ -282,48 +263,40 @@ public static OptionalInt getPortNumberFromConfigKeys( * @return A collection of SCM addresses * @throws IllegalArgumentException If the configuration is invalid */ - public static Collection getSCMAddressForDatanodes( - ConfigurationSource conf) { + public static Collection getSCMAddressForDatanodes(ConfigurationSource conf) { // First check HA style config, if not defined fall back to OZONE_SCM_NAMES if (SCMHAUtils.getScmServiceId(conf) != null) { List scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf); - Collection scmAddressList = - new HashSet<>(scmNodeInfoList.size()); + Collection scmAddressList = new HashSet<>(scmNodeInfoList.size()); for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) { - scmAddressList.add( - NetUtils.createSocketAddr(scmNodeInfo.getScmDatanodeAddress())); + scmAddressList.add(NetUtils.createSocketAddr(scmNodeInfo.getScmDatanodeAddress())); } return scmAddressList; } else { // fall back to OZONE_SCM_NAMES. - Collection names = - conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES); + Collection names = conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES); if (names.isEmpty()) { throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES - + " need to be a set of valid DNS names or IP addresses." - + " Empty address list found."); + + " need to be a set of valid DNS names or IP addresses. Empty address list found."); } Collection addresses = new HashSet<>(names.size()); for (String address : names) { Optional hostname = getHostName(address); if (!hostname.isPresent()) { - throw new IllegalArgumentException("Invalid hostname for SCM: " - + address); + throw new IllegalArgumentException("Invalid hostname for SCM: " + address); } int port = getHostPort(address) - .orElse(conf.getInt(OZONE_SCM_DATANODE_PORT_KEY, - OZONE_SCM_DATANODE_PORT_DEFAULT)); - InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(), - port); + .orElse(conf.getInt(OZONE_SCM_DATANODE_PORT_KEY, OZONE_SCM_DATANODE_PORT_DEFAULT)); + InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(), port); addresses.add(addr); } if (addresses.size() > 1) { - LOG.warn("When SCM HA is configured, configure {} appended with " + - "serviceId and nodeId. {} is deprecated.", OZONE_SCM_ADDRESS_KEY, + LOG.warn("When SCM HA is configured, configure {} appended with serviceId and nodeId. {} is deprecated.", + OZONE_SCM_ADDRESS_KEY, OZONE_SCM_NAMES); } return addresses; @@ -336,49 +309,40 @@ public static Collection getSCMAddressForDatanodes( * @return Recon address * @throws IllegalArgumentException If the configuration is invalid */ - public static InetSocketAddress getReconAddresses( - ConfigurationSource conf) { + public static InetSocketAddress getReconAddresses(ConfigurationSource conf) { String name = conf.get(OZONE_RECON_ADDRESS_KEY); if (StringUtils.isEmpty(name)) { return null; } Optional hostname = getHostName(name); if (!hostname.isPresent()) { - throw new IllegalArgumentException("Invalid hostname for Recon: " - + name); + throw new IllegalArgumentException("Invalid hostname for Recon: " + name); } int port = getHostPort(name).orElse(OZONE_RECON_DATANODE_PORT_DEFAULT); return NetUtils.createSocketAddr(hostname.get(), port); } /** - * Returns the hostname for this datanode. If the hostname is not - * explicitly configured in the given config, then it is determined - * via the DNS class. + * Returns the hostname for this datanode. + * If the hostname is not explicitly configured in the given config, then it is determined via the DNS class. * * @param conf Configuration - * * @return the hostname (NB: may not be a FQDN) - * @throws UnknownHostException if the dfs.datanode.dns.interface - * option is used and the hostname can not be determined + * @throws UnknownHostException if the dfs.datanode.dns.interface option is used and the hostname cannot be determined */ - public static String getHostName(ConfigurationSource conf) - throws UnknownHostException { + public static String getHostName(ConfigurationSource conf) throws UnknownHostException { String name = conf.get(DFS_DATANODE_HOST_NAME_KEY); if (name == null) { - String dnsInterface = conf.get( - CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY); - String nameServer = conf.get( - CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY); + String dnsInterface = conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY); + String nameServer = conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY); boolean fallbackToHosts = false; if (dnsInterface == null) { // Try the legacy configuration keys. dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY); - dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY); nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY); } else { - // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file + // If HADOOP_SECURITY_DNS_* is set, then also attempt hosts file // resolution if DNS fails. We will not use hosts file resolution // by default to avoid breaking existing clusters. fallbackToHosts = true; @@ -390,31 +354,28 @@ public static String getHostName(ConfigurationSource conf) } /** - * Retrieve the socket address that is used by Datanode. - * @param conf + * Retrieve the socket address used by Datanode. + * + * @param conf Ozone configuration. * @return Target InetSocketAddress for the Datanode service endpoint. */ - public static InetSocketAddress - getDatanodeRpcAddress(ConfigurationSource conf) { - final String host = getHostNameFromConfigKeys(conf, - HDDS_DATANODE_CLIENT_BIND_HOST_KEY) + public static InetSocketAddress getDatanodeRpcAddress(ConfigurationSource conf) { + final String host = getHostNameFromConfigKeys(conf, HDDS_DATANODE_CLIENT_BIND_HOST_KEY) .orElse(HDDS_DATANODE_CLIENT_BIND_HOST_DEFAULT); - final int port = getPortNumberFromConfigKeys(conf, - HDDS_DATANODE_CLIENT_ADDRESS_KEY) - .orElse(conf.getInt(HDDS_DATANODE_CLIENT_PORT_KEY, - HDDS_DATANODE_CLIENT_PORT_DEFAULT)); + final int port = getPortNumberFromConfigKeys(conf, HDDS_DATANODE_CLIENT_ADDRESS_KEY) + .orElse(conf.getInt(HDDS_DATANODE_CLIENT_PORT_KEY, HDDS_DATANODE_CLIENT_PORT_DEFAULT)); return NetUtils.createSocketAddr(host + ":" + port); } /** - * Checks if the container command is read only or not. + * Checks if the container command is read-only or not. + * * @param proto ContainerCommand Request proto - * @return True if its readOnly , false otherwise. + * @return {@code true} if its read-only, {@code false} otherwise. */ - public static boolean isReadOnly( - ContainerCommandRequestProtoOrBuilder proto) { + public static boolean isReadOnly(ContainerCommandRequestProtoOrBuilder proto) { switch (proto.getCmdType()) { case ReadContainer: case ReadChunk: @@ -424,6 +385,7 @@ public static boolean isReadOnly( case ListContainer: case ListChunk: case GetCommittedBlockLength: + case VerifyBlock: return true; case CloseContainer: case WriteChunk: @@ -447,8 +409,7 @@ public static boolean isReadOnly( } /** - * Returns true if the container is in open to write state - * (OPEN or RECOVERING). + * Returns {@code true} if the container is in open to write state ({@link State#OPEN} or {@link State#RECOVERING}). * * @param state - container state */ @@ -457,18 +418,15 @@ public static boolean isOpenToWriteState(State state) { } /** - * Not all datanode container cmd protocol has embedded ozone block token. - * Block token are issued by Ozone Manager and return to Ozone client to - * read/write data on datanode via input/output stream. - * Ozone datanode uses this helper to decide which command requires block - * token. - * @return true if it is a cmd that block token should be checked when - * security is enabled - * false if block token does not apply to the command. + * Not all datanode container cmd protocols have embedded ozone block token. + * Block token is issued by Ozone Manager and returns to an Ozone client + * to read/write data on datanode via input/output stream. + * Ozone datanode uses this helper to decide which command requires block token. * + * @return {@code true} if it is a cmd that block token should be checked + * when security is enabled {@code false} if block token does not apply to the command. */ - public static boolean requireBlockToken( - ContainerProtos.Type cmdType) { + public static boolean requireBlockToken(ContainerProtos.Type cmdType) { switch (cmdType) { case DeleteBlock: case DeleteChunk: @@ -480,14 +438,21 @@ public static boolean requireBlockToken( case ReadChunk: case WriteChunk: case FinalizeBlock: + case VerifyBlock: return true; default: return false; } } - public static boolean requireContainerToken( - ContainerProtos.Type cmdType) { + /** + * Determines if a specific container command type requires a container token. + * + * @param cmdType the type of the container command being checked + * @return {@code true} if the container command type requires a container token, + * {@code false} otherwise + */ + public static boolean requireContainerToken(ContainerProtos.Type cmdType) { switch (cmdType) { case CloseContainer: case CreateContainer: @@ -503,6 +468,7 @@ public static boolean requireContainerToken( /** * Return the block ID of container commands that are related to blocks. + * * @param msg container command * @return block ID. */ @@ -564,6 +530,11 @@ public static BlockID getBlockID(ContainerCommandRequestProtoOrBuilder msg) { blockID = msg.getFinalizeBlock().getBlockID(); } break; + case VerifyBlock: + if (msg.hasVerifyBlock()) { + blockID = msg.getVerifyBlock().getBlockID(); + } + break; default: break; } @@ -575,8 +546,7 @@ public static BlockID getBlockID(ContainerCommandRequestProtoOrBuilder msg) { /** * Register the provided MBean with additional JMX ObjectName properties. - * If additional properties are not supported then fallback to registering - * without properties. + * If additional properties are not supported, then fallback to registering without properties. * * @param serviceName - see {@link MBeans#register} * @param mBeanName - see {@link MBeans#register} @@ -584,33 +554,25 @@ public static BlockID getBlockID(ContainerCommandRequestProtoOrBuilder msg) { * @param mBean - the MBean to register. * @return the named used to register the MBean. */ - public static ObjectName registerWithJmxProperties( - String serviceName, String mBeanName, Map jmxProperties, - Object mBean) { + public static ObjectName registerWithJmxProperties(String serviceName, String mBeanName, + Map jmxProperties, Object mBean) { try { - // Check support for registering with additional properties. - final Method registerMethod = MBeans.class.getMethod( - "register", String.class, String.class, + final Method registerMethod = MBeans.class.getMethod("register", String.class, String.class, Map.class, Object.class); - return (ObjectName) registerMethod.invoke( - null, serviceName, mBeanName, jmxProperties, mBean); - - } catch (NoSuchMethodException | IllegalAccessException | - InvocationTargetException e) { + return (ObjectName) registerMethod.invoke(null, serviceName, mBeanName, jmxProperties, mBean); + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { // Fallback - if (LOG.isTraceEnabled()) { - LOG.trace("Registering MBean {} without additional properties {}", - mBeanName, jmxProperties); - } + LOG.trace("Registering MBean {} without additional properties {}", mBeanName, jmxProperties); return MBeans.register(serviceName, mBeanName, mBean); } } /** * Get the current time in milliseconds. + * * @return the current time in milliseconds. */ public static long getTime() { @@ -620,52 +582,54 @@ public static long getTime() { /** * Basic validation for {@code path}: checks that it is a descendant of * (or the same as) the given {@code ancestor}. - * @param path the path to be validated - * @param ancestor a trusted path that is supposed to be the ancestor of - * {@code path} - * @throws NullPointerException if either {@code path} or {@code ancestor} is - * null - * @throws IllegalArgumentException if {@code ancestor} is not really the - * ancestor of {@code path} + * + * @param path the path to be validated. + * @param ancestor a trusted path that is supposed to be the ancestor of {@code path}. + * @throws NullPointerException if either {@code path} or {@code ancestor} is {@code null}. + * @throws IllegalArgumentException if {@code ancestor} is not really the ancestor of {@code path} */ public static void validatePath(Path path, Path ancestor) { - Preconditions.checkNotNull(path, - "Path should not be null"); - Preconditions.checkNotNull(ancestor, - "Ancestor should not be null"); - Preconditions.checkArgument( - path.normalize().startsWith(ancestor.normalize()), + Preconditions.checkNotNull(path, "Path should not be null"); + Preconditions.checkNotNull(ancestor, "Ancestor should not be null"); + Preconditions.checkArgument(path.normalize().startsWith(ancestor.normalize()), "Path should be a descendant of %s", ancestor); } + /** + * Creates a directory based on the provided directory path. + * If the specified directory does not exist or is not a directory, it throws an {@link IllegalArgumentException}. + * + * @param dirPath The path of the directory to be created. + * @return A File object representing the directory. + * @throws IllegalArgumentException if the path cannot be created as a directory. + */ public static File createDir(String dirPath) { - File dirFile = new File(dirPath); - if (!dirFile.mkdirs() && !dirFile.exists()) { - throw new IllegalArgumentException("Unable to create path: " + dirFile); + Path dirFile = Paths.get(dirPath); + try { + Files.createDirectories(dirFile); + } catch (IOException e) { + throw new IllegalArgumentException("Unable to create path: " + dirFile, e); } - return dirFile; + return dirFile.toFile(); } /** * Utility string formatter method to display SCM roles. * - * @param nodes - * @return + * @param nodes a list of nodes represented in the format "HostName:RatisPort:Role" + * @return a formatted string representing the list of nodes in structured form */ public static String format(List nodes) { StringBuilder sb = new StringBuilder(); for (String node : nodes) { String[] x = node.split(":"); - sb.append(String - .format("{ HostName : %s, Ratis Port : %s, Role : %s } ", x[0], x[1], - x[2])); + sb.append(String.format("{ HostName : %s, Ratis Port : %s, Role : %s } ", x[0], x[1], x[2])); } return sb.toString(); } /** * Return Ozone service shutdown time out. - * @param conf */ public static long getShutDownTimeOut(ConfigurationSource conf) { return conf.getObject(OzoneServiceConfig.class).getServiceShutdownTimeout(); @@ -680,8 +644,7 @@ public static int roundupMb(long bytes) { /** * Unwrap exception to check if it is some kind of access control problem - * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) - * or a RpcException. + * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) or a {@link RpcException}. */ public static Throwable getUnwrappedException(Exception ex) { if (ex instanceof ServiceException) { @@ -690,9 +653,8 @@ public static Throwable getUnwrappedException(Exception ex) { t = ((RemoteException) t).unwrapRemoteException(); } while (t != null) { - if (t instanceof RpcException || - t instanceof AccessControlException || - t instanceof SecretManager.InvalidToken) { + if (t instanceof RpcException || t instanceof AccessControlException + || t instanceof SecretManager.InvalidToken) { return t; } t = t.getCause(); @@ -702,28 +664,24 @@ public static Throwable getUnwrappedException(Exception ex) { } /** - * For some Rpc Exceptions, client should not failover. + * For some Rpc Exceptions, a client should not fail over. */ public static boolean shouldNotFailoverOnRpcException(Throwable exception) { if (exception instanceof RpcException) { - // Should not failover for following exceptions - if (exception instanceof RpcNoSuchMethodException || - exception instanceof RpcNoSuchProtocolException || - exception instanceof RPC.VersionMismatch) { - return true; - } - if (exception.getMessage().contains( - "RPC response exceeds maximum data length") || - exception.getMessage().contains("RPC response has invalid length")) { + // Should not fail over for the following exceptions + if (exception instanceof RpcNoSuchMethodException || exception instanceof RpcNoSuchProtocolException + || exception instanceof RPC.VersionMismatch) { return true; } + return exception.getMessage().contains("RPC response exceeds maximum data length") + || exception.getMessage().contains("RPC response has invalid length"); } return false; } /** - * Remove binary data from request {@code msg}. (May be incomplete, feel - * free to add any missing cleanups.) + * Remove binary data from request {@code msg}. + * Maybe incomplete, feel free to add any missing cleanups. */ public static ContainerProtos.ContainerCommandRequestProto processForDebug( ContainerProtos.ContainerCommandRequestProto msg) { @@ -733,8 +691,7 @@ public static ContainerProtos.ContainerCommandRequestProto processForDebug( } if (msg.hasWriteChunk() || msg.hasPutSmallFile()) { - ContainerProtos.ContainerCommandRequestProto.Builder builder = - msg.toBuilder(); + ContainerProtos.ContainerCommandRequestProto.Builder builder = msg.toBuilder(); if (msg.hasWriteChunk()) { builder.getWriteChunkBuilder().setData(REDACTED); } @@ -748,8 +705,8 @@ public static ContainerProtos.ContainerCommandRequestProto processForDebug( } /** - * Remove binary data from response {@code msg}. (May be incomplete, feel - * free to add any missing cleanups.) + * Remove binary data from response {@code msg}. + * Maybe incomplete, feel free to add any missing cleanups. */ public static ContainerProtos.ContainerCommandResponseProto processForDebug( ContainerProtos.ContainerCommandResponseProto msg) { @@ -759,8 +716,7 @@ public static ContainerProtos.ContainerCommandResponseProto processForDebug( } if (msg.hasReadChunk() || msg.hasGetSmallFile()) { - ContainerProtos.ContainerCommandResponseProto.Builder builder = - msg.toBuilder(); + ContainerProtos.ContainerCommandResponseProto.Builder builder = msg.toBuilder(); if (msg.hasReadChunk()) { if (msg.getReadChunk().hasData()) { builder.getReadChunkBuilder().setData(REDACTED); @@ -790,7 +746,7 @@ public static ContainerProtos.ContainerCommandResponseProto processForDebug( /** * Redacts sensitive configuration. - * Sorts all properties by key name + * Sorts all properties by key name. * * @param conf OzoneConfiguration object to be printed. * @return Sorted Map of properties @@ -809,6 +765,13 @@ public static Map processForLogging(OzoneConfiguration conf) { return sortedOzoneProps; } + /** + * Generates a thread name prefix based on the given identifier. + * + * @param id an identifier object; + * if non-null and non-empty, it will be converted to a string and appended with a hyphen. + * @return a string prefix based on the identifier, or an empty string if the identifier is null or empty. + */ @Nonnull public static String threadNamePrefix(@Nullable Object id) { return id != null && !"".equals(id) @@ -817,11 +780,9 @@ public static String threadNamePrefix(@Nullable Object id) { } /** - * Execute some code and ensure thread name is not changed - * (workaround for HADOOP-18433). + * Execute some code and ensure the thread name is not changed (workaround for HADOOP-18433). */ - public static T preserveThreadName( - CheckedSupplier supplier) throws E { + public static T preserveThreadName(CheckedSupplier supplier) throws E { final Thread thread = Thread.currentThread(); final String threadName = thread.getName(); @@ -839,8 +800,7 @@ public static T preserveThreadName( * Transform a protobuf UUID to Java UUID. */ public static UUID fromProtobuf(HddsProtos.UUID uuid) { - Objects.requireNonNull(uuid, - "HddsProtos.UUID can't be null to transform to java UUID."); + Objects.requireNonNull(uuid, "HddsProtos.UUID can't be null to transform to java UUID."); return new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()); } @@ -848,18 +808,17 @@ public static UUID fromProtobuf(HddsProtos.UUID uuid) { * Transform a Java UUID to protobuf UUID. */ public static HddsProtos.UUID toProtobuf(UUID uuid) { - Objects.requireNonNull(uuid, - "UUID can't be null to transform to protobuf UUID."); + Objects.requireNonNull(uuid, "UUID can't be null to transform to protobuf UUID."); return HddsProtos.UUID.newBuilder() .setMostSigBits(uuid.getMostSignificantBits()) .setLeastSigBits(uuid.getLeastSignificantBits()) .build(); } - /** Concatenate stack trace {@code elements} (one per line) starting at - * {@code startIndex}. */ - public static @Nonnull String formatStackTrace( - @Nullable StackTraceElement[] elements, int startIndex) { + /** + * Concatenate stack trace {@code elements} (one per line) starting at {@code startIndex}. + */ + public static @Nonnull String formatStackTrace(@Nullable StackTraceElement[] elements, int startIndex) { if (elements != null && elements.length > startIndex) { final StringBuilder sb = new StringBuilder(); for (int line = startIndex; line < elements.length; line++) { @@ -871,8 +830,7 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) { } /** @return current thread stack trace if {@code logger} has debug enabled */ - public static @Nullable StackTraceElement[] getStackTrace( - @Nonnull Logger logger) { + public static @Nullable StackTraceElement[] getStackTrace(@Nonnull Logger logger) { return logger.isDebugEnabled() ? Thread.currentThread().getStackTrace() : null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index d3f39c023b7..f6c4ea95974 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -55,15 +56,15 @@ public final class ContainerCommandResponseBuilders { /** - * Returns a Container Command Response Builder with the specified result - * and message. - * @param request requestProto message. + * Returns a Container Command Response Builder with the specified result and message. + * + * @param request request message. * @param result result of the command. * @param message response message. * @return ContainerCommand Response Builder. */ - public static Builder getContainerCommandResponse( - ContainerCommandRequestProto request, Result result, String message) { + public static Builder getContainerCommandResponse(ContainerCommandRequestProto request, Result result, + String message) { return ContainerCommandResponseProto.newBuilder() .setCmdType(request.getCmdType()) @@ -73,15 +74,14 @@ public static Builder getContainerCommandResponse( } /** - * Returns a Container Command Response Builder. This call is used to build - * success responses. Calling function can add other fields to the response - * as required. - * @param request requestProto message. - * @return ContainerCommand Response Builder with result as SUCCESS. + * Returns a Container Command Response Builder. + * This call is used to build success responses. + * Calling function can add other fields to the response as required. + * + * @param request request message. + * @return ContainerCommand Response Builder with a result as {@link Result#SUCCESS}. */ - public static Builder getSuccessResponseBuilder( - ContainerCommandRequestProto request) { - + public static Builder getSuccessResponseBuilder(ContainerCommandRequestProto request) { return ContainerCommandResponseProto.newBuilder() .setCmdType(request.getCmdType()) .setTraceID(request.getTraceID()) @@ -89,68 +89,65 @@ public static Builder getSuccessResponseBuilder( } /** - * Returns a Container Command Response. This call is used for creating null - * success responses. - * @param request requestProto message. - * @return ContainerCommand Response with result as SUCCESS. + * Returns a Container Command Response. This call is used for creating {@code null} success responses. + * + * @param request request message. + * @return ContainerCommand Response with a result as {@link Result#SUCCESS}. */ - public static ContainerCommandResponseProto getSuccessResponse( - ContainerCommandRequestProto request) { - + public static ContainerCommandResponseProto getSuccessResponse(ContainerCommandRequestProto request) { return getSuccessResponseBuilder(request) .setMessage("") .build(); } /** - * We found a command type but no associated payload for the command. Hence - * return malformed Command as response. + * We found a command type but no associated payload for the command. + * Hence, return malformed Command as response. * * @param request - Protobuf message. - * @return ContainerCommandResponseProto - MALFORMED_REQUEST. + * @return ContainerCommand Response with a result as {@link Result#MALFORMED_REQUEST}. */ - public static ContainerCommandResponseProto malformedRequest( - ContainerCommandRequestProto request) { - - return getContainerCommandResponse(request, Result.MALFORMED_REQUEST, - "Cmd type does not match the payload.") + public static ContainerCommandResponseProto malformedRequest(ContainerCommandRequestProto request) { + return getContainerCommandResponse(request, Result.MALFORMED_REQUEST, "Cmd type does not match the payload.") .build(); } /** - * We found a command type that is not supported yet. + * We found a command type not supported yet. * * @param request - Protobuf message. - * @return ContainerCommandResponseProto - UNSUPPORTED_REQUEST. + * @return ContainerCommand Response with a result as {@link Result#UNSUPPORTED_REQUEST}. */ - public static ContainerCommandResponseProto unsupportedRequest( - ContainerCommandRequestProto request) { - - return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST, - "Server does not support this command yet.") + public static ContainerCommandResponseProto unsupportedRequest(ContainerCommandRequestProto request) { + return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST, "Server does not support this command yet.") .build(); } /** - * Returns putBlock response success. + * Returns successful putBlock response. + * * @param msg - Request. - * @return Response. + * @return Successful PutBlock response. */ - public static ContainerCommandResponseProto putBlockResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { + public static ContainerCommandResponseProto putBlockResponseSuccess(ContainerCommandRequestProto msg, + BlockData blockData) { PutBlockResponseProto.Builder putBlock = PutBlockResponseProto.newBuilder() - .setCommittedBlockLength(getCommittedBlockLengthResponseBuilder( - blockData.getSize(), blockData.getBlockID())); + .setCommittedBlockLength(getCommittedBlockLengthResponseBuilder(blockData.getSize(), blockData.getBlockID())); return getSuccessResponseBuilder(msg) .setPutBlock(putBlock) .build(); } - public static ContainerCommandResponseProto getBlockDataResponse( - ContainerCommandRequestProto msg, BlockData data) { - + /** + * Generates a successful response containing block data for the given request. + * + * @param msg The request message. + * @param data The block data to be included in the response. + * @return A ContainerCommandResponseProto object containing the block data. + */ + public static ContainerCommandResponseProto getBlockDataResponse(ContainerCommandRequestProto msg, BlockData data) { GetBlockResponseProto.Builder getBlock = GetBlockResponseProto.newBuilder() .setBlockData(data); @@ -159,35 +156,49 @@ public static ContainerCommandResponseProto getBlockDataResponse( .build(); } - public static ContainerCommandResponseProto getListBlockResponse( - ContainerCommandRequestProto msg, List data) { + /** + * Generates a response containing a list of block data for the given request. + * + * @param msg The request message. + * @param data The list of block data to be included in the response. + * @return A ContainerCommandResponseProto object containing the list of block data. + */ + public static ContainerCommandResponseProto getListBlockResponse(ContainerCommandRequestProto msg, + List data) { - ListBlockResponseProto.Builder builder = - ListBlockResponseProto.newBuilder(); + ListBlockResponseProto.Builder builder = ListBlockResponseProto.newBuilder(); builder.addAllBlockData(data); return getSuccessResponseBuilder(msg) .setListBlock(builder) .build(); } + /** - * Returns successful getCommittedBlockLength Response. - * @param msg - Request. - * @return Response. + * Generates a response based on the provided request message and block length. + * + * @param msg The request message of type ContainerCommandRequestProto. + * @param blockLength The length of the block to be included in the response. + * @return A ContainerCommandResponseProto object containing the block length information. */ - public static ContainerCommandResponseProto getBlockLengthResponse( - ContainerCommandRequestProto msg, long blockLength) { + public static ContainerCommandResponseProto getBlockLengthResponse(ContainerCommandRequestProto msg, + long blockLength) { GetCommittedBlockLengthResponseProto.Builder committedBlockLength = - getCommittedBlockLengthResponseBuilder(blockLength, - msg.getGetCommittedBlockLength().getBlockID()); + getCommittedBlockLengthResponseBuilder(blockLength, msg.getGetCommittedBlockLength().getBlockID()); return getSuccessResponseBuilder(msg) .setGetCommittedBlockLength(committedBlockLength) .build(); } - public static GetCommittedBlockLengthResponseProto.Builder - getCommittedBlockLengthResponseBuilder(long blockLength, + /** + * Constructs a GetCommittedBlockLengthResponseProto.Builder with the specified block length and block ID. + * + * @param blockLength The length of the block to be included in the response. + * @param blockID The ID of the block to be included in the response. + * @return The GetCommittedBlockLengthResponseProto.Builder object containing the block length and block ID. + */ + public static GetCommittedBlockLengthResponseProto.Builder getCommittedBlockLengthResponseBuilder(long blockLength, DatanodeBlockID blockID) { return GetCommittedBlockLengthResponseProto.newBuilder() @@ -196,17 +207,18 @@ public static ContainerCommandResponseProto getBlockLengthResponse( } /** - * Gets a response for the putSmallFile RPC. - * @param msg - ContainerCommandRequestProto - * @return - ContainerCommandResponseProto + * Generates a successful response for the PutSmallFile operation. + * + * @param msg The request message containing the PutSmallFile command. + * @param blockData The block data associated with the PutSmallFile operation. + * @return A ContainerCommandResponseProto object indicating the success of the PutSmallFile operation + * and containing relevant response data. */ - public static ContainerCommandResponseProto getPutFileResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { + public static ContainerCommandResponseProto getPutFileResponseSuccess(ContainerCommandRequestProto msg, + BlockData blockData) { - PutSmallFileResponseProto.Builder putSmallFile = - PutSmallFileResponseProto.newBuilder() - .setCommittedBlockLength(getCommittedBlockLengthResponseBuilder( - blockData.getSize(), blockData.getBlockID())); + PutSmallFileResponseProto.Builder putSmallFile = PutSmallFileResponseProto.newBuilder() + .setCommittedBlockLength(getCommittedBlockLengthResponseBuilder(blockData.getSize(), blockData.getBlockID())); return getSuccessResponseBuilder(msg) .setCmdType(Type.PutSmallFile) @@ -215,21 +227,22 @@ public static ContainerCommandResponseProto getPutFileResponseSuccess( } /** - * Gets a response for the WriteChunk RPC. - * @param msg - ContainerCommandRequestProto - * @return - ContainerCommandResponseProto + * Generates a successful response for a WriteChunk operation. + * + * @param msg The request message containing the WriteChunk command. + * @param blockData The block data associated with the WriteChunk operation. + * @return A ContainerCommandResponseProto object indicating the success of the WriteChunk operation + * and containing relevant response data. */ - public static ContainerCommandResponseProto getWriteChunkResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { + public static ContainerCommandResponseProto getWriteChunkResponseSuccess(ContainerCommandRequestProto msg, + BlockData blockData) { - WriteChunkResponseProto.Builder writeChunk = - WriteChunkResponseProto.newBuilder(); + WriteChunkResponseProto.Builder writeChunk = WriteChunkResponseProto.newBuilder(); if (blockData != null) { - writeChunk.setCommittedBlockLength( - getCommittedBlockLengthResponseBuilder( - blockData.getSize(), blockData.getBlockID())); - + writeChunk + .setCommittedBlockLength(getCommittedBlockLengthResponseBuilder(blockData.getSize(), blockData.getBlockID())); } + return getSuccessResponseBuilder(msg) .setCmdType(Type.WriteChunk) .setWriteChunk(writeChunk) @@ -237,29 +250,27 @@ public static ContainerCommandResponseProto getWriteChunkResponseSuccess( } /** - * Gets a response to the read small file call. - * @param request - Msg - * @param dataBuffers - Data - * @param info - Info - * @return Response. + * Generates a successful response for the GetSmallFile operation. + * + * @param request The request message containing the GetSmallFile command. + * @param dataBuffers A list of ByteString objects containing the data buffers for the small file. + * @param info The ChunkInfo object containing metadata about the chunk. + * @return A ContainerCommandResponseProto object indicating the success of the GetSmallFile operation + * and containing relevant response data. */ - public static ContainerCommandResponseProto getGetSmallFileResponseSuccess( - ContainerCommandRequestProto request, List dataBuffers, - ChunkInfo info) { + public static ContainerCommandResponseProto getGetSmallFileResponseSuccess(ContainerCommandRequestProto request, + List dataBuffers, ChunkInfo info) { Preconditions.checkNotNull(request); - boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()) - .equals(ContainerProtos.ReadChunkVersion.V0); + boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()).equals(ContainerProtos.ReadChunkVersion.V0); ReadChunkResponseProto.Builder readChunk; if (isReadChunkV0) { // V0 has all response data in a single ByteBuffer - ByteString combinedData = ByteString.EMPTY; - for (ByteString buffer : dataBuffers) { - combinedData = combinedData.concat(buffer); - } + ByteString combinedData = BufferUtils.concatByteStrings(dataBuffers); + readChunk = ReadChunkResponseProto.newBuilder() .setChunkData(info) .setData(combinedData) @@ -274,41 +285,46 @@ public static ContainerCommandResponseProto getGetSmallFileResponseSuccess( .setBlockID(request.getGetSmallFile().getBlock().getBlockID()); } - GetSmallFileResponseProto.Builder getSmallFile = - GetSmallFileResponseProto.newBuilder().setData(readChunk); + GetSmallFileResponseProto.Builder getSmallFile = GetSmallFileResponseProto.newBuilder().setData(readChunk); return getSuccessResponseBuilder(request) .setCmdType(Type.GetSmallFile) .setGetSmallFile(getSmallFile) .build(); } + /** - * Returns a ReadContainer Response. + * Generates a response containing the requested container data. * - * @param request Request - * @param containerData - data - * @return Response. + * @param request The request message of type ContainerCommandRequestProto. + * @param containerData The container data to be included in the response. + * @return A ContainerCommandResponseProto object with the container data. */ - public static ContainerCommandResponseProto getReadContainerResponse( - ContainerCommandRequestProto request, ContainerDataProto containerData) { + public static ContainerCommandResponseProto getReadContainerResponse(ContainerCommandRequestProto request, + ContainerDataProto containerData) { Preconditions.checkNotNull(containerData); - ReadContainerResponseProto.Builder response = - ReadContainerResponseProto.newBuilder() - .setContainerData(containerData); + ReadContainerResponseProto.Builder response = ReadContainerResponseProto.newBuilder() + .setContainerData(containerData); return getSuccessResponseBuilder(request) .setReadContainer(response) .build(); } - public static ContainerCommandResponseProto getReadChunkResponse( - ContainerCommandRequestProto request, ChunkBuffer data, - Function byteBufferToByteString) { + /** + * Generates a response for a ReadChunk request, containing the requested chunk data. + * + * @param request The ContainerCommandRequestProto object containing the ReadChunk request. + * @param data The ChunkBuffer containing the data to be included in the response. + * @param byteBufferToByteString Function to convert ByteBuffer objects to ByteString. + * @return A ContainerCommandResponseProto object containing the response data. + */ + public static ContainerCommandResponseProto getReadChunkResponse(ContainerCommandRequestProto request, + ChunkBuffer data, Function byteBufferToByteString) { - boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()) - .equals(ContainerProtos.ReadChunkVersion.V0); + boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0); ReadChunkResponseProto.Builder response; @@ -333,8 +349,16 @@ public static ContainerCommandResponseProto getReadChunkResponse( .build(); } - public static ContainerCommandResponseProto getFinalizeBlockResponse( - ContainerCommandRequestProto msg, BlockData data) { + /** + * Generates a successful response for the FinalizeBlock operation. + * + * @param msg The request message containing the FinalizeBlock command. + * @param data The block data associated with the FinalizeBlock operation. + * @return A ContainerCommandResponseProto object indicating the success of the FinalizeBlock operation + * and containing relevant response data. + */ + public static ContainerCommandResponseProto getFinalizeBlockResponse(ContainerCommandRequestProto msg, + BlockData data) { ContainerProtos.FinalizeBlockResponseProto.Builder blockData = ContainerProtos.FinalizeBlockResponseProto.newBuilder() @@ -345,9 +369,15 @@ public static ContainerCommandResponseProto getFinalizeBlockResponse( .build(); } - public static ContainerCommandResponseProto getEchoResponse( - ContainerCommandRequestProto msg) { - + /** + * Generates an echo response based on the provided request message. + * The response contains a random payload of the specified size and optionally simulates a delay before responding. + * + * @param msg The request message of type ContainerCommandRequestProto, + * containing the EchoRequest with payload size and optional sleep time. + * @return A ContainerCommandResponseProto object containing the echo response with a random payload. + */ + public static ContainerCommandResponseProto getEchoResponse(ContainerCommandRequestProto msg) { ContainerProtos.EchoRequestProto echoRequest = msg.getEcho(); int responsePayload = echoRequest.getPayloadSizeResp(); @@ -360,10 +390,9 @@ public static ContainerCommandResponseProto getEchoResponse( throw new RuntimeException(e); } - ContainerProtos.EchoResponseProto.Builder echo = - ContainerProtos.EchoResponseProto - .newBuilder() - .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.nextBytes(responsePayload))); + ContainerProtos.EchoResponseProto.Builder echo = ContainerProtos.EchoResponseProto + .newBuilder() + .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.nextBytes(responsePayload))); return getSuccessResponseBuilder(msg) .setEcho(echo) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApi.java new file mode 100644 index 00000000000..a2e886a5897 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApi.java @@ -0,0 +1,35 @@ +/* + + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.VerifyBlockResponseProto; + +/** + * Interface for communication with a datanode. + * Provides methods to perform any protocol calls by Container clients on a single datanode. + */ +public interface ContainerApi extends AutoCloseable { + /** + * Verifies the integrity and validity of a block within the container. + * + * @return a VerifyBlockResponseProto object containing the result of the block verification operation. + */ + VerifyBlockResponseProto verifyBlock(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApiHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApiHelper.java new file mode 100644 index 00000000000..ad45a7ccb81 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerApiHelper.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.VerifyBlockRequestProto; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.security.token.Token; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.VerifyBlock; + +/** + * Class designed working with Datanode Proto requests and responses. + */ +class ContainerApiHelper { + /** + * Creates a request to verify a block on the datanode. + * + * @param datanodeBlockID The identifier for the block on the datanode. + * @param token The security token used for authentication. + * @param datanodeUuid The unique identifier of the datanode. + * @return A {@link ContainerCommandRequestProto} object representing the verify block request. + * @throws IOException If an I/O error occurs during the request creation. + */ + ContainerCommandRequestProto createVerifyBlockRequest(DatanodeBlockID datanodeBlockID, + Token token, String datanodeUuid) throws IOException { + + VerifyBlockRequestProto.Builder verifyBlockRequestBuilder = ContainerProtos.VerifyBlockRequestProto + .newBuilder() + .setBlockID(datanodeBlockID); + + ContainerCommandRequestProto.Builder commandRequestBuilder = ContainerCommandRequestProto + .newBuilder() + .setCmdType(VerifyBlock) + .setContainerID(datanodeBlockID.getContainerID()) + .setDatanodeUuid(datanodeUuid) + .setVerifyBlock(verifyBlockRequestBuilder); + + if (token != null) { + commandRequestBuilder.setEncodedToken(token.encodeToUrlString()); + } + + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + commandRequestBuilder.setTraceID(traceId); + } + + return commandRequestBuilder.build(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApi.java new file mode 100644 index 00000000000..6dc9b13801a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApi.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.VerifyBlockResponseProto; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.security.token.Token; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; + +/** + * Interface for communication with multiple datanodes. + * Provides methods to perform any protocol calls by Container clients on multiple datanodes. + */ +public interface ContainerMultinodeApi extends AutoCloseable { + /** + * Verifies the specified block on multiple datanodes. + * + * @param datanodeBlockID the ID of the block to be verified + * @param token the security token required for block verification + * @return a map containing the datanode details and their respective verification response + * @throws IOException if an I/O error occurs during verification + * @throws InterruptedException if the verification process is interrupted + */ + Map verifyBlock(DatanodeBlockID datanodeBlockID, + Token token) throws IOException, InterruptedException; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApiImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApiImpl.java new file mode 100644 index 00000000000..50695a4ed9f --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerMultinodeApiImpl.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.VerifyBlockResponseProto; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.security.token.Token; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Implementation of the {@link ContainerMultinodeApi} interface. + * This class provides methods to perform protocol calls on multiple datanodes. + */ +public class ContainerMultinodeApiImpl implements ContainerMultinodeApi { + + private final XceiverClientSpi client; + + private final ContainerApiHelper requestHelper = new ContainerApiHelper(); + + public ContainerMultinodeApiImpl(XceiverClientSpi client) { + this.client = client; + } + + @Override + public Map verifyBlock(DatanodeBlockID datanodeBlockID, + Token token) throws IOException, InterruptedException { + + String datanodeUuid = client.getPipeline().getFirstNode().getUuidString(); + + Map datanodeToResponseMap = new HashMap<>(); + + ContainerCommandRequestProto request = requestHelper.createVerifyBlockRequest(datanodeBlockID, token, datanodeUuid); + Map responses = client.sendCommandOnAllNodes(request); + + responses.forEach((key, value) -> datanodeToResponseMap.put(key, value.getVerifyBlock())); + + return datanodeToResponseMap; + } + + @Override + public void close() throws IOException { + client.close(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index cb41479b5f3..2108c5b7fa4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -85,12 +85,10 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED; /** - * Implementation of all container protocol calls performed by Container - * clients. + * Implementation of all container protocol calls performed by Container clients. */ public final class ContainerProtocolCalls { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerProtocolCalls.class); + private static final Logger LOG = LoggerFactory.getLogger(ContainerProtocolCalls.class); /** * There is no need to instantiate this class. @@ -105,25 +103,21 @@ private ContainerProtocolCalls() { * @param containerID the ID of the container to list block * @param startLocalID the localID of the first block to get * @param count max number of blocks to get - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return container protocol list block response * @throws IOException if there is an I/O error while performing the call */ - public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, - long containerID, Long startLocalID, int count, - Token token) throws IOException { + public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, long containerID, Long startLocalID, + int count, Token token) throws IOException { - ListBlockRequestProto.Builder listBlockBuilder = - ListBlockRequestProto.newBuilder() - .setCount(count); + ListBlockRequestProto.Builder listBlockBuilder = ListBlockRequestProto.newBuilder().setCount(count); if (startLocalID != null) { listBlockBuilder.setStartLocalID(startLocalID); } // datanodeID doesn't matter for read only requests - String datanodeID = - xceiverClient.getPipeline().getFirstNode().getUuidString(); + String datanodeID = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder() @@ -132,24 +126,11 @@ public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, .setDatanodeUuid(datanodeID) .setListBlock(listBlockBuilder.build()); - if (token != null) { - builder.setEncodedToken(token.encodeToUrlString()); - } - String traceId = TracingUtil.exportCurrentSpan(); - if (traceId != null) { - builder.setTraceID(traceId); - } - - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getListBlock(); + return sendContainerCommand(xceiverClient, builder, token).getListBlock(); } - static T tryEachDatanode(Pipeline pipeline, - CheckedFunction op, - Function toErrorMessage) - throws IOException { + static T tryEachDatanode(Pipeline pipeline, CheckedFunction op, + Function toErrorMessage) throws IOException { final Set excluded = new HashSet<>(); for (; ;) { final DatanodeDetails d = pipeline.getClosestNode(excluded); @@ -170,8 +151,7 @@ static T tryEachDatanode(Pipeline pipeline, span.log("failed to connect to DN " + d); excluded.add(d); if (excluded.size() < pipeline.size()) { - LOG.warn(toErrorMessage.apply(d) - + "; will try another datanode.", e); + LOG.warn("{}; will try another datanode.", toErrorMessage.apply(d), e); } else { throw e; } @@ -185,7 +165,7 @@ static T tryEachDatanode(Pipeline pipeline, * @param xceiverClient client to perform call * @param validators functions to validate the response * @param blockID blockID to identify container - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return container protocol get block response * @throws IOException if there is an I/O error while performing the call */ @@ -210,32 +190,21 @@ static String toErrorMessage(BlockID blockId, DatanodeDetails d) { blockId.getLocalID(), blockId.getContainerID(), d); } - public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, - BlockID datanodeBlockID, + public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, BlockID datanodeBlockID, Token token, Map replicaIndexes) throws IOException { return getBlock(xceiverClient, getValidatorList(), datanodeBlockID, token, replicaIndexes); } - private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, - List validators, - ContainerCommandRequestProto.Builder builder, BlockID blockID, - DatanodeDetails datanode, Map replicaIndexes) throws IOException { - String traceId = TracingUtil.exportCurrentSpan(); - if (traceId != null) { - builder.setTraceID(traceId); - } - final DatanodeBlockID.Builder datanodeBlockID = blockID.getDatanodeBlockIDProtobufBuilder(); - int replicaIndex = replicaIndexes.getOrDefault(datanode, 0); - if (replicaIndex > 0) { - datanodeBlockID.setReplicaIndex(replicaIndex); - } + private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, List validators, + ContainerCommandRequestProto.Builder builder, BlockID blockID, DatanodeDetails datanode, + Map replicaIndexes) throws IOException { + DatanodeBlockID datanodeBlockID = buildDatanodeId(builder, blockID, datanode, replicaIndexes); final GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto.newBuilder() - .setBlockID(datanodeBlockID.build()); + .setBlockID(datanodeBlockID); final ContainerCommandRequestProto request = builder .setDatanodeUuid(datanode.getUuidString()) .setGetBlock(readBlockRequest).build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, validators); + ContainerCommandResponseProto response = xceiverClient.sendCommand(request, validators); return response.getGetBlock(); } @@ -244,17 +213,13 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param blockID blockId for the Block - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return container protocol getLastCommittedBlockLength response * @throws IOException if there is an I/O error while performing the call */ - public static ContainerProtos.GetCommittedBlockLengthResponseProto - getCommittedBlockLength( - XceiverClientSpi xceiverClient, BlockID blockID, - Token token) - throws IOException { - ContainerProtos.GetCommittedBlockLengthRequestProto.Builder - getBlockLengthRequestBuilder = + public static ContainerProtos.GetCommittedBlockLengthResponseProto getCommittedBlockLength( + XceiverClientSpi xceiverClient, BlockID blockID, Token token) throws IOException { + ContainerProtos.GetCommittedBlockLengthRequestProto.Builder getBlockLengthRequestBuilder = ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder(). setBlockID(blockID.getDatanodeBlockIDProtobuf()); String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); @@ -272,8 +237,7 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, builder.setTraceID(traceId); } ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); + ContainerCommandResponseProto response = xceiverClient.sendCommand(request, getValidatorList()); return response.getGetCommittedBlockLength(); } @@ -283,17 +247,14 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, * @param xceiverClient client to perform call * @param containerBlockData block data to identify container * @param eof whether this is the last putBlock for the same block - * @param tokenString a serialized token for this block (may be null) + * @param tokenString a serialized token for this block (maybe null) * @return putBlockResponse * @throws IOException if there is an error while performing the call */ - public static XceiverClientReply putBlockAsync(XceiverClientSpi xceiverClient, - BlockData containerBlockData, - boolean eof, - String tokenString) - throws IOException, InterruptedException, ExecutionException { - final ContainerCommandRequestProto request = getPutBlockRequest( - xceiverClient.getPipeline(), containerBlockData, eof, tokenString); + public static XceiverClientReply putBlockAsync(XceiverClientSpi xceiverClient, BlockData containerBlockData, + boolean eof, String tokenString) throws IOException, InterruptedException, ExecutionException { + final ContainerCommandRequestProto request = + getPutBlockRequest(xceiverClient.getPipeline(), containerBlockData, eof, tokenString); return xceiverClient.sendCommandAsync(request); } @@ -302,16 +263,13 @@ public static XceiverClientReply putBlockAsync(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param blockID block ID to identify block - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return FinalizeBlockResponseProto * @throws IOException if there is an I/O error while performing the call */ - public static ContainerProtos.FinalizeBlockResponseProto finalizeBlock( - XceiverClientSpi xceiverClient, DatanodeBlockID blockID, - Token token) - throws IOException { - FinalizeBlockRequestProto.Builder finalizeBlockRequest = - FinalizeBlockRequestProto.newBuilder().setBlockID(blockID); + public static ContainerProtos.FinalizeBlockResponseProto finalizeBlock(XceiverClientSpi xceiverClient, + DatanodeBlockID blockID, Token token) throws IOException { + FinalizeBlockRequestProto.Builder finalizeBlockRequest = FinalizeBlockRequestProto.newBuilder().setBlockID(blockID); String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.FinalizeBlock) @@ -322,14 +280,22 @@ public static ContainerProtos.FinalizeBlockResponseProto finalizeBlock( builder.setEncodedToken(token.encodeToUrlString()); } ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); + ContainerCommandResponseProto response = xceiverClient.sendCommand(request, getValidatorList()); return response.getFinalizeBlock(); } - public static ContainerCommandRequestProto getPutBlockRequest( - Pipeline pipeline, BlockData containerBlockData, boolean eof, - String tokenString) throws IOException { + /** + * Constructs and returns a ContainerCommandRequestProto for a PutBlock operation. + * + * @param pipeline the pipeline that specifies the nodes involved in the operation + * @param containerBlockData the block data to be put + * @param eof flag indicating if this is the end of the file + * @param tokenString an optional security token for the operation may be null + * @return a ContainerCommandRequestProto representing the PutBlock request + * @throws IOException if an I/O error occurs during request creation + */ + public static ContainerCommandRequestProto getPutBlockRequest(Pipeline pipeline, BlockData containerBlockData, + boolean eof, String tokenString) throws IOException { PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder() .setBlockData(containerBlockData) @@ -353,14 +319,12 @@ public static ContainerCommandRequestProto getPutBlockRequest( * @param chunk information about chunk to read * @param blockID ID of the block * @param validators functions to validate the response - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return container protocol read chunk response * @throws IOException if there is an I/O error while performing the call */ - public static ContainerProtos.ReadChunkResponseProto readChunk( - XceiverClientSpi xceiverClient, ChunkInfo chunk, DatanodeBlockID blockID, - List validators, - Token token) throws IOException { + public static ContainerProtos.ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, + DatanodeBlockID blockID, List validators, Token token) throws IOException { ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto.newBuilder() .setBlockID(blockID) @@ -374,25 +338,21 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( builder.setEncodedToken(token.encodeToUrlString()); } - Span span = GlobalTracer.get() - .buildSpan("readChunk").start(); + Span span = GlobalTracer.get().buildSpan("readChunk").start(); try (Scope ignored = GlobalTracer.get().activateSpan(span)) { span.setTag("offset", chunk.getOffset()) .setTag("length", chunk.getLen()) .setTag("block", blockID.toString()); return tryEachDatanode(xceiverClient.getPipeline(), - d -> readChunk(xceiverClient, chunk, blockID, - validators, builder, d), + d -> readChunk(xceiverClient, chunk, blockID, validators, builder, d), d -> toErrorMessage(chunk, blockID, d)); } finally { span.finish(); } } - private static ContainerProtos.ReadChunkResponseProto readChunk( - XceiverClientSpi xceiverClient, ChunkInfo chunk, DatanodeBlockID blockID, - List validators, - ContainerCommandRequestProto.Builder builder, + private static ContainerProtos.ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, + DatanodeBlockID blockID, List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails d) throws IOException { ContainerCommandRequestProto.Builder requestBuilder = builder .setDatanodeUuid(d.getUuidString()); @@ -401,19 +361,16 @@ private static ContainerProtos.ReadChunkResponseProto readChunk( if (traceId != null) { requestBuilder = requestBuilder.setTraceID(traceId); } - ContainerCommandResponseProto reply = - xceiverClient.sendCommand(requestBuilder.build(), validators); + ContainerCommandResponseProto reply = xceiverClient.sendCommand(requestBuilder.build(), validators); final ReadChunkResponseProto response = reply.getReadChunk(); final long readLen = getLen(response); if (readLen != chunk.getLen()) { - throw new IOException(toErrorMessage(chunk, blockID, d) - + ": readLen=" + readLen); + throw new IOException(toErrorMessage(chunk, blockID, d) + ": readLen=" + readLen); } return response; } - static String toErrorMessage(ChunkInfo chunk, DatanodeBlockID blockId, - DatanodeDetails d) { + static String toErrorMessage(ChunkInfo chunk, DatanodeBlockID blockId, DatanodeDetails d) { return String.format("Failed to read chunk %s (len=%s) %s from %s", chunk.getChunkName(), chunk.getLen(), blockId, d); } @@ -440,10 +397,8 @@ static long getLen(ReadChunkResponseProto response) { * @throws IOException if there is an I/O error while performing the call */ @SuppressWarnings("parameternumber") - public static XceiverClientReply writeChunkAsync( - XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data, String tokenString, - int replicationIndex, BlockData blockData, boolean close) + public static XceiverClientReply writeChunkAsync(XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, + ByteString data, String tokenString, int replicationIndex, BlockData blockData, boolean close) throws IOException, ExecutionException, InterruptedException { WriteChunkRequestProto.Builder writeChunkRequest = @@ -487,11 +442,10 @@ public static XceiverClientReply writeChunkAsync( * @param client - client that communicates with the container. * @param blockID - ID of the block * @param data - Data to be written into the container. - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return container protocol writeSmallFile response */ - public static PutSmallFileResponseProto writeSmallFile( - XceiverClientSpi client, BlockID blockID, byte[] data, + public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token token) throws IOException { BlockData containerBlockData = @@ -532,15 +486,14 @@ public static PutSmallFileResponseProto writeSmallFile( builder.setEncodedToken(token.encodeToUrlString()); } ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); + ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); return response.getPutSmallFile(); } /** * createRecoveringContainer call that creates a container on the datanode. - * Currently this is used for EC reconstruction containers. When EC - * reconstruction coordinator reconstructing the containers, the in progress + * Currently, this is used for EC reconstruction containers. + * When EC reconstruction coordinator reconstructed the containers, the in progress * containers would be created as "RECOVERING" state containers. * @param client - client * @param containerID - ID of container @@ -548,39 +501,37 @@ public static PutSmallFileResponseProto writeSmallFile( * @param replicaIndex - index position of the container replica */ @InterfaceStability.Evolving - public static void createRecoveringContainer(XceiverClientSpi client, - long containerID, String encodedToken, int replicaIndex) - throws IOException { - createContainer(client, containerID, encodedToken, - ContainerProtos.ContainerDataProto.State.RECOVERING, replicaIndex); + public static void createRecoveringContainer(XceiverClientSpi client, long containerID, String encodedToken, + int replicaIndex) throws IOException { + createContainer(client, containerID, encodedToken, ContainerProtos.ContainerDataProto.State.RECOVERING, + replicaIndex); } /** - * createContainer call that creates a container on the datanode. + * Creates a container on the datanode. + * * @param client - client * @param containerID - ID of container * @param encodedToken - encodedToken if security is enabled */ - public static void createContainer(XceiverClientSpi client, long containerID, - String encodedToken) throws IOException { + public static void createContainer(XceiverClientSpi client, long containerID, String encodedToken) + throws IOException { createContainer(client, containerID, encodedToken, null, 0); } /** - * createContainer call that creates a container on the datanode. + * Creates a container on the datanode. + * * @param client - client * @param containerID - ID of container * @param encodedToken - encodedToken if security is enabled * @param state - state of the container * @param replicaIndex - index position of the container replica */ - public static void createContainer(XceiverClientSpi client, - long containerID, String encodedToken, - ContainerProtos.ContainerDataProto.State state, int replicaIndex) - throws IOException { + public static void createContainer(XceiverClientSpi client, long containerID, String encodedToken, + ContainerProtos.ContainerDataProto.State state, int replicaIndex) throws IOException { ContainerProtos.CreateContainerRequestProto.Builder createRequest = ContainerProtos.CreateContainerRequestProto.newBuilder(); - createRequest - .setContainerType(ContainerProtos.ContainerType.KeyValueContainer); + createRequest.setContainerType(ContainerProtos.ContainerType.KeyValueContainer); if (state != null) { createRequest.setState(state); } @@ -589,8 +540,7 @@ public static void createContainer(XceiverClientSpi client, } String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); + ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.newBuilder(); if (encodedToken != null) { request.setEncodedToken(encodedToken); } @@ -609,21 +559,25 @@ public static void createContainer(XceiverClientSpi client, /** * Deletes a container from a pipeline. * - * @param force whether or not to forcibly delete the container. + * @param force whether to forcibly delete the container. * @param encodedToken - encodedToken if security is enabled */ - public static void deleteContainer(XceiverClientSpi client, long containerID, - boolean force, String encodedToken) throws IOException { + public static void deleteContainer(XceiverClientSpi client, long containerID, boolean force, String encodedToken) + throws IOException { ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest = ContainerProtos.DeleteContainerRequestProto.newBuilder(); deleteRequest.setForceDelete(force); String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); + ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(ContainerProtos.Type.DeleteContainer); request.setContainerID(containerID); request.setDeleteContainer(deleteRequest); + sendContainerCommand(client, encodedToken, id, request); + } + + private static void sendContainerCommand(XceiverClientSpi client, String encodedToken, String id, + ContainerCommandRequestProto.Builder request) throws IOException { request.setDatanodeUuid(id); if (encodedToken != null) { request.setEncodedToken(encodedToken); @@ -636,57 +590,38 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, } /** - * Close a container. + * Closes an open container identified by the specified container ID through the given client. * - * @param encodedToken - encodedToken if security is enabled + * @param client The xceiver client instance used to communicate with the container. + * @param containerID The identifier of the container to be closed. + * @param encodedToken An encoded authentication token required to authorize the command. + * @throws IOException If an I/O error occurs during the operation. */ - public static void closeContainer(XceiverClientSpi client, - long containerID, String encodedToken) - throws IOException { + public static void closeContainer(XceiverClientSpi client, long containerID, String encodedToken) throws IOException { String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); + ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - String traceId = TracingUtil.exportCurrentSpan(); - if (traceId != null) { - request.setTraceID(traceId); - } - client.sendCommand(request.build(), getValidatorList()); + sendContainerCommand(client, encodedToken, id, request); } /** - * readContainer call that gets meta data from an existing container. + * Reads the content of a specified container using the given client and encoded token. * - * @param client - client - * @param encodedToken - encodedToken if security is enabled + * @param client An instance of XceiverClientSpi used to communicate with the server. + * @param containerID The ID of the container to be read. + * @param encodedToken A token used for authorization to read the container. + * @return A ReadContainerResponseProto object containing the response from the server. + * @throws IOException If an I/O error occurs while communicating with the server. */ - public static ReadContainerResponseProto readContainer( - XceiverClientSpi client, long containerID, String encodedToken) + public static ReadContainerResponseProto readContainer(XceiverClientSpi client, long containerID, String encodedToken) throws IOException { String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(Type.ReadContainer); - request.setContainerID(containerID); - request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - String traceId = TracingUtil.exportCurrentSpan(); - if (traceId != null) { - request.setTraceID(traceId); - } - ContainerCommandResponseProto response = - client.sendCommand(request.build(), getValidatorList()); + ContainerCommandRequestProto request = buildReadContainerRequest(containerID, encodedToken, id); + ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); return response.getReadContainer(); } @@ -695,11 +630,10 @@ public static ReadContainerResponseProto readContainer( * Reads the data given the blockID. * * @param blockID - ID of the block - * @param token a token for this block (may be null) + * @param token a token for this block (maybe null) * @return GetSmallFileResponseProto */ - public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, - BlockID blockID, + public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, BlockID blockID, Token token) throws IOException { GetBlockRequestProto.Builder getBlock = GetBlockRequestProto .newBuilder() @@ -717,6 +651,13 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, .setContainerID(blockID.getContainerID()) .setDatanodeUuid(id) .setGetSmallFile(getSmallFileRequest); + + ContainerCommandResponseProto response = sendContainerCommand(client, builder, token); + return response.getGetSmallFile(); + } + + private static ContainerCommandResponseProto sendContainerCommand(XceiverClientSpi client, + ContainerCommandRequestProto.Builder builder, Token token) throws IOException { if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } @@ -725,15 +666,23 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, builder.setTraceID(traceId); } ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); - return response.getGetSmallFile(); + return client.sendCommand(request, getValidatorList()); } /** - * Send an echo to DataNode. + * Executes an echo command on the specified container using the given xceiver client. + * + * @param client The xceiver client used to send the command. + * @param encodedContainerID The encoded token of the container ID. + * @param containerID The numeric ID of the container. + * @param payloadReqBytes The payload in bytes for the echo request. + * @param payloadRespSizeKB The expected size of the payload in the response, in kilobytes. + * @param sleepTimeMs The amount of time in milliseconds to sleep before responding. + * @param readOnly A boolean indicating if the echo operation should be read-only. * - * @return EchoResponseProto + * @return An EchoResponseProto containing the response payload. + * + * @throws IOException If an I/O error occurs while sending the command. */ public static EchoResponseProto echo(XceiverClientSpi client, String encodedContainerID, long containerID, ByteString payloadReqBytes, int payloadRespSizeKB, int sleepTimeMs, boolean readOnly) @@ -762,32 +711,32 @@ public static EchoResponseProto echo(XceiverClientSpi client, String encodedCont builder.setTraceID(traceId); } ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); + ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); return response.getEcho(); } /** - * Validates a response from a container protocol call. Any non-successful - * return code is mapped to a corresponding exception and thrown. + * Validates a response from a container protocol call. + * Any non-successful return code is mapped to a corresponding exception and thrown. * * @param response container protocol call response * @throws StorageContainerException if the container protocol call failed */ - public static void validateContainerResponse( - ContainerCommandResponseProto response - ) throws StorageContainerException { - if (response.getResult() == ContainerProtos.Result.SUCCESS) { + public static void validateContainerResponse(ContainerCommandResponseProto response) + throws StorageContainerException { + switch (response.getResult()) { + case SUCCESS: return; - } else if (response.getResult() - == ContainerProtos.Result.BLOCK_NOT_COMMITTED) { + + case BLOCK_NOT_COMMITTED: throw new BlockNotCommittedException(response.getMessage()); - } else if (response.getResult() - == ContainerProtos.Result.CLOSED_CONTAINER_IO) { + + case CLOSED_CONTAINER_IO: throw new ContainerNotOpenException(response.getMessage()); + + default: + throw new StorageContainerException(response.getMessage(), response.getResult()); } - throw new StorageContainerException( - response.getMessage(), response.getResult()); } private static List getValidatorList() { @@ -797,30 +746,40 @@ private static List getValidatorList() { private static final List VALIDATORS = createValidators(); private static List createValidators() { - return singletonList( - (request, response) -> validateContainerResponse(response)); + return singletonList((request, response) -> validateContainerResponse(response)); } + /** + * Converts the given Validator into a list of Validators, combined with default validators. + * + * @param validator The Validator to be added to the list. + * @return An unmodifiable list of Validators, including the given Validator and default validators. + */ public static List toValidatorList(Validator validator) { final List defaults = getValidatorList(); - final List validators - = new ArrayList<>(defaults.size() + 1); + final List validators = new ArrayList<>(defaults.size() + 1); validators.addAll(defaults); validators.add(validator); return Collections.unmodifiableList(validators); } - public static HashMap - getBlockFromAllNodes( - XceiverClientSpi xceiverClient, - DatanodeBlockID datanodeBlockID, - Token token) + /** + * Retrieves block information from all nodes in the pipeline. + * + * @param xceiverClient the client interface for communication with the data nodes. + * @param datanodeBlockID the identifier of the data block. + * @param token the security token used for authentication and authorization. + * @return a map containing data node details mapped to their respective block response protocol objects. + * @throws IOException if there is an I/O error during communication with the data nodes. + * @throws InterruptedException if the operation is interrupted. + */ + public static Map getBlockFromAllNodes(XceiverClientSpi xceiverClient, + DatanodeBlockID datanodeBlockID, Token token) throws IOException, InterruptedException { GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto .newBuilder() .setBlockID(datanodeBlockID); - HashMap datanodeToResponseMap - = new HashMap<>(); + Map datanodeToResponseMap = new HashMap<>(); String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto .newBuilder() @@ -836,23 +795,41 @@ public static List toValidatorList(Validator validator) { builder.setTraceID(traceId); } ContainerCommandRequestProto request = builder.build(); - Map responses = - xceiverClient.sendCommandOnAllNodes(request); - for (Map.Entry entry: - responses.entrySet()) { + Map responses = xceiverClient.sendCommandOnAllNodes(request); + for (Map.Entry entry: responses.entrySet()) { datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock()); } return datanodeToResponseMap; } - public static HashMap - readContainerFromAllNodes(XceiverClientSpi client, long containerID, - String encodedToken) throws IOException, InterruptedException { + /** + * Reads a container from all nodes in the pipeline associated with the given client. + * + * @param client The XceiverClientSpi used to send the read container request to all nodes. + * @param containerID The ID of the container to be read. + * @param encodedToken The security token used for authorization. + * @return A Map mapping each DatanodeDetails to its corresponding ReadContainerResponseProto, + * representing the response from each node for the read container request. + * @throws IOException If an I/O error occurs while sending the request or processing the response. + * @throws InterruptedException If the thread is interrupted while waiting for the response. + */ + public static Map readContainerFromAllNodes(XceiverClientSpi client, + long containerID, String encodedToken) throws IOException, InterruptedException { + String id = client.getPipeline().getFirstNode().getUuidString(); - HashMap datanodeToResponseMap - = new HashMap<>(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); + Map datanodeToResponseMap = new HashMap<>(); + ContainerCommandRequestProto request = buildReadContainerRequest(containerID, encodedToken, id); + Map responses = client.sendCommandOnAllNodes(request); + for (Map.Entry entry : responses.entrySet()) { + datanodeToResponseMap.put(entry.getKey(), entry.getValue().getReadContainer()); + } + return datanodeToResponseMap; + } + + private static ContainerCommandRequestProto buildReadContainerRequest(long containerID, String encodedToken, + String id) { + + ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(Type.ReadContainer); request.setContainerID(containerID); request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); @@ -864,14 +841,22 @@ public static List toValidatorList(Validator validator) { if (traceId != null) { request.setTraceID(traceId); } - Map responses = - client.sendCommandOnAllNodes(request.build()); - for (Map.Entry entry : - responses.entrySet()) { - datanodeToResponseMap.put(entry.getKey(), - entry.getValue().getReadContainer()); - } - return datanodeToResponseMap; + + return request.build(); } + private static DatanodeBlockID buildDatanodeId(ContainerCommandRequestProto.Builder builder, BlockID blockID, + DatanodeDetails datanode, Map replicaIndexes) { + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } + final DatanodeBlockID.Builder datanodeBlockID = blockID.getDatanodeBlockIDProtobufBuilder(); + int replicaIndex = replicaIndexes.getOrDefault(datanode, 0); + if (replicaIndex > 0) { + datanodeBlockID.setReplicaIndex(replicaIndex); + } + + return datanodeBlockID.build(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index b34a5d8387b..c6023ccd071 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -40,7 +40,6 @@ public final class OzoneConsts { public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId"; public static final String PRIMARY_SCM_NODE_ID = "primaryScmNodeId"; - public static final String OZONE_SIMPLE_ROOT_USER = "root"; public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; public static final String STORAGE_ID = "storageID"; @@ -76,12 +75,6 @@ public final class OzoneConsts { "EEE, dd MMM yyyy HH:mm:ss zzz"; public static final String OZONE_TIME_ZONE = "GMT"; - public static final String OZONE_COMPONENT = "component"; - public static final String OZONE_FUNCTION = "function"; - public static final String OZONE_RESOURCE = "resource"; - public static final String OZONE_USER = "user"; - public static final String OZONE_REQUEST = "request"; - // OM Http server endpoints public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = "/serviceList"; @@ -101,14 +94,9 @@ public final class OzoneConsts { public static final String CONTAINER_EXTENSION = ".container"; - public static final String CONTAINER_META = ".meta"; - - // Refer to {@link ContainerReader} for container storage layout on disk. - public static final String CONTAINER_PREFIX = "containers"; public static final String CONTAINER_META_PATH = "metadata"; public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; - public static final String CONTAINER_ROOT_PREFIX = "repository"; public static final String FILE_HASH = "SHA-256"; public static final String MD5_HASH = "MD5"; @@ -128,7 +116,6 @@ public final class OzoneConsts { * level DB names used by SCM and data nodes. */ public static final String CONTAINER_DB_SUFFIX = "container.db"; - public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; @@ -187,10 +174,8 @@ public final class OzoneConsts { public static final String OM_USER_PREFIX = "$"; public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_CALLER_CONTEXT_PREFIX = "S3Auth:S3G|"; - public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; - public static final String OM_TENANT = "Tenant:"; /** * Max chunk size limit. @@ -198,11 +183,6 @@ public final class OzoneConsts { public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; - /** - * Max OM Quota size of Long.MAX_VALUE. - */ - public static final long MAX_QUOTA_IN_BYTES = Long.MAX_VALUE; - /** * Quota RESET default is -1, which means quota is not set. */ @@ -214,36 +194,20 @@ public final class OzoneConsts { */ public enum Units { TB, GB, MB, KB, B } - /** - * Max number of keys returned per list buckets operation. - */ - public static final int MAX_LISTBUCKETS_SIZE = 1024; - - /** - * Max number of keys returned per list keys operation. - */ - public static final int MAX_LISTKEYS_SIZE = 1024; - - /** - * Max number of volumes returned per list volumes operation. - */ - public static final int MAX_LISTVOLUMES_SIZE = 1024; - - public static final int INVALID_PORT = -1; - /** * Object ID to identify reclaimable uncommitted blocks. */ public static final long OBJECT_ID_RECLAIM_BLOCKS = 0L; - /** * Default SCM Datanode ID file name. */ public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - // The ServiceListJSONServlet context attribute where OzoneManager - // instance gets stored. + /** + * The ServiceListJSONServlet context attribute where OzoneManager + * instance gets stored. + */ public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm"; @@ -308,12 +272,8 @@ private OzoneConsts() { public static final String KEY_PREFIX = "keyPrefix"; public static final String ACL = "acl"; public static final String ACLS = "acls"; - public static final String USER_ACL = "userAcl"; - public static final String ADD_ACLS = "addAcls"; - public static final String REMOVE_ACLS = "removeAcls"; public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; public static final String HAS_SNAPSHOT = "hasSnapshot"; - public static final String TO_KEY_NAME = "toKeyName"; public static final String STORAGE_TYPE = "storageType"; public static final String RESOURCE_TYPE = "resourceType"; public static final String IS_VERSION_ENABLED = "isVersionEnabled"; @@ -323,7 +283,6 @@ private OzoneConsts() { public static final String REPLICATION_TYPE = "replicationType"; public static final String REPLICATION_FACTOR = "replicationFactor"; public static final String REPLICATION_CONFIG = "replicationConfig"; - public static final String KEY_LOCATION_INFO = "keyLocationInfo"; public static final String MULTIPART_LIST = "multipartList"; public static final String UPLOAD_ID = "uploadID"; public static final String PART_NUMBER_MARKER = "partNumberMarker"; @@ -378,10 +337,6 @@ private OzoneConsts() { public static final String JAVA_TMP_DIR = "java.io.tmpdir"; public static final String LOCALHOST = "localhost"; - - public static final int S3_BUCKET_MIN_LENGTH = 3; - public static final int S3_BUCKET_MAX_LENGTH = 64; - public static final int S3_SECRET_KEY_MIN_LENGTH = 8; public static final int S3_REQUEST_HEADER_METADATA_SIZE_LIMIT_KB = 2; @@ -398,7 +353,6 @@ private OzoneConsts() { public static final String GDPR_ALGORITHM_NAME = "AES"; public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; public static final Charset GDPR_CHARSET = StandardCharsets.UTF_8; - public static final String GDPR_LENGTH = "length"; public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; @@ -426,13 +380,6 @@ private OzoneConsts() { public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; - // SCM HA - public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; - - // SCM Ratis snapshot file to store the last applied index - public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex"; - - public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm"; // An on-disk transient marker file used when replacing DB with checkpoint public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index eec2ceeb5e8..982b559c7a5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -44,6 +44,8 @@ public enum OzoneManagerVersion implements ComponentVersion { ATOMIC_REWRITE_KEY(6, "OzoneManager version that supports rewriting key as atomic operation"), HBASE_SUPPORT(7, "OzoneManager version that supports HBase integration"), + LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight" + + " listStatus API."), FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index 4bd170df8e8..ea5c5453f3f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto3Codec; +import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; import java.util.Collections; @@ -280,4 +281,14 @@ public void appendTo(StringBuilder sb) { sb.append(", size=").append(size); sb.append("]"); } + + public long getBlockGroupLength() { + String lenStr = getMetadata() + .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); + // If we don't have the length, then it indicates a problem with the stripe. + // All replica should carry the length, so if it is not there, we return 0, + // which will cause us to set the length of the block to zero and not + // attempt to reconstruct it. + return (lenStr == null) ? 0 : Long.parseLong(lenStr); + } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 2b7592e1c35..4c1d9d9dbc5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -60,11 +60,9 @@ * Helpers for container tests. */ public final class ContainerTestHelper { - private static final Logger LOG = LoggerFactory.getLogger( - ContainerTestHelper.class); + private static final Logger LOG = LoggerFactory.getLogger(ContainerTestHelper.class); - public static final long CONTAINER_MAX_SIZE = - (long) StorageUnit.GB.toBytes(1); + public static final long CONTAINER_MAX_SIZE = (long) StorageUnit.GB.toBytes(1); public static final String DATANODE_UUID = UUID.randomUUID().toString(); private static final long DUMMY_CONTAINER_ID = 9999; @@ -83,10 +81,8 @@ private ContainerTestHelper() { * @param seqNo - Chunk number. * @return ChunkInfo */ - public static ChunkInfo getChunk(long keyID, int seqNo, long offset, - long len) { - return new ChunkInfo(String.format("%d.data.%d", keyID, - seqNo), offset, len); + public static ChunkInfo getChunk(long keyID, int seqNo, long offset, long len) { + return new ChunkInfo(String.format("%d.data.%d", keyID, seqNo), offset, len); } /** @@ -107,10 +103,8 @@ public static ChunkBuffer getData(int len) { * @param info - chunk info. * @param data - data array */ - public static void setDataChecksum(ChunkInfo info, ChunkBuffer data) - throws OzoneChecksumException { - Checksum checksum = new Checksum(ChecksumType.CRC32, - 1024 * 1024); + public static void setDataChecksum(ChunkInfo info, ChunkBuffer data) throws OzoneChecksumException { + Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024); info.setChecksumData(checksum.computeChecksum(data)); data.rewind(); } @@ -123,17 +117,13 @@ public static void setDataChecksum(ChunkInfo info, ChunkBuffer data) * @param datalen - Length of data. * @return ContainerCommandRequestProto */ - public static ContainerCommandRequestProto getWriteChunkRequest( - Pipeline pipeline, BlockID blockID, int datalen) + public static ContainerCommandRequestProto getWriteChunkRequest(Pipeline pipeline, BlockID blockID, int datalen) throws IOException { - LOG.trace("writeChunk {} (blockID={}) to pipeline={}", - datalen, blockID, pipeline); - return newWriteChunkRequestBuilder(pipeline, blockID, datalen) - .build(); + LOG.trace("writeChunk {} (blockID={}) to pipeline={}", datalen, blockID, pipeline); + return newWriteChunkRequestBuilder(pipeline, blockID, datalen).build(); } - public static ContainerCommandRequestProto getListBlockRequest( - ContainerCommandRequestProto writeChunkRequest) { + public static ContainerCommandRequestProto getListBlockRequest(ContainerCommandRequestProto writeChunkRequest) { return ContainerCommandRequestProto.newBuilder() .setContainerID(writeChunkRequest.getContainerID()) .setCmdType(ContainerProtos.Type.ListBlock) @@ -143,8 +133,7 @@ public static ContainerCommandRequestProto getListBlockRequest( .build(); } - public static ContainerCommandRequestProto getPutBlockRequest( - ContainerCommandRequestProto writeChunkRequest) { + public static ContainerCommandRequestProto getPutBlockRequest(ContainerCommandRequestProto writeChunkRequest) { ContainerProtos.BlockData.Builder block = ContainerProtos.BlockData.newBuilder() .setSize(writeChunkRequest.getWriteChunk().getChunkData().getLen()) @@ -160,17 +149,15 @@ public static ContainerCommandRequestProto getPutBlockRequest( .build(); } - public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, - BlockID blockID, int datalen) throws IOException { + public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, int datalen) + throws IOException { ChunkBuffer data = getData(datalen); return newWriteChunkRequestBuilder(pipeline, blockID, data, 0); } - public static Builder newWriteChunkRequestBuilder( - Pipeline pipeline, BlockID blockID, ChunkBuffer data, int seq) + public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, ChunkBuffer data, int seq) throws IOException { - LOG.trace("writeChunk {} (blockID={}) to pipeline={}", - data.limit(), blockID, pipeline); + LOG.trace("writeChunk {} (blockID={}) to pipeline={}", data.limit(), blockID, pipeline); ContainerProtos.WriteChunkRequestProto.Builder writeRequest = ContainerProtos.WriteChunkRequestProto .newBuilder(); @@ -183,14 +170,11 @@ public static Builder newWriteChunkRequestBuilder( writeRequest.setChunkData(info.getProtoBufMessage()); writeRequest.setData(data.toByteString()); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.WriteChunk); - request.setContainerID(blockID.getContainerID()); - request.setWriteChunk(writeRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - - return request; + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.WriteChunk) + .setContainerID(blockID.getContainerID()) + .setWriteChunk(writeRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()); } /** @@ -201,8 +185,7 @@ public static Builder newWriteChunkRequestBuilder( * @param dataLen - Number of bytes in the data * @return ContainerCommandRequestProto */ - public static ContainerCommandRequestProto getWriteSmallFileRequest( - Pipeline pipeline, BlockID blockID, int dataLen) + public static ContainerCommandRequestProto getWriteSmallFileRequest(Pipeline pipeline, BlockID blockID, int dataLen) throws Exception { ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder(); @@ -210,9 +193,7 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen); setDataChecksum(info, data); - - ContainerProtos.PutBlockRequestProto.Builder putRequest = - ContainerProtos.PutBlockRequestProto.newBuilder(); + ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder(); BlockData blockData = new BlockData(blockID); List newList = new LinkedList<>(); @@ -224,13 +205,12 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( smallFileRequest.setData(data.toByteString()); smallFileRequest.setBlock(putRequest); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.PutSmallFile); - request.setContainerID(blockID.getContainerID()); - request.setPutSmallFile(smallFileRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setPutSmallFile(smallFileRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) + .build(); } @@ -242,13 +222,12 @@ public static ContainerCommandRequestProto getReadSmallFileRequest( ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey); smallFileRequest.setBlock(getKey.getGetBlock()); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.GetSmallFile); - request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID()); - request.setGetSmallFile(smallFileRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.GetSmallFile) + .setContainerID(getKey.getGetBlock().getBlockID().getContainerID()) + .setGetSmallFile(smallFileRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) + .build(); } /** @@ -267,8 +246,7 @@ public static ContainerCommandRequestProto getReadChunkRequest( public static Builder newReadChunkRequestBuilder(Pipeline pipeline, ContainerProtos.WriteChunkRequestProtoOrBuilder writeChunk) throws IOException { - LOG.trace("readChunk blockID={} from pipeline={}", - writeChunk.getBlockID(), pipeline); + LOG.trace("readChunk blockID={} from pipeline={}", writeChunk.getBlockID(), pipeline); ContainerProtos.ReadChunkRequestProto.Builder readRequest = ContainerProtos.ReadChunkRequestProto.newBuilder(); @@ -276,13 +254,11 @@ public static Builder newReadChunkRequestBuilder(Pipeline pipeline, readRequest.setChunkData(writeChunk.getChunkData()); readRequest.setReadChunkVersion(ContainerProtos.ReadChunkVersion.V1); - Builder newRequest = - ContainerCommandRequestProto.newBuilder(); - newRequest.setCmdType(ContainerProtos.Type.ReadChunk); - newRequest.setContainerID(readRequest.getBlockID().getContainerID()); - newRequest.setReadChunk(readRequest); - newRequest.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return newRequest; + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.ReadChunk) + .setContainerID(readRequest.getBlockID().getContainerID()) + .setReadChunk(readRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()); } /** @@ -297,17 +273,12 @@ public static ContainerCommandRequestProto getCreateContainerRequest( return getContainerCommandRequestBuilder(containerID, pipeline).build(); } - private static Builder getContainerCommandRequestBuilder(long containerID, - Pipeline pipeline) throws IOException { - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.CreateContainer); - request.setContainerID(containerID); - request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - - return request; + private static Builder getContainerCommandRequestBuilder(long containerID, Pipeline pipeline) throws IOException { + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.CreateContainer) + .setContainerID(containerID) + .setCreateContainer(ContainerProtos.CreateContainerRequestProto.getDefaultInstance()) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()); } /** @@ -343,16 +314,14 @@ public static ContainerCommandRequestProto getUpdateContainerRequest( kvBuilder.setValue(metaData.get(key)); updateRequestBuilder.addMetadata(kvBuilder.build()); } - Pipeline pipeline = - MockPipeline.createSingleNodePipeline(); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.UpdateContainer); - request.setContainerID(containerID); - request.setUpdateContainer(updateRequestBuilder.build()); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); + Pipeline pipeline = MockPipeline.createSingleNodePipeline(); + + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.UpdateContainer) + .setContainerID(containerID) + .setUpdateContainer(updateRequestBuilder.build()) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) + .build(); } /** @@ -361,17 +330,14 @@ public static ContainerCommandRequestProto getUpdateContainerRequest( * * @return ContainerCommandRequestProto. */ - public static ContainerCommandResponseProto - getCreateContainerResponse(ContainerCommandRequestProto request) { + public static ContainerCommandResponseProto getCreateContainerResponse(ContainerCommandRequestProto request) { - ContainerCommandResponseProto.Builder response = - ContainerCommandResponseProto.newBuilder(); - response.setCmdType(ContainerProtos.Type.CreateContainer); - response.setTraceID(request.getTraceID()); - response.setCreateContainer( - ContainerProtos.CreateContainerResponseProto.getDefaultInstance()); - response.setResult(ContainerProtos.Result.SUCCESS); - return response.build(); + return ContainerCommandResponseProto.newBuilder() + .setCmdType(ContainerProtos.Type.CreateContainer) + .setTraceID(request.getTraceID()) + .setCreateContainer(ContainerProtos.CreateContainerResponseProto.getDefaultInstance()) + .setResult(ContainerProtos.Result.SUCCESS) + .build(); } /** @@ -401,14 +367,11 @@ public static Builder newPutBlockRequestBuilder(Pipeline pipeline, public static Builder newPutBlockRequestBuilder(Pipeline pipeline, ContainerProtos.WriteChunkRequestProtoOrBuilder writeRequest, boolean incremental) throws IOException { - LOG.trace("putBlock: {} to pipeline={}", - writeRequest.getBlockID(), pipeline); + LOG.trace("putBlock: {} to pipeline={}", writeRequest.getBlockID(), pipeline); - ContainerProtos.PutBlockRequestProto.Builder putRequest = - ContainerProtos.PutBlockRequestProto.newBuilder(); + ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder(); - BlockData blockData = new BlockData( - BlockID.getFromProtobuf(writeRequest.getBlockID())); + BlockData blockData = new BlockData(BlockID.getFromProtobuf(writeRequest.getBlockID())); List newList = new LinkedList<>(); newList.add(writeRequest.getChunkData()); blockData.setChunks(newList); @@ -418,13 +381,11 @@ public static Builder newPutBlockRequestBuilder(Pipeline pipeline, } putRequest.setBlockData(blockData.getProtoBufMessage()); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.PutBlock); - request.setContainerID(blockData.getContainerID()); - request.setPutBlock(putRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request; + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutBlock) + .setContainerID(blockData.getContainerID()) + .setPutBlock(putRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()); } /** @@ -445,17 +406,14 @@ public static Builder newGetBlockRequestBuilder( throws IOException { DatanodeBlockID blockID = putBlock.getBlockData().getBlockID(); - ContainerProtos.GetBlockRequestProto.Builder getRequest = - ContainerProtos.GetBlockRequestProto.newBuilder(); + ContainerProtos.GetBlockRequestProto.Builder getRequest = ContainerProtos.GetBlockRequestProto.newBuilder(); getRequest.setBlockID(blockID); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.GetBlock); - request.setContainerID(blockID.getContainerID()); - request.setGetBlock(getRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request; + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.GetBlock) + .setContainerID(blockID.getContainerID()) + .setGetBlock(getRequest) + .setDatanodeUuid(pipeline.getFirstNode().getUuidString()); } /** @@ -465,8 +423,7 @@ public static Builder newGetBlockRequestBuilder( */ public static void verifyGetBlock(ContainerCommandResponseProto response, int expectedChunksCount) { assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - assertEquals(expectedChunksCount, - response.getGetBlock().getBlockData().getChunksCount()); + assertEquals(expectedChunksCount, response.getGetBlock().getBlockData().getChunksCount()); } public static Builder newGetCommittedBlockLengthBuilder(Pipeline pipeline, @@ -549,9 +506,9 @@ public static ContainerProtos.ContainerCommandRequestProto getFinalizeBlockReque .setContainerID(container.getContainerID()).setLocalID(localID) .setBlockCommitSequenceId(0).build(); - builder.setFinalizeBlock(ContainerProtos.FinalizeBlockRequestProto - .newBuilder().setBlockID(blockId).build()); - return builder.build(); + return builder.setFinalizeBlock(ContainerProtos.FinalizeBlockRequestProto + .newBuilder().setBlockID(blockId).build()) + .build(); } public static BlockID getTestBlockID(long containerID) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java index f7a38e3dec8..bd1e031ad71 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java @@ -41,7 +41,8 @@ public enum DNAction implements AuditAction { GET_COMMITTED_BLOCK_LENGTH, STREAM_INIT, FINALIZE_BLOCK, - ECHO; + ECHO, + VERIFY_BLOCK; @Override public String getAction() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index c5855b38b74..6a79bf8417a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.OnDemandContainerDataScanner; import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import org.apache.hadoop.util.Time; @@ -88,8 +87,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class); - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.DNLOGGER); + private static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.DNLOGGER); private static final String AUDIT_PARAM_CONTAINER_ID = "containerID"; private static final String AUDIT_PARAM_CONTAINER_TYPE = "containerType"; private static final String AUDIT_PARAM_FORCE_UPDATE = "forceUpdate"; @@ -101,48 +99,40 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { private static final String AUDIT_PARAM_BLOCK_DATA_STAGE = "stage"; private static final String AUDIT_PARAM_COUNT = "count"; private static final String AUDIT_PARAM_START_LOCAL_ID = "startLocalID"; - private static final String AUDIT_PARAM_PREV_CHUNKNAME = "prevChunkName"; + private static final String AUDIT_PARAM_PREV_CHUNK_NAME = "prevChunkName"; private final Map handlers; - private final ConfigurationSource conf; private final ContainerSet containerSet; - private final VolumeSet volumeSet; private final StateContext context; private final float containerCloseThreshold; private final ProtocolMessageMetrics protocolMetrics; - private OzoneProtocolMessageDispatcher dispatcher; private String clusterId; private ContainerMetrics metrics; private final TokenVerifier tokenVerifier; - private long slowOpThresholdNs; - private VolumeUsage.MinFreeSpaceCalculator freeSpaceCalculator; + private final long slowOpThresholdNs; + private final VolumeUsage.MinFreeSpaceCalculator freeSpaceCalculator; /** * Constructs an OzoneContainer that receives calls from * XceiverServerHandler. */ - public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, - VolumeSet volumes, Map handlers, - StateContext context, ContainerMetrics metrics, - TokenVerifier tokenVerifier) { - this.conf = config; + public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, Map handlers, + StateContext context, ContainerMetrics metrics, TokenVerifier tokenVerifier) { this.containerSet = contSet; - this.volumeSet = volumes; this.context = context; this.handlers = handlers; this.metrics = metrics; - this.containerCloseThreshold = conf.getFloat( + this.containerCloseThreshold = config.getFloat( HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD, HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT); - this.tokenVerifier = tokenVerifier != null ? tokenVerifier - : new NoopTokenVerifier(); - this.slowOpThresholdNs = getSlowOpThresholdMs(conf) * 1000000; + this.tokenVerifier = tokenVerifier != null ? tokenVerifier : new NoopTokenVerifier(); + this.slowOpThresholdNs = getSlowOpThresholdMs(config) * 1000000; - protocolMetrics = - new ProtocolMessageMetrics<>( - "HddsDispatcher", - "HDDS dispatcher metrics", - Type.values()); + protocolMetrics = new ProtocolMessageMetrics<>( + "HddsDispatcher", + "HDDS dispatcher metrics", + Type.values()); this.dispatcher = new OzoneProtocolMessageDispatcher<>("DatanodeClient", @@ -150,7 +140,7 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, LOG, HddsUtils::processForDebug, HddsUtils::processForDebug); - this.freeSpaceCalculator = new VolumeUsage.MinFreeSpaceCalculator(conf); + this.freeSpaceCalculator = new VolumeUsage.MinFreeSpaceCalculator(config); } @Override @@ -164,10 +154,10 @@ public void shutdown() { } /** - * Returns true for exceptions which can be ignored for marking the container - * unhealthy. + * Returns true for exceptions which can be ignored for marking the container unhealthy. + * * @param result ContainerCommandResponse error code. - * @return true if exception can be ignored, false otherwise. + * @return {@code true} if exception can be ignored, {@code false} otherwise. */ private boolean canIgnoreException(Result result) { switch (result) { @@ -183,15 +173,12 @@ private boolean canIgnoreException(Result result) { } @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - containerSet - .buildMissingContainerSetAndValidate(container2BCSIDMap); + public void buildMissingContainerSetAndValidate(Map container2BCSIDMap) { + containerSet.buildMissingContainerSetAndValidate(container2BCSIDMap); } @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { + public ContainerCommandResponseProto dispatch(ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { try { return dispatcher.processRequest(msg, req -> dispatchRequest(msg, dispatcherContext), @@ -203,39 +190,35 @@ public ContainerCommandResponseProto dispatch( } @SuppressWarnings("methodlength") - private ContainerCommandResponseProto dispatchRequest( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { + private ContainerCommandResponseProto dispatchRequest(ContainerCommandRequestProto msg, + DispatcherContext dispatcherContext) { + Preconditions.checkNotNull(msg); - if (LOG.isTraceEnabled()) { - LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), - msg.getTraceID()); - } + LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), msg.getTraceID()); AuditAction action = getAuditAction(msg.getCmdType()); EventType eventType = getEventType(msg); PerformanceStringBuilder perf = new PerformanceStringBuilder(); ContainerType containerType; - ContainerCommandResponseProto responseProto = null; + ContainerCommandResponseProto responseProto; long startTime = Time.monotonicNowNanos(); Type cmdType = msg.getCmdType(); long containerID = msg.getContainerID(); Container container = getContainer(containerID); boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.WRITE_DATA) + && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.WRITE_DATA) || (cmdType == Type.StreamInit); boolean isWriteCommitStage = (cmdType == Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMMIT_DATA); + && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA); if (dispatcherContext == null) { - // increase all op not through ratis + // Increase all ops not through ratis metrics.incContainerOpsMetrics(cmdType); } else if (isWriteStage) { - // increase WriteChunk in only WRITE_STAGE + // Increase WriteChunk in only WRITE_STAGE metrics.incContainerOpsMetrics(cmdType); } else if (cmdType != Type.WriteChunk) { metrics.incContainerOpsMetrics(cmdType); @@ -252,57 +235,50 @@ private ContainerCommandResponseProto dispatchRequest( s, ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED); return ContainerUtils.logAndReturnError(LOG, sce, msg); } - // if the command gets executed other than Ratis, the default write stage - // is WriteChunkStage.COMBINED + // if the command gets executed other than Ratis, the default write stage is WriteChunkStage.COMBINED boolean isCombinedStage = cmdType == Type.WriteChunk && (dispatcherContext == null - || dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMBINED); + || dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMBINED); Map container2BCSIDMap = null; if (dispatcherContext != null) { container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); } if (isWriteCommitStage) { - // check if the container Id exist in the loaded snapshot file. if - // it does not , it infers that , this is a restart of dn where - // the we are reapplying the transaction which was not captured in the - // snapshot. - // just add it to the list, and remove it from missing container set - // as it might have been added in the list during "init". + // Check if the container id exist in the loaded snapshot file. + // If it does not, it infers that + // this is a restart of datanode + // where they are reapplying the transaction which was not captured in the snapshot. + // Add it to the list, + // and remove it from the missing container set as it might have been added in the list during "init". Preconditions.checkNotNull(container2BCSIDMap); if (container != null && container2BCSIDMap.get(containerID) == null) { - container2BCSIDMap.put( - containerID, container.getBlockCommitSequenceId()); + container2BCSIDMap.put(containerID, container.getBlockCommitSequenceId()); getMissingContainerSet().remove(containerID); } } if (getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID - + " has been lost and cannot be recreated on this DataNode", + "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING); audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, sce); return ContainerUtils.logAndReturnError(LOG, sce, msg); } if (cmdType != Type.CreateContainer) { - /** - * Create Container should happen only as part of Write_Data phase of - * writeChunk. - * In EC, we are doing empty putBlock. In the partial stripe writes, if - * file size is less than chunkSize*(ECData-1), we are making empty block - * to get the container created in non writing nodes. If replica index is - * >0 then we know it's for ec container. + /* + Create Container should happen only as part of Write_Data phase of writeChunk. + In EC, we are doing empty putBlock. + In the partial stripe writes, if file size is less than chunkSize*(ECData-1), + we are making empty block to get the container created in non-writing nodes. + If replica index is > 0 then we know it's for ec container. */ if (container == null && ((isWriteStage || isCombinedStage) || cmdType == Type.PutSmallFile || cmdType == Type.PutBlock)) { - // If container does not exist, create one for WriteChunk and - // PutSmallFile request + // If a container does not exist, create one for WriteChunk and PutSmallFile request responseProto = createContainer(msg); metrics.incContainerOpsMetrics(Type.CreateContainer); - metrics.incContainerOpsLatencies(Type.CreateContainer, - Time.monotonicNowNanos() - startTime); + metrics.incContainerOpsLatencies(Type.CreateContainer, Time.monotonicNowNanos() - startTime); if (responseProto.getResult() != Result.SUCCESS) { StorageContainerException sce = new StorageContainerException( @@ -315,14 +291,13 @@ private ContainerCommandResponseProto dispatchRequest( || dispatcherContext == null || cmdType == Type.PutBlock); if (container2BCSIDMap != null) { - // adds this container to list of containers created in the pipeline - // with initial BCSID recorded as 0. + // Adds this container to list of containers created in the pipeline with initial BCSID recorded as 0. container2BCSIDMap.putIfAbsent(containerID, 0L); } container = getContainer(containerID); } - // if container not found return error + // if container didn't find return error if (container == null) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " does not exist", @@ -333,14 +308,13 @@ private ContainerCommandResponseProto dispatchRequest( containerType = getContainerType(container); } else { if (!msg.hasCreateContainer()) { - audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, - new Exception("MALFORMED_REQUEST")); + audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, new Exception("MALFORMED_REQUEST")); return malformedRequest(msg); } containerType = msg.getCreateContainer().getContainerType(); } - // Small performance optimization. We check if the operation is of type - // write before trying to send CloseContainerAction. + // Small performance optimization. + // We check if the operation is of a type write before trying to send CloseContainerAction. if (!HddsUtils.isReadOnly(msg)) { sendCloseContainerActionIfNeeded(container); } @@ -359,75 +333,67 @@ private ContainerCommandResponseProto dispatchRequest( if (responseProto != null) { metrics.incContainerOpsLatencies(cmdType, opLatencyNs); - // If the request is of Write Type and the container operation - // is unsuccessful, it implies the applyTransaction on the container - // failed. All subsequent transactions on the container should fail and - // hence replica will be marked unhealthy here. In this case, a close - // container action will be sent to SCM to close the container. + // If the request is of Write Type and the container operation is unsuccessful, + // it implies the applyTransaction on the container failed. + // All later transactions on the container should fail, and hence the replica will be marked unhealthy here. + // In this case, a close container action will be sent to SCM to close the container. - // ApplyTransaction called on closed Container will fail with Closed - // container exception. In such cases, ignore the exception here - // If the container is already marked unhealthy, no need to change the - // state here. + // ApplyTransaction called on closed Container will fail with Closed container exception. + // In such cases, ignore the exception here If the container is already marked unhealthy, + // no need to change the state here. Result result = responseProto.getResult(); if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) { - // If the container is open/closing and the container operation - // has failed, it should be first marked unhealthy and the initiate the - // close container action. This also implies this is the first - // transaction which has failed, so the container is marked unhealthy - // right here. - // Once container is marked unhealthy, all the subsequent write - // transactions will fail with UNHEALTHY_CONTAINER exception. + // If the container is open/closing and the container operation has failed, + // it should be first marked unhealthy and the initiate the close container action. + // This also implies this is the first transaction that has failed, + // so the container is marked unhealthy right here. + // Once a container is marked unhealthy, + // all the later writing transactions will fail with UNHEALTHY_CONTAINER exception. if (container == null) { - throw new NullPointerException( - "Error on creating containers " + result + " " + responseProto - .getMessage()); + throw new NullPointerException("Error on creating containers " + result + " " + responseProto.getMessage()); } - // For container to be moved to unhealthy state here, the container can - // only be in open or closing state. + // For the container to be moved to unhealthy state here, the container can only be in open or closing state. State containerState = container.getContainerData().getState(); Preconditions.checkState( containerState == State.OPEN || containerState == State.CLOSING || containerState == State.RECOVERING); - // mark and persist the container state to be unhealthy + // Mark and persist the container state to be unhealthy try { - // TODO HDDS-7096 + HDDS-8781: Use on demand scanning for the open - // container instead. + // TODO HDDS-7096 + HDDS-8781: Use on demand scanning for the open container instead. handler.markContainerUnhealthy(container, ScanResult.unhealthy(ScanResult.FailureType.WRITE_FAILURE, new File(container.getContainerData().getContainerPath()), new StorageContainerException(result))); LOG.info("Marked Container UNHEALTHY, ContainerID: {}", containerID); } catch (IOException ioe) { - // just log the error here in case marking the container fails, - // Return the actual failure response to the client - LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", - ioe); + // Log the error here in case marking the container fails, return the actual failure response to the client + LOG.error("Failed to mark container {} UNHEALTHY. ", containerID, ioe); } - // in any case, the in memory state of the container should be unhealthy - Preconditions.checkArgument( - container.getContainerData().getState() == State.UNHEALTHY); + // In any case, the in memory state of the container should be unhealthy + Preconditions.checkArgument(container.getContainerData().getState() == State.UNHEALTHY); sendCloseContainerActionIfNeeded(container); } - if (cmdType == Type.CreateContainer - && result == Result.SUCCESS && dispatcherContext != null) { + if (cmdType == Type.CreateContainer && result == Result.SUCCESS && dispatcherContext != null) { Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap()); - container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0)); + container2BCSIDMap.putIfAbsent(containerID, 0L); } if (result == Result.SUCCESS) { updateBCSID(container, dispatcherContext, cmdType); audit(action, eventType, msg, dispatcherContext, AuditEventStatus.SUCCESS, null); } else { - //TODO HDDS-7096: - // This is a too general place for on demand scanning. - // Create a specific exception that signals for on demand scanning - // and move this general scan to where it is more appropriate. + //TODO HDDS-7096: This is a too general place for on demand scanning. + // Create a specific exception + // that signals for on demand scanning and move this general scan to where it is more appropriate. // Add integration tests to test the full functionality. OnDemandContainerDataScanner.scanContainer(container); - audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, + audit(action, + eventType, + msg, + dispatcherContext, + AuditEventStatus.FAILURE, new Exception(responseProto.getMessage())); } perf.appendOpLatencyNanos(opLatencyNs); @@ -436,8 +402,7 @@ private ContainerCommandResponseProto dispatchRequest( return responseProto; } else { // log failure - audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, - new Exception("UNSUPPORTED_REQUEST")); + audit(action, eventType, msg, dispatcherContext, AuditEventStatus.FAILURE, new Exception("UNSUPPORTED_REQUEST")); return unsupportedRequest(msg); } } @@ -446,13 +411,11 @@ private long getSlowOpThresholdMs(ConfigurationSource config) { return config.getTimeDuration( HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_KEY, HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT, - TimeUnit.MILLISECONDS); + TimeUnit.MILLISECONDS); } - private void updateBCSID(Container container, - DispatcherContext dispatcherContext, Type cmdType) { - if (dispatcherContext != null && (cmdType == Type.PutBlock - || cmdType == Type.PutSmallFile)) { + private void updateBCSID(Container container, DispatcherContext dispatcherContext, Type cmdType) { + if (dispatcherContext != null && (cmdType == Type.PutBlock || cmdType == Type.PutSmallFile)) { Preconditions.checkNotNull(container); long bcsID = container.getBlockCommitSequenceId(); long containerId = container.getContainerData().getContainerID(); @@ -460,35 +423,31 @@ private void updateBCSID(Container container, container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); Preconditions.checkNotNull(container2BCSIDMap); Preconditions.checkArgument(container2BCSIDMap.containsKey(containerId)); - // updates the latest BCSID on every putBlock or putSmallFile - // transaction over Ratis. - container2BCSIDMap.computeIfPresent(containerId, (u, v) -> v = bcsID); + // updates the latest BCSID on every putBlock or putSmallFile transaction over Ratis. + container2BCSIDMap.computeIfPresent(containerId, (u, v) -> bcsID); } } + /** * Create a container using the input container request. + * * @param containerRequest - the container request which requires container * to be created. * @return ContainerCommandResponseProto container command response. */ @VisibleForTesting - ContainerCommandResponseProto createContainer( - ContainerCommandRequestProto containerRequest) { + ContainerCommandResponseProto createContainer(ContainerCommandRequestProto containerRequest) { ContainerProtos.CreateContainerRequestProto.Builder createRequest = ContainerProtos.CreateContainerRequestProto.newBuilder(); - ContainerType containerType = - ContainerProtos.ContainerType.KeyValueContainer; + ContainerType containerType = ContainerProtos.ContainerType.KeyValueContainer; createRequest.setContainerType(containerType); if (containerRequest.hasWriteChunk()) { - createRequest.setReplicaIndex( - containerRequest.getWriteChunk().getBlockID().getReplicaIndex()); + createRequest.setReplicaIndex(containerRequest.getWriteChunk().getBlockID().getReplicaIndex()); } if (containerRequest.hasPutBlock()) { - createRequest.setReplicaIndex( - containerRequest.getPutBlock().getBlockData().getBlockID() - .getReplicaIndex()); + createRequest.setReplicaIndex(containerRequest.getPutBlock().getBlockData().getBlockID().getReplicaIndex()); } ContainerCommandRequestProto.Builder requestBuilder = @@ -506,26 +465,21 @@ ContainerCommandResponseProto createContainer( return handler.handle(requestBuilder.build(), null, null); } - private void validateToken( - ContainerCommandRequestProto msg) throws IOException { - tokenVerifier.verify( - msg, - msg.getEncodedToken() - ); + private void validateToken(ContainerCommandRequestProto msg) throws IOException { + tokenVerifier.verify(msg, msg.getEncodedToken()); } /** - * This will be called as a part of creating the log entry during - * startTransaction in Ratis on the leader node. In such cases, if the - * container is not in open state for writing we should just fail. + * This will be called as a part of creating the log entry during startTransaction in Ratis on the leader node. + * In such cases, if the container is not in open state for writing, we should just fail. * Leader will propagate the exception to client. + * * @param msg container command proto - * @throws StorageContainerException In case container state is open for write - * requests and in invalid state for read requests. + * @throws StorageContainerException In case container state is open for write requests + * and in invalid state for read requests. */ @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { + public void validateContainerCommand(ContainerCommandRequestProto msg) throws StorageContainerException { try { validateToken(msg); } catch (IOException ioe) { @@ -555,19 +509,16 @@ public void validateContainerCommand( State containerState = container.getContainerState(); String log = "Container " + containerID + " in " + containerState + " state"; - if (!HddsUtils.isReadOnly(msg) - && !HddsUtils.isOpenToWriteState(containerState)) { + if (!HddsUtils.isReadOnly(msg) && !HddsUtils.isOpenToWriteState(containerState)) { switch (cmdType) { case CreateContainer: // Create Container is idempotent. There is nothing to validate. break; case CloseContainer: - // If the container is unhealthy, closeContainer will be rejected - // while execution. Nothing to validate here. + // If the container is unhealthy, closeContainer will be rejected while execution. Nothing to validate here. break; default: - // if the container is not open/recovering, no updates can happen. Just - // throw an exception + // If the container is not open/recovering, no updates can happen. Throw an exception ContainerNotOpenException cex = new ContainerNotOpenException(log); audit(action, eventType, msg, null, AuditEventStatus.FAILURE, cex); throw cex; @@ -580,8 +531,9 @@ public void validateContainerCommand( } /** - * If the container usage reaches the close threshold or the container is - * marked unhealthy we send Close ContainerAction to SCM. + * If the container usage reaches the close threshold or the container is marked unhealthy, + * we send Close ContainerAction to SCM. + * * @param container current state of container */ private void sendCloseContainerActionIfNeeded(Container container) { @@ -590,12 +542,13 @@ private void sendCloseContainerActionIfNeeded(Container container) { boolean shouldClose = isSpaceFull || isContainerUnhealthy(container); if (shouldClose) { ContainerData containerData = container.getContainerData(); - ContainerAction.Reason reason = - isSpaceFull ? ContainerAction.Reason.CONTAINER_FULL : - ContainerAction.Reason.CONTAINER_UNHEALTHY; + ContainerAction.Reason reason = isSpaceFull + ? ContainerAction.Reason.CONTAINER_FULL + : ContainerAction.Reason.CONTAINER_UNHEALTHY; ContainerAction action = ContainerAction.newBuilder() .setContainerID(containerData.getContainerID()) - .setAction(ContainerAction.Action.CLOSE).setReason(reason).build(); + .setAction(ContainerAction.Action.CLOSE) + .setReason(reason).build(); context.addContainerActionIfAbsent(action); } } @@ -606,8 +559,7 @@ private boolean isContainerFull(Container container) { .orElse(Boolean.FALSE); if (isOpen) { ContainerData containerData = container.getContainerData(); - double containerUsedPercentage = - 1.0f * containerData.getBytesUsed() / containerData.getMaxSize(); + double containerUsedPercentage = 1.0f * containerData.getBytesUsed() / containerData.getMaxSize(); return containerUsedPercentage >= containerCloseThreshold; } else { return false; @@ -620,11 +572,9 @@ private boolean isVolumeFull(Container container) { .orElse(Boolean.FALSE); if (isOpen) { HddsVolume volume = container.getContainerData().getVolume(); - SpaceUsageSource precomputedVolumeSpace = - volume.getCurrentUsage(); + SpaceUsageSource precomputedVolumeSpace = volume.getCurrentUsage(); long volumeCapacity = precomputedVolumeSpace.getCapacity(); - long volumeFreeSpaceToSpare = - freeSpaceCalculator.get(volumeCapacity); + long volumeFreeSpaceToSpare = freeSpaceCalculator.get(volumeCapacity); long volumeFree = precomputedVolumeSpace.getAvailable(); long volumeCommitted = volume.getCommittedBytes(); long volumeAvailable = volumeFree - volumeCommitted; @@ -634,9 +584,8 @@ private boolean isVolumeFull(Container container) { } private boolean isContainerUnhealthy(Container container) { - return Optional.ofNullable(container).map( - cont -> (cont.getContainerState() == - ContainerDataProto.State.UNHEALTHY)) + return Optional.ofNullable(container) + .map(cont -> (cont.getContainerState() == ContainerDataProto.State.UNHEALTHY)) .orElse(Boolean.FALSE); } @@ -647,7 +596,7 @@ public Handler getHandler(ContainerProtos.ContainerType containerType) { @Override public void setClusterId(String clusterId) { - Preconditions.checkNotNull(clusterId, "clusterId Cannot be null"); + Preconditions.checkNotNull(clusterId, "clusterId cannot be null"); if (this.clusterId == null) { this.clusterId = clusterId; for (Map.Entry handlerMap : handlers.entrySet()) { @@ -656,6 +605,12 @@ public void setClusterId(String clusterId) { } } + /** + * Retrieves a container from the container set based on the given container ID. + * + * @param containerID the unique identifier of the container to be retrieved. + * @return the container object corresponding to the specified container ID. + */ @VisibleForTesting public Container getContainer(long containerID) { return containerSet.getContainer(containerID); @@ -670,6 +625,11 @@ private ContainerType getContainerType(Container container) { return container.getContainerType(); } + /** + * Sets the container metrics for testing purposes. + * + * @param containerMetrics The ContainerMetrics instance to be used for testing. + */ @VisibleForTesting public void setMetricsForTesting(ContainerMetrics containerMetrics) { this.metrics = containerMetrics; @@ -679,21 +639,19 @@ private EventType getEventType(ContainerCommandRequestProto msg) { return HddsUtils.isReadOnly(msg) ? EventType.READ : EventType.WRITE; } - private void audit(AuditAction action, EventType eventType, - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext, - AuditEventStatus result, Throwable exception) { + private void audit(AuditAction action, EventType eventType, ContainerCommandRequestProto msg, + DispatcherContext dispatcherContext, AuditEventStatus result, Throwable exception) { + Map params; AuditMessage amsg; switch (result) { case SUCCESS: if (isAllowed(action.getAction())) { params = getAuditParams(msg, dispatcherContext); - if (eventType == EventType.READ && - AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { + if (eventType == EventType.READ && AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logReadSuccess(amsg); - } else if (eventType == EventType.WRITE && - AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logWriteSuccess(amsg); } @@ -702,21 +660,17 @@ private void audit(AuditAction action, EventType eventType, case FAILURE: params = getAuditParams(msg, dispatcherContext); - if (eventType == EventType.READ && - AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { + if (eventType == EventType.READ && AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logReadFailure(amsg); - } else if (eventType == EventType.WRITE && - AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logWriteFailure(amsg); } break; default: - if (LOG.isDebugEnabled()) { - LOG.debug("Invalid audit event status - {}", result); - } + LOG.debug("Invalid audit event status - {}", result); } } @@ -724,12 +678,19 @@ private void performanceAudit(AuditAction action, ContainerCommandRequestProto m DispatcherContext dispatcherContext, PerformanceStringBuilder performance, long opLatencyNs) { if (isOperationSlow(opLatencyNs)) { Map params = getAuditParams(msg, dispatcherContext); - AuditMessage auditMessage = - buildAuditMessageForPerformance(action, params, performance); + AuditMessage auditMessage = buildAuditMessageForPerformance(action, params, performance); AUDIT.logPerformance(auditMessage); } } + /** + * Builds an audit message for performance tracking purposes. + * + * @param op The audit action representing the operation being performed. + * @param auditMap A map containing key-value pairs for audit parameters. + * @param performance The performance string builder encapsulating performance metrics. + * @return An AuditMessage instance configured with the provided parameters. + */ public AuditMessage buildAuditMessageForPerformance(AuditAction op, Map auditMap, PerformanceStringBuilder performance) { return new AuditMessage.Builder() @@ -743,9 +704,7 @@ public AuditMessage buildAuditMessageForPerformance(AuditAction op, //TODO: use GRPC to fetch user and ip details @Override - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap) { - + public AuditMessage buildAuditMessageForSuccess(AuditAction op, Map auditMap) { return new AuditMessage.Builder() .setUser(null) .atIp(null) @@ -756,9 +715,7 @@ public AuditMessage buildAuditMessageForSuccess(AuditAction op, } @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, - Map auditMap, Throwable throwable) { - + public AuditMessage buildAuditMessageForFailure(AuditAction op, Map auditMap, Throwable throwable) { return new AuditMessage.Builder() .setUser(null) .atIp(null) @@ -776,8 +733,12 @@ enum EventType { /** * Checks if the action is allowed for audit. - * @param action - * @return true or false accordingly. + * + * @param action The action to be checked for allowance. Valid actions include: + * "CLOSE_CONTAINER", "CREATE_CONTAINER", "LIST_CONTAINER", + * "DELETE_CONTAINER", "READ_CONTAINER", "UPDATE_CONTAINER", + * and "DELETE_BLOCK". + * @return {@code true} if the action is allowed; {@code false} otherwise. */ private boolean isAllowed(String action) { switch (action) { @@ -789,14 +750,15 @@ private boolean isAllowed(String action) { case "UPDATE_CONTAINER": case "DELETE_BLOCK": return true; - default: return false; + default: + return false; } } @Override - public StateMachine.DataChannel getStreamDataChannel( - ContainerCommandRequestProto msg) - throws StorageContainerException { + public StateMachine.DataChannel getStreamDataChannel(ContainerCommandRequestProto msg) + throws StorageContainerException { + long containerID = msg.getContainerID(); Container container = getContainer(containerID); if (container != null) { @@ -811,160 +773,153 @@ public StateMachine.DataChannel getStreamDataChannel( private static DNAction getAuditAction(Type cmdType) { switch (cmdType) { - case CreateContainer : return DNAction.CREATE_CONTAINER; - case ReadContainer : return DNAction.READ_CONTAINER; - case UpdateContainer : return DNAction.UPDATE_CONTAINER; - case DeleteContainer : return DNAction.DELETE_CONTAINER; - case ListContainer : return DNAction.LIST_CONTAINER; - case PutBlock : return DNAction.PUT_BLOCK; - case GetBlock : return DNAction.GET_BLOCK; - case DeleteBlock : return DNAction.DELETE_BLOCK; - case ListBlock : return DNAction.LIST_BLOCK; - case ReadChunk : return DNAction.READ_CHUNK; - case DeleteChunk : return DNAction.DELETE_CHUNK; - case WriteChunk : return DNAction.WRITE_CHUNK; - case ListChunk : return DNAction.LIST_CHUNK; - case CompactChunk : return DNAction.COMPACT_CHUNK; - case PutSmallFile : return DNAction.PUT_SMALL_FILE; - case GetSmallFile : return DNAction.GET_SMALL_FILE; - case CloseContainer : return DNAction.CLOSE_CONTAINER; - case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH; - case StreamInit : return DNAction.STREAM_INIT; - case FinalizeBlock : return DNAction.FINALIZE_BLOCK; - case Echo : return DNAction.ECHO; - default : + case CreateContainer: + return DNAction.CREATE_CONTAINER; + case ReadContainer: + return DNAction.READ_CONTAINER; + case UpdateContainer: + return DNAction.UPDATE_CONTAINER; + case DeleteContainer: + return DNAction.DELETE_CONTAINER; + case ListContainer: + return DNAction.LIST_CONTAINER; + case PutBlock: + return DNAction.PUT_BLOCK; + case GetBlock: + return DNAction.GET_BLOCK; + case DeleteBlock: + return DNAction.DELETE_BLOCK; + case ListBlock: + return DNAction.LIST_BLOCK; + case ReadChunk: + return DNAction.READ_CHUNK; + case DeleteChunk: + return DNAction.DELETE_CHUNK; + case WriteChunk: + return DNAction.WRITE_CHUNK; + case ListChunk: + return DNAction.LIST_CHUNK; + case CompactChunk: + return DNAction.COMPACT_CHUNK; + case PutSmallFile: + return DNAction.PUT_SMALL_FILE; + case GetSmallFile: + return DNAction.GET_SMALL_FILE; + case CloseContainer: + return DNAction.CLOSE_CONTAINER; + case GetCommittedBlockLength: + return DNAction.GET_COMMITTED_BLOCK_LENGTH; + case StreamInit: + return DNAction.STREAM_INIT; + case FinalizeBlock: + return DNAction.FINALIZE_BLOCK; + case Echo: + return DNAction.ECHO; + case VerifyBlock: + return DNAction.VERIFY_BLOCK; + default: LOG.debug("Invalid command type - {}", cmdType); return null; } } - private static Map getAuditParams( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { + private static Map getAuditParams(ContainerCommandRequestProto msg, + DispatcherContext dispatcherContext) { Map auditParams = new TreeMap<>(); Type cmdType = msg.getCmdType(); String containerID = String.valueOf(msg.getContainerID()); switch (cmdType) { case CreateContainer: auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); - auditParams.put(AUDIT_PARAM_CONTAINER_TYPE, - msg.getCreateContainer().getContainerType().toString()); + auditParams.put(AUDIT_PARAM_CONTAINER_TYPE, msg.getCreateContainer().getContainerType().toString()); return auditParams; case ReadContainer: + case CloseContainer: auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); return auditParams; case UpdateContainer: auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); - auditParams.put(AUDIT_PARAM_FORCE_UPDATE, - String.valueOf(msg.getUpdateContainer().getForceUpdate())); + auditParams.put(AUDIT_PARAM_FORCE_UPDATE, String.valueOf(msg.getUpdateContainer().getForceUpdate())); return auditParams; case DeleteContainer: auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); - auditParams.put(AUDIT_PARAM_FORCE_DELETE, - String.valueOf(msg.getDeleteContainer().getForceDelete())); + auditParams.put(AUDIT_PARAM_FORCE_DELETE, String.valueOf(msg.getDeleteContainer().getForceDelete())); return auditParams; case ListContainer: auditParams.put(AUDIT_PARAM_START_CONTAINER_ID, containerID); - auditParams.put(AUDIT_PARAM_COUNT, - String.valueOf(msg.getListContainer().getCount())); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListContainer().getCount())); return auditParams; case PutBlock: try { auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) - .toString()); + BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()).toString()); } catch (IOException ex) { - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } + LOG.trace("Encountered error parsing BlockData from protobuf: {}", ex.getMessage(), ex); return null; } return auditParams; case GetBlock: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); return auditParams; case DeleteBlock: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()) - .toString()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()).toString()); return auditParams; case ListBlock: - auditParams.put(AUDIT_PARAM_START_LOCAL_ID, - String.valueOf(msg.getListBlock().getStartLocalID())); + auditParams.put(AUDIT_PARAM_START_LOCAL_ID, String.valueOf(msg.getListBlock().getStartLocalID())); auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListBlock().getCount())); return auditParams; case ReadChunk: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); - auditParams.put(AUDIT_PARAM_BLOCK_DATA_OFFSET, - String.valueOf(msg.getReadChunk().getChunkData().getOffset())); - auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, - String.valueOf(msg.getReadChunk().getChunkData().getLen())); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA_OFFSET, String.valueOf(msg.getReadChunk().getChunkData().getOffset())); + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getReadChunk().getChunkData().getLen())); return auditParams; case DeleteChunk: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()) - .toString()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()).toString()); return auditParams; case WriteChunk: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) - .toString()); - auditParams.put(AUDIT_PARAM_BLOCK_DATA_OFFSET, - String.valueOf(msg.getWriteChunk().getChunkData().getOffset())); - auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, - String.valueOf(msg.getWriteChunk().getChunkData().getLen())); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()).toString()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA_OFFSET, String.valueOf(msg.getWriteChunk().getChunkData().getOffset())); + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getWriteChunk().getChunkData().getLen())); if (dispatcherContext != null && dispatcherContext.getStage() != null) { auditParams.put(AUDIT_PARAM_BLOCK_DATA_STAGE, dispatcherContext.getStage().toString()); } return auditParams; case ListChunk: - auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); - auditParams.put(AUDIT_PARAM_PREV_CHUNKNAME, msg.getListChunk().getPrevChunkName()); + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); + auditParams.put(AUDIT_PARAM_PREV_CHUNK_NAME, msg.getListChunk().getPrevChunkName()); auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListChunk().getCount())); return auditParams; - case CompactChunk: return null; //CompactChunk operation + case CompactChunk: + return null; //CompactChunk operation case PutSmallFile: try { auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockData.getFromProtoBuf(msg.getPutSmallFile() - .getBlock().getBlockData()).toString()); + BlockData.getFromProtoBuf(msg.getPutSmallFile().getBlock().getBlockData()).toString()); auditParams.put(AUDIT_PARAM_BLOCK_DATA_OFFSET, String.valueOf(msg.getPutSmallFile().getChunkInfo().getOffset())); - auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, - String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen())); + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen())); } catch (IOException ex) { - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } + LOG.trace("Encountered error parsing BlockData from protobuf: {}", ex.getMessage(), ex); } return auditParams; case GetSmallFile: auditParams.put(AUDIT_PARAM_BLOCK_DATA, - BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()) - .toString()); - return auditParams; - - case CloseContainer: - auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()).toString()); return auditParams; case GetCommittedBlockLength: @@ -979,11 +934,15 @@ private static Map getAuditParams( .toString()); return auditParams; + case VerifyBlock: + auditParams.put( + "verifyBlock", + BlockID.getFromProtobuf(msg.getVerifyBlock().getBlockID()).toString()); + default : LOG.debug("Invalid command type - {}", cmdType); return null; } - } private boolean isOperationSlow(long opLatencyNs) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index bfdff69be46..bba672a0c76 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -21,11 +21,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.Objects; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -54,12 +55,10 @@ public abstract class Handler { protected String clusterId; protected final ContainerMetrics metrics; protected String datanodeId; - private IncrementalReportSender icrSender; + private final IncrementalReportSender icrSender; - protected Handler(ConfigurationSource config, String datanodeId, - ContainerSet contSet, VolumeSet volumeSet, - ContainerMetrics containerMetrics, - IncrementalReportSender icrSender) { + protected Handler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volumeSet, + ContainerMetrics containerMetrics, IncrementalReportSender icrSender) { this.conf = config; this.containerSet = contSet; this.volumeSet = volumeSet; @@ -68,28 +67,41 @@ protected Handler(ConfigurationSource config, String datanodeId, this.icrSender = icrSender; } - public static Handler getHandlerForContainerType( - final ContainerType containerType, final ConfigurationSource config, - final String datanodeId, final ContainerSet contSet, - final VolumeSet volumeSet, final ContainerMetrics metrics, + /** + * Returns a handler for the specified container type. + * + * @param containerType the type of container for which the handler is required + * @param config the configuration source + * @param datanodeId the ID of the data node + * @param contSet the set of containers + * @param volumeSet the set of volumes + * @param metrics metrics for the container + * @param icrSender the incremental report sender + * @return a Handler for the specified container type + * @throws IllegalArgumentException if the container type does not exist + */ + public static Handler getHandlerForContainerType(ContainerType containerType, ConfigurationSource config, + String datanodeId, ContainerSet contSet, VolumeSet volumeSet, ContainerMetrics metrics, IncrementalReportSender icrSender) { - switch (containerType) { - case KeyValueContainer: - return new KeyValueHandler(config, - datanodeId, contSet, volumeSet, metrics, - icrSender); - default: - throw new IllegalArgumentException("Handler for ContainerType: " + - containerType + "doesn't exist."); + if (Objects.requireNonNull(containerType) == ContainerType.KeyValueContainer) { + return new KeyValueHandler(config, datanodeId, contSet, volumeSet, metrics, icrSender); } + throw new IllegalArgumentException("Handler for ContainerType: " + containerType + "doesn't exist."); } - public abstract StateMachine.DataChannel getStreamDataChannel( - Container container, ContainerCommandRequestProto msg) - throws StorageContainerException; + /** + * Retrieves the data channel stream for a given container based on the specified request message. + * + * @param container the container for which the data channel will be retrieved + * @param msg the command request message associated with the data channel retrieval + * @return the data channel stream corresponding to the given container and request message + * @throws StorageContainerException if an error occurs while retrieving the data channel + */ + public abstract StateMachine.DataChannel getStreamDataChannel(Container container, ContainerCommandRequestProto msg) + throws StorageContainerException; /** - * Returns the Id of this datanode. + * Returns the id of this datanode. * * @return datanode Id */ @@ -98,41 +110,39 @@ protected String getDatanodeId() { } /** - * This should be called whenever there is state change. It will trigger - * an ICR to SCM. + * This should be called whenever there is state change. It will trigger an ICR to SCM. * * @param container Container for which ICR has to be sent */ - protected void sendICR(final Container container) - throws StorageContainerException { - if (container - .getContainerState() == ContainerProtos.ContainerDataProto - .State.RECOVERING) { + protected void sendICR(final Container container) throws StorageContainerException { + if (container.getContainerState() == State.RECOVERING) { // Ignoring the recovering containers reports for now. return; } icrSender.send(container); } - public abstract ContainerCommandResponseProto handle( - ContainerCommandRequestProto msg, Container container, + /** + * Handles the given container command request. + * + * @param msg the container command request protocol message + * @param container the container to be handled + * @param dispatcherContext the context of the dispatcher handling the command + * @return the response protocol for the executed command + */ + public abstract ContainerCommandResponseProto handle(ContainerCommandRequestProto msg, Container container, DispatcherContext dispatcherContext); /** * Imports container from a raw input stream. */ - public abstract Container importContainer( - ContainerData containerData, InputStream rawContainerStream, - TarContainerPacker packer) - throws IOException; + public abstract Container importContainer(ContainerData containerData, InputStream rawContainerStream, + TarContainerPacker packer) throws IOException; /** * Exports container to the output stream. */ - public abstract void exportContainer( - Container container, - OutputStream outputStream, - TarContainerPacker packer) + public abstract void exportContainer(Container container, OutputStream outputStream, TarContainerPacker packer) throws IOException; /** @@ -141,84 +151,84 @@ public abstract void exportContainer( public abstract void stop(); /** - * Marks the container for closing. Moves the container to CLOSING state. + * Marks the container for closing. Moves the container to {@link State#CLOSING} state. * * @param container container to update * @throws IOException in case of exception */ - public abstract void markContainerForClose(Container container) - throws IOException; + public abstract void markContainerForClose(Container container) throws IOException; /** - * Marks the container Unhealthy. Moves the container to UNHEALTHY state. + * Marks the container Unhealthy. Moves the container to {@link State#UNHEALTHY} state. * * @param container container to update * @param reason The reason the container was marked unhealthy * @throws IOException in case of exception */ - public abstract void markContainerUnhealthy(Container container, - ScanResult reason) - throws IOException; + public abstract void markContainerUnhealthy(Container container, ScanResult reason) throws IOException; /** - * Moves the Container to QUASI_CLOSED state. + * Moves the Container to {@link State#QUASI_CLOSED} state. * * @param container container to be quasi closed - * @param reason The reason the container was quasi closed, for logging - * purposes. - * @throws IOException + * @param reason The reason the container was quasi closed, for logging purposes. */ - public abstract void quasiCloseContainer(Container container, String reason) - throws IOException; + public abstract void quasiCloseContainer(Container container, String reason) throws IOException; /** - * Moves the Container to CLOSED state. + * Moves the Container to {@link State#CLOSED} state. * * @param container container to be closed - * @throws IOException */ - public abstract void closeContainer(Container container) - throws IOException; + public abstract void closeContainer(Container container) throws IOException; /** * Deletes the given container. * * @param container container to be deleted - * @param force if this is set to true, we delete container without - * checking - * state of the container. - * @throws IOException + * @param force if this is set to true, we delete container without checking state of the container. */ - public abstract void deleteContainer(Container container, boolean force) - throws IOException; + public abstract void deleteContainer(Container container, boolean force) throws IOException; /** * Deletes the given files associated with a block of the container. * * @param container container whose block is to be deleted * @param blockData block to be deleted - * @throws IOException */ - public abstract void deleteBlock(Container container, BlockData blockData) - throws IOException; + public abstract void deleteBlock(Container container, BlockData blockData) throws IOException; /** - * Deletes the possible onDisk but unreferenced blocks/chunks with localID - * in the container. + * Deletes the possible onDisk but unreferenced blocks/chunks with localID in the container. * * @param container container whose block/chunk is to be deleted * @param localID localId of the block/chunk - * @throws IOException */ - public abstract void deleteUnreferenced(Container container, long localID) - throws IOException; + public abstract void deleteUnreferenced(Container container, long localID) throws IOException; + /** + * Adds a finalized block to a container. + * + * @param container The container to which the finalized block will be added. + * @param localID The local identifier for the block. + */ public abstract void addFinalizedBlock(Container container, long localID); + /** + * Checks if a finalized block exists in the specified container with the given local ID. + * + * @param container the container to be checked + * @param localID the local ID of the block to be verified + * @return true if the finalized block exists, false otherwise + */ public abstract boolean isFinalizedBlockExist(Container container, long localID); + /** + * Sets the cluster ID for this handler. + * + * @param clusterID the new cluster ID to be set + */ public void setClusterID(String clusterID) { this.clusterId = clusterID; } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index b3398de07ad..78e43c4bf48 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -26,7 +26,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -41,7 +40,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; @@ -100,56 +98,53 @@ import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; import org.apache.ratis.util.function.CheckedSupplier; -import org.apache.ratis.util.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.ratis.util.JavaUtils.completeExceptionally; + /** - * A {@link StateMachine} for containers, - * which is responsible for handling different types of container requests. + * A {@link StateMachine} for containers, which is responsible for handling different types of container requests. *

* The container requests can be divided into readonly request, WriteChunk request and other write requests. - * - Read only requests (see {@link HddsUtils#isReadOnly}) are handled by {@link #query(Message)}. + * - Read-only requests (see {@link HddsUtils#isReadOnly}) are handled by {@link #query(Message)}. * - WriteChunk request contains user data - * - Other write request does not contain user data. + * - Another writing request does not contain user data. *

- * In order to optimize the write throughput, a WriteChunk request is processed : - * (1) {@link #startTransaction(RaftClientRequest)} separate user data from the client request - * (2) the user data is written directly into the state machine via {@link #write} - * (3) transaction is committed via {@link #applyTransaction(TransactionContext)} + * In order to optimize the writing throughput, a WriteChunk request is processed: + *

    + *
  1. {@link #startTransaction(RaftClientRequest)} separate user data from the client request + *
  2. the user data is written directly into the state machine via {@link #write} + *
  3. transaction is committed via {@link #applyTransaction(TransactionContext)} + *
*

- * For the other write requests, + * For the other writing requests, * the transaction is directly committed via {@link #applyTransaction(TransactionContext)}. *

- * There are 2 ordering operation which are enforced right now in the code, - * 1) WriteChunk must be executed after the CreateContainer; - * otherwise, WriteChunk will fail with container not found. - * 2) WriteChunk commit is executed after WriteChunk write. - * Then, WriteChunk commit and CreateContainer will be executed in the same order. + * There are two ordering operations that are enforced right now in the code, + *

    + *
  1. WriteChunk must be executed after the CreateContainer; otherwise, WriteChunk will fail with container not found. + *
  2. WriteChunk commit is executed after WriteChunk write. + * Then, WriteChunk commit and CreateContainer will be executed in the same order. + *
*/ public class ContainerStateMachine extends BaseStateMachine { - static final Logger LOG = - LoggerFactory.getLogger(ContainerStateMachine.class); + static final Logger LOG = LoggerFactory.getLogger(ContainerStateMachine.class); static class TaskQueueMap { private final Map map = new HashMap<>(); - synchronized CompletableFuture submit( - long containerId, - CheckedSupplier task, - ExecutorService executor) { - final TaskQueue queue = map.computeIfAbsent( - containerId, id -> new TaskQueue("container" + id)); - final CompletableFuture f - = queue.submit(task, executor); + synchronized CompletableFuture submit(long containerId, + CheckedSupplier task, ExecutorService executor) { + final TaskQueue queue = map.computeIfAbsent(containerId, id -> new TaskQueue("container" + id)); + final CompletableFuture f = queue.submit(task, executor); // after the task is completed, remove the queue if the queue is empty. f.thenAccept(dummy -> removeIfEmpty(containerId)); return f; } synchronized void removeIfEmpty(long containerId) { - map.computeIfPresent(containerId, - (id, q) -> q.isEmpty() ? null : q); + map.computeIfPresent(containerId, (id, q) -> q.isEmpty() ? null : q); } } @@ -182,14 +177,12 @@ long getStartTime() { } } - private final SimpleStateMachineStorage storage = - new SimpleStateMachineStorage(); + private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); private final RaftGroupId gid; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final XceiverServerRatis ratisServer; - private final ConcurrentHashMap> writeChunkFutureMap; + private final ConcurrentHashMap> writeChunkFutureMap; // keeps track of the containers created per pipeline private final Map container2BCSIDMap; @@ -203,7 +196,7 @@ long getStartTime() { private final Semaphore applyTransactionSemaphore; private final boolean waitOnBothFollowers; private final HddsDatanodeService datanodeService; - private static Semaphore semaphore = new Semaphore(1); + private static final Semaphore SEMAPHORE = new Semaphore(1); /** * CSM metrics. @@ -211,13 +204,16 @@ long getStartTime() { private final CSMMetrics metrics; @SuppressWarnings("parameternumber") - public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupId gid, + public ContainerStateMachine( + HddsDatanodeService hddsDatanodeService, + RaftGroupId gid, ContainerDispatcher dispatcher, ContainerController containerController, List chunkExecutors, XceiverServerRatis ratisServer, ConfigurationSource conf, - String threadNamePrefix) { + String threadNamePrefix + ) { this.datanodeService = hddsDatanodeService; this.gid = gid; this.dispatcher = dispatcher; @@ -230,10 +226,9 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); - // cache with FIFO eviction, and if element not found, this needs - // to be obtained from disk for slow follower + // Cache with FIFO eviction, and if an element not found, this needs to be obtained from disk for slow follower stateMachineDataCache = new ResourceCache<>( - (index, data) -> ((ByteString)data).size(), + (index, data) -> data.size(), pendingRequestsBytesLimit, (p) -> { if (p.wasEvicted()) { @@ -249,23 +244,17 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( - ScmConfigKeys. - HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, - ScmConfigKeys. - HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat( - threadNamePrefix + "ContainerOp-" + gid.getUuid() + "-%d") + .setNameFormat(threadNamePrefix + "ContainerOp-" + gid.getUuid() + "-%d") .build(); - this.executor = Executors.newFixedThreadPool(numContainerOpExecutors, - threadFactory); - - this.waitOnBothFollowers = conf.getObject( - DatanodeConfiguration.class).waitOnAllFollowers(); + this.executor = Executors.newFixedThreadPool(numContainerOpExecutors, threadFactory); + this.waitOnBothFollowers = conf.getObject(DatanodeConfiguration.class).waitOnAllFollowers(); } @Override @@ -273,14 +262,17 @@ public StateMachineStorage getStateMachineStorage() { return storage; } + /** + * Retrieves the current metrics for the {@link ContainerStateMachine}. + * + * @return The CSMMetrics instance containing the metrics data. + */ public CSMMetrics getMetrics() { return metrics; } @Override - public void initialize( - RaftServer server, RaftGroupId id, RaftStorage raftStorage) - throws IOException { + public void initialize(RaftServer server, RaftGroupId id, RaftStorage raftStorage) throws IOException { super.initialize(server, id, raftStorage); storage.init(raftStorage); ratisServer.notifyGroupAdd(gid); @@ -288,59 +280,66 @@ public void initialize( loadSnapshot(storage.getLatestSnapshot()); } - private long loadSnapshot(SingleFileSnapshotInfo snapshot) - throws IOException { + private void loadSnapshot(SingleFileSnapshotInfo snapshot) throws IOException { if (snapshot == null) { TermIndex empty = TermIndex.valueOf(0, RaftLog.INVALID_LOG_INDEX); - LOG.info("{}: The snapshot info is null. Setting the last applied index " + - "to:{}", gid, empty); + LOG.info("{}: The snapshot info is null. Setting the last applied index to:{}", gid, empty); setLastAppliedTermIndex(empty); - return empty.getIndex(); + empty.getIndex(); + return; } final File snapshotFile = snapshot.getFile().getPath().toFile(); - final TermIndex last = - SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); + final TermIndex last = SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); LOG.info("{}: Setting the last applied index to {}", gid, last); setLastAppliedTermIndex(last); - // initialize the dispatcher with snapshot so that it build the missing - // container list + // Initialize the dispatcher with snapshot so that it builds the missing container list buildMissingContainerSet(snapshotFile); - return last.getIndex(); + last.getIndex(); } + /** + * Initializes the dispatcher with the provided snapshot file and builds the + * missing container list by reading the container data from the snapshot file + * and updating the internal container-to-BCSID map. + * + * @param snapshotFile the snapshot file containing the container and block + * container mapping information used to build the missing + * container set. + * @throws IOException if an I/O error occurs while reading the snapshot file + * or updating the container-to-BCSID map. + */ @VisibleForTesting public void buildMissingContainerSet(File snapshotFile) throws IOException { - // initialize the dispatcher with snapshot so that it build the missing - // container list + // Initialize the dispatcher with snapshot so that it builds the missing container list try (FileInputStream fin = new FileInputStream(snapshotFile)) { - ContainerProtos.Container2BCSIDMapProto proto = - ContainerProtos.Container2BCSIDMapProto - .parseFrom(fin); - // read the created containers list from the snapshot file and add it to - // the container2BCSIDMap here. + ContainerProtos.Container2BCSIDMapProto proto = ContainerProtos.Container2BCSIDMapProto.parseFrom(fin); + // Read the created containers list from the snapshot file and add it to the container2BCSIDMap here. // container2BCSIDMap will further grow as and when containers get created container2BCSIDMap.putAll(proto.getContainer2BCSIDMap()); dispatcher.buildMissingContainerSetAndValidate(container2BCSIDMap); } } /** - * As a part of taking snapshot with Ratis StateMachine, it will persist - * the existing container set in the snapshotFile. + * As a part of taking snapshot with Ratis StateMachine, + * it will persist the existing container set in the snapshotFile. + * * @param out OutputStream mapped to the Ratis snapshot file - * @throws IOException */ public void persistContainerSet(OutputStream out) throws IOException { - Container2BCSIDMapProto.Builder builder = - Container2BCSIDMapProto.newBuilder(); + Container2BCSIDMapProto.Builder builder = Container2BCSIDMapProto.newBuilder(); builder.putAllContainer2BCSID(container2BCSIDMap); - // TODO : while snapshot is being taken, deleteContainer call should not - // should not happen. Lock protection will be required if delete - // container happens outside of Ratis. + // TODO: while snapshot is being taken, deleteContainer call should not happen. + // Lock protection will be required if delete container happens outside of Ratis. builder.build().writeTo(out); } + /** + * Checks if the state machine is currently healthy. + * + * @return {@code true} if the state machine is healthy, {@code false} otherwise. + */ public boolean isStateMachineHealthy() { return stateMachineHealthy.get(); } @@ -350,29 +349,29 @@ public long takeSnapshot() throws IOException { TermIndex ti = getLastAppliedTermIndex(); long startTime = Time.monotonicNow(); if (!isStateMachineHealthy()) { - String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" - + " is unhealthy. The last applied index is at " + ti; + String msg = "Failed to take snapshot for " + gid + " as the stateMachine is unhealthy." + + " The last applied index is at " + ti; StateMachineException sme = new StateMachineException(msg); LOG.error(msg); throw sme; } if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { - final File snapshotFile = - storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); + final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { persistContainerSet(fos); fos.flush(); - // make sure the snapshot file is synced + // Make sure the snapshot file is synced fos.getFD().sync(); } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, - snapshotFile); + LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, snapshotFile); throw ioe; } LOG.info("{}: Finished taking a snapshot at:{} file:{} took: {} ms", - gid, ti, snapshotFile, (Time.monotonicNow() - startTime)); + gid, + ti, + snapshotFile, + (Time.monotonicNow() - startTime)); return ti.getIndex(); } return -1; @@ -394,13 +393,13 @@ public TransactionContext startTransaction(LogEntryProto entry, RaftPeerRole rol final ContainerCommandRequestProto requestProto; if (logProto.getCmdType() == Type.WriteChunk) { - // combine state machine data + // Combine state machine data requestProto = ContainerCommandRequestProto.newBuilder(logProto) .setWriteChunk(WriteChunkRequestProto.newBuilder(logProto.getWriteChunk()) .setData(stateMachineLogEntry.getStateMachineEntry().getStateMachineData())) .build(); } else { - // request and log are the same when there is no state machine data, + // Request and log are the same when there is no state machine data, requestProto = logProto; } return trx.setStateMachineContext(new Context(requestProto, logProto)); @@ -408,11 +407,9 @@ public TransactionContext startTransaction(LogEntryProto entry, RaftPeerRole rol /** For the Leader to serve the given client request. */ @Override - public TransactionContext startTransaction(RaftClientRequest request) - throws IOException { + public TransactionContext startTransaction(RaftClientRequest request) throws IOException { long startTime = Time.monotonicNowNanos(); - final ContainerCommandRequestProto proto = - message2ContainerCommandRequestProto(request.getMessage()); + final ContainerCommandRequestProto proto = message2ContainerCommandRequestProto(request.getMessage()); Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); final TransactionContext.Builder builder = TransactionContext.newBuilder() @@ -455,8 +452,7 @@ public TransactionContext startTransaction(RaftClientRequest request) builder.setStateMachineData(write.getData()); } } else if (proto.getCmdType() == Type.FinalizeBlock) { - containerController.addFinalizedBlock(proto.getContainerID(), - proto.getFinalizeBlock().getBlockID().getLocalID()); + containerController.addFinalizedBlock(proto.getContainerID(), proto.getFinalizeBlock().getBlockID().getLocalID()); } if (blockAlreadyFinalized) { @@ -478,39 +474,35 @@ private boolean shouldRejectRequest(ContainerProtos.DatanodeBlockID blockID) { return containerController.isFinalizedBlockExist(blockID.getContainerID(), blockID.getLocalID()); } - private static ContainerCommandRequestProto getContainerCommandRequestProto( - RaftGroupId id, ByteString request) + private static ContainerCommandRequestProto getContainerCommandRequestProto(RaftGroupId id, ByteString request) throws InvalidProtocolBufferException { - // TODO: We can avoid creating new builder and set pipeline Id if - // the client is already sending the pipeline id, then we just have to - // validate the pipeline Id. + // TODO: We can avoid creating new builder and set pipeline Id if the client is already sending the pipeline id, + // then we just have to validate the pipeline Id. return ContainerCommandRequestProto.newBuilder( ContainerCommandRequestProto.parseFrom(request)) .setPipelineID(id.getUuid().toString()).build(); } - private ContainerCommandRequestProto message2ContainerCommandRequestProto( - Message message) throws InvalidProtocolBufferException { + private ContainerCommandRequestProto message2ContainerCommandRequestProto(Message message) + throws InvalidProtocolBufferException { return ContainerCommandRequestMessage.toProto(message.getContent(), gid); } private ContainerCommandResponseProto dispatchCommand( ContainerCommandRequestProto requestProto, DispatcherContext context) { - if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, - requestProto.getCmdType(), requestProto.getContainerID(), - requestProto.getPipelineID(), requestProto.getTraceID()); - } - ContainerCommandResponseProto response = - dispatcher.dispatch(requestProto, context); - if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); - } + LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", + gid, + requestProto.getCmdType(), + requestProto.getContainerID(), + requestProto.getPipelineID(), + requestProto.getTraceID()); + ContainerCommandResponseProto response = dispatcher.dispatch(requestProto, context); + LOG.trace("{}: response {}", gid, response); return response; } - private CompletableFuture link( - ContainerCommandRequestProto requestProto, LogEntryProto entry) { + private CompletableFuture link(ContainerCommandRequestProto requestProto, + LogEntryProto entry) { return CompletableFuture.supplyAsync(() -> { final DispatcherContext context = DispatcherContext .newBuilder(DispatcherContext.Op.STREAM_LINK) @@ -524,9 +516,8 @@ private CompletableFuture link( }, executor); } - private CompletableFuture writeStateMachineData( - ContainerCommandRequestProto requestProto, long entryIndex, long term, - long startTime) { + private CompletableFuture writeStateMachineData(ContainerCommandRequestProto requestProto, long entryIndex, + long term, long startTime) { final WriteChunkRequestProto write = requestProto.getWriteChunk(); RaftServer server = ratisServer.getServer(); Preconditions.checkArgument(!write.getData().isEmpty()); @@ -549,18 +540,19 @@ private CompletableFuture writeStateMachineData( .setContainer2BCSIDMap(container2BCSIDMap) .build(); CompletableFuture raftFuture = new CompletableFuture<>(); - // ensure the write chunk happens asynchronously in writeChunkExecutor pool - // thread. + // Ensure the writing chunk happens asynchronously in writeChunkExecutor pool thread. CompletableFuture writeChunkFuture = CompletableFuture.supplyAsync(() -> { try { - metrics.recordWriteStateMachineQueueingLatencyNs( - Time.monotonicNowNanos() - startTime); + metrics.recordWriteStateMachineQueueingLatencyNs(Time.monotonicNowNanos() - startTime); return dispatchCommand(requestProto, context); } catch (Exception e) { - LOG.error("{}: writeChunk writeStateMachineData failed: blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), - entryIndex, write.getChunkData().getChunkName(), e); + LOG.error("{}: writeChunk writeStateMachineData failed: blockId{} logIndex {} chunkName {}", + gid, + write.getBlockID(), + entryIndex, + write.getChunkData().getChunkName(), + e); metrics.incNumWriteDataFails(); // write chunks go in parallel. It's possible that one write chunk // see the stateMachine is marked unhealthy by other parallel thread @@ -571,13 +563,12 @@ private CompletableFuture writeStateMachineData( }, getChunkExecutor(requestProto.getWriteChunk())); writeChunkFutureMap.put(entryIndex, writeChunkFuture); - if (LOG.isDebugEnabled()) { - LOG.debug("{}: writeChunk writeStateMachineData : blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), - entryIndex, write.getChunkData().getChunkName()); - } - // Remove the future once it finishes execution from the - // writeChunkFutureMap. + LOG.debug("{}: writeChunk writeStateMachineData : blockId{} logIndex {} chunkName {}", + gid, + write.getBlockID(), + entryIndex, + write.getChunkData().getChunkName()); + // Remove the future once it finishes execution from the writeChunkFutureMap. writeChunkFuture.thenApply(r -> { if (r.getResult() != ContainerProtos.Result.SUCCESS && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN @@ -585,30 +576,30 @@ private CompletableFuture writeStateMachineData( // After concurrent flushes are allowed on the same key, chunk file inconsistencies can happen and // that should not crash the pipeline. && r.getResult() != ContainerProtos.Result.CHUNK_FILE_INCONSISTENCY) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName() + " Error message: " + - r.getMessage() + " Container Result: " + r.getResult()); + StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult()); + LOG.error("{}: writeChunk writeStateMachineData failed: blockId{} logIndex {} chunkName {} Error message: {}" + + " Container Result: {}", + gid, + write.getBlockID(), + entryIndex, + write.getChunkData().getChunkName(), + r.getMessage(), + r.getResult()); metrics.incNumWriteDataFails(); - // If the write fails currently we mark the stateMachine as unhealthy. - // This leads to pipeline close. Any change in that behavior requires - // handling the entry for the write chunk in cache. + // If the writing fails currently, we mark the stateMachine as unhealthy. + // This leads to the pipeline close. + // Any change in that behavior requires handling the entry for the writing chunk in cache. stateMachineHealthy.set(false); raftFuture.completeExceptionally(sce); } else { - metrics.incNumBytesWrittenCount( - requestProto.getWriteChunk().getChunkData().getLen()); - if (LOG.isDebugEnabled()) { - LOG.debug(gid + - ": writeChunk writeStateMachineData completed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName()); - } + metrics.incNumBytesWrittenCount(requestProto.getWriteChunk().getChunkData().getLen()); + LOG.debug("{}: writeChunk writeStateMachineData completed: blockId{} logIndex {} chunkName {}", + gid, + write.getBlockID(), + entryIndex, + write.getChunkData().getChunkName()); raftFuture.complete(r::toByteString); - metrics.recordWriteStateMachineCompletionNs( - Time.monotonicNowNanos() - startTime); + metrics.recordWriteStateMachineCompletionNs(Time.monotonicNowNanos() - startTime); } writeChunkFutureMap.remove(entryIndex); @@ -617,15 +608,14 @@ private CompletableFuture writeStateMachineData( return raftFuture; } - private StateMachine.DataChannel getStreamDataChannel( - ContainerCommandRequestProto requestProto, - DispatcherContext context) throws StorageContainerException { - if (LOG.isDebugEnabled()) { - LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} " + - "traceID={}", gid, requestProto.getCmdType(), - requestProto.getContainerID(), requestProto.getPipelineID(), - requestProto.getTraceID()); - } + private StateMachine.DataChannel getStreamDataChannel(ContainerCommandRequestProto requestProto, + DispatcherContext context) throws StorageContainerException { + LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} traceID={}", + gid, + requestProto.getCmdType(), + requestProto.getContainerID(), + requestProto.getPipelineID(), + requestProto.getTraceID()); dispatchCommand(requestProto, context); // stream init return dispatcher.getStreamDataChannel(requestProto); } @@ -634,8 +624,7 @@ private StateMachine.DataChannel getStreamDataChannel( public CompletableFuture stream(RaftClientRequest request) { return CompletableFuture.supplyAsync(() -> { try { - ContainerCommandRequestProto requestProto = - message2ContainerCommandRequestProto(request.getMessage()); + ContainerCommandRequestProto requestProto = message2ContainerCommandRequestProto(request.getMessage()); DispatcherContext context = DispatcherContext .newBuilder(DispatcherContext.Op.STREAM_INIT) @@ -643,8 +632,9 @@ public CompletableFuture stream(RaftClientRequest request) { .setContainer2BCSIDMap(container2BCSIDMap) .build(); DataChannel channel = getStreamDataChannel(requestProto, context); - final ExecutorService chunkExecutor = requestProto.hasWriteChunk() ? - getChunkExecutor(requestProto.getWriteChunk()) : null; + final ExecutorService chunkExecutor = requestProto.hasWriteChunk() + ? getChunkExecutor(requestProto.getWriteChunk()) + : null; return new LocalStream(channel, chunkExecutor); } catch (IOException e) { throw new CompletionException("Failed to create data stream", e); @@ -655,46 +645,40 @@ public CompletableFuture stream(RaftClientRequest request) { @Override public CompletableFuture link(DataStream stream, LogEntryProto entry) { if (stream == null) { - return JavaUtils.completeExceptionally(new IllegalStateException( - "DataStream is null")); + return completeExceptionally(new IllegalStateException("DataStream is null")); } else if (!(stream instanceof LocalStream)) { - return JavaUtils.completeExceptionally(new IllegalStateException( - "Unexpected DataStream " + stream.getClass())); + return completeExceptionally(new IllegalStateException("Unexpected DataStream " + stream.getClass())); } final DataChannel dataChannel = stream.getDataChannel(); if (dataChannel.isOpen()) { - return JavaUtils.completeExceptionally(new IllegalStateException( - "DataStream: " + stream + " is not closed properly")); + return completeExceptionally(new IllegalStateException("DataStream: " + stream + " is not closed properly")); } if (!(dataChannel instanceof KeyValueStreamDataChannel)) { - return JavaUtils.completeExceptionally(new IllegalStateException( - "Unexpected DataChannel " + dataChannel.getClass())); + return completeExceptionally(new IllegalStateException("Unexpected DataChannel " + dataChannel.getClass())); } - final KeyValueStreamDataChannel kvStreamDataChannel = - (KeyValueStreamDataChannel) dataChannel; + final KeyValueStreamDataChannel kvStreamDataChannel = (KeyValueStreamDataChannel) dataChannel; - final ContainerCommandRequestProto request = - kvStreamDataChannel.getPutBlockRequest(); + final ContainerCommandRequestProto request = kvStreamDataChannel.getPutBlockRequest(); return link(request, entry).whenComplete((response, e) -> { if (e != null) { - LOG.warn("Failed to link logEntry {} for request {}", - TermIndex.valueOf(entry), request, e); + LOG.warn("Failed to link logEntry {} for request {}", TermIndex.valueOf(entry), request, e); } if (response != null) { final ContainerProtos.Result result = response.getResult(); - if (LOG.isDebugEnabled()) { - LOG.debug("{} to link logEntry {} for request {}, response: {}", - result, TermIndex.valueOf(entry), request, response); - } + LOG.debug("{} to link logEntry {} for request {}, response: {}", + result, + TermIndex.valueOf(entry), + request, + response); if (result == ContainerProtos.Result.SUCCESS) { kvStreamDataChannel.setLinked(); return; } } - // failed to link, cleanup + // Failed to link, cleanup kvStreamDataChannel.cleanUp(); }); } @@ -704,9 +688,18 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { return chunkExecutors.get(i); } - /* - * writeStateMachineData calls are not synchronized with each other - * and also with applyTransaction. + /** + * Processes a log entry to write data to the state machine. + * Incidents of the number of write state machine operations are recorded. + * {@link #writeStateMachineData(ContainerCommandRequestProto, long, long, long)} + * calls are not synchronized with each other and also with {@link #applyTransaction(TransactionContext)}. + * + * @param entry The log entry to be written. + * @param trx The transaction context associated with this write operation. + * @return A CompletableFuture representing the asynchronous operation of writing the data, + * which will complete with the resulting message or exceptionally if an error occurs. + * @throws NullPointerException if the transaction context is null. + * @throws IllegalStateException if the command type is not supported for state machine data. */ @Override public CompletableFuture write(LogEntryProto entry, TransactionContext trx) { @@ -720,14 +713,10 @@ public CompletableFuture write(LogEntryProto entry, TransactionContext // For only writeChunk, there will be writeStateMachineData call. // CreateContainer will happen as a part of writeChunk only. - switch (cmdType) { - case WriteChunk: - return writeStateMachineData(requestProto, entry.getIndex(), - entry.getTerm(), writeStateMachineStartTime); - default: - throw new IllegalStateException("Cmd Type:" + cmdType - + " should not have state machine data"); + if (Objects.requireNonNull(cmdType) == Type.WriteChunk) { + return writeStateMachineData(requestProto, entry.getIndex(), entry.getTerm(), writeStateMachineStartTime); } + throw new IllegalStateException("Cmd Type:" + cmdType + " should not have state machine data"); } catch (Exception e) { metrics.incNumWriteStateMachineFails(); return completeExceptionally(e); @@ -738,26 +727,21 @@ public CompletableFuture write(LogEntryProto entry, TransactionContext public CompletableFuture query(Message request) { try { metrics.incNumQueryStateMachineOps(); - final ContainerCommandRequestProto requestProto = - message2ContainerCommandRequestProto(request); - return CompletableFuture.completedFuture( - dispatchCommand(requestProto, null)::toByteString); + final ContainerCommandRequestProto requestProto = message2ContainerCommandRequestProto(request); + return CompletableFuture.completedFuture(dispatchCommand(requestProto, null)::toByteString); } catch (IOException e) { metrics.incNumQueryStateMachineFails(); return completeExceptionally(e); } } - private ByteString readStateMachineData( - ContainerCommandRequestProto requestProto, long term, long index) + private ByteString readStateMachineData(ContainerCommandRequestProto requestProto, long term, long index) throws IOException { - // the stateMachine data is not present in the stateMachine cache, - // increment the stateMachine cache miss count + // The stateMachine data is not present in the stateMachine cache, increment the stateMachine cache miss count metrics.incNumReadStateMachineMissCount(); - WriteChunkRequestProto writeChunkRequestProto = - requestProto.getWriteChunk(); + WriteChunkRequestProto writeChunkRequestProto = requestProto.getWriteChunk(); ContainerProtos.ChunkInfo chunkInfo = writeChunkRequestProto.getChunkData(); - // prepare the chunk to be read + // Prepare the chunk to be read ReadChunkRequestProto.Builder readChunkRequestProto = ReadChunkRequestProto.newBuilder() .setBlockID(writeChunkRequestProto.getBlockID()) @@ -772,16 +756,16 @@ private ByteString readStateMachineData( .setTerm(term) .setLogIndex(index) .build(); - // read the chunk - ContainerCommandResponseProto response = - dispatchCommand(dataContainerCommandProto, context); + // Read the chunk + ContainerCommandResponseProto response = dispatchCommand(dataContainerCommandProto, context); if (response.getResult() != ContainerProtos.Result.SUCCESS) { - StorageContainerException sce = - new StorageContainerException(response.getMessage(), - response.getResult()); - LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, - response.getMessage(), response.getResult()); + StorageContainerException sce = new StorageContainerException(response.getMessage(), response.getResult()); + LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : {} Container Result: {}", + gid, + response.getCmdType(), + index, + response.getMessage(), + response.getResult()); stateMachineHealthy.set(false); throw sce; } @@ -791,45 +775,42 @@ private ByteString readStateMachineData( if (responseProto.hasData()) { data = responseProto.getData(); } else { - data = BufferUtils.concatByteStrings( - responseProto.getDataBuffers().getBuffersList()); + data = BufferUtils.concatByteStrings(responseProto.getDataBuffers().getBuffersList()); } - // assert that the response has data in it. - Preconditions - .checkNotNull(data, "read chunk data is null for chunk: %s", - chunkInfo); + // Assert that the response has data in it. + Preconditions.checkNotNull(data, "read chunk data is null for chunk: %s", chunkInfo); Preconditions.checkState(data.size() == chunkInfo.getLen(), "read chunk len=%s does not match chunk expected len=%s for chunk:%s", - data.size(), chunkInfo.getLen(), chunkInfo); + data.size(), + chunkInfo.getLen(), + chunkInfo); return data; } /** - * Returns the combined future of all the writeChunks till the given log - * index. The Raft log worker will wait for the stateMachineData to complete - * flush as well. + * Returns the combined future of all the writeChunks till the given log index. + * The Raft log worker will wait for the stateMachineData to complete flush as well. * * @param index log index till which the stateMachine data needs to be flushed * @return Combined future of all writeChunks till the log index given. */ @Override public CompletableFuture flush(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); - return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); + return CompletableFuture.allOf(writeChunkFutureMap.entrySet().stream() + .filter(x -> x.getKey() <= index) + .map(Map.Entry::getValue) + .toArray(CompletableFuture[]::new)); } /** - * This method is used by the Leader to read state machine date for sending appendEntries to followers. + * The Leader uses this method to read state machine date for sending appendEntries to followers. * It will first get the data from {@link #stateMachineDataCache}. * If the data is not in the cache, it will read from the file by dispatching a command * * @param trx the transaction context, - * which can be null if this method is invoked after {@link #applyTransaction(TransactionContext)}. + * which can be null if this method is invoked after {@link #applyTransaction(TransactionContext)}. */ @Override public CompletableFuture read(LogEntryProto entry, TransactionContext trx) { @@ -856,7 +837,8 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex final Context context = (Context) Optional.ofNullable(trx) .map(TransactionContext::getStateMachineContext) .orElse(null); - final ContainerCommandRequestProto requestProto = context != null ? context.getLogProto() + final ContainerCommandRequestProto requestProto = context != null + ? context.getLogProto() : getContainerCommandRequestProto(gid, entry.getStateMachineLogEntry().getLogData()); if (requestProto.getCmdType() != Type.WriteChunk) { @@ -897,33 +879,32 @@ private synchronized void updateLastApplied() { } /** - * Notifies the state machine about index updates because of entries - * which do not cause state machine update, i.e. conf entries, metadata - * entries + * Notifies the state machine about index updates because of entries which do not cause state machine update, + * i.e., conf entries, metadata entries. + * * @param term term of the log entry * @param index index of the log entry */ @Override public void notifyTermIndexUpdated(long term, long index) { applyTransactionCompletionMap.put(index, term); - // We need to call updateLastApplied here because now in ratis when a - // node becomes leader, it is checking stateMachineIndex >= - // placeHolderIndex (when a node becomes leader, it writes a conf entry - // with some information like its peers and termIndex). So, calling - // updateLastApplied updates lastAppliedTermIndex. + // We need to call updateLastApplied here because now in ratis when a node becomes leader, + // it is checking stateMachineIndex >= placeHolderIndex + // (when a node becomes leader, it writes a conf entry with some information like its peers and termIndex). + // So, calling updateLastApplied updates lastAppliedTermIndex. updateLastApplied(); removeStateMachineDataIfNeeded(index); } @Override public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allServer) { - // if datanodeService is stopped , it indicates this `close` originates - // from `HddsDatanodeService.stop()`, otherwise, it indicates this `close` originates from ratis. + // If datanodeService is stopped, it indicates this `close` originates from `HddsDatanodeService.stop()`, + // otherwise, it indicates this `close` originates from ratis. if (allServer) { if (datanodeService != null && !datanodeService.isStopped()) { LOG.info("{} is closed by ratis", gid); - if (semaphore.tryAcquire()) { - // run with a different thread, so this raft group can be closed + if (SEMAPHORE.tryAcquire()) { + // Run with a different thread, so this raft group can be closed Runnable runnable = () -> { try { int closed = 0, total = 0; @@ -932,9 +913,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS } catch (InterruptedException e) { Thread.currentThread().interrupt(); } - Iterator iterator = ratisServer.getServer().getGroupIds().iterator(); - while (iterator.hasNext()) { - RaftGroupId id = iterator.next(); + for (RaftGroupId id : ratisServer.getServer().getGroupIds()) { RaftServer.Division division = ratisServer.getServer().getDivision(id); if (division.getRaftServer().getLifeCycleState() == LifeCycle.State.CLOSED) { closed++; @@ -959,37 +938,38 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS } private CompletableFuture applyTransaction( - ContainerCommandRequestProto request, DispatcherContext context, - Consumer exceptionHandler) { + ContainerCommandRequestProto request, DispatcherContext context, Consumer exceptionHandler) { final long containerId = request.getContainerID(); - final CheckedSupplier task - = () -> { - try { - long timeNow = Time.monotonicNowNanos(); - long queueingDelay = timeNow - context.getStartTime(); - metrics.recordQueueingDelay(request.getCmdType(), queueingDelay); - // TODO: add a counter to track number of executing applyTransaction - // and queue size - return dispatchCommand(request, context); - } catch (Exception e) { - exceptionHandler.accept(e); - throw e; - } - }; + final CheckedSupplier task = () -> { + try { + long timeNow = Time.monotonicNowNanos(); + long queueingDelay = timeNow - context.getStartTime(); + metrics.recordQueueingDelay(request.getCmdType(), queueingDelay); + // TODO: add a counter to track number of executing applyTransaction + // and queue size + return dispatchCommand(request, context); + } catch (Exception e) { + exceptionHandler.accept(e); + throw e; + } + }; return containerTaskQueues.submit(containerId, task, executor); } - // Removes the stateMachine data from cache once both followers catch up - // to the particular index. + /** + * Removes state machine data from the cache if certain conditions are met. + * This method will remove data corresponding to the given log index up to a certain minimum index + * if the current division is the leader and waitOnBothFollowers is set to true. + * + * @param index the log index up to which data may be removed from the cache + */ private void removeStateMachineDataIfNeeded(long index) { if (waitOnBothFollowers) { try { RaftServer.Division division = ratisServer.getServer().getDivision(gid); if (division.getInfo().isLeader()) { - long minIndex = Arrays.stream(division.getInfo() - .getFollowerNextIndices()).min().getAsLong(); - LOG.debug("Removing data corresponding to log index {} min index {} " - + "from cache", index, minIndex); + long minIndex = Arrays.stream(division.getInfo().getFollowerNextIndices()).min().getAsLong(); + LOG.debug("Removing data corresponding to log index {} min index {} from cache", index, minIndex); removeCacheDataUpTo(Math.min(minIndex, index)); } } catch (Exception e) { @@ -998,20 +978,23 @@ private void removeStateMachineDataIfNeeded(long index) { } } - /* - * ApplyTransaction calls in Ratis are sequential. + /** + * Applies a transaction to the state machine. + * + * @param trx The transaction context containing information about the transaction to be applied. + * @return A CompletableFuture that will complete with the resultant message once the transaction is applied. */ @Override public CompletableFuture applyTransaction(TransactionContext trx) { long index = trx.getLogEntry().getIndex(); try { - // Remove the stateMachine data once both followers have caught up. If any - // one of the follower is behind, the pending queue will max out as - // configurable limit on pending request size and count and then will - // block and client will backoff as a result of that. + // Remove the stateMachine data once both followers have caught up. + // If any one of the followers is behind, + // the pending queue will max out as configurable limit on pending request size and count + // and then will block and client will backoff as a result of that. removeStateMachineDataIfNeeded(index); - // if waitOnBothFollower is false, remove the entry from the cache - // as soon as its applied and such entry exists in the cache. + // If waitOnBothFollower is false, + // remove the entry from the cache as soon as its applied and such entry exists in the cache. removeStateMachineDataIfMajorityFollowSync(index); final DispatcherContext.Builder builder = DispatcherContext .newBuilder(DispatcherContext.Op.APPLY_TRANSACTION) @@ -1024,14 +1007,12 @@ public CompletableFuture applyTransaction(TransactionContext trx) { applyTransactionSemaphore.acquire(); metrics.incNumApplyTransactionsOps(); - Objects.requireNonNull(context, "context == null"); final ContainerCommandRequestProto requestProto = context.getLogProto(); final Type cmdType = requestProto.getCmdType(); - // Make sure that in write chunk, the user data is not set + // Make sure that in writes chunk, the user data is not set if (cmdType == Type.WriteChunk) { - Preconditions - .checkArgument(requestProto.getWriteChunk().getData().isEmpty()); + Preconditions.checkArgument(requestProto.getWriteChunk().getData().isEmpty()); builder.setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA); } if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile @@ -1039,65 +1020,58 @@ public CompletableFuture applyTransaction(TransactionContext trx) { || cmdType == Type.StreamInit) { builder.setContainer2BCSIDMap(container2BCSIDMap); } - CompletableFuture applyTransactionFuture = - new CompletableFuture<>(); + CompletableFuture applyTransactionFuture = new CompletableFuture<>(); final Consumer exceptionHandler = e -> { - LOG.error(gid + ": failed to applyTransaction at logIndex " + index - + " for " + requestProto.getCmdType(), e); + LOG.error("{}: failed to applyTransaction at logIndex {} for {}", gid, index, requestProto.getCmdType(), e); stateMachineHealthy.compareAndSet(true, false); metrics.incNumApplyTransactionsFails(); applyTransactionFuture.completeExceptionally(e); }; - // Ensure the command gets executed in a separate thread than - // stateMachineUpdater thread which is calling applyTransaction here. + // Ensure the command gets executed in a separate thread than stateMachineUpdater thread + // which is calling applyTransaction here. final CompletableFuture future = applyTransaction(requestProto, builder.build(), exceptionHandler); future.thenApply(r -> { // TODO: add metrics for non-leader case if (trx.getServerRole() == RaftPeerRole.LEADER) { final long startTime = context.getStartTime(); - metrics.incPipelineLatencyMs(cmdType, - (Time.monotonicNowNanos() - startTime) / 1000000L); + metrics.incPipelineLatencyMs(cmdType, (Time.monotonicNowNanos() - startTime) / 1000000L); } - // ignore close container exception while marking the stateMachine - // unhealthy + // Ignore close container exception while marking the stateMachine unhealthy if (r.getResult() != ContainerProtos.Result.SUCCESS && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO && r.getResult() != ContainerProtos.Result.CHUNK_FILE_INCONSISTENCY) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error( - "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); + StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult()); + LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : {} Container Result: {}", + gid, + r.getCmdType(), + index, + r.getMessage(), + r.getResult()); metrics.incNumApplyTransactionsFails(); // Since the applyTransaction now is completed exceptionally, - // before any further snapshot is taken , the exception will be - // caught in stateMachineUpdater in Ratis and ratis server will - // shutdown. + // before any further snapshot is taken, + // the exception will be caught in stateMachineUpdater in Ratis and ratis server will shut down. applyTransactionFuture.completeExceptionally(sce); stateMachineHealthy.compareAndSet(true, false); ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); - } + LOG.debug("gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : {} Container Result: {}", + gid, + r.getCmdType(), + index, + r.getMessage(), + r.getResult()); if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { - metrics.incNumBytesCommittedCount( - requestProto.getWriteChunk().getChunkData().getLen()); + metrics.incNumBytesCommittedCount(requestProto.getWriteChunk().getChunkData().getLen()); } applyTransactionFuture.complete(r::toByteString); - // add the entry to the applyTransactionCompletionMap only if the - // stateMachine is healthy i.e, there has been no applyTransaction - // failures before. + // Add the entry to the applyTransactionCompletionMap only if the stateMachine is healthy, + // i.e., there have been no applyTransaction failures before. if (isStateMachineHealthy()) { - final Long previous = applyTransactionCompletionMap - .put(index, trx.getLogEntry().getTerm()); + final Long previous = applyTransactionCompletionMap.put(index, trx.getLogEntry().getTerm()); Preconditions.checkState(previous == null); updateLastApplied(); } @@ -1108,8 +1082,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { exceptionHandler.accept(t); } applyTransactionSemaphore.release(); - metrics.recordApplyTransactionCompletionNs( - Time.monotonicNowNanos() - applyTxnStartTime); + metrics.recordApplyTransactionCompletionNs(Time.monotonicNowNanos() - applyTxnStartTime); if (trx.getServerRole() == RaftPeerRole.LEADER) { metrics.decPendingApplyTransactions(); } @@ -1127,8 +1100,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { private void removeStateMachineDataIfMajorityFollowSync(long index) { if (!waitOnBothFollowers) { - // if majority follow in sync, remove all cache previous to current index - // including current index + // If the majority follow in sync, remove all caches previous to current index including current index removeCacheDataUpTo(index); } } @@ -1137,15 +1109,9 @@ private void removeCacheDataUpTo(long index) { stateMachineDataCache.removeIf(k -> k <= index); } - private static CompletableFuture completeExceptionally(Exception e) { - final CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(e); - return future; - } - @Override public void notifyNotLeader(Collection pendingEntries) { - // once the leader steps down , clear the cache + // Once the leader steps down, clear the cache evictStateMachineCache(); } @@ -1155,6 +1121,11 @@ public CompletableFuture truncate(long index) { return CompletableFuture.completedFuture(null); } + /** + * Evicts the state machine cache by clearing all cached data. + * This method is primarily used for testing and is typically invoked + * when there is a need to reset or invalidate the current state of the cache. + */ @VisibleForTesting public void evictStateMachineCache() { stateMachineDataCache.clear(); @@ -1172,16 +1143,18 @@ public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { @Override public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - LOG.error("{}: {} {}", gid, TermIndex.valueOf(failedEntry), - toStateMachineLogEntryString(failedEntry.getStateMachineLogEntry()), t); + LOG.error("{}: {} {}", + gid, + TermIndex.valueOf(failedEntry), + toStateMachineLogEntryString(failedEntry.getStateMachineLogEntry()), + t); ratisServer.handleNodeLogFailure(gid, t); } @Override - public CompletableFuture notifyInstallSnapshotFromLeader( - RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, - firstTermIndexInLog); + public CompletableFuture notifyInstallSnapshotFromLeader(RoleInfoProto roleInfoProto, + TermIndex firstTermIndexInLog) { + ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, firstTermIndexInLog); final CompletableFuture future = new CompletableFuture<>(); future.complete(firstTermIndexInLog); return future; @@ -1190,14 +1163,12 @@ public CompletableFuture notifyInstallSnapshotFromLeader( @Override public void notifyGroupRemove() { ratisServer.notifyGroupRemove(gid); - // Make best effort to quasi-close all the containers on group removal. - // Containers already in terminal state like CLOSED or UNHEALTHY will not - // be affected. + // Make the best effort to quasi-close all the containers on group removal. + // Containers already in terminal state like CLOSED or UNHEALTHY will not be affected. for (Long cid : container2BCSIDMap.keySet()) { try { containerController.markContainerForClose(cid); - containerController.quasiCloseContainer(cid, - "Ratis group removed"); + containerController.quasiCloseContainer(cid, "Ratis group removed. Group id: " + gid); } catch (IOException e) { LOG.debug("Failed to quasi-close container {}", cid); } @@ -1212,8 +1183,7 @@ public void close() { } @Override - public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, - RaftPeerId raftPeerId) { + public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, RaftPeerId raftPeerId) { ratisServer.handleLeaderChangedNotification(groupMemberId, raftPeerId); } @@ -1222,13 +1192,19 @@ public String toStateMachineLogEntryString(StateMachineLogEntryProto proto) { return smProtoToString(gid, containerController, proto); } - public static String smProtoToString(RaftGroupId gid, - ContainerController containerController, - StateMachineLogEntryProto proto) { + /** + * Converts the given {@link StateMachineLogEntryProto} to a string representation. + * + * @param gid the Raft group ID associated with the state machine entry. + * @param containerController the controller used to manage container operations, can be null. + * @param proto the StateMachineLogEntryProto instance to be converted to string. + * @return a string representation of the state machine log entry. + */ + public static String smProtoToString(RaftGroupId gid, ContainerController containerController, + StateMachineLogEntryProto proto) { StringBuilder builder = new StringBuilder(); try { - ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto(gid, proto.getLogData()); + ContainerCommandRequestProto requestProto = getContainerCommandRequestProto(gid, proto.getLogData()); long contId = requestProto.getContainerID(); builder.append(TextFormat.shortDebugString(requestProto)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java index 6d32f3a3f3e..a50a125f6d4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java @@ -46,6 +46,16 @@ public ECReconstructionCoordinatorTask( debugString = reconstructionCommandInfo.toString(); } + @Override + public String getMetricName() { + return "ECReconstructions"; + } + + @Override + public String getMetricDescriptionSegment() { + return "EC reconstructions"; + } + @Override public void runTask() { // Implement the coordinator logic to handle a container group diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 98d81c15d0a..ed9c4a7ede0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -25,7 +25,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; import java.time.Instant; import java.util.Collections; import java.util.HashSet; @@ -68,6 +67,8 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR; @@ -84,122 +85,113 @@ import org.slf4j.LoggerFactory; /** - * Class to perform KeyValue Container operations. Any modifications to - * KeyValueContainer object should ideally be done via api exposed in - * KeyValueHandler class. + * Class to perform KeyValue Container operations. + * Any modifications to KeyValueContainer object should ideally be done via api exposed in KeyValueHandler class. */ public class KeyValueContainer implements Container { - private static final Logger LOG = - LoggerFactory.getLogger(KeyValueContainer.class); + private static final Logger LOG = LoggerFactory.getLogger(KeyValueContainer.class); - // Use a non-fair RW lock for better throughput, we may revisit this decision - // if this causes fairness issues. + // Use a non-fair RW lock for better throughput, we may revisit this decision if this causes fairness issues. private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); // Simple lock to synchronize container metadata dump operation. private final Object dumpLock = new Object(); private final KeyValueContainerData containerData; - private ConfigurationSource config; - - // Cache of Blocks (LocalIDs) awaiting final PutBlock call after the stream - // is closed. When a block is added to the DB as part of putBlock, it is - // added to the cache here. It is cleared from the Cache when the putBlock - // is called on the block as part of stream.close() (with endOfBlock = true - // in BlockManagerImpl#putBlock). Or when the container is marked for - // close, the whole cache is cleared as there can be no more writes to this - // container. - // We do not need to explicitly synchronize this cache as the writes to - // container are synchronous. + private final ConfigurationSource config; + + // Cache of Blocks (LocalIDs) awaiting final PutBlock call after the stream is closed. + // When a block is added to the DB as part of putBlock, it is added to the cache here. + // It is cleared from the Cache when the putBlock is called on the block as part of stream.close() + // (with endOfBlock = true in BlockManagerImpl#putBlock). + // Or when the container is marked for close, + // the whole cache is cleared as there can be no more writes to this container. + // We do not need to explicitly synchronize this cache as the writings to container are synchronous. private Set pendingPutBlockCache; private boolean bCheckChunksFilePath; - public KeyValueContainer(KeyValueContainerData containerData, - ConfigurationSource ozoneConfig) { - Preconditions.checkNotNull(containerData, - "KeyValueContainerData cannot be null"); - Preconditions.checkNotNull(ozoneConfig, - "Ozone configuration cannot be null"); + public KeyValueContainer(KeyValueContainerData containerData, ConfigurationSource ozoneConfig) { + Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot be null"); + Preconditions.checkNotNull(ozoneConfig, "Ozone configuration cannot be null"); this.config = ozoneConfig; this.containerData = containerData; if (this.containerData.isOpen() || this.containerData.isClosing()) { - // If container is not in OPEN or CLOSING state, there cannot be block - // writes to the container. So pendingPutBlockCache is not needed. + // If the container is not in OPEN or CLOSING state, there cannot be block writes to the container. + // So pendingPutBlockCache is unnecessary. this.pendingPutBlockCache = new HashSet<>(); } else { this.pendingPutBlockCache = Collections.emptySet(); } - DatanodeConfiguration dnConf = - config.getObject(DatanodeConfiguration.class); + DatanodeConfiguration dnConf = config.getObject(DatanodeConfiguration.class); bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); } + /** + * Sets the flag to check the chunk directory file path. + * + * @param bCheckChunksDirFilePath the flag indicating whether to check the chunks directory file path + */ @VisibleForTesting public void setCheckChunksFilePath(boolean bCheckChunksDirFilePath) { this.bCheckChunksFilePath = bCheckChunksDirFilePath; } @Override - public void create(VolumeSet volumeSet, VolumeChoosingPolicy - volumeChoosingPolicy, String clusterId) throws StorageContainerException { - Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " + - "cannot be null"); + public void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy, String clusterId) + throws StorageContainerException { + Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy cannot be null"); Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null"); Preconditions.checkNotNull(clusterId, "clusterId cannot be null"); File containerMetaDataPath = null; - //acquiring volumeset read lock + // Acquiring volumeset read lock long maxSize = containerData.getMaxSize(); volumeSet.readLock(); try { - List volumes - = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()); + List volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()); while (true) { HddsVolume containerVolume; try { containerVolume = volumeChoosingPolicy.chooseVolume(volumes, maxSize); } catch (DiskOutOfSpaceException ex) { - throw new StorageContainerException("Container creation failed, " + - "due to disk out of space", ex, DISK_OUT_OF_SPACE); + throw new StorageContainerException( + "Container creation failed, due to disk out of space", + ex, + DISK_OUT_OF_SPACE); } catch (IOException ex) { throw new StorageContainerException( - "Container creation failed. " + ex.getMessage(), ex, + "Container creation failed. " + ex.getMessage(), + ex, CONTAINER_INTERNAL_ERROR); } try { String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - // Set volume before getContainerDBFile(), because we may need the - // volume to deduce the db file. + // Set volume before getContainerDBFile(), because we may need the volume to deduce the db file. containerData.setVolume(containerVolume); long containerID = containerData.getContainerID(); - String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID( - containerVolume, clusterId); - // Set schemaVersion before the dbFile since we have to - // choose the dbFile location based on schema version. - String schemaVersion = VersionedDatanodeFeatures.SchemaV3 - .chooseSchemaVersion(config); + String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(containerVolume, clusterId); + // Set schemaVersion before the dbFile since we have to choose the dbFile location based on a schema version. + String schemaVersion = VersionedDatanodeFeatures.SchemaV3.chooseSchemaVersion(config); containerData.setSchemaVersion(schemaVersion); - containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, idDir, containerID); + containerMetaDataPath = + KeyValueContainerLocationUtil.getContainerMetaDataPath(hddsVolumeDir, idDir, containerID); containerData.setMetadataPath(containerMetaDataPath.getPath()); - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, idDir, containerID); + File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(hddsVolumeDir, idDir, containerID); - // Check if it is new Container. + // Check if it is a new Container. ContainerUtils.verifyIsNewContainer(containerMetaDataPath); - //Create Metadata path chunks path and metadata db + // Create Metadata path chunks path and metadata db File dbFile = getContainerDBFile(); - createContainerMetaData(containerMetaDataPath, chunksPath, dbFile, - containerData.getSchemaVersion(), config); + createContainerMetaData(containerMetaDataPath, chunksPath, dbFile, containerData.getSchemaVersion(), config); - //Set containerData for the KeyValueContainer. + // Set containerData for the KeyValueContainer. containerData.setChunksPath(chunksPath.getPath()); containerData.setDbFile(dbFile); @@ -209,32 +201,34 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy return; } catch (StorageContainerException ex) { - if (containerMetaDataPath != null - && containerMetaDataPath.getParentFile().exists()) { + if (containerMetaDataPath != null && containerMetaDataPath.getParentFile().exists()) { FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); } throw ex; } catch (FileAlreadyExistsException ex) { - throw new StorageContainerException("Container creation failed " + - "because ContainerFile already exists", ex, + throw new StorageContainerException( + "Container creation failed because ContainerFile already exists", + ex, CONTAINER_ALREADY_EXISTS); } catch (IOException ex) { - // This is a general catch all - no space left of device, which should - // not happen as the volume Choosing policy should filter out full - // disks, but it may still be possible if the disk quickly fills, - // or some IO error on the disk etc. In this case we try again with a - // different volume if there are any left to try. - if (containerMetaDataPath != null && - containerMetaDataPath.getParentFile().exists()) { + // This is a general catch-all - no space left of device, + // which should not happen as the volume choosing policy should filter out full disks, + // but it may still be possible if the disk quickly fills, or some IO error on the disk etc. + // In this case we try again with a different volume if there are any left to try. + if (containerMetaDataPath != null && containerMetaDataPath.getParentFile().exists()) { FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); } volumes.remove(containerVolume); - LOG.error("Exception attempting to create container {} on volume {}" + - " remaining volumes to try {}", containerData.getContainerID(), - containerVolume.getHddsRootDir(), volumes.size(), ex); - if (volumes.size() == 0) { + LOG.error( + "Exception attempting to create container {} on volume {} remaining volumes to try {}", + containerData.getContainerID(), + containerVolume.getHddsRootDir(), + volumes.size(), + ex); + if (volumes.isEmpty()) { throw new StorageContainerException( - "Container creation failed. " + ex.getMessage(), ex, + "Container creation failed. " + ex.getMessage(), + ex, CONTAINER_INTERNAL_ERROR); } } @@ -244,28 +238,27 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy } } - /** - * The Static method call is wrapped in a protected instance method so it can - * be overridden in tests. + * The Static method call is wrapped in a protected instance method so it can be overridden in tests. */ @VisibleForTesting - protected void createContainerMetaData(File containerMetaDataPath, - File chunksPath, File dbFile, String schemaVersion, + protected void createContainerMetaData(File containerMetaDataPath, File chunksPath, File dbFile, String schemaVersion, ConfigurationSource configuration) throws IOException { - KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, - chunksPath, dbFile, schemaVersion, configuration); + KeyValueContainerUtil.createContainerMetaData( + containerMetaDataPath, + chunksPath, + dbFile, + schemaVersion, + configuration); } /** - * Set all of the path realted container data fields based on the name - * conventions. + * Set all the path related container data fields based on the name conventions. * - * @param clusterId - * @param containerVolume + * @param clusterId the ID of the cluster + * @param containerVolume the volume of the container */ - public void populatePathFields(String clusterId, - HddsVolume containerVolume) { + public void populatePathFields(String clusterId, HddsVolume containerVolume) { long containerId = containerData.getContainerID(); String hddsVolumeDir = containerVolume.getHddsRootDir().getAbsolutePath(); @@ -273,10 +266,9 @@ public void populatePathFields(String clusterId, File containerMetaDataPath = KeyValueContainerLocationUtil .getContainerMetaDataPath(hddsVolumeDir, clusterId, containerId); - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, clusterId, containerId); + File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(hddsVolumeDir, clusterId, containerId); - //Set containerData for the KeyValueContainer. + // Set containerData for the KeyValueContainer. containerData.setMetadataPath(containerMetaDataPath.getPath()); containerData.setChunksPath(chunksPath.getPath()); containerData.setVolume(containerVolume); @@ -287,78 +279,66 @@ public void populatePathFields(String clusterId, * Writes to .container file. * * @param containerFile container file name - * @param isCreate True if creating a new file. False is updating an - * existing container file. - * @throws StorageContainerException + * @param isCreate True if creating a new file. False is updating an existing container file. + * @throws StorageContainerException If an error occurs while writing to the container file. */ - private void writeToContainerFile(File containerFile, boolean isCreate) - throws StorageContainerException { + private void writeToContainerFile(File containerFile, boolean isCreate) throws StorageContainerException { File tempContainerFile = null; long containerId = containerData.getContainerID(); try { tempContainerFile = createTempFile(containerFile); - ContainerDataYaml.createContainerFile( - ContainerType.KeyValueContainer, containerData, tempContainerFile); + ContainerDataYaml.createContainerFile(ContainerType.KeyValueContainer, containerData, tempContainerFile); - // NativeIO.renameTo is an atomic function. But it might fail if the - // container file already exists. Hence, we handle the two cases - // separately. + // NativeIO.renameTo is an atomic function. + // But it might fail if the container file already exists. + // Hence, we handle the two cases separately. if (isCreate) { NativeIO.renameTo(tempContainerFile, containerFile); } else { - Files.move(tempContainerFile.toPath(), containerFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); + Files.move(tempContainerFile.toPath(), containerFile.toPath(), REPLACE_EXISTING); } } catch (IOException ex) { onFailure(containerData.getVolume()); - String containerExceptionMessage = "Error while creating/updating" + - " container file. ContainerID: " + containerId + - ", container path: " + containerFile.getAbsolutePath(); + String containerExceptionMessage = "Error while creating/updating container file. ContainerID: " + + containerId + + ", container path: " + + containerFile.getAbsolutePath(); if (tempContainerFile == null) { containerExceptionMessage += " Temporary file could not be created."; } - throw new StorageContainerException(containerExceptionMessage, ex, - CONTAINER_FILES_CREATE_ERROR); + throw new StorageContainerException(containerExceptionMessage, ex, CONTAINER_FILES_CREATE_ERROR); } finally { - if (tempContainerFile != null && tempContainerFile.exists()) { - if (!tempContainerFile.delete()) { - LOG.warn("Unable to delete container temporary file: {}.", - tempContainerFile.getAbsolutePath()); - } + if (tempContainerFile != null && tempContainerFile.exists() && !tempContainerFile.delete()) { + LOG.warn("Unable to delete container temporary file: {}.", tempContainerFile.getAbsolutePath()); } } } - private void createContainerFile(File containerFile) - throws StorageContainerException { + private void createContainerFile(File containerFile) throws StorageContainerException { writeToContainerFile(containerFile, true); } - private void updateContainerFile(File containerFile) - throws StorageContainerException { + private void updateContainerFile(File containerFile) throws StorageContainerException { writeToContainerFile(containerFile, false); } - @Override public void delete() throws StorageContainerException { long containerId = containerData.getContainerID(); try { // Delete the Container from tmp directory. - File tmpDirectoryPath = KeyValueContainerUtil.getTmpDirectoryPath( - containerData, containerData.getVolume()).toFile(); + File tmpDirectoryPath = + KeyValueContainerUtil.getTmpDirectoryPath(containerData, containerData.getVolume()).toFile(); FileUtils.deleteDirectory(tmpDirectoryPath); } catch (StorageContainerException ex) { // Disk needs replacement. throw ex; } catch (IOException ex) { - // Container will be removed from tmp directory under the volume. - // On datanode shutdown/restart any partial artifacts left - // will be wiped from volume's tmp directory. + // The Container will be removed from tmp directory under the volume. + // On datanode shutdown/restart any partial artifacts left will be wiped from volume's tmp directory. onFailure(containerData.getVolume()); - String errMsg = String.format("Failed to cleanup container. ID: %d", - containerId); + String errMsg = String.format("Failed to cleanup container. ID: %d", containerId); LOG.error(errMsg, ex); throw new StorageContainerException(errMsg, ex, CONTAINER_INTERNAL_ERROR); } @@ -367,8 +347,7 @@ public void delete() throws StorageContainerException { @Override public boolean hasBlocks() throws IOException { try (DBHandle db = BlockUtils.getDB(containerData, config)) { - return !KeyValueContainerUtil.noBlocksInContainer(db.getStore(), - containerData, bCheckChunksFilePath); + return !KeyValueContainerUtil.noBlocksInContainer(db.getStore(), containerData, bCheckChunksFilePath); } } @@ -381,11 +360,10 @@ public void markContainerForClose() throws StorageContainerException { "Attempting to close a " + getContainerState() + " container.", CONTAINER_NOT_OPEN); } - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.CLOSING)); - // Do not clear the pendingBlockCache here as a follower can still - // receive transactions from leader in CLOSING state. Refer to - // KeyValueHandler#checkContainerOpen() + updateContainerData(() -> containerData.setState(ContainerDataProto.State.CLOSING)); + // Do not clear the pendingBlockCache here + // as a follower can still receive transactions from leader in CLOSING state. + // Refer to KeyValueHandler#checkContainerOpen() } finally { writeUnlock(); } @@ -396,15 +374,16 @@ public void markContainerUnhealthy() throws StorageContainerException { writeLock(); ContainerDataProto.State prevState = containerData.getState(); try { - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.UNHEALTHY)); + updateContainerData(() -> containerData.setState(ContainerDataProto.State.UNHEALTHY)); clearPendingPutBlockCache(); } finally { writeUnlock(); } - LOG.warn("Moving container {} to state {} from state:{}", - containerData.getContainerPath(), containerData.getState(), - prevState); + LOG.warn( + "Moving container {} to state {} from state:{}", + containerData.getContainerPath(), + containerData.getState(), + prevState); } @Override @@ -414,7 +393,7 @@ public void markContainerForDelete() { try { containerData.setState(ContainerDataProto.State.DELETED); File containerFile = getContainerFile(); - // update the new container data to .container File + // Update the new container data to .container File updateContainerFile(containerFile); } catch (IOException ioe) { LOG.error("Exception occur while update container {} state", @@ -422,8 +401,10 @@ public void markContainerForDelete() { } finally { writeUnlock(); } - LOG.info("Moving container {} to state {} from state:{}", - containerData.getContainerPath(), containerData.getState(), + LOG.info( + "Moving container {} to state {} from state:{}", + containerData.getContainerPath(), + containerData.getState(), prevState); } @@ -441,14 +422,14 @@ public void close() throws StorageContainerException { throw new StorageContainerException(ex, IO_EXCEPTION); } closeAndFlushIfNeeded(containerData::closeContainer); - LOG.info("Container {} is closed with bcsId {}.", + LOG.info( + "Container {} is closed with bcsId {}.", containerData.getContainerID(), containerData.getBlockCommitSequenceId()); } @Override - public void updateDataScanTimestamp(Instant time) - throws StorageContainerException { + public void updateDataScanTimestamp(Instant time) throws StorageContainerException { writeLock(); try { updateContainerData(() -> containerData.updateDataScanTime(time)); @@ -460,17 +441,15 @@ public void updateDataScanTimestamp(Instant time) /** * Sync RocksDB WAL on closing of a single container. * - * @param closer - * @throws StorageContainerException + * @param closer the closer to use for closing the container + * @throws StorageContainerException if an error occurs during closing */ - private void closeAndFlushIfNeeded(Runnable closer) - throws StorageContainerException { + private void closeAndFlushIfNeeded(Runnable closer) throws StorageContainerException { flushAndSyncDB(); writeLock(); try { - // Second sync should be a very light operation as sync has already - // been done outside the lock. + // Second sync should be a very light operation as sync has already been done outside the lock. flushAndSyncDB(); updateContainerData(closer); clearPendingPutBlockCache(); @@ -480,14 +459,17 @@ private void closeAndFlushIfNeeded(Runnable closer) } /** + * Updates the state of the container data and persists it to a file. + * The method ensures that changes are made atomically; if the update fails, + * the container data state is reverted to its original state (unless the new state is unhealthy). + *

+ * It Must be invoked with the writeLock held. * - * Must be invoked with the writeLock held. - * - * @param update - * @throws StorageContainerException + * @param update A Runnable representing the update operation to be performed on the container data. + * @throws StorageContainerException If an error occurs while updating the container data + * or writing to the container file. */ - private void updateContainerData(Runnable update) - throws StorageContainerException { + private void updateContainerData(Runnable update) throws StorageContainerException { Preconditions.checkState(hasWriteLock()); ContainerDataProto.State oldState = null; try { @@ -496,10 +478,8 @@ private void updateContainerData(Runnable update) File containerFile = getContainerFile(); // update the new container data to .container File updateContainerFile(containerFile); - } catch (StorageContainerException ex) { - if (oldState != null - && containerData.getState() != ContainerDataProto.State.UNHEALTHY) { + if (oldState != null && containerData.getState() != ContainerDataProto.State.UNHEALTHY) { // Failed to update .container file. Reset the state to old state only // if the current state is not unhealthy. containerData.setState(oldState); @@ -526,7 +506,8 @@ private void flushAndSyncDB() throws StorageContainerException { try { try (DBHandle db = BlockUtils.getDB(containerData, config)) { db.getStore().flushLog(true); - LOG.info("Container {} is synced with bcsId {}.", + LOG.info( + "Container {} is synced with bcsId {}.", containerData.getContainerID(), containerData.getBlockCommitSequenceId()); } @@ -555,23 +536,20 @@ public ContainerType getContainerType() { } @Override - public void update( - Map metadata, boolean forceUpdate) - throws StorageContainerException { - - // TODO: Now, when writing the updated data to .container file, we are - // holding lock and writing data to disk. We can have async implementation - // to flush the update container data to disk. + public void update(Map metadata, boolean forceUpdate) throws StorageContainerException { + // TODO: Now, when writing the updated data to .container file, we are holding lock and writing data to disk. + // We can have async implementation to flush the update container data to disk. long containerId = containerData.getContainerID(); if (!containerData.isValid()) { LOG.debug("Invalid container data. ContainerID: {}", containerId); - throw new StorageContainerException("Invalid container data. " + - "ContainerID: " + containerId, INVALID_CONTAINER_STATE); + throw new StorageContainerException( + "Invalid container data. ContainerID: " + containerId, + INVALID_CONTAINER_STATE); } if (!forceUpdate && !containerData.isOpen()) { throw new StorageContainerException( - "Updating a closed container without force option is not allowed. " + - "ContainerID: " + containerId, UNSUPPORTED_REQUEST); + "Updating a closed container without force option is not allowed. ContainerID: " + containerId, + UNSUPPORTED_REQUEST); } Map oldMetadata = containerData.getMetadata(); @@ -582,7 +560,7 @@ public void update( } File containerFile = getContainerFile(); - // update the new container data to .container File + // Update the new container data to .container File updateContainerFile(containerFile); } catch (StorageContainerException ex) { containerData.setMetadata(oldMetadata); @@ -598,37 +576,30 @@ public void updateDeleteTransactionId(long deleteTransactionId) { } @Override - public void importContainerData(InputStream input, - ContainerPacker packer) - throws IOException { + public void importContainerData(InputStream input, ContainerPacker packer) throws IOException { HddsVolume hddsVolume = containerData.getVolume(); - String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID( - hddsVolume, hddsVolume.getClusterID()); + String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(hddsVolume, hddsVolume.getClusterID()); long containerId = containerData.getContainerID(); Path destContainerDir = - Paths.get(KeyValueContainerLocationUtil.getBaseContainerLocation( - hddsVolume.getHddsRootDir().toString(), idDir, containerId)); + Paths.get(KeyValueContainerLocationUtil + .getBaseContainerLocation(hddsVolume.getHddsRootDir().toString(), idDir, containerId)); Path tmpDir = ContainerImporter.getUntarDirectory(hddsVolume); writeLock(); try { - //copy the values from the input stream to the final destination - // directory. - byte[] descriptorContent = packer.unpackContainerData(this, input, tmpDir, - destContainerDir); - - Preconditions.checkNotNull(descriptorContent, - "Container descriptor is missing from the container archive: " - + getContainerData().getContainerID()); - - //now, we have extracted the container descriptor from the previous - //datanode. We can load it and upload it with the current data - // (original metadata + current filepath fields) + // Copy the values from the input stream to the final destination directory. + byte[] descriptorContent = packer.unpackContainerData(this, input, tmpDir, destContainerDir); + + Preconditions.checkNotNull( + descriptorContent, + "Container descriptor is missing from the container archive: " + getContainerData().getContainerID()); + + // Now, we have extracted the container descriptor from the previous datanode. + // We can load it and upload it with the current data (original metadata + current filepath fields) KeyValueContainerData originalContainerData = - (KeyValueContainerData) ContainerDataYaml - .readContainer(descriptorContent); + (KeyValueContainerData) ContainerDataYaml.readContainer(descriptorContent); importContainerData(originalContainerData); } catch (Exception ex) { - // clean data under tmp directory + // Clean data under tmp directory try { Path containerUntarDir = tmpDir.resolve(String.valueOf(containerId)); if (containerUntarDir.toFile().exists()) { @@ -637,17 +608,18 @@ public void importContainerData(InputStream input, } catch (Exception deleteex) { LOG.error( "Can not cleanup container directory under {} for container {}", - tmpDir, containerId, deleteex); + tmpDir, + containerId, + deleteex); } // Throw exception for existed containers - if (ex instanceof StorageContainerException && - ((StorageContainerException) ex).getResult() == - CONTAINER_ALREADY_EXISTS) { + if (ex instanceof StorageContainerException + && ((StorageContainerException) ex).getResult() == CONTAINER_ALREADY_EXISTS) { throw ex; } - // delete all other temporary data in case of any exception. + // Delete all other temporary data in case of any exception. try { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { BlockUtils.removeContainerFromDB(containerData, config); @@ -658,8 +630,9 @@ public void importContainerData(InputStream input, new File(getContainerData().getContainerPath())); } catch (Exception deleteex) { LOG.error( - "Can not cleanup destination directories after a container import" - + " error (cid: {}", containerId, deleteex); + "Can not cleanup destination directories after a container import error (cid: {}", + containerId, + deleteex); } throw ex; } finally { @@ -667,48 +640,53 @@ public void importContainerData(InputStream input, } } - public void importContainerData(KeyValueContainerData originalContainerData) - throws IOException { + /** + * Imports metadata and other necessary data from the original container data into the current container. + * It updates the state, container type, and schema version before rewriting the YAML file with a new checksum. + * Depending on the schema version, + * it may also load metadata from received dump files and fill in memory statistics such as key count and byte usage. + * + * @param originalContainerData the data from the original container to import + * @throws IOException if an I/O error occurs during the import process + */ + public void importContainerData(KeyValueContainerData originalContainerData) throws IOException { containerData.setState(originalContainerData.getState()); - containerData - .setContainerDBType(originalContainerData.getContainerDBType()); + containerData.setContainerDBType(originalContainerData.getContainerDBType()); containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); - //rewriting the yaml file with new checksum calculation. + // Rewriting the yaml file with new checksum calculation. update(originalContainerData.getMetadata(), true); if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { - // load metadata from received dump files before we try to parse kv + // Load metadata from received dump files before we try to parse kv BlockUtils.loadKVContainerDataFromFiles(containerData, config); } - //fill in memory stat counter (keycount, byte usage) + // Fill in memory stat counter (keycount, byte usage) KeyValueContainerUtil.parseKVContainerData(containerData, config); } @Override - public void exportContainerData(OutputStream destination, - ContainerPacker packer) throws IOException { + public void exportContainerData(OutputStream destination, ContainerPacker packer) + throws IOException { writeLock(); try { - // Closed/ Quasi closed and unhealthy containers are considered for - // replication by replication manager if they are under-replicated. - ContainerProtos.ContainerDataProto.State state = - getContainerData().getState(); - if (!(state == ContainerProtos.ContainerDataProto.State.CLOSED || - state == ContainerDataProto.State.QUASI_CLOSED - || state == ContainerDataProto.State.UNHEALTHY)) { - throw new IllegalStateException( - "Only (quasi)closed and unhealthy containers can be exported. " + - "ContainerId=" + getContainerData().getContainerID() + - " is in state " + state); + // Replication manager considers closed/ Quasi closed and unhealthy containers for replication + // if they are under-replicated. + ContainerProtos.ContainerDataProto.State state = getContainerData().getState(); + if (state != ContainerProtos.ContainerDataProto.State.CLOSED + && state != ContainerDataProto.State.QUASI_CLOSED + && state != ContainerDataProto.State.UNHEALTHY) { + throw new IllegalStateException("Only (quasi)closed and unhealthy containers can be exported. ContainerId=" + + getContainerData().getContainerID() + + " is in state " + + state); } try { if (!containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { compactDB(); - // Close DB (and remove from cache) to avoid concurrent modification - // while packing it. + // Close DB (and remove from cache) to avoid concurrent modification while packing it. BlockUtils.removeDB(containerData, config); } } finally { @@ -756,8 +734,7 @@ public boolean hasReadLock() { */ @Override public void writeLock() { - // TODO: The lock for KeyValueContainer object should not be exposed - // publicly. + // TODO: The lock for KeyValueContainer object should not be exposed publicly. this.lock.writeLock().lock(); } @@ -780,7 +757,6 @@ public boolean hasWriteLock() { /** * Acquire read lock, unless interrupted while waiting. - * @throws InterruptedException */ @Override public void readLockInterruptibly() throws InterruptedException { @@ -789,7 +765,6 @@ public void readLockInterruptibly() throws InterruptedException { /** * Acquire write lock, unless interrupted while waiting. - * @throws InterruptedException */ @Override public void writeLockInterruptibly() throws InterruptedException { @@ -797,24 +772,37 @@ public void writeLockInterruptibly() throws InterruptedException { } - public boolean writeLockTryLock(long time, TimeUnit unit) - throws InterruptedException { + /** + * Attempts to acquire the write lock within the given time frame. + * + * @param time the maximum time to wait for the lock + * @param unit the time unit of the time argument + * @return {@code true} if the lock was acquired, {@code false} otherwise + * @throws InterruptedException if the current thread is interrupted while waiting + */ + public boolean writeLockTryLock(long time, TimeUnit unit) throws InterruptedException { return this.lock.writeLock().tryLock(time, unit); } /** * Returns containerFile. + * * @return .container File name */ @Override public File getContainerFile() { - return getContainerFile(containerData.getMetadataPath(), - containerData.getContainerID()); + return getContainerFile(containerData.getMetadataPath(), containerData.getContainerID()); } + /** + * Retrieves the container file based on the provided metadata path and container ID. + * + * @param metadataPath the path to the metadata directory + * @param containerId the identifier of the container + * @return the container file represented as a {@link File} object + */ public static File getContainerFile(String metadataPath, long containerId) { - return new File(metadataPath, - containerId + OzoneConsts.CONTAINER_EXTENSION); + return new File(metadataPath, containerId + OzoneConsts.CONTAINER_EXTENSION); } @Override @@ -828,8 +816,7 @@ public long getBlockCommitSequenceId() { } /** - * Return whether the given localID of a block is present in the - * pendingPutBlockCache or not. + * Return whether the given localID of a block is present in the pendingPutBlockCache or not. */ public boolean isBlockInPendingPutBlockCache(long localID) { return pendingPutBlockCache.contains(localID); @@ -838,21 +825,18 @@ public boolean isBlockInPendingPutBlockCache(long localID) { /** * Add the given localID of a block to the pendingPutBlockCache. */ - public void addToPendingPutBlockCache(long localID) - throws StorageContainerException { + public void addToPendingPutBlockCache(long localID) throws StorageContainerException { try { pendingPutBlockCache.add(localID); } catch (UnsupportedOperationException e) { - // Getting an UnsupportedOperationException here implies that the - // pendingPutBlockCache is an Empty Set. This should not happen if the - // container is in OPEN or CLOSING state. Log the exception here and - // throw a non-Runtime exception so that putBlock request fails. - String msg = "Failed to add block " + localID + " to " + - "pendingPutBlockCache of container " + containerData.getContainerID() + // Getting an UnsupportedOperationException here implies that the pendingPutBlockCache is an Empty Set. + // This should not happen if the container is in OPEN or CLOSING state. + // Log the exception here and throw a non-Runtime exception so that putBlock request fails. + String msg = "Failed to add block " + localID + + " to pendingPutBlockCache of container " + containerData.getContainerID() + " (state: " + containerData.getState() + ")"; LOG.error(msg, e); - throw new StorageContainerException(msg, - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); + throw new StorageContainerException(msg, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); } } @@ -864,8 +848,8 @@ public void removeFromPendingPutBlockCache(long localID) { } /** - * When a container is closed, quasi-closed or marked unhealthy, clear the - * pendingPutBlockCache as there won't be any more writes to the container. + * When a container is closed, quasi-closed or marked unhealthy, + * clear the pendingPutBlockCache as there won't be anymore writes to the container. */ private void clearPendingPutBlockCache() { pendingPutBlockCache.clear(); @@ -876,11 +860,9 @@ private void clearPendingPutBlockCache() { * Returns KeyValueContainerReport for the KeyValueContainer. */ @Override - public ContainerReplicaProto getContainerReport() - throws StorageContainerException { - ContainerReplicaProto.Builder ciBuilder = - ContainerReplicaProto.newBuilder(); - ciBuilder.setContainerID(containerData.getContainerID()) + public ContainerReplicaProto getContainerReport() throws StorageContainerException { + return ContainerReplicaProto.newBuilder() + .setContainerID(containerData.getContainerID()) .setReadCount(containerData.getReadCount()) .setWriteCount(containerData.getWriteCount()) .setReadBytes(containerData.getReadBytes()) @@ -892,17 +874,16 @@ public ContainerReplicaProto getContainerReport() .setDeleteTransactionId(containerData.getDeleteTransactionId()) .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId()) .setOriginNodeId(containerData.getOriginNodeId()) - .setIsEmpty(containerData.isEmpty()); - return ciBuilder.build(); + .setIsEmpty(containerData.isEmpty()) + .build(); } /** * Returns LifeCycle State of the container. + * * @return LifeCycle State of the container in HddsProtos format - * @throws StorageContainerException */ - private ContainerReplicaProto.State getHddsState() - throws StorageContainerException { + private ContainerReplicaProto.State getHddsState() throws StorageContainerException { ContainerReplicaProto.State state; switch (containerData.getState()) { case OPEN: @@ -924,15 +905,15 @@ private ContainerReplicaProto.State getHddsState() state = ContainerReplicaProto.State.DELETED; break; default: - throw new StorageContainerException("Invalid Container state found: " + - containerData.getContainerID(), INVALID_CONTAINER_STATE); + throw new StorageContainerException( + "Invalid Container state found: " + containerData.getContainerID(), + INVALID_CONTAINER_STATE); } return state; } /** * Returns container DB file. - * @return */ public File getContainerDBFile() { return KeyValueContainerLocationUtil.getContainerDBFile(containerData); @@ -941,12 +922,12 @@ public File getContainerDBFile() { @Override public boolean shouldScanMetadata() { - boolean shouldScan = - getContainerState() != ContainerDataProto.State.UNHEALTHY; + boolean shouldScan = getContainerState() != ContainerDataProto.State.UNHEALTHY; if (!shouldScan && LOG.isDebugEnabled()) { - LOG.debug("Container {} in state {} should not have its metadata " + - "scanned.", - containerData.getContainerID(), containerData.getState()); + LOG.debug( + "Container {} in state {} should not have its metadata scanned.", + containerData.getContainerID(), + containerData.getState()); } return shouldScan; } @@ -954,61 +935,64 @@ public boolean shouldScanMetadata() { @Override public ScanResult scanMetaData() throws InterruptedException { long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId, containerData.getVolume(), this); + KeyValueContainerCheck checker = new KeyValueContainerCheck( + containerData.getMetadataPath(), + config, + containerId, + containerData.getVolume(), + this); return checker.fastCheck(); } @Override public boolean shouldScanData() { - boolean shouldScan = - getContainerState() == ContainerDataProto.State.CLOSED + boolean shouldScan = getContainerState() == ContainerDataProto.State.CLOSED || getContainerState() == ContainerDataProto.State.QUASI_CLOSED; if (!shouldScan && LOG.isDebugEnabled()) { - LOG.debug("Container {} in state {} should not have its data scanned.", - containerData.getContainerID(), containerData.getState()); + LOG.debug( + "Container {} in state {} should not have its data scanned.", + containerData.getContainerID(), + containerData.getState()); } return shouldScan; } @Override - public ScanResult scanData(DataTransferThrottler throttler, Canceler canceler) - throws InterruptedException { + public ScanResult scanData(DataTransferThrottler throttler, Canceler canceler) throws InterruptedException { if (!shouldScanData()) { - throw new IllegalStateException("The checksum verification can not be" + - " done for container in state " - + containerData.getState()); + throw new IllegalStateException( + "The checksum verification can not be done for container in state " + containerData.getState()); } long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId, containerData.getVolume(), this); + KeyValueContainerCheck checker = new KeyValueContainerCheck( + containerData.getMetadataPath(), + config, + containerId, + containerData.getVolume(), + this); return checker.fullCheck(throttler, canceler); } /** - * Creates a temporary file. - * @param file - * @return - * @throws IOException + * Creates a temporary file in the same directory as the given file, + * with a unique prefix based on the current system time. + * + * @param file the file whose directory will be used to create the temporary file + * @return a newly created temporary file + * @throws IOException if an I/O error occurs */ private File createTempFile(File file) throws IOException { - return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", - file.getName(), file.getParentFile()); + return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", file.getName(), file.getParentFile()); } - private void packContainerToDestination(OutputStream destination, - ContainerPacker packer) + private void packContainerToDestination(OutputStream destination, ContainerPacker packer) throws IOException { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { - // Synchronize the dump and pack operation, - // so concurrent exports don't get dump files overwritten. - // We seldom got concurrent exports for a container, - // so it should not influence performance much. + // Synchronize the dump and pack operation, so concurrent exports don't get dump files overwritten. + // We seldom got concurrent exports for a container, so it should not influence performance much. synchronized (dumpLock) { BlockUtils.dumpKVContainerDataToFiles(containerData, config); packer.pack(this, destination); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index c235109f2cb..fc910b02028 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -53,28 +53,36 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.CORRUPT_CHUNK; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.CORRUPT_CONTAINER_FILE; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.INACCESSIBLE_DB; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.INCONSISTENT_CHUNK_LENGTH; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.MISSING_CHUNKS_DIR; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.MISSING_CHUNK_FILE; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.MISSING_CONTAINER_DIR; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.MISSING_CONTAINER_FILE; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.MISSING_METADATA_DIR; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.unhealthy; /** * Class to run integrity checks on Datanode Containers. * Provide infra for Data Scrubbing */ - public class KeyValueContainerCheck { + private static final Logger LOG = LoggerFactory.getLogger(KeyValueContainerCheck.class); - private static final Logger LOG = - LoggerFactory.getLogger(KeyValueContainerCheck.class); - - private long containerID; + private final long containerID; private KeyValueContainerData onDiskContainerData; //loaded from fs/disk - private ConfigurationSource checkConfig; + private final ConfigurationSource checkConfig; - private String metadataPath; - private HddsVolume volume; - private KeyValueContainer container; - private static final DirectBufferPool BUFFER_POOL = new DirectBufferPool(); + private final String metadataPath; + private final HddsVolume volume; + private final KeyValueContainer container; + public static final DirectBufferPool BUFFER_POOL = new DirectBufferPool(); - public KeyValueContainerCheck(String metadataPath, ConfigurationSource conf, - long containerID, HddsVolume volume, KeyValueContainer container) { + public KeyValueContainerCheck(String metadataPath, ConfigurationSource conf, long containerID, HddsVolume volume, + KeyValueContainer container) { Preconditions.checkArgument(metadataPath != null); this.checkConfig = conf; @@ -90,7 +98,7 @@ public KeyValueContainerCheck(String metadataPath, ConfigurationSource conf, * These checks do not look inside the metadata files. * Applicable for OPEN containers. * - * @return true : integrity checks pass, false : otherwise. + * @return {@code true}: integrity checks pass, {@code false}: otherwise. */ public ScanResult fastCheck() throws InterruptedException { LOG.debug("Running basic checks for container {};", containerID); @@ -99,193 +107,155 @@ public ScanResult fastCheck() throws InterruptedException { // Container directory should exist. File containerDir = new File(metadataPath).getParentFile(); if (!containerDir.exists()) { - return ScanResult.unhealthy( - ScanResult.FailureType.MISSING_CONTAINER_DIR, - containerDir, new FileNotFoundException("Container directory " + - containerDir + " not found.")); + return unhealthy(MISSING_CONTAINER_DIR, containerDir, + new FileNotFoundException("Container directory " + containerDir + " not found.")); } // Metadata directory should exist. File metadataDir = new File(metadataPath); if (!metadataDir.exists()) { - return ScanResult.unhealthy(ScanResult.FailureType.MISSING_METADATA_DIR, - metadataDir, new FileNotFoundException("Metadata directory " + - metadataDir + " not found.")); + return unhealthy(MISSING_METADATA_DIR, metadataDir, + new FileNotFoundException("Metadata directory " + metadataDir + " not found.")); } // Container file should be valid. - File containerFile = KeyValueContainer - .getContainerFile(metadataPath, containerID); + File containerFile = KeyValueContainer.getContainerFile(metadataPath, containerID); try { loadContainerData(containerFile); } catch (FileNotFoundException ex) { - return ScanResult.unhealthy( - ScanResult.FailureType.MISSING_CONTAINER_FILE, containerFile, ex); + return unhealthy(MISSING_CONTAINER_FILE, containerFile, ex); } catch (IOException ex) { - return ScanResult.unhealthy( - ScanResult.FailureType.CORRUPT_CONTAINER_FILE, containerFile, ex); + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, ex); } // Chunks directory should exist. File chunksDir = new File(onDiskContainerData.getChunksPath()); if (!chunksDir.exists()) { - return ScanResult.unhealthy(ScanResult.FailureType.MISSING_CHUNKS_DIR, - chunksDir, new FileNotFoundException("Chunks directory " + - chunksDir + " not found.")); + return unhealthy(MISSING_CHUNKS_DIR, chunksDir, + new FileNotFoundException("Chunks directory " + chunksDir + " not found.")); } return checkContainerFile(containerFile); } finally { if (Thread.currentThread().isInterrupted()) { - throw new InterruptedException("Metadata scan of container " + - containerID + " interrupted."); + throw new InterruptedException("Metadata scan of container " + containerID + " interrupted."); } } } /** - * full checks comprise scanning all metadata inside the container. - * Including the KV database. These checks are intrusive, consume more - * resources compared to fast checks and should only be done on Closed - * or Quasi-closed Containers. Concurrency being limited to delete - * workflows. + * Full checks comprise scanning all metadata inside the container. + * Including the KV database. + * These checks are intrusive, + * consume more resources compared to fast checks and should only be done on Closed or Quasi-closed Containers. + * Concurrency being limited to delete workflows. *

* fullCheck is a superset of fastCheck * - * @return true : integrity checks pass, false : otherwise. + * @return {@code true}: integrity checks pass, {@code false}: otherwise. */ - public ScanResult fullCheck(DataTransferThrottler throttler, - Canceler canceler) throws InterruptedException { + public ScanResult fullCheck(DataTransferThrottler throttler, Canceler canceler) throws InterruptedException { ScanResult result = fastCheck(); if (result.isHealthy()) { result = scanData(throttler, canceler); } if (!result.isHealthy() && Thread.currentThread().isInterrupted()) { - throw new InterruptedException("Data scan of container " + containerID + - " interrupted."); + throw new InterruptedException("Data scan of container " + containerID + " interrupted."); } return result; } private ScanResult checkContainerFile(File containerFile) { - /* - * compare the values in the container file loaded from disk, - * with the values we are expecting - */ + // Compare the values in the container file loaded from the disk with the values we are expecting String dbType; - Preconditions - .checkState(onDiskContainerData != null, "Container File not loaded"); + Preconditions.checkState(onDiskContainerData != null, "Container File not loaded"); try { ContainerUtils.verifyChecksum(onDiskContainerData, checkConfig); } catch (IOException ex) { - return ScanResult.unhealthy(ScanResult.FailureType.CORRUPT_CONTAINER_FILE, - containerFile, ex); + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, ex); } - if (onDiskContainerData.getContainerType() - != ContainerProtos.ContainerType.KeyValueContainer) { - String errStr = "Bad Container type in Containerdata for " + containerID; - return ScanResult.unhealthy(ScanResult.FailureType.CORRUPT_CONTAINER_FILE, - containerFile, new IOException(errStr)); + if (onDiskContainerData.getContainerType() != ContainerProtos.ContainerType.KeyValueContainer) { + String errStr = "Bad Container type in ContainerData for " + containerID; + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, new IOException(errStr)); } if (onDiskContainerData.getContainerID() != containerID) { - String errStr = - "Bad ContainerID field in Containerdata for " + containerID; - return ScanResult.unhealthy(ScanResult.FailureType.CORRUPT_CONTAINER_FILE, - containerFile, new IOException(errStr)); + String errStr = "Bad ContainerID field in ContainerData for " + containerID; + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, new IOException(errStr)); } dbType = onDiskContainerData.getContainerDBType(); if (!dbType.equals(CONTAINER_DB_TYPE_ROCKSDB)) { - String errStr = "Unknown DBType [" + dbType - + "] in Container File for [" + containerID + "]"; - return ScanResult.unhealthy(ScanResult.FailureType.CORRUPT_CONTAINER_FILE, - containerFile, new IOException(errStr)); + String errStr = "Unknown DBType [" + dbType + "] in Container File for [" + containerID + "]"; + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, new IOException(errStr)); } KeyValueContainerData kvData = onDiskContainerData; if (!metadataPath.equals(kvData.getMetadataPath())) { - String errStr = - "Bad metadata path in Containerdata for " + containerID + "Expected [" - + metadataPath + "] Got [" + kvData.getMetadataPath() - + "]"; - return ScanResult.unhealthy(ScanResult.FailureType.CORRUPT_CONTAINER_FILE, - containerFile, new IOException(errStr)); + String errStr = "Bad metadata path in Containerdata for " + containerID + "Expected [" + metadataPath + + "] Got [" + kvData.getMetadataPath() + "]"; + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, new IOException(errStr)); } return ScanResult.healthy(); } - private ScanResult scanData(DataTransferThrottler throttler, - Canceler canceler) { + private ScanResult scanData(DataTransferThrottler throttler, Canceler canceler) { /* * Check the integrity of the DB inside each container. - * 1. iterate over each key (Block) and locate the chunks for the block - * 2. garbage detection (TBD): chunks which exist in the filesystem, - * but not in the DB. This function will be implemented in HDDS-1202 - * 3. chunk checksum verification. + * 1. Iterate over each key (Block) and locate the chunks for the block + * 2. Garbage detection (TBD): chunks which exist in the filesystem, but not in the DB. + * This function will be implemented in HDDS-1202. + * 3. Chunk checksum verification. */ - Preconditions.checkState(onDiskContainerData != null, - "invoke loadContainerData prior to calling this function"); + Preconditions + .checkState(onDiskContainerData != null, "invoke loadContainerData prior to calling this function"); - File dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(onDiskContainerData); + File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(onDiskContainerData); if (!dbFile.exists() || !dbFile.canRead()) { - String dbFileErrorMsg = "Unable to access DB File [" + dbFile.toString() - + "] for Container [" + containerID + "] metadata path [" - + metadataPath + "]"; - return ScanResult.unhealthy(ScanResult.FailureType.INACCESSIBLE_DB, - dbFile, new IOException(dbFileErrorMsg)); + String dbFileErrorMsg = "Unable to access DB File [" + dbFile + "] for Container [" + containerID + + "] metadata path [" + metadataPath + "]"; + return unhealthy(INACCESSIBLE_DB, dbFile, new IOException(dbFileErrorMsg)); } onDiskContainerData.setDbFile(dbFile); try { try (DBHandle db = BlockUtils.getDB(onDiskContainerData, checkConfig); - BlockIterator kvIter = db.getStore().getBlockIterator( - onDiskContainerData.getContainerID(), - onDiskContainerData.getUnprefixedKeyFilter())) { + BlockIterator kvIter = db.getStore().getBlockIterator(onDiskContainerData.getContainerID(), + onDiskContainerData.getUnprefixedKeyFilter())) { while (kvIter.hasNext()) { BlockData block = kvIter.nextBlock(); - // If holding read lock for the entire duration, including wait() - // calls in DataTransferThrottler, would effectively make other - // threads throttled. - // Here try optimistically and retry with the container lock to - // make sure reading the latest record. If the record is just removed, - // the block should be skipped to scan. + // If holding read lock for the entire duration, + // including wait() calls in DataTransferThrottler, + // would effectively make other threads throttled. + // Here try optimistically and retry with the container lock to make sure reading the latest record. + // If the record is just removed, the block should be skipped to scan. ScanResult result = scanBlock(block, throttler, canceler); if (!result.isHealthy()) { - if (result.getFailureType() == - ScanResult.FailureType.MISSING_CHUNK_FILE) { + if (result.getFailureType() == MISSING_CHUNK_FILE) { if (getBlockDataFromDBWithLock(db, block) != null) { // Block was not deleted, the failure is legitimate. return result; } else { - // If schema V3 and container details not in DB or - // if containerDBPath is removed - if ((onDiskContainerData.hasSchema(OzoneConsts.SCHEMA_V3) && - db.getStore().getMetadataTable().get( - onDiskContainerData.getBcsIdKey()) == null) || - !new File(onDiskContainerData.getDbFile() - .getAbsolutePath()).exists()) { - // Container has been deleted. Skip the rest of the blocks. - return ScanResult.unhealthy( - ScanResult.FailureType.DELETED_CONTAINER, + // If schema V3 and container details not in DB or if containerDBPath is removed + if (onDiskContainerData.hasSchema(OzoneConsts.SCHEMA_V3) + && db.getStore().getMetadataTable().get(onDiskContainerData.getBcsIdKey()) == null + || !new File(onDiskContainerData.getDbFile().getAbsolutePath()).exists()) { + // The Container has been deleted. Skip the rest of the blocks. + return unhealthy(ScanResult.FailureType.DELETED_CONTAINER, result.getUnhealthyFile(), result.getException()); } - // Block may have been deleted during the scan. - if (LOG.isDebugEnabled()) { - LOG.debug("Scanned outdated blockData {} in container {}.", - block, containerID); - } + // The Block may have been deleted during the scan. + LOG.debug("Scanned outdated blockData {} in container {}.", block, containerID); } } else { // All other failures should be treated as errors. @@ -295,8 +265,7 @@ private ScanResult scanData(DataTransferThrottler throttler, } } } catch (IOException ex) { - return ScanResult.unhealthy(ScanResult.FailureType.INACCESSIBLE_DB, - dbFile, ex); + return unhealthy(INACCESSIBLE_DB, dbFile, ex); } return ScanResult.healthy(); @@ -304,33 +273,28 @@ private ScanResult scanData(DataTransferThrottler throttler, /** * Attempt to read the block data without the container lock. - * The block onDisk might be in modification by other thread and not yet - * flushed to DB, so the content might be outdated. + * The block onDisk might be in modification by another thread and not yet flushed to DB, + * so the content might be outdated. * * @param db DB of container * @param block last queried blockData * @return blockData in DB - * @throws IOException */ - private BlockData getBlockDataFromDB(DBHandle db, BlockData block) - throws IOException { - String blockKey = - onDiskContainerData.getBlockKey(block.getBlockID().getLocalID()); + private BlockData getBlockDataFromDB(DBHandle db, BlockData block) throws IOException { + String blockKey = onDiskContainerData.getBlockKey(block.getBlockID().getLocalID()); return db.getStore().getBlockDataTable().get(blockKey); } /** * Attempt to read the block data with the container lock. - * The container lock ensure the latest DB record could be retrieved, since - * other block related write operation will acquire the container write lock. + * The container lock ensures the latest DB record could be retrieved, + * since another block related write operation will acquire the container write lock. * * @param db DB of container * @param block last queried blockData * @return blockData in DB - * @throws IOException */ - private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) - throws IOException { + private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) throws IOException { container.readLock(); try { return getBlockDataFromDB(db, block); @@ -339,37 +303,28 @@ private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) } } - private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler, - Canceler canceler) { + private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler, Canceler canceler) { ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion(); for (ContainerProtos.ChunkInfo chunk : block.getChunks()) { File chunkFile; try { - chunkFile = layout.getChunkFile(onDiskContainerData, - block.getBlockID(), chunk.getChunkName()); + chunkFile = layout.getChunkFile(onDiskContainerData, block.getBlockID(), chunk.getChunkName()); } catch (IOException ex) { - return ScanResult.unhealthy( - ScanResult.FailureType.MISSING_CHUNK_FILE, - new File(onDiskContainerData.getChunksPath()), ex); + return unhealthy(MISSING_CHUNK_FILE, new File(onDiskContainerData.getChunksPath()), ex); } if (!chunkFile.exists()) { - // In EC, client may write empty putBlock in padding block nodes. - // So, we need to make sure, chunk length > 0, before declaring - // the missing chunk file. - if (block.getChunks().size() > 0 && block - .getChunks().get(0).getLen() > 0) { - return ScanResult.unhealthy(ScanResult.FailureType.MISSING_CHUNK_FILE, - chunkFile, new IOException("Missing chunk file " + - chunkFile.getAbsolutePath())); + // In EC, a client may write empty putBlock in padding block nodes. + // So, we need to make sure, chunk length > 0, before declaring the missing chunk file. + if (!block.getChunks().isEmpty() && block.getChunks().get(0).getLen() > 0) { + return unhealthy(MISSING_CHUNK_FILE, chunkFile, + new IOException("Missing chunk file " + chunkFile.getAbsolutePath())); } - } else if (chunk.getChecksumData().getType() - != ContainerProtos.ChecksumType.NONE) { + } else if (chunk.getChecksumData().getType() != ContainerProtos.ChecksumType.NONE) { int bytesPerChecksum = chunk.getChecksumData().getBytesPerChecksum(); ByteBuffer buffer = BUFFER_POOL.getBuffer(bytesPerChecksum); - ScanResult result = verifyChecksum(block, chunk, chunkFile, layout, buffer, - throttler, canceler); + ScanResult result = verifyChecksum(block, chunk, chunkFile, layout, buffer, throttler, canceler); buffer.clear(); BUFFER_POOL.returnBuffer(buffer); if (!result.isHealthy()) { @@ -381,27 +336,21 @@ chunkFile, new IOException("Missing chunk file " + return ScanResult.healthy(); } - private static ScanResult verifyChecksum(BlockData block, - ContainerProtos.ChunkInfo chunk, File chunkFile, - ContainerLayoutVersion layout, ByteBuffer buffer, - DataTransferThrottler throttler, Canceler canceler) { - ChecksumData checksumData = - ChecksumData.getFromProtoBuf(chunk.getChecksumData()); + public static ScanResult verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, + ContainerLayoutVersion layout, ByteBuffer buffer, DataTransferThrottler throttler, Canceler canceler) { + ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData()); int checksumCount = checksumData.getChecksums().size(); int bytesPerChecksum = checksumData.getBytesPerChecksum(); - Checksum cal = new Checksum(checksumData.getChecksumType(), - bytesPerChecksum); + Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum); long bytesRead = 0; - try (FileChannel channel = FileChannel.open(chunkFile.toPath(), - ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) { - if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) { + try (FileChannel channel = + FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) { + if (layout == FILE_PER_BLOCK) { channel.position(chunk.getOffset()); } for (int i = 0; i < checksumCount; i++) { - // limit last read for FILE_PER_BLOCK, to avoid reading next chunk - if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && - i == checksumCount - 1 && - chunk.getLen() % bytesPerChecksum != 0) { + // Limit last read for FILE_PER_BLOCK, to avoid reading the next chunk + if (layout == FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) { buffer.limit((int) (chunk.getLen() % bytesPerChecksum)); } @@ -415,46 +364,34 @@ private static ScanResult verifyChecksum(BlockData block, throttler.throttle(v, canceler); ByteString expected = checksumData.getChecksums().get(i); - ByteString actual = cal.computeChecksum(buffer) - .getChecksums().get(0); + ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0); if (!expected.equals(actual)) { - String message = String - .format("Inconsistent read for chunk=%s" + - " checksum item %d" + - " expected checksum %s" + - " actual checksum %s" + - " for block %s", + String message = String.format("Inconsistent read for chunk=%s checksum item %d " + + "expected checksum %s actual checksum %s for block %s", ChunkInfo.getFromProtoBuf(chunk), i, StringUtils.bytes2Hex(expected.asReadOnlyByteBuffer()), StringUtils.bytes2Hex(actual.asReadOnlyByteBuffer()), block.getBlockID()); - return ScanResult.unhealthy( - ScanResult.FailureType.CORRUPT_CHUNK, chunkFile, - new IOException(message)); + return unhealthy(CORRUPT_CHUNK, chunkFile, new IOException(message)); } } if (bytesRead != chunk.getLen()) { - String message = String - .format("Inconsistent read for chunk=%s expected length=%d" - + " actual length=%d for block %s", + String message = String.format("Inconsistent read for chunk=%s " + + "expected length=%d actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()); - return ScanResult.unhealthy( - ScanResult.FailureType.INCONSISTENT_CHUNK_LENGTH, chunkFile, - new IOException(message)); + return unhealthy(INCONSISTENT_CHUNK_LENGTH, chunkFile, new IOException(message)); } } catch (IOException ex) { - return ScanResult.unhealthy( - ScanResult.FailureType.MISSING_CHUNK_FILE, chunkFile, ex); + return unhealthy(MISSING_CHUNK_FILE, chunkFile, ex); } return ScanResult.healthy(); } private void loadContainerData(File containerFile) throws IOException { - onDiskContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); + onDiskContainerData = (KeyValueContainerData) ContainerDataYaml.readContainerFile(containerFile); onDiskContainerData.setVolume(volume); } @@ -464,9 +401,7 @@ void setContainerData(KeyValueContainerData containerData) { } @VisibleForTesting - ScanResult scanContainer(DataTransferThrottler throttler, - Canceler canceler) { + ScanResult scanContainer(DataTransferThrottler throttler, Canceler canceler) { return scanData(throttler, canceler); } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 47d4f3f9e70..97ea9e40e0a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -63,9 +63,8 @@ import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix; /** - * This class represents the KeyValueContainer metadata, which is the - * in-memory representation of container metadata and is represented on disk - * by the .container file. + * This class represents the KeyValueContainer metadata, + * which is the in-memory representation of container metadata and is represented on disk by the .container file. */ public class KeyValueContainerData extends ContainerData { @@ -78,7 +77,7 @@ public class KeyValueContainerData extends ContainerData { // Path to Container metadata Level DB/RocksDB Store and .container file. private String metadataPath; - //Type of DB used to store key to chunks mapping + //Type of DB used to store key to chunk mapping private String containerDBType = CONTAINER_DB_TYPE_ROCKSDB; private File dbFile = null; @@ -108,14 +107,14 @@ public class KeyValueContainerData extends ContainerData { /** * Constructs KeyValueContainerData object. + * * @param id - ContainerId * @param layoutVersion container layout * @param size - maximum size of the container in bytes */ - public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion, - long size, String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion, - size, originPipelineId, originNodeId); + public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion, long size, String originPipelineId, + String originNodeId) { + super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion, size, originPipelineId, originNodeId); this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; finalizedBlockSet = ConcurrentHashMap.newKeySet(); @@ -123,8 +122,7 @@ public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion, public KeyValueContainerData(KeyValueContainerData source) { super(source); - Preconditions.checkArgument(source.getContainerType() - == ContainerProtos.ContainerType.KeyValueContainer); + Preconditions.checkArgument(source.getContainerType() == ContainerProtos.ContainerType.KeyValueContainer); this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; this.schemaVersion = source.getSchemaVersion(); @@ -132,25 +130,22 @@ public KeyValueContainerData(KeyValueContainerData source) { } /** - * @param version The schema version indicating the table layout of the - * container's database. + * @param version The schema version indicating the table layout of the container's database. */ public void setSchemaVersion(String version) { schemaVersion = version; } /** - * @return The schema version describing the container database's table - * layout. + * @return The schema version describing the container database's table layout. */ public String getSchemaVersion() { return schemaVersion; } /** - * Returns schema version or the default value when the - * {@link KeyValueContainerData#schemaVersion} is null. The default value can - * be referred to {@link KeyValueContainerUtil#isSameSchemaVersion}. + * Returns schema version or the default value when the {@link KeyValueContainerData#schemaVersion} is null. + * The default value can be referred to {@link KeyValueContainerUtil#isSameSchemaVersion}. * * @return Schema version as a string. * @throws UnsupportedOperationException If no valid schema version is found. @@ -167,9 +162,7 @@ public String getSupportedSchemaVersionOrDefault() { } /** - * Sets Container dbFile. This should be called only during creation of - * KeyValue container. - * @param containerDbFile + * Sets Container dbFile. This should be called only during the creation of KeyValue container. */ public void setDbFile(File containerDbFile) { dbFile = containerDbFile; @@ -177,6 +170,7 @@ public void setDbFile(File containerDbFile) { /** * Returns container DB file. + * * @return dbFile */ public File getDbFile() { @@ -185,7 +179,8 @@ public File getDbFile() { /** * Returns container metadata path. - * @return - Physical path where container file and checksum is stored. + * + * @return - Physical path where container file and checksum are stored. */ public String getMetadataPath() { return metadataPath; @@ -202,6 +197,7 @@ public void setMetadataPath(String path) { /** * Returns the path to base dir of the container. + * * @return Path to base dir */ @Override @@ -218,7 +214,7 @@ public long getBlockCommitSequenceId() { } /** - * updates the blockCommitSequenceId. + * Updates the blockCommitSequenceId. */ public void updateBlockCommitSequenceId(long id) { this.blockCommitSequenceId = id; @@ -226,6 +222,7 @@ public void updateBlockCommitSequenceId(long id) { /** * Returns the DBType used for the container. + * * @return containerDBType */ public String getContainerDBType() { @@ -234,7 +231,6 @@ public String getContainerDBType() { /** * Sets the DBType used for the container. - * @param containerDBType */ public void setContainerDBType(String containerDBType) { this.containerDBType = containerDBType; @@ -288,17 +284,37 @@ public void addToFinalizedBlockSet(long localID) { finalizedBlockSet.add(localID); } + /** + * Returns a set of finalized block IDs associated with the container data. + * + * @return a Set of Long values representing the IDs of finalized blocks. + */ public Set getFinalizedBlockSet() { return finalizedBlockSet; } + /** + * Checks if a block with the specified local ID exists in the finalized block set. + * + * @param localID The ID of the block to check for existence in the finalized block set. + * @return true if the block exists in the finalized block set, false otherwise. + */ public boolean isFinalizedBlockExist(long localID) { return finalizedBlockSet.contains(localID); } + /** + * Clears the set of finalized blocks from both memory and the database. + * This operation will remove all finalized blocks associated with the current container's prefix. + * It first checks if the finalized block set is not empty, + * then deletes the corresponding entries from the database using batch operations and clears the in-memory set. + * + * @param db The database handle to use it for the batch operations. It must not be {@code null}. + * @throws IOException If any I/O error occurs during the batch operations. + */ public void clearFinalizedBlock(DBHandle db) throws IOException { if (!finalizedBlockSet.isEmpty()) { - // delete from db and clear memory + // Delete it from db and clear memory. // Should never fail. Preconditions.checkNotNull(db, "DB cannot be null here"); try (BatchOperation batch = db.getStore().getBatchHandler().initBatchOperation()) { @@ -324,10 +340,8 @@ public ContainerDataProto getProtoBufMessage() { builder.setBlockCount(this.getBlockCount()); for (Map.Entry entry : getMetadata().entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); + ContainerProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.newBuilder(); + builder.addMetadata(keyValBuilder.setKey(entry.getKey()).setValue(entry.getValue()).build()); } if (this.getBytesUsed() >= 0) { @@ -341,35 +355,44 @@ public ContainerDataProto getProtoBufMessage() { return builder.build(); } + /** + * Returns an unmodifiable list of YAML field names used in the key-value container. + * + * @return a List of Strings representing the YAML field names. + */ public static List getYamlFields() { return Collections.unmodifiableList(KV_YAML_FIELDS); } /** * Update DB counters related to block metadata. + * * @param db - Reference to container DB. * @param batchOperation - Batch Operation to batch DB operations. * @param deletedBlockCount - Number of blocks deleted. * @param releasedBytes - Number of bytes released. - * @throws IOException */ - public void updateAndCommitDBCounters(DBHandle db, - BatchOperation batchOperation, int deletedBlockCount, + public void updateAndCommitDBCounters(DBHandle db, BatchOperation batchOperation, int deletedBlockCount, long releasedBytes) throws IOException { Table metadataTable = db.getStore().getMetadataTable(); // Set Bytes used and block count key. - metadataTable.putWithBatch(batchOperation, getBytesUsedKey(), - getBytesUsed() - releasedBytes); - metadataTable.putWithBatch(batchOperation, getBlockCountKey(), - getBlockCount() - deletedBlockCount); - metadataTable.putWithBatch(batchOperation, + metadataTable.putWithBatch(batchOperation, getBytesUsedKey(), getBytesUsed() - releasedBytes); + metadataTable.putWithBatch(batchOperation, getBlockCountKey(), getBlockCount() - deletedBlockCount); + metadataTable.putWithBatch( + batchOperation, getPendingDeleteBlockCountKey(), getNumPendingDeletionBlocks() - deletedBlockCount); db.getStore().getBatchHandler().commitBatchOperation(batchOperation); } + /** + * Resets the count of pending deletion blocks to zero. + * + * @param db The database handle used to access the container's metadata table. + * @throws IOException If an I/O error occurs while updating the metadata table on disk. + */ public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { // Reset the in memory metadata. numPendingDeletionBlocks.set(0); @@ -378,59 +401,116 @@ public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { metadataTable.put(getPendingDeleteBlockCountKey(), 0L); } - // NOTE: Below are some helper functions to format keys according - // to container schemas, we should use them instead of using - // raw const variables defined. + // NOTE: Below are some helper functions to format keys according to container schemas, + // we should use them instead of using raw const variables defined. + /** + * Generates a formatted key for accessing block data using the provided local block ID. + * + * @param localID The local ID of the block within the container. + * @return The formatted key string specific to the container's schema. + */ public String getBlockKey(long localID) { return formatKey(Long.toString(localID)); } + /** + * Generates a deleting block key string using the provided local ID. + * + * @param localID The local ID of the block within the container. + * @return The formatted key string specific to the container's schema with the deleting block key prefix. + */ public String getDeletingBlockKey(long localID) { return formatKey(DELETING_KEY_PREFIX + localID); } + /** + * Generates a formatted key for accessing delete transaction data using the provided transaction ID. + * + * @param txnID The ID of the transaction to be deleted. + * @return The formatted key string specific to the container's schema. + */ public String getDeleteTxnKey(long txnID) { return formatKey(Long.toString(txnID)); } + /** + * Retrieves the formatted key for the latest delete transaction. + * + * @return A String representing the formatted key for the latest delete transaction. + */ public String getLatestDeleteTxnKey() { return formatKey(DELETE_TRANSACTION_KEY); } + /** + * Retrieves the formatted key specific to the Block Commit Sequence ID. + * + * @return A String representing the formatted key for the Block Commit Sequence ID. + */ public String getBcsIdKey() { return formatKey(BLOCK_COMMIT_SEQUENCE_ID); } + /** + * Retrieves the formatted key specific to the Block Count. + * + * @return A String representing the formatted key for the Block Count. + */ public String getBlockCountKey() { return formatKey(BLOCK_COUNT); } + /** + * Retrieves the formatted key specific to the bytes used in the container. + * This key is used to store or query the bytes used information from the database specific to the container's schema. + * + * @return A String representing the formatted key for bytes used. + */ public String getBytesUsedKey() { return formatKey(CONTAINER_BYTES_USED); } + /** + * Retrieves the formatted key specific to the Pending Delete Block Count. + * + * @return A String representing the formatted key for the Pending Delete Block Count. + */ public String getPendingDeleteBlockCountKey() { return formatKey(PENDING_DELETE_BLOCK_COUNT); } + /** + * Retrieves the key prefix used for deleting blocks within the container. + * + * @return A String representing the formatted key prefix specific to the container's schema for deleting blocks. + */ public String getDeletingBlockKeyPrefix() { return formatKey(DELETING_KEY_PREFIX); } + /** + * Returns a KeyPrefixFilter that is configured to filter out keys with the container's schema-specific prefix. + * + * @return a KeyPrefixFilter object that filters out keys using the container's schema-specific prefix. + */ public KeyPrefixFilter getUnprefixedKeyFilter() { String schemaPrefix = containerPrefix(); return new KeyPrefixFilter().addFilter(schemaPrefix + "#", true); } + /** + * Generates and returns a {@link KeyPrefixFilter} + * configured to filter out keys that have the prefix used for deleting blocks within the container. + * + * @return a KeyPrefixFilter object configured to filter keys with the deleting block key prefix. + */ public KeyPrefixFilter getDeletingBlockKeyFilter() { return new KeyPrefixFilter().addFilter(getDeletingBlockKeyPrefix()); } /** - * Schema v3 use a prefix as startKey, - * for other schemas just return null. - * @return + * Schema v3 use a prefix as startKey, for other schemas return {@code null}. */ public String startKeyEmpty() { if (hasSchema(SCHEMA_V3)) { @@ -440,9 +520,7 @@ public String startKeyEmpty() { } /** - * Schema v3 use containerID as key prefix, - * for other schemas just return null. - * @return + * Schema v3 use containerID as key prefix, for other schemas {@code null}. */ public String containerPrefix() { if (hasSchema(SCHEMA_V3)) { @@ -452,9 +530,9 @@ public String containerPrefix() { } /** - * Format the raw key to a schema specific format key. - * Schema v3 use container ID as key prefix, - * for other schemas just return the raw key. + * Format the raw key to a schema-specific format key. + * Schema v3 uses container ID as key prefix, for other schemas return the raw key. + * * @param key raw key * @return formatted key */ @@ -465,8 +543,13 @@ private String formatKey(String key) { return key; } + /** + * Checks if the provided version matches the schema version of the container. + * + * @param version The schema version to compare against the container's schema version. + * @return true if the provided version matches the container's schema version, false otherwise. + */ public boolean hasSchema(String version) { return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, version); } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 1bcb64200b2..844ad7f29cf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -18,11 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue; -import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; +import java.io.*; import java.nio.ByteBuffer; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -56,6 +52,8 @@ import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdfs.util.Canceler; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChunkBuffer; @@ -65,9 +63,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.impl.*; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; @@ -88,6 +84,7 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import com.google.common.annotations.VisibleForTesting; @@ -124,6 +121,10 @@ import static org.apache.hadoop.ozone.ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.FailureType.*; +import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult.unhealthy; +import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerCheck.BUFFER_POOL; +import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerCheck.verifyChecksum; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; @@ -136,8 +137,7 @@ */ public class KeyValueHandler extends Handler { - public static final Logger LOG = LoggerFactory.getLogger( - KeyValueHandler.class); + public static final Logger LOG = LoggerFactory.getLogger(KeyValueHandler.class); private final BlockManager blockManager; private final ChunkManager chunkManager; @@ -149,18 +149,13 @@ public class KeyValueHandler extends Handler { private final Striped containerCreationLocks; private static FaultInjector injector; - public KeyValueHandler(ConfigurationSource config, - String datanodeId, - ContainerSet contSet, - VolumeSet volSet, - ContainerMetrics metrics, - IncrementalReportSender icrSender) { + public KeyValueHandler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volSet, + ContainerMetrics metrics, IncrementalReportSender icrSender) { super(config, datanodeId, contSet, volSet, metrics, icrSender); blockManager = new BlockManagerImpl(config); - validateChunkChecksumData = conf.getObject( - DatanodeConfiguration.class).isChunkDataValidationCheck(); - chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, - volSet); + validateChunkChecksumData = conf.getObject(DatanodeConfiguration.class).isChunkDataValidationCheck(); + chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, volSet); + try { volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } catch (Exception e) { @@ -170,49 +165,44 @@ public KeyValueHandler(ConfigurationSource config, maxContainerSize = (long) config.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - // this striped handler lock is used for synchronizing createContainer - // Requests. + // This striped handler lock is used for synchronizing createContainer Requests. final int threadCountPerDisk = conf.getInt( - OzoneConfigKeys - .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, - OzoneConfigKeys - .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); - final int numberOfDisks = - HddsServerUtil.getDatanodeStorageDirs(conf).size(); - containerCreationLocks = Striped.lazyWeakLock( - threadCountPerDisk * numberOfDisks); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + + final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); + containerCreationLocks = Striped.lazyWeakLock(threadCountPerDisk * numberOfDisks); boolean isUnsafeByteBufferConversionEnabled = conf.getBoolean( OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - byteBufferToByteString = - ByteStringConversion - .createByteBufferConversion(isUnsafeByteBufferConversionEnabled); + byteBufferToByteString = ByteStringConversion.createByteBufferConversion(isUnsafeByteBufferConversionEnabled); } + /** + * Returns the {@link VolumeChoosingPolicy} used for testing purposes. + * + * @return the instance of VolumeChoosingPolicy. + */ @VisibleForTesting public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() { return volumeChoosingPolicy; } @Override - public StateMachine.DataChannel getStreamDataChannel( - Container container, ContainerCommandRequestProto msg) + public StateMachine.DataChannel getStreamDataChannel(Container container, ContainerCommandRequestProto msg) throws StorageContainerException { KeyValueContainer kvContainer = (KeyValueContainer) container; checkContainerOpen(kvContainer); if (msg.hasWriteChunk()) { - BlockID blockID = - BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()); + BlockID blockID = BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()); - return chunkManager.getStreamDataChannel(kvContainer, - blockID, metrics); + return chunkManager.getStreamDataChannel(kvContainer, blockID, metrics); } else { - throw new StorageContainerException("Malformed request.", - ContainerProtos.Result.IO_EXCEPTION); + throw new StorageContainerException("Malformed request.", ContainerProtos.Result.IO_EXCEPTION); } } @@ -223,25 +213,19 @@ public void stop() { } @Override - public ContainerCommandResponseProto handle( - ContainerCommandRequestProto request, Container container, + public ContainerCommandResponseProto handle(ContainerCommandRequestProto request, Container container, DispatcherContext dispatcherContext) { try { - return KeyValueHandler - .dispatchRequest(this, request, (KeyValueContainer) container, - dispatcherContext); + return KeyValueHandler.dispatchRequest(this, request, (KeyValueContainer) container, dispatcherContext); } catch (RuntimeException e) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException(e, CONTAINER_INTERNAL_ERROR), - request); + return ContainerUtils.logAndReturnError(LOG, new StorageContainerException(e, CONTAINER_INTERNAL_ERROR), request); } } @VisibleForTesting - static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { + static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, ContainerCommandRequestProto request, + KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { Type cmdType = request.getCmdType(); switch (cmdType) { @@ -254,6 +238,8 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, case DeleteContainer: return handler.handleDeleteContainer(request, kvContainer); case ListContainer: + case ListChunk: + case CompactChunk: return handler.handleUnsupportedOp(request); case CloseContainer: return handler.handleCloseContainer(request, kvContainer); @@ -262,24 +248,19 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, case GetBlock: return handler.handleGetBlock(request, kvContainer); case DeleteBlock: - return handler.handleDeleteBlock(request, kvContainer); + return handler.handleDeleteBlock(); case ListBlock: return handler.handleListBlock(request, kvContainer); case ReadChunk: return handler.handleReadChunk(request, kvContainer, dispatcherContext); case DeleteChunk: - return handler.handleDeleteChunk(request, kvContainer); + return handler.handleDeleteChunk(); case WriteChunk: return handler.handleWriteChunk(request, kvContainer, dispatcherContext); case StreamInit: - return handler.handleStreamInit(request, kvContainer, dispatcherContext); - case ListChunk: - return handler.handleUnsupportedOp(request); - case CompactChunk: - return handler.handleUnsupportedOp(request); + return handler.handleStreamInit(request, kvContainer); case PutSmallFile: - return handler - .handlePutSmallFile(request, kvContainer, dispatcherContext); + return handler.handlePutSmallFile(request, kvContainer, dispatcherContext); case GetSmallFile: return handler.handleGetSmallFile(request, kvContainer); case GetCommittedBlockLength: @@ -287,42 +268,48 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, case FinalizeBlock: return handler.handleFinalizeBlock(request, kvContainer); case Echo: - return handler.handleEcho(request, kvContainer); + return handler.handleEcho(request); + case VerifyBlock: + return handler.handleVerifyBlock(request, kvContainer); default: return null; } } + /** + * Returns the instance of {@link ChunkManager} used within the {@link KeyValueHandler}. + * + * @return the instance of {@link ChunkManager}. + */ @VisibleForTesting public ChunkManager getChunkManager() { return this.chunkManager; } + /** + * Returns the instance of {@link BlockManager} used within the {@link KeyValueHandler}. + * + * @return the instance of {@link BlockManager}. + */ @VisibleForTesting public BlockManager getBlockManager() { return this.blockManager; } - ContainerCommandResponseProto handleStreamInit( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { + ContainerCommandResponseProto handleStreamInit(ContainerCommandRequestProto request, KeyValueContainer kvContainer) { final BlockID blockID; if (request.hasWriteChunk()) { WriteChunkRequestProto writeChunk = request.getWriteChunk(); blockID = BlockID.getFromProtobuf(writeChunk.getBlockID()); } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed {} request. trace ID: {}", - request.getCmdType(), request.getTraceID()); - } + LOG.debug("Malformed {} request. trace ID: {}", request.getCmdType(), request.getTraceID()); return malformedRequest(request); } - String path = null; + String path; try { checkContainerOpen(kvContainer); - path = chunkManager - .streamInit(kvContainer, blockID); + path = chunkManager.streamInit(kvContainer, blockID); } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } @@ -333,45 +320,40 @@ ContainerCommandResponseProto handleStreamInit( } /** - * Handles Create Container Request. If successful, adds the container to - * ContainerSet and sends an ICR to the SCM. + * Handles Create Container Request. + * If successful, add the container to {@link ContainerSet} and send an ICR to the SCM. */ ContainerCommandResponseProto handleCreateContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasCreateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Create Container request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Create Container request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } - // Create Container request should be passed a null container as the - // container would be created here. + // Create Container request should be passed a null container as the container would be created here. if (kvContainer != null) { return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException( - "Container creation failed because " + "key value container" + + new StorageContainerException("Container creation failed because " + "key value container" + " already exists", null, CONTAINER_ALREADY_EXISTS), request); } long containerID = request.getContainerID(); - ContainerLayoutVersion layoutVersion = - ContainerLayoutVersion.getConfiguredVersion(conf); + ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(conf); KeyValueContainerData newContainerData = new KeyValueContainerData( - containerID, layoutVersion, maxContainerSize, request.getPipelineID(), + containerID, + layoutVersion, + maxContainerSize, + request.getPipelineID(), getDatanodeId()); State state = request.getCreateContainer().getState(); if (state != null) { newContainerData.setState(state); } - newContainerData.setReplicaIndex(request.getCreateContainer() - .getReplicaIndex()); + newContainerData.setReplicaIndex(request.getCreateContainer().getReplicaIndex()); - // TODO: Add support to add metadataList to ContainerData. Add metadata - // to container during creation. - KeyValueContainer newContainer = new KeyValueContainer( - newContainerData, conf); + // TODO: Add support to add metadataList to ContainerData. + // Add metadata to container during creation. + KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf); boolean created = false; Lock containerIdLock = containerCreationLocks.get(containerID); @@ -381,9 +363,9 @@ ContainerCommandResponseProto handleCreateContainer( newContainer.create(volumeSet, volumeChoosingPolicy, clusterId); created = containerSet.addContainer(newContainer); } else { - // The create container request for an already existing container can - // arrive in case the ContainerStateMachine reapplies the transaction - // on datanode restart. Just log a warning msg here. + // The creation container request for an already existing container can arrive + // in case the ContainerStateMachine reapplied the transaction on datanode restart. + // Just log a warning msg here. LOG.debug("Container already exists. container Id {}", containerID); } } catch (StorageContainerException ex) { @@ -404,12 +386,10 @@ ContainerCommandResponseProto handleCreateContainer( return getSuccessResponse(request); } - private void populateContainerPathFields(KeyValueContainer container, - HddsVolume hddsVolume) throws IOException { + private void populateContainerPathFields(KeyValueContainer container, HddsVolume hddsVolume) throws IOException { volumeSet.readLock(); try { - String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID( - hddsVolume, clusterId); + String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(hddsVolume, clusterId); container.populatePathFields(idDir, hddsVolume); } finally { volumeSet.readUnlock(); @@ -422,10 +402,7 @@ private void populateContainerPathFields(KeyValueContainer container, ContainerCommandResponseProto handleReadContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasReadContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Container request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Read Container request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -436,23 +413,18 @@ ContainerCommandResponseProto handleReadContainer( /** - * Handles Update Container Request. If successful, the container metadata - * is updated. + * Handles Update Container Request. If successful, the container metadata is updated. */ ContainerCommandResponseProto handleUpdateContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasUpdateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Update Container request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } boolean forceUpdate = request.getUpdateContainer().getForceUpdate(); - List keyValueList = - request.getUpdateContainer().getMetadataList(); + List keyValueList = request.getUpdateContainer().getMetadataList(); Map metadata = new HashMap<>(); for (KeyValue keyValue : keyValueList) { metadata.put(keyValue.getKey(), keyValue.getValue()); @@ -471,18 +443,14 @@ ContainerCommandResponseProto handleUpdateContainer( /** * Handles Delete Container Request. * Open containers cannot be deleted. - * Holds writeLock on ContainerSet till the container is removed from - * containerMap. On disk deletion of container files will happen - * asynchronously without the lock. + * Holds writeLock on ContainerSet till the container is removed from containerMap. + * On disk deletion of container files will happen asynchronously without the lock. */ - ContainerCommandResponseProto handleDeleteContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleDeleteContainer(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { if (!request.hasDeleteContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete container request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Delete container request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -499,14 +467,11 @@ ContainerCommandResponseProto handleDeleteContainer( * Handles Close Container Request. An open container is closed. * Close Container call is idempotent. */ - ContainerCommandResponseProto handleCloseContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleCloseContainer(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { if (!request.hasCloseContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Update Container request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } try { @@ -516,8 +481,7 @@ ContainerCommandResponseProto handleCloseContainer( return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Close Container failed", ex, - IO_EXCEPTION), request); + new StorageContainerException("Close Container failed", ex, IO_EXCEPTION), request); } return getSuccessResponse(request); @@ -526,15 +490,11 @@ ContainerCommandResponseProto handleCloseContainer( /** * Handle Put Block operation. Calls BlockManager to process the request. */ - ContainerCommandResponseProto handlePutBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, + ContainerCommandResponseProto handlePutBlock(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { if (!request.hasPutBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Key request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Put Key request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -548,20 +508,18 @@ ContainerCommandResponseProto handlePutBlock( boolean endOfBlock = false; if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) { - // There are two cases where client sends empty put block with eof. - // (1) An EC empty file. In this case, the block/chunk file does not exist, - // so no need to flush/close the file. + // There are two cases where a client sends an empty put block with eof. + // (1) An EC empty file. In this case, the block/chunk file does not exist, so no need to flush/close the file. // (2) Ratis output stream in incremental chunk list mode may send empty put block // to close the block, in which case we need to flush/close the file. - if (!request.getPutBlock().getBlockData().getChunksList().isEmpty() || - blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) { + if (!request.getPutBlock().getBlockData().getChunksList().isEmpty() + || blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) { chunkManager.finishWriteChunks(kvContainer, blockData); } endOfBlock = true; } - long bcsId = - dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); + long bcsId = dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); blockData.setBlockCommitSequenceId(bcsId); blockManager.putBlock(kvContainer, blockData, endOfBlock); @@ -572,72 +530,66 @@ ContainerCommandResponseProto handlePutBlock( } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Put Key failed", ex, IO_EXCEPTION), + return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Put Key failed", ex, IO_EXCEPTION), request); } return putBlockResponseSuccess(request, blockDataProto); } - ContainerCommandResponseProto handleFinalizeBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleFinalizeBlock(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { ContainerCommandResponseProto responseProto = checkFaultInjector(request); if (responseProto != null) { return responseProto; } if (!request.hasFinalizeBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Finalize block request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Finalize block request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } ContainerProtos.BlockData responseData; try { if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { - throw new StorageContainerException("DataNode has not finalized " + - "upgrading to a version that supports block finalization.", UNSUPPORTED_REQUEST); + throw new StorageContainerException( + "DataNode has not finalized upgrading to a version that supports block finalization.", + UNSUPPORTED_REQUEST); } checkContainerOpen(kvContainer); - BlockID blockID = BlockID.getFromProtobuf( - request.getFinalizeBlock().getBlockID()); + BlockID blockID = BlockID.getFromProtobuf(request.getFinalizeBlock().getBlockID()); Preconditions.checkNotNull(blockID); LOG.info("Finalized Block request received {} ", blockID); - responseData = blockManager.getBlock(kvContainer, blockID) - .getProtoBufMessage(); + responseData = blockManager.getBlock(kvContainer, blockID).getProtoBufMessage(); chunkManager.finalizeWriteChunk(kvContainer, blockID); blockManager.finalizeBlock(kvContainer, blockID); - kvContainer.getContainerData() - .addToFinalizedBlockSet(blockID.getLocalID()); + kvContainer.getContainerData().addToFinalizedBlockSet(blockID.getLocalID()); LOG.info("Block has been finalized {} ", blockID); - } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException( - "Finalize Block failed", ex, IO_EXCEPTION), request); + return ContainerUtils.logAndReturnError( + LOG, + new StorageContainerException("Finalize Block failed", ex, IO_EXCEPTION), + request); } return getFinalizeBlockResponse(request, responseData); } - ContainerCommandResponseProto handleEcho( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleEcho(ContainerCommandRequestProto request) { return getEchoResponse(request); } /** * Checks if a replicaIndex needs to be checked based on the client version for a request. + * * @param request ContainerCommandRequest object. - * @return true if the validation is required for the client version else false. + * @return {@code true} if the validation is required for the client version else {@code false}. */ private boolean replicaIndexCheckRequired(ContainerCommandRequestProto request) { return request.hasVersion() && request.getVersion() >= EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue(); @@ -650,29 +602,23 @@ ContainerCommandResponseProto handleGetBlock( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasGetBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Get Key request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } ContainerProtos.BlockData responseData; try { - BlockID blockID = BlockID.getFromProtobuf( - request.getGetBlock().getBlockID()); + BlockID blockID = BlockID.getFromProtobuf(request.getGetBlock().getBlockID()); if (replicaIndexCheckRequired(request)) { BlockUtils.verifyReplicaIdx(kvContainer, blockID); } responseData = blockManager.getBlock(kvContainer, blockID).getProtoBufMessage(); final long numBytes = responseData.getSerializedSize(); metrics.incContainerBytesStats(Type.GetBlock, numBytes); - } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Get Key failed", ex, IO_EXCEPTION), + return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Get Key failed", ex, IO_EXCEPTION), request); } @@ -683,8 +629,8 @@ ContainerCommandResponseProto handleGetBlock( * Handles GetCommittedBlockLength operation. * Calls BlockManager to process the request. */ - ContainerCommandResponseProto handleGetCommittedBlockLength( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleGetCommittedBlockLength(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { ContainerCommandResponseProto responseProto = checkFaultInjector(request); if (responseProto != null) { @@ -692,25 +638,22 @@ ContainerCommandResponseProto handleGetCommittedBlockLength( } if (!request.hasGetCommittedBlockLength()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Get Key request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } long blockLength; try { - BlockID blockID = BlockID - .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); + BlockID blockID = BlockID.getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); BlockUtils.verifyBCSId(kvContainer, blockID); blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID); } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("GetCommittedBlockLength failed", ex, - IO_EXCEPTION), request); + return ContainerUtils.logAndReturnError( + LOG, + new StorageContainerException("GetCommittedBlockLength failed", ex, IO_EXCEPTION), + request); } return getBlockLengthResponse(request, blockLength); @@ -719,14 +662,10 @@ ContainerCommandResponseProto handleGetCommittedBlockLength( /** * Handle List Block operation. Calls BlockManager to process the request. */ - ContainerCommandResponseProto handleListBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleListBlock(ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasListBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed list block request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed list block request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -737,8 +676,7 @@ ContainerCommandResponseProto handleListBlock( if (request.getListBlock().hasStartLocalID()) { startLocalId = request.getListBlock().getStartLocalID(); } - List responseData = - blockManager.listBlock(kvContainer, startLocalId, count); + List responseData = blockManager.listBlock(kvContainer, startLocalId, count); for (BlockData responseDatum : responseData) { returnData.add(responseDatum.getProtoBufMessage()); } @@ -754,37 +692,31 @@ ContainerCommandResponseProto handleListBlock( } /** - * Handle Delete Block operation. Calls BlockManager to process the request. + * Handle Delete Block operation. + * Calls BlockManager to process the request. */ @Deprecated - ContainerCommandResponseProto handleDeleteBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleDeleteBlock() { // Block/ Chunk Deletion is handled by BlockDeletingService. // SCM sends Block Deletion commands directly to Datanodes and not // through a Pipeline. - throw new UnsupportedOperationException("Datanode handles block deletion " + - "using BlockDeletingService"); + throw new UnsupportedOperationException("Datanode handles block deletion using BlockDeletingService"); } /** - * Handle Read Chunk operation. Calls ChunkManager to process the request. + * Handle Read Chunk operation. + * Calls ChunkManager to process the request. */ - ContainerCommandResponseProto handleReadChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, + ContainerCommandResponseProto handleReadChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { if (!request.hasReadChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Chunk request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Read Chunk request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } ChunkBuffer data; try { - BlockID blockID = BlockID.getFromProtobuf( - request.getReadChunk().getBlockID()); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk() - .getChunkData()); + BlockID blockID = BlockID.getFromProtobuf(request.getReadChunk().getBlockID()); + ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk().getChunkData()); Preconditions.checkNotNull(chunkInfo); if (replicaIndexCheckRequired(request)) { BlockUtils.verifyReplicaIdx(kvContainer, blockID); @@ -795,21 +727,18 @@ ContainerCommandResponseProto handleReadChunk( dispatcherContext = DispatcherContext.getHandleReadChunk(); } - boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()) - .equals(ContainerProtos.ReadChunkVersion.V0); + boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0); if (isReadChunkV0) { - // For older clients, set ReadDataIntoSingleBuffer to true so that - // all the data read from chunk file is returned as a single - // ByteString. Older clients cannot process data returned as a list - // of ByteStrings. + // For older clients, + // set ReadDataIntoSingleBuffer to true + // so that all the data read from a chunk file is returned as a single ByteString. + // Older clients cannot process data returned as a list of ByteStrings. chunkInfo.setReadDataIntoSingleBuffer(true); } - data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, - dispatcherContext); + data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); LOG.debug("read chunk from block {} chunk {}", blockID, chunkInfo); - // Validate data only if the read chunk is issued by Ratis for its - // internal logic. + // Validate data only if the read chunk is issued by Ratis for its internal logic. // For client reads, the client is expected to validate. if (DispatcherContext.op(dispatcherContext).readFromTmpFile()) { validateChunkChecksumData(data, chunkInfo); @@ -834,13 +763,10 @@ ContainerCommandResponseProto handleReadChunk( * Handle Delete Chunk operation. Calls ChunkManager to process the request. */ @Deprecated - ContainerCommandResponseProto handleDeleteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleDeleteChunk() { // Block/ Chunk Deletion is handled by BlockDeletingService. - // SCM sends Block Deletion commands directly to Datanodes and not - // through a Pipeline. - throw new UnsupportedOperationException("Datanode handles chunk deletion " + - "using BlockDeletingService"); + // SCM sends Block Deletion commands directly to Datanodes and not through a Pipeline. + throw new UnsupportedOperationException("Datanode handles chunk deletion using BlockDeletingService"); } private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) @@ -855,17 +781,14 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) } /** - * Handle Write Chunk operation. Calls ChunkManager to process the request. + * Handle Write Chunk operation. + * Calls ChunkManager to process the request. */ - ContainerCommandResponseProto handleWriteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, + ContainerCommandResponseProto handleWriteChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { if (!request.hasWriteChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Write Chunk request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Write Chunk request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -886,26 +809,20 @@ ContainerCommandResponseProto handleWriteChunk( } final boolean isWrite = dispatcherContext.getStage().isWrite(); if (isWrite) { - data = - ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList()); + data = ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList()); validateChunkChecksumData(data, chunkInfo); } - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); + chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); final boolean isCommit = dispatcherContext.getStage().isCommit(); if (isCommit && writeChunk.hasBlock()) { long startTime = Time.monotonicNowNanos(); metrics.incContainerOpsMetrics(Type.PutBlock); - BlockData blockData = BlockData.getFromProtoBuf( - writeChunk.getBlock().getBlockData()); - // optimization for hsync when WriteChunk is in commit phase: - // - // block metadata is piggybacked in the same message. - // there will not be an additional PutBlock request. - // - // do not do this in WRITE_DATA phase otherwise PutBlock will be out - // of order. + BlockData blockData = BlockData.getFromProtoBuf(writeChunk.getBlock().getBlockData()); + // Optimization for hsync when WriteChunk is in commit phase: + // Block metadata is piggybacked in the same message. + // There will not be an additional PutBlock request. + // Do not do this in the WRITE_DATA phase, otherwise PutBlock will be out of order. blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); boolean eob = writeChunk.getBlock().getEof(); if (eob) { @@ -920,8 +837,7 @@ ContainerCommandResponseProto handleWriteChunk( // We should increment stats after writeChunk if (isWrite) { - metrics.incContainerBytesStats(Type.WriteChunk, writeChunk - .getChunkData().getLen()); + metrics.incContainerBytesStats(Type.WriteChunk, writeChunk.getChunkData().getLen()); } } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); @@ -935,19 +851,15 @@ ContainerCommandResponseProto handleWriteChunk( } /** - * Handle Put Small File operation. Writes the chunk and associated key - * using a single RPC. Calls BlockManager and ChunkManager to process the - * request. + * Handle Put Small File operation. + * Writes the chunk and associated key using a single RPC. + * Call BlockManager and ChunkManager to process the request. */ - ContainerCommandResponseProto handlePutSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, + ContainerCommandResponseProto handlePutSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { if (!request.hasPutSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Small File request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Put Small File request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } @@ -956,27 +868,24 @@ ContainerCommandResponseProto handlePutSmallFile( try { checkContainerOpen(kvContainer); - BlockData blockData = BlockData.getFromProtoBuf( - putSmallFileReq.getBlock().getBlockData()); + BlockData blockData = BlockData.getFromProtoBuf(putSmallFileReq.getBlock().getBlockData()); Preconditions.checkNotNull(blockData); ContainerProtos.ChunkInfo chunkInfoProto = putSmallFileReq.getChunkInfo(); ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); Preconditions.checkNotNull(chunkInfo); - ChunkBuffer data = ChunkBuffer.wrap( - putSmallFileReq.getData().asReadOnlyByteBufferList()); + ChunkBuffer data = ChunkBuffer.wrap(putSmallFileReq.getData().asReadOnlyByteBufferList()); if (dispatcherContext == null) { dispatcherContext = DispatcherContext.getHandlePutSmallFile(); } BlockID blockID = blockData.getBlockID(); - // chunks will be committed as a part of handling putSmallFile - // here. There is no need to maintain this info in openContainerBlockMap. + // Chunks will be committed as a part of handling putSmallFile here. + // There is no need to maintain this info in openContainerBlockMap. validateChunkChecksumData(data, chunkInfo); - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); + chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); chunkManager.finishWriteChunks(kvContainer, blockData); List chunks = new LinkedList<>(); @@ -992,107 +901,92 @@ ContainerCommandResponseProto handlePutSmallFile( return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Read Chunk failed", ex, - PUT_SMALL_FILE_ERROR), request); + new StorageContainerException("Read Chunk failed", ex, PUT_SMALL_FILE_ERROR), + request); } return getPutFileResponseSuccess(request, blockDataProto); } /** - * Handle Get Small File operation. Gets a data stream using a key. This - * helps in reducing the RPC overhead for small files. Calls BlockManager and - * ChunkManager to process the request. + * Handle Get Small File operation. + * Gets a data stream using a key. + * This helps in reducing the RPC overhead for small files. + * Call BlockManager and ChunkManager to process the request. */ - ContainerCommandResponseProto handleGetSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + ContainerCommandResponseProto handleGetSmallFile(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { if (!request.hasGetSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Small File request. trace ID: {}", - request.getTraceID()); - } + LOG.debug("Malformed Get Small File request. trace ID: {}", request.getTraceID()); return malformedRequest(request); } GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile(); try { - BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock() - .getBlockID()); + BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock().getBlockID()); BlockData responseData = blockManager.getBlock(kvContainer, blockID); ContainerProtos.ChunkInfo chunkInfoProto = null; List dataBuffers = new ArrayList<>(); - final DispatcherContext dispatcherContext - = DispatcherContext.getHandleGetSmallFile(); + final DispatcherContext dispatcherContext = DispatcherContext.getHandleGetSmallFile(); for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) { - // if the block is committed, all chunks must have been committed. + // If the block is committed, all chunks must have been committed. // Tmp chunk files won't exist here. ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk); boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()) .equals(ContainerProtos.ReadChunkVersion.V0); if (isReadChunkV0) { - // For older clients, set ReadDataIntoSingleBuffer to true so that - // all the data read from chunk file is returned as a single - // ByteString. Older clients cannot process data returned as a list - // of ByteStrings. + // For older clients, + // set ReadDataIntoSingleBuffer to true + // so that all the data read from a chunk file is returned as a single ByteString. + // Older clients cannot process data returned as a list of ByteStrings. chunkInfo.setReadDataIntoSingleBuffer(true); } - ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, - chunkInfo, dispatcherContext); - dataBuffers.addAll(data.toByteStringList(byteBufferToByteString)); - chunkInfoProto = chunk; + try (ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext)) { + dataBuffers.addAll(data.toByteStringList(byteBufferToByteString)); + chunkInfoProto = chunk; + } } - metrics.incContainerBytesStats(Type.GetSmallFile, - BufferUtils.getBuffersLen(dataBuffers)); - return getGetSmallFileResponseSuccess(request, dataBuffers, - chunkInfoProto); + metrics.incContainerBytesStats(Type.GetSmallFile, BufferUtils.getBuffersLen(dataBuffers)); + return getGetSmallFileResponseSuccess(request, dataBuffers, chunkInfoProto); } catch (StorageContainerException e) { return ContainerUtils.logAndReturnError(LOG, e, request); } catch (IOException ex) { return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Write Chunk failed", ex, - GET_SMALL_FILE_ERROR), request); + new StorageContainerException("Write Chunk failed", ex, GET_SMALL_FILE_ERROR), + request); } } /** * Handle unsupported operation. */ - ContainerCommandResponseProto handleUnsupportedOp( - ContainerCommandRequestProto request) { + ContainerCommandResponseProto handleUnsupportedOp(ContainerCommandRequestProto request) { // TODO : remove all unsupported operations or handle them. return unsupportedRequest(request); } /** - * Check if container is open. Throw exception otherwise. - * @param kvContainer - * @throws StorageContainerException + * Check if the container is open. Throw exception otherwise. */ - private void checkContainerOpen(KeyValueContainer kvContainer) - throws StorageContainerException { - + private void checkContainerOpen(KeyValueContainer kvContainer) throws StorageContainerException { final State containerState = kvContainer.getContainerState(); /* * In a closing state, follower will receive transactions from leader. - * Once the leader is put to closing state, it will reject further requests - * from clients. Only the transactions which happened before the container - * in the leader goes to closing state, will arrive here even the container - * might already be in closing state here. + * Once the leader is put to closing state, it will reject further requests from clients. + * Only the transactions which happened before the container in the leader goes to closing state, + * will arrive here even the container might already be in closing state here. */ - if (containerState == State.OPEN || containerState == State.CLOSING - || containerState == State.RECOVERING) { + if (containerState == State.OPEN || containerState == State.CLOSING || containerState == State.RECOVERING) { return; } final ContainerProtos.Result result; switch (containerState) { case QUASI_CLOSED: - result = CLOSED_CONTAINER_IO; - break; case CLOSED: result = CLOSED_CONTAINER_IO; break; @@ -1105,23 +999,19 @@ private void checkContainerOpen(KeyValueContainer kvContainer) default: result = CONTAINER_INTERNAL_ERROR; } - String msg = "Requested operation not allowed as ContainerState is " + - containerState; + String msg = "Requested operation not allowed as ContainerState is " + containerState; throw new StorageContainerException(msg, result); } @Override - public Container importContainer(ContainerData originalContainerData, - final InputStream rawContainerStream, - final TarContainerPacker packer) throws IOException { - Preconditions.checkState(originalContainerData instanceof - KeyValueContainerData, "Should be KeyValueContainerData instance"); + public Container importContainer(ContainerData originalContainerData, InputStream rawContainerStream, + TarContainerPacker packer) throws IOException { + Preconditions.checkState(originalContainerData instanceof KeyValueContainerData, + "Should be KeyValueContainerData instance"); - KeyValueContainerData containerData = new KeyValueContainerData( - (KeyValueContainerData) originalContainerData); + KeyValueContainerData containerData = new KeyValueContainerData((KeyValueContainerData) originalContainerData); - KeyValueContainer container = new KeyValueContainer(containerData, - conf); + KeyValueContainer container = new KeyValueContainer(containerData, conf); HddsVolume targetVolume = originalContainerData.getVolume(); populateContainerPathFields(container, targetVolume); @@ -1129,13 +1019,10 @@ public Container importContainer(ContainerData originalContainerData, ContainerLogger.logImported(containerData); sendICR(container); return container; - } @Override - public void exportContainer(final Container container, - final OutputStream outputStream, - final TarContainerPacker packer) + public void exportContainer(Container container, OutputStream outputStream, TarContainerPacker packer) throws IOException { final KeyValueContainer kvc = (KeyValueContainer) container; kvc.exportContainerData(outputStream, packer); @@ -1143,17 +1030,14 @@ public void exportContainer(final Container container, } @Override - public void markContainerForClose(Container container) - throws IOException { + public void markContainerForClose(Container container) throws IOException { container.writeLock(); try { - ContainerProtos.ContainerDataProto.State state = - container.getContainerState(); + ContainerProtos.ContainerDataProto.State state = container.getContainerState(); // Move the container to CLOSING state only if it's OPEN/RECOVERING if (HddsUtils.isOpenToWriteState(state)) { if (state == RECOVERING) { - containerSet.removeRecoveringContainer( - container.getContainerData().getContainerID()); + containerSet.removeRecoveringContainer(container.getContainerData().getContainerID()); ContainerLogger.logRecovered(container.getContainerData()); } container.markContainerForClose(); @@ -1166,35 +1050,32 @@ public void markContainerForClose(Container container) } @Override - public void markContainerUnhealthy(Container container, ScanResult reason) - throws StorageContainerException { + public void markContainerUnhealthy(Container container, ScanResult reason) throws StorageContainerException { container.writeLock(); try { long containerID = container.getContainerData().getContainerID(); if (container.getContainerState() == State.UNHEALTHY) { - LOG.debug("Call to mark already unhealthy container {} as unhealthy", - containerID); + LOG.debug("Call to mark already unhealthy container {} as unhealthy", containerID); return; } - // If the volume is unhealthy, no action is needed. The container has - // already been discarded and SCM notified. Once a volume is failed, it - // cannot be restored without a restart. + // If the volume is unhealthy, no action is necessary. + // The container has already been discarded and SCM notified. + // Once a volume is failed, it cannot be restored without a restart. HddsVolume containerVolume = container.getContainerData().getVolume(); if (containerVolume.isFailed()) { - LOG.debug("Ignoring unhealthy container {} detected on an " + - "already failed volume {}", containerID, containerVolume); + LOG.debug("Ignoring unhealthy container {} detected on an already failed volume {}", + containerID, containerVolume); return; } try { container.markContainerUnhealthy(); } catch (StorageContainerException ex) { - LOG.warn("Unexpected error while marking container {} unhealthy", - containerID, ex); + LOG.warn("Unexpected error while marking container {} unhealthy", containerID, ex); } finally { - // Even if the container file is corrupted/missing and the unhealthy - // update fails, the unhealthy state is kept in memory and sent to - // SCM. Write a corresponding entry to the container log as well. + // Even if the container file is corrupted/missing and the unhealthy update fails, + // the unhealthy state is kept in memory and sent to SCM. + // Write a corresponding entry to the container log as well. ContainerLogger.logUnhealthy(container.getContainerData(), reason); sendICR(container); } @@ -1204,8 +1085,7 @@ public void markContainerUnhealthy(Container container, ScanResult reason) } @Override - public void quasiCloseContainer(Container container, String reason) - throws IOException { + public void quasiCloseContainer(Container container, String reason) throws IOException { container.writeLock(); try { final State state = container.getContainerState(); @@ -1215,12 +1095,12 @@ public void quasiCloseContainer(Container container, String reason) } // The container has to be in CLOSING state. if (state != State.CLOSING) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; + ContainerProtos.Result error = state == State.INVALID ? INVALID_CONTAINER_STATE : CONTAINER_INTERNAL_ERROR; throw new StorageContainerException( - "Cannot quasi close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); + String.format("Cannot quasi close container #%s while in %s state.", + container.getContainerData().getContainerID(), + state), + error); } container.quasiClose(); ContainerLogger.logQuasiClosed(container.getContainerData(), reason); @@ -1231,8 +1111,7 @@ public void quasiCloseContainer(Container container, String reason) } @Override - public void closeContainer(Container container) - throws IOException { + public void closeContainer(Container container) throws IOException { container.writeLock(); try { final State state = container.getContainerState(); @@ -1242,18 +1121,19 @@ public void closeContainer(Container container) } if (state == State.UNHEALTHY) { throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", + String.format("Cannot quasi close container #%s while in %s state.", + container.getContainerData().getContainerID(), + state), ContainerProtos.Result.CONTAINER_UNHEALTHY); } // The container has to be either in CLOSING or in QUASI_CLOSED state. if (state != State.CLOSING && state != State.QUASI_CLOSED) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; + ContainerProtos.Result error = state == State.INVALID ? INVALID_CONTAINER_STATE : CONTAINER_INTERNAL_ERROR; throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); + String.format("Cannot quasi close container #%s while in %s state.", + container.getContainerData().getContainerID(), + state), + error); } container.close(); ContainerLogger.logClosed(container.getContainerData()); @@ -1264,18 +1144,16 @@ public void closeContainer(Container container) } @Override - public void deleteContainer(Container container, boolean force) - throws IOException { + public void deleteContainer(Container container, boolean force) throws IOException { deleteInternal(container, force); } /** - * Called by BlockDeletingService to delete all the chunks in a block - * before proceeding to delete the block info from DB. + * Called by {@link BlockDeletingService} + * to delete all the chunks in a block before proceeding to delete the block info from DB. */ @Override - public void deleteBlock(Container container, BlockData blockData) - throws IOException { + public void deleteBlock(Container container, BlockData blockData) throws IOException { chunkManager.deleteChunks(container, blockData); if (LOG.isDebugEnabled()) { for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { @@ -1286,13 +1164,10 @@ public void deleteBlock(Container container, BlockData blockData) } @Override - public void deleteUnreferenced(Container container, long localID) - throws IOException { - // Since the block/chunk is already checked that is unreferenced, no - // need to lock the container here. + public void deleteUnreferenced(Container container, long localID) throws IOException { + // Since the block/chunk is already checked that is unreferenced, no need to lock the container here. StringBuilder prefixBuilder = new StringBuilder(); - ContainerLayoutVersion layoutVersion = container.getContainerData(). - getLayoutVersion(); + ContainerLayoutVersion layoutVersion = container.getContainerData().getLayoutVersion(); long containerID = container.getContainerData().getContainerID(); // Only supports the default chunk/block name format now switch (layoutVersion) { @@ -1303,19 +1178,18 @@ public void deleteUnreferenced(Container container, long localID) prefixBuilder.append(localID).append("_chunk_"); break; default: - throw new IOException("Unsupported container layout version " + - layoutVersion + " for the container " + containerID); + throw new IOException(String.format("Unsupported container layout version %s for the container %s", + layoutVersion, containerID)); } String prefix = prefixBuilder.toString(); File chunkDir = ContainerUtils.getChunkDir(container.getContainerData()); - // chunkNames here is an array of file/dir name, so if we cannot find any - // matching one, it means the client did not write any chunk into the block. + // chunkNames here is an array of file/dir name, + // so if we cannot find any matching one, it means the client did not write any chunk into the block. // Since the putBlock request may fail, we don't know if the chunk exists, // thus we need to check it when receiving the request to delete such blocks String[] chunkNames = getFilesWithPrefix(prefix, chunkDir); if (chunkNames.length == 0) { - LOG.warn("Missing delete block(Container = {}, Block = {}", - containerID, localID); + LOG.warn("Missing delete block(Container = {}, Block = {}", containerID, localID); return; } for (String name: chunkNames) { @@ -1324,16 +1198,18 @@ public void deleteUnreferenced(Container container, long localID) continue; } FileUtil.fullyDelete(file); - LOG.info("Deleted unreferenced chunk/block {} in container {}", name, - containerID); + LOG.info("Deleted unreferenced chunk/block {} in container {}", name, containerID); } } + @Override public void addFinalizedBlock(Container container, long localID) { KeyValueContainer keyValueContainer = (KeyValueContainer)container; keyValueContainer.getContainerData().addToFinalizedBlockSet(localID); } + + @Override public boolean isFinalizedBlockExist(Container container, long localID) { KeyValueContainer keyValueContainer = (KeyValueContainer)container; return keyValueContainer.getContainerData().isFinalizedBlockExist(localID); @@ -1344,17 +1220,12 @@ private String[] getFilesWithPrefix(String prefix, File chunkDir) { return chunkDir.list(filter); } - private boolean logBlocksIfNonZero(Container container) - throws IOException { + private void logBlocksIfNonZero(Container container) throws IOException { boolean nonZero = false; - try (DBHandle dbHandle - = BlockUtils.getDB( - (KeyValueContainerData) container.getContainerData(), - conf)) { + try (DBHandle dbHandle = BlockUtils.getDB((KeyValueContainerData) container.getContainerData(), conf)) { StringBuilder stringBuilder = new StringBuilder(); - try (BlockIterator - blockIterator = dbHandle.getStore(). - getBlockIterator(container.getContainerData().getContainerID())) { + try (BlockIterator blockIterator = + dbHandle.getStore().getBlockIterator(container.getContainerData().getContainerID())) { while (blockIterator.hasNext()) { nonZero = true; stringBuilder.append(blockIterator.nextBlock()); @@ -1364,21 +1235,17 @@ private boolean logBlocksIfNonZero(Container container) } } if (nonZero) { - LOG.error("blocks in rocksDB on container delete: {}", - stringBuilder.toString()); + LOG.error("blocks in rocksDB on container delete: {}", stringBuilder); } } - return nonZero; } - private boolean logBlocksFoundOnDisk(Container container) throws IOException { + private void logBlocksFoundOnDisk(Container container) throws IOException { // List files left over - File chunksPath = new - File(container.getContainerData().getChunksPath()); + File chunksPath = new File(container.getContainerData().getChunksPath()); Preconditions.checkArgument(chunksPath.isDirectory()); boolean notEmpty = false; - try (DirectoryStream dir - = Files.newDirectoryStream(chunksPath.toPath())) { + try (DirectoryStream dir = Files.newDirectoryStream(chunksPath.toPath())) { StringBuilder stringBuilder = new StringBuilder(); for (Path block : dir) { if (notEmpty) { @@ -1391,68 +1258,57 @@ private boolean logBlocksFoundOnDisk(Container container) throws IOException { } } if (notEmpty) { - LOG.error("Files still part of the container on delete: {}", - stringBuilder.toString()); + LOG.error("Files still part of the container on delete: {}", stringBuilder); } } - return notEmpty; } - private void deleteInternal(Container container, boolean force) - throws StorageContainerException { + private void deleteInternal(Container container, boolean force) throws StorageContainerException { container.writeLock(); try { if (container.getContainerData().getVolume().isFailed()) { - // if the volume in which the container resides fails - // don't attempt to delete/move it. When a volume fails, - // failedVolumeListener will pick it up and clear the container - // from the container set. - LOG.info("Delete container issued on containerID {} which is in a " + - "failed volume. Skipping", container.getContainerData() - .getContainerID()); + // If the volume in which the container resides fails, don't attempt to delete/move it. + // When a volume fails, failedVolumeListener will pick it up and clear the container from the container set. + LOG.info("Delete container issued on containerID {} which is in a failed volume. Skipping", + container.getContainerData().getContainerID()); return; } - // If force is false, we check container state. + // If force is false, we check the container state. if (!force) { - // Check if container is open + // Check if the container is open if (container.getContainerData().isOpen()) { - throw new StorageContainerException( - "Deletion of Open Container is not allowed.", - DELETE_ON_OPEN_CONTAINER); + throw new StorageContainerException("Deletion of Open Container is not allowed.", DELETE_ON_OPEN_CONTAINER); } // Safety check that the container is empty. - // If the container is not empty, it should not be deleted unless the - // container is being forcefully deleted (which happens when - // container is unhealthy or over-replicated). + // If the container is not empty, it should not be deleted unless the container is being forcefully deleted + // (which happens when the container is unhealthy or over-replicated). if (container.hasBlocks()) { metrics.incContainerDeleteFailedNonEmpty(); - LOG.error("Received container deletion command for container {} but" + - " the container is not empty with blockCount {}", + LOG.error( + "Received container deletion command for container {} but the container is not empty with blockCount {}", container.getContainerData().getContainerID(), container.getContainerData().getBlockCount()); - // blocks table for future debugging. + // Blocks table for future debugging. // List blocks logBlocksIfNonZero(container); // Log chunks logBlocksFoundOnDisk(container); - throw new StorageContainerException("Non-force deletion of " + - "non-empty container is not allowed.", + throw new StorageContainerException("Non-force deletion of non-empty container is not allowed.", DELETE_ON_NON_EMPTY_CONTAINER); } } else { metrics.incContainersForceDelete(); } if (container.getContainerData() instanceof KeyValueContainerData) { - KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) container.getContainerData(); + KeyValueContainerData keyValueContainerData = (KeyValueContainerData) container.getContainerData(); HddsVolume hddsVolume = keyValueContainerData.getVolume(); // Steps to delete - // 1. container marked deleted - // 2. container is removed from container set - // 3. container db handler and content removed from db - // 4. container moved to tmp folder - // 5. container content deleted from tmp folder + // 1. Container marked deleted + // 2. Container is removed from container set + // 3. Container db handler and content removed from db + // 4. Container moved to tmp folder + // 5. Container content deleted from tmp folder try { container.markContainerForDelete(); long containerId = container.getContainerData().getContainerID(); @@ -1460,27 +1316,18 @@ private void deleteInternal(Container container, boolean force) ContainerLogger.logDeleted(container.getContainerData(), force); KeyValueContainerUtil.removeContainer(keyValueContainerData, conf); } catch (IOException ioe) { - LOG.error("Failed to move container under " + hddsVolume - .getDeletedContainerDir()); - String errorMsg = - "Failed to move container" + container.getContainerData() - .getContainerID(); - triggerVolumeScanAndThrowException(container, errorMsg, - CONTAINER_INTERNAL_ERROR); + LOG.error("Failed to move container under {}", hddsVolume.getDeletedContainerDir()); + String errorMsg = "Failed to move container" + container.getContainerData().getContainerID(); + triggerVolumeScanAndThrowException(container, errorMsg, CONTAINER_INTERNAL_ERROR); } } } catch (StorageContainerException e) { throw e; } catch (IOException e) { - // All other IO Exceptions should be treated as if the container is not - // empty as a defensive check. - LOG.error("Could not determine if the container {} is empty", - container.getContainerData().getContainerID(), e); - String errorMsg = - "Failed to read container dir" + container.getContainerData() - .getContainerID(); - triggerVolumeScanAndThrowException(container, errorMsg, - CONTAINER_INTERNAL_ERROR); + // All other IO Exceptions should be treated as if the container is not empty as a defensive check. + LOG.error("Could not determine if the container {} is empty", container.getContainerData().getContainerID(), e); + String errorMsg = "Failed to read container dir" + container.getContainerData().getContainerID(); + triggerVolumeScanAndThrowException(container, errorMsg, CONTAINER_INTERNAL_ERROR); } finally { container.writeUnlock(); } @@ -1489,8 +1336,7 @@ private void deleteInternal(Container container, boolean force) sendICR(container); } - private void triggerVolumeScanAndThrowException(Container container, - String msg, ContainerProtos.Result result) + private void triggerVolumeScanAndThrowException(Container container, String msg, ContainerProtos.Result result) throws StorageContainerException { // Trigger a volume scan as exception occurred. StorageVolumeUtil.onFailure(container.getContainerData().getVolume()); @@ -1520,15 +1366,100 @@ private ContainerCommandResponseProto checkFaultInjector(ContainerCommandRequest return null; } + private ContainerCommandResponseProto handleVerifyBlock(ContainerCommandRequestProto request, + KeyValueContainer kvContainer) { + if (!request.hasVerifyBlock()) { + LOG.debug("Malformed Verify Block request. trace ID: {}", request.getTraceID()); + return malformedRequest(request); + } + + try { + BlockID blockID = BlockID.getFromProtobuf(request.getGetBlock().getBlockID()); + if (replicaIndexCheckRequired(request)) { + BlockUtils.verifyReplicaIdx(kvContainer, blockID); + } + BlockData block = BlockData.getFromProtoBuf(blockManager.getBlock(kvContainer, blockID).getProtoBufMessage()); + + KeyValueContainerData onDiskContainerData; + + String metadataPath = kvContainer.getContainerData().getMetadataPath(); + + long containerID = kvContainer.getContainerData().getContainerID(); + + File containerFile = KeyValueContainer.getContainerFile(metadataPath, containerID); + + HddsVolume volume = kvContainer.getContainerData().getVolume(); + + try { + onDiskContainerData = (KeyValueContainerData) ContainerDataYaml.readContainerFile(containerFile); + onDiskContainerData.setVolume(volume); + } catch (FileNotFoundException ex) { + return unhealthy(MISSING_CONTAINER_FILE, containerFile, ex); + } catch (IOException ex) { + return unhealthy(CORRUPT_CONTAINER_FILE, containerFile, ex); + } + + ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion(); + + for (ContainerProtos.ChunkInfo chunk : block.getChunks()) { + File chunkFile; + try { + chunkFile = layout.getChunkFile(onDiskContainerData, block.getBlockID(), chunk.getChunkName()); + } catch (IOException ex) { + return unhealthy(MISSING_CHUNK_FILE, new File(onDiskContainerData.getChunksPath()), ex); + } + + if (!chunkFile.exists()) { + // In EC, a client may write empty putBlock in padding block nodes. + // So, we need to make sure, chunk length > 0, before declaring the missing chunk file. + if (!block.getChunks().isEmpty() && block.getChunks().get(0).getLen() > 0) { + return unhealthy(MISSING_CHUNK_FILE, chunkFile, + new IOException("Missing chunk file " + chunkFile.getAbsolutePath())); + } + } else if (chunk.getChecksumData().getType() != ContainerProtos.ChecksumType.NONE) { + int bytesPerChecksum = chunk.getChecksumData().getBytesPerChecksum(); + ByteBuffer buffer = BUFFER_POOL.getBuffer(bytesPerChecksum); + + ContainerScannerConfiguration containerScannerConfiguration = conf.getObject(ContainerScannerConfiguration.class); + + DataTransferThrottler throttler = new DataTransferThrottler(containerScannerConfiguration.getBandwidthPerVolume()); + + Canceler canceler = new Canceler(); + + ScanResult result = verifyChecksum(block, chunk, chunkFile, layout, buffer, throttler, canceler); + buffer.clear(); + BUFFER_POOL.returnBuffer(buffer); + if (!result.isHealthy()) { + return result; + } + } + } + } catch (Exception e) { + LOG.error("OOPS", e); + } + + return null; + } + public static Logger getLogger() { return LOG; } + /** + * Returns the instance of {@link FaultInjector} used for testing purposes. + * + * @return the instance of {@link FaultInjector}. + */ @VisibleForTesting public static FaultInjector getInjector() { return injector; } + /** + * Sets the instance of the {@link FaultInjector} for testing purposes. + * + * @param instance the instance of {@link FaultInjector} to be set + */ @VisibleForTesting public static void setInjector(FaultInjector instance) { injector = instance; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index b7d5b5fa59e..8a3d4534b71 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -49,101 +49,79 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; -/** - * Class which defines utility methods for KeyValueContainer. - */ - +/** Class which defines utility methods for KeyValueContainer. */ public final class KeyValueContainerUtil { /* Never constructed. */ private KeyValueContainerUtil() { - } - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueContainerUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(KeyValueContainerUtil.class); /** - * creates metadata path, chunks path and metadata DB for the specified - * container. + * Creates metadata path, chunk path and metadata DB for the specified container. * * @param containerMetaDataPath Path to the container's metadata directory. - * @param chunksPath Path were chunks for this container should be stored. + * @param chunksPath Path was chunks for this container should be stored. * @param dbFile Path to the container's .db file. - * @param schemaVersion The schema version of the container. If this method - * has not been updated after a schema version addition - * and does not recognize the latest SchemaVersion, an + * @param schemaVersion The schema version of the container. + * If this method has not been updated after a schema version addition + * and does not recognize the latest SchemaVersion, an * {@link IllegalArgumentException} is thrown. * @param conf The configuration to use for this container. - * @throws IOException */ - public static void createContainerMetaData( - File containerMetaDataPath, File chunksPath, File dbFile, + public static void createContainerMetaData(File containerMetaDataPath, File chunksPath, File dbFile, String schemaVersion, ConfigurationSource conf) throws IOException { Preconditions.checkNotNull(containerMetaDataPath); Preconditions.checkNotNull(conf); if (!containerMetaDataPath.mkdirs()) { - LOG.error("Unable to create directory for metadata storage. Path: {}", - containerMetaDataPath); - throw new IOException("Unable to create directory for metadata storage." + - " Path: " + containerMetaDataPath); + LOG.error("Unable to create directory for metadata storage. Path: {}", containerMetaDataPath); + throw new IOException("Unable to create directory for metadata storage." + " Path: " + containerMetaDataPath); } if (!chunksPath.mkdirs()) { - LOG.error("Unable to create chunks directory Container {}", - chunksPath); - //clean up container metadata path and metadata db + LOG.error("Unable to create chunks directory Container {}", chunksPath); + // Clean up container metadata path and metadata db FileUtils.deleteDirectory(containerMetaDataPath); FileUtils.deleteDirectory(containerMetaDataPath.getParentFile()); - throw new IOException("Unable to create directory for data storage." + - " Path: " + chunksPath); + throw new IOException("Unable to create directory for data storage." + " Path: " + chunksPath); } DatanodeStore store; if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V1)) { - store = new DatanodeStoreSchemaOneImpl(conf, dbFile.getAbsolutePath(), - false); + store = new DatanodeStoreSchemaOneImpl(conf, dbFile.getAbsolutePath(), false); } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V2)) { - store = new DatanodeStoreSchemaTwoImpl(conf, dbFile.getAbsolutePath(), - false); + store = new DatanodeStoreSchemaTwoImpl(conf, dbFile.getAbsolutePath(), false); } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { - // We don't create per-container store for schema v3 containers, - // they should use per-volume db store. + // We don't create per-container store for schema v3 containers, they should use per-volume db store. return; } else { - throw new IllegalArgumentException( - "Unrecognized schema version for container: " + schemaVersion); + throw new IllegalArgumentException("Unrecognized schema version for container: " + schemaVersion); } - //add db handler into cache + // Add db handler into cache BlockUtils.addDB(store, dbFile.getAbsolutePath(), conf, schemaVersion); } /** - * remove Container 1. remove db, 2. move to tmp directory. + * Remove Container 1. remove db, 2. move to tmp directory. * * @param containerData - Data of the container to remove. - * @throws IOException */ - public static void removeContainer( - KeyValueContainerData containerData, ConfigurationSource conf) - throws IOException { + public static void removeContainer(KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { Preconditions.checkNotNull(containerData); KeyValueContainerUtil.removeContainerDB(containerData, conf); - KeyValueContainerUtil.moveToDeletedContainerDir(containerData, - containerData.getVolume()); + KeyValueContainerUtil.moveToDeletedContainerDir(containerData, containerData.getVolume()); } /** - * remove Container db, the Level DB file. + * Remove Container db, the Level DB file. * * @param containerData - Data of the container to remove. * @param conf - configuration of the cluster. - * @throws IOException */ - public static void removeContainerDB( - KeyValueContainerData containerData, ConfigurationSource conf) + public static void removeContainerDB(KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { // DB failure is catastrophic, the disk needs to be replaced. @@ -151,36 +129,31 @@ public static void removeContainerDB( try { BlockUtils.removeContainerFromDB(containerData, conf); } catch (IOException ex) { - LOG.error("DB failure, unable to remove container. " + - "Disk need to be replaced.", ex); + LOG.error("DB failure, unable to remove container. Disk need to be replaced.", ex); throw ex; } } else { - // Close the DB connection and remove the DB handler from cache + // Close the DB connection and remove the DB handler from the cache BlockUtils.removeDB(containerData, conf); } } /** * Returns if there are no blocks in the container. + * * @param store DBStore * @param containerData Container to check - * @param bCheckChunksFilePath Whether to check chunksfilepath has any blocks + * @param bCheckChunksFilePath Whether to check chunks filepath has any blocks * @return true if the directory containing blocks is empty - * @throws IOException */ - public static boolean noBlocksInContainer(DatanodeStore store, - KeyValueContainerData - containerData, - boolean bCheckChunksFilePath) - throws IOException { + public static boolean noBlocksInContainer(DatanodeStore store, KeyValueContainerData containerData, + boolean bCheckChunksFilePath) throws IOException { Preconditions.checkNotNull(store); Preconditions.checkNotNull(containerData); if (containerData.isOpen()) { return false; } - try (BlockIterator blockIterator = - store.getBlockIterator(containerData.getContainerID())) { + try (BlockIterator blockIterator = store.getBlockIterator(containerData.getContainerID())) { if (blockIterator.hasNext()) { return false; } @@ -188,8 +161,7 @@ public static boolean noBlocksInContainer(DatanodeStore store, if (bCheckChunksFilePath) { File chunksPath = new File(containerData.getChunksPath()); Preconditions.checkArgument(chunksPath.isDirectory()); - try (DirectoryStream dir - = Files.newDirectoryStream(chunksPath.toPath())) { + try (DirectoryStream dir = Files.newDirectoryStream(chunksPath.toPath())) { return !dir.iterator().hasNext(); } } @@ -197,16 +169,12 @@ public static boolean noBlocksInContainer(DatanodeStore store, } /** - * Parse KeyValueContainerData and verify checksum. Set block related - * metadata like block commit sequence id, block count, bytes used and - * pending delete block count and delete transaction id. - * @param kvContainerData - * @param config - * @throws IOException + * Parse KeyValueContainerData and verify checksum. + * Set block related metadata like a block commit sequence id, block count, + * bytes used and pending delete block count and delete transaction id. */ - public static void parseKVContainerData(KeyValueContainerData kvContainerData, - ConfigurationSource config) throws IOException { - + public static void parseKVContainerData(KeyValueContainerData kvContainerData, ConfigurationSource config) + throws IOException { long containerID = kvContainerData.getContainerID(); // Verify Checksum @@ -218,25 +186,20 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, kvContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V1); } - File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - kvContainerData); + File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(kvContainerData); if (!dbFile.exists()) { - LOG.error("Container DB file is missing for ContainerID {}. " + - "Skipping loading of this container.", containerID); + LOG.error("Container DB file is missing for ContainerID {}. Skipping loading of this container.", containerID); // Don't further process this container, as it is missing db file. - throw new IOException("Container DB file is missing for containerID " - + containerID); + throw new IOException("Container DB file is missing for containerID " + containerID); } kvContainerData.setDbFile(dbFile); - DatanodeConfiguration dnConf = - config.getObject(DatanodeConfiguration.class); + DatanodeConfiguration dnConf = config.getObject(DatanodeConfiguration.class); boolean bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); if (kvContainerData.hasSchema(OzoneConsts.SCHEMA_V3)) { try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { - populateContainerMetadata(kvContainerData, - db.getStore(), bCheckChunksFilePath); + populateContainerMetadata(kvContainerData, db.getStore(), bCheckChunksFilePath); } return; } @@ -245,94 +208,76 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, DatanodeStore store = null; try { try { - boolean readOnly = ContainerInspectorUtil.isReadOnly( - ContainerProtos.ContainerType.KeyValueContainer); - store = BlockUtils.getUncachedDatanodeStore( - kvContainerData, config, readOnly); + boolean readOnly = ContainerInspectorUtil.isReadOnly(ContainerProtos.ContainerType.KeyValueContainer); + store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config, readOnly); } catch (IOException e) { - // If an exception is thrown, then it may indicate the RocksDB is - // already open in the container cache. As this code is only executed at - // DN startup, this should only happen in the tests. + // If an exception is thrown, then it may indicate the RocksDB is already open in the container cache. + // As this code is only executed at DN startup, this should only happen in the tests. cachedDB = BlockUtils.getDB(kvContainerData, config); store = cachedDB.getStore(); - LOG.warn("Attempt to get an uncached RocksDB handle failed and an " + - "instance was retrieved from the cache. This should only happen " + - "in tests"); + LOG.warn("Attempt to get an uncached RocksDB handle failed and an instance was retrieved from the cache." + + " This should only happen in tests"); } populateContainerMetadata(kvContainerData, store, bCheckChunksFilePath); } finally { if (cachedDB != null) { - // If we get a cached instance, calling close simply decrements the - // reference count. + // If we get a cached instance, calling close simply decrements the reference count. cachedDB.close(); } else if (store != null) { - // We only stop the store if cacheDB is null, as otherwise we would - // close the rocksDB handle in the cache and the next reader would fail + // We only stop the store if cacheDB is null, + // as otherwise we would close the rocksDB handle in the cache and the next reader would fail try { store.stop(); } catch (IOException e) { throw e; } catch (Exception e) { - throw new RuntimeException("Unexpected exception closing the " + - "RocksDB when loading containers", e); + throw new RuntimeException("Unexpected exception closing the RocksDB when loading containers", e); } } } } - private static void populateContainerMetadata( - KeyValueContainerData kvContainerData, DatanodeStore store, - boolean bCheckChunksFilePath) - throws IOException { + private static void populateContainerMetadata(KeyValueContainerData kvContainerData, DatanodeStore store, + boolean bCheckChunksFilePath) throws IOException { boolean isBlockMetadataSet = false; Table metadataTable = store.getMetadataTable(); // Set pending deleted block count. - Long pendingDeleteBlockCount = - metadataTable.get(kvContainerData - .getPendingDeleteBlockCountKey()); + Long pendingDeleteBlockCount = metadataTable.get(kvContainerData.getPendingDeleteBlockCountKey()); if (pendingDeleteBlockCount != null) { - kvContainerData.incrPendingDeletionBlocks( - pendingDeleteBlockCount); + kvContainerData.incrPendingDeletionBlocks(pendingDeleteBlockCount); } else { // Set pending deleted block count. - MetadataKeyFilters.KeyPrefixFilter filter = - kvContainerData.getDeletingBlockKeyFilter(); + MetadataKeyFilters.KeyPrefixFilter filter = kvContainerData.getDeletingBlockKeyFilter(); int numPendingDeletionBlocks = store.getBlockDataTable() .getSequentialRangeKVs(kvContainerData.startKeyEmpty(), - Integer.MAX_VALUE, kvContainerData.containerPrefix(), + Integer.MAX_VALUE, + kvContainerData.containerPrefix(), filter).size(); kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); } // Set delete transaction id. - Long delTxnId = - metadataTable.get(kvContainerData.getLatestDeleteTxnKey()); + Long delTxnId = metadataTable.get(kvContainerData.getLatestDeleteTxnKey()); if (delTxnId != null) { - kvContainerData - .updateDeleteTransactionId(delTxnId); + kvContainerData.updateDeleteTransactionId(delTxnId); } // Set BlockCommitSequenceId. - Long bcsId = metadataTable.get( - kvContainerData.getBcsIdKey()); + Long bcsId = metadataTable.get(kvContainerData.getBcsIdKey()); if (bcsId != null) { - kvContainerData - .updateBlockCommitSequenceId(bcsId); + kvContainerData.updateBlockCommitSequenceId(bcsId); } - // Set bytes used. - // commitSpace for Open Containers relies on usedBytes - Long bytesUsed = - metadataTable.get(kvContainerData.getBytesUsedKey()); + // Set bytes used. commitSpace for Open Containers relies on usedBytes + Long bytesUsed = metadataTable.get(kvContainerData.getBytesUsedKey()); if (bytesUsed != null) { isBlockMetadataSet = true; kvContainerData.setBytesUsed(bytesUsed); } // Set block count. - Long blockCount = metadataTable.get( - kvContainerData.getBlockCountKey()); + Long blockCount = metadataTable.get(kvContainerData.getBlockCountKey()); if (blockCount != null) { isBlockMetadataSet = true; kvContainerData.setBlockCount(blockCount); @@ -341,8 +286,7 @@ private static void populateContainerMetadata( initializeUsedBytesAndBlockCount(store, kvContainerData); } - // If the container is missing a chunks directory, possibly due to the - // bug fixed by HDDS-6235, create it here. + // If the container is missing a chunks directory, possibly due to the bug fixed by HDDS-6235, create it here. File chunksDir = new File(kvContainerData.getChunksPath()); if (!chunksDir.exists()) { Files.createDirectories(chunksDir.toPath()); @@ -352,9 +296,8 @@ private static void populateContainerMetadata( kvContainerData.markAsEmpty(); } - // Run advanced container inspection/repair operations if specified on - // startup. If this method is called but not as a part of startup, - // The inspectors will be unloaded and this will be a no-op. + // Run advanced container inspection/repair operations if specified on startup. + // If this method is called but not as a part of startup, the inspectors will be unloaded and this will be a no-op. ContainerInspectorUtil.process(kvContainerData, store); // Load finalizeBlockLocalIds for container in memory. @@ -363,12 +306,11 @@ private static void populateContainerMetadata( /** * Loads finalizeBlockLocalIds for container in memory. + * * @param kvContainerData - KeyValueContainerData * @param store - DatanodeStore - * @throws IOException */ - private static void populateContainerFinalizeBlock( - KeyValueContainerData kvContainerData, DatanodeStore store) + private static void populateContainerFinalizeBlock(KeyValueContainerData kvContainerData, DatanodeStore store) throws IOException { if (store.getFinalizeBlocksTable() != null) { try (BlockIterator iter = @@ -383,20 +325,15 @@ private static void populateContainerFinalizeBlock( /** * Initialize bytes used and block count. - * @param kvData - * @throws IOException */ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, KeyValueContainerData kvData) throws IOException { - final String errorMessage = "Failed to parse block data for" + - " Container " + kvData.getContainerID(); + final String errorMessage = "Failed to parse block data for Container " + kvData.getContainerID(); long blockCount = 0; long usedBytes = 0; try (BlockIterator blockIter = - store.getBlockIterator(kvData.getContainerID(), - kvData.getUnprefixedKeyFilter())) { - + store.getBlockIterator(kvData.getContainerID(), kvData.getUnprefixedKeyFilter())) { while (blockIter.hasNext()) { blockCount++; try { @@ -409,9 +346,7 @@ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, // Count all deleting blocks. try (BlockIterator blockIter = - store.getBlockIterator(kvData.getContainerID(), - kvData.getDeletingBlockKeyFilter())) { - + store.getBlockIterator(kvData.getContainerID(), kvData.getDeletingBlockKeyFilter())) { while (blockIter.hasNext()) { blockCount++; try { @@ -425,12 +360,26 @@ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, kvData.setBlockCount(blockCount); } + /** + * Calculates the total length of all chunks in a given block. + * + * @param block BlockData object containing chunks whose lengths need to be summed. + * @return Total length of all chunks within the block. + * @throws IOException if an I/O error occurs while accessing chunk lengths. + */ public static long getBlockLength(BlockData block) throws IOException { return block.getChunks().stream() .mapToLong(ContainerProtos.ChunkInfo::getLen) .sum(); } + /** + * Compares two schema versions to determine if they are the same. + * + * @param schema the first schema version to compare. If {@code null}, defaults to {@link OzoneConsts#SCHEMA_V1}. + * @param other the second schema version to compare. If {@code null}, defaults to {@link OzoneConsts#SCHEMA_V1}. + * @return {@code true} if both schema versions are equal, {@code false} otherwise. + */ public static boolean isSameSchemaVersion(String schema, String other) { String effective1 = schema != null ? schema : SCHEMA_V1; String effective2 = other != null ? other : SCHEMA_V1; @@ -438,59 +387,58 @@ public static boolean isSameSchemaVersion(String schema, String other) { } /** - * Moves container directory to a new location - * under "/hdds//tmp/deleted-containers" - * and updates metadata and chunks path. - * Containers will be moved under it before getting deleted - * to avoid, in case of failure, having artifact leftovers - * on the default container path on the disk. - * + * Moves container directory to a new location under "/hdds//tmp/deleted-containers" + * and updates metadata and chunk path. + * Containers will be moved under it before getting deleted to avoid, in case of failure, + * having artifact leftovers on the default container path on the disk. + *

* Delete operation for Schema < V3 - * 1. Container is marked DELETED - * 2. Container is removed from memory container set - * 3. Container DB handler from cache is removed and closed - * 4. Container directory renamed to tmp directory. - * 5. Container is deleted from tmp directory. - * + *

    + *
  1. Container is marked DELETED + *
  2. Container is removed from memory container set + *
  3. Container DB handler from cache is removed and closed + *
  4. Container directory renamed to tmp directory. + *
  5. Container is deleted from tmp directory. + *
* Delete operation for Schema V3 - * 1. Container is marked DELETED - * 2. Container is removed from memory container set - * 3. Container from DB is removed - * 4. Container directory renamed to tmp directory. - * 5. Container is deleted from tmp directory. - * - * @param keyValueContainerData - * @return true if renaming was successful + *
    + *
  1. Container is marked DELETED + *
  2. Container is removed from memory container set + *
  3. Container from DB is removed + *
  4. Container directory renamed to tmp directory. + *
  5. Container is deleted from tmp directory. + *
*/ - public static void moveToDeletedContainerDir( - KeyValueContainerData keyValueContainerData, - HddsVolume hddsVolume) throws IOException { + public static void moveToDeletedContainerDir(KeyValueContainerData keyValueContainerData, HddsVolume hddsVolume) + throws IOException { String containerPath = keyValueContainerData.getContainerPath(); File container = new File(containerPath); - Path destinationDirPath = getTmpDirectoryPath(keyValueContainerData, - hddsVolume); + Path destinationDirPath = getTmpDirectoryPath(keyValueContainerData, hddsVolume); File destinationDirFile = destinationDirPath.toFile(); - // If a container by the same name was moved to the delete directory but - // the final delete failed, clear it out before adding another container - // with the same name. + // If a container by the same name was moved to the delete directory but the final delete failed, + // clear it out before adding another container with the same name. if (destinationDirFile.exists()) { FileUtils.deleteDirectory(destinationDirFile); } Files.move(container.toPath(), destinationDirPath); LOG.debug("Container {} has been successfully moved under {}", - container.getName(), hddsVolume.getDeletedContainerDir()); + container.getName(), + hddsVolume.getDeletedContainerDir()); } - public static Path getTmpDirectoryPath( - KeyValueContainerData keyValueContainerData, - HddsVolume hddsVolume) { + /** + * Retrieves the temporary directory path for a container. + * + * @param keyValueContainerData the data associated with the KeyValue container + * @param hddsVolume the volume where the container resides + * @return the path representing the temporary directory for the container + */ + public static Path getTmpDirectoryPath(KeyValueContainerData keyValueContainerData, HddsVolume hddsVolume) { String containerPath = keyValueContainerData.getContainerPath(); File container = new File(containerPath); String containerDirName = container.getName(); - Path destinationDirPath = hddsVolume.getDeletedContainerDir().toPath() - .resolve(Paths.get(containerDirName)); - return destinationDirPath; + return hddsVolume.getDeletedContainerDir().toPath().resolve(Paths.get(containerDirName)); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 6232b843567..93720694999 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; @@ -48,15 +49,13 @@ import org.slf4j.LoggerFactory; /** - * This class is for performing block related operations on the KeyValue - * Container. + * This class is for performing block related operations on the KeyValue Container. */ public class BlockManagerImpl implements BlockManager { - public static final Logger LOG = - LoggerFactory.getLogger(BlockManagerImpl.class); + public static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); - private ConfigurationSource config; + private final ConfigurationSource config; private static final String DB_NULL_ERR_MSG = "DB cannot be null here"; public static final String FULL_CHUNK = "full"; @@ -91,66 +90,67 @@ public long putBlock(Container container, BlockData data) throws IOException { } @Override - public long putBlock(Container container, BlockData data, - boolean endOfBlock) throws IOException { - return persistPutBlock( - (KeyValueContainer) container, - data, endOfBlock); + public long putBlock(Container container, BlockData data, boolean endOfBlock) throws IOException { + return persistPutBlock((KeyValueContainer) container, data, endOfBlock); } - public long persistPutBlock(KeyValueContainer container, - BlockData data, boolean endOfBlock) - throws IOException { - Preconditions.checkNotNull(data, "BlockData cannot be null for put " + - "operation."); - Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + - "cannot be negative"); + /** + * Persists a block into the key-value container. This method is responsible + * for updating the state of the container along with the block metadata. It + * ensures that the block data is correctly stored in the associated DB and + * handles potential conflicts during the process. + * + * @param container The key-value container where the block data will be stored. + * @param data The block data to be persisted. + * @param endOfBlock A flag indicating whether this is the final chunk of the block. + * @return The size of the block that was persisted. + * @throws IOException If an I/O error occurs during the operation. + */ + public long persistPutBlock(KeyValueContainer container, BlockData data, boolean endOfBlock) throws IOException { + Preconditions.checkNotNull(data, "BlockData cannot be null for put operation."); + Preconditions.checkState(data.getContainerID() >= 0, "Container Id cannot be negative"); KeyValueContainerData containerData = container.getContainerData(); - // We are not locking the key manager since LevelDb serializes all actions - // against a single DB. We rely on DB level locking to avoid conflicts. + // We are not locking the key manager since LevelDb serializes all actions against a single DB. + // We rely on DB level locking to avoid conflicts. try (DBHandle db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. + // This is a post-condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); long bcsId = data.getBlockCommitSequenceId(); long containerBCSId = containerData.getBlockCommitSequenceId(); - // default blockCommitSequenceId for any block is 0. It the putBlock - // request is not coming via Ratis(for test scenarios), it will be 0. + // Default blockCommitSequenceId for any block is 0. + // It the putBlock request is not coming via Ratis(for test scenarios), it will be 0. // In such cases, we should overwrite the block as well if ((bcsId != 0) && (bcsId <= containerBCSId)) { - // Since the blockCommitSequenceId stored in the db is greater than - // equal to blockCommitSequenceId to be updated, it means the putBlock - // transaction is reapplied in the ContainerStateMachine on restart. + // Since the blockCommitSequenceId stored in the db is greater than equal to blockCommitSequenceId + // to be updated, + // it means the putBlock transaction is reapplied in the ContainerStateMachine on restart. // It also implies that the given block must already exist in the db. - // just log and return - LOG.debug("blockCommitSequenceId {} in the Container Db is greater" - + " than the supplied value {}. Ignoring it", - containerBCSId, bcsId); + // Log and return + LOG.debug("blockCommitSequenceId {} in the Container Db is greater than the supplied value {}. Ignoring it", + containerBCSId, + bcsId); return data.getSize(); } - // Check if the block is present in the pendingPutBlockCache for the - // container to determine whether the blockCount is already incremented - // for this block in the DB or not. + // Check if the block is present in the pendingPutBlockCache for the container to determine + // whether the blockCount is already incremented for this block in the DB or not. long localID = data.getLocalID(); boolean isBlockInCache = container.isBlockInPendingPutBlockCache(localID); boolean incrBlockCount = false; - // update the blockData as well as BlockCommitSequenceId here - try (BatchOperation batch = db.getStore().getBatchHandler() - .initBatchOperation()) { - // If the block does not exist in the pendingPutBlockCache of the - // container, then check the DB to ascertain if it exists or not. - // If block exists in cache, blockCount should not be incremented. + // Update the blockData as well as BlockCommitSequenceId here + try (BatchOperation batch = db.getStore().getBatchHandler().initBatchOperation()) { + // If the block does not exist in the pendingPutBlockCache of the container, + // then check the DB to figure out if it exists or not. + // If a block exists in cache, blockCount should not be incremented. if (!isBlockInCache) { - if (db.getStore().getBlockDataTable().get( - containerData.getBlockKey(localID)) == null) { - // Block does not exist in DB => blockCount needs to be - // incremented when the block is added into DB. + if (db.getStore().getBlockDataTable().get(containerData.getBlockKey(localID)) == null) { + // Block does not exist in DB ⇒ blockCount needs to be incremented when the block is added into DB. incrBlockCount = true; } } @@ -158,33 +158,31 @@ public long persistPutBlock(KeyValueContainer container, boolean incrementalEnabled = true; if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { if (isPartialChunkList(data)) { - throw new StorageContainerException("DataNode has not finalized " + - "upgrading to a version that supports incremental chunk list.", UNSUPPORTED_REQUEST); + throw new StorageContainerException("DataNode has not finalized upgrading to a version" + + " that supports incremental chunk list.", UNSUPPORTED_REQUEST); } incrementalEnabled = false; } - db.getStore().putBlockByID(batch, incrementalEnabled, localID, data, - containerData, endOfBlock); + db.getStore().putBlockByID(batch, incrementalEnabled, localID, data, containerData, endOfBlock); if (bcsId != 0) { - db.getStore().getMetadataTable().putWithBatch( - batch, containerData.getBcsIdKey(), bcsId); + db.getStore().getMetadataTable().putWithBatch(batch, containerData.getBcsIdKey(), bcsId); } - // Set Bytes used, this bytes used will be updated for every write and - // only get committed for every put block. In this way, when datanode - // is up, for computation of disk space by container only committed - // block length is used, And also on restart the blocks committed to DB - // is only used to compute the bytes used. This is done to keep the - // current behavior and avoid DB write during write chunk operation. - db.getStore().getMetadataTable().putWithBatch( - batch, containerData.getBytesUsedKey(), - containerData.getBytesUsed()); + // Set Bytes used, + // these bytes used will be updated for every writing and only get committed for every put block. + // In this way, when datanode is up, + // for computation of disk space by container only committed block length is used, + // and also on restart the blocks committed to DB is only used to compute the bytes used. + // This is done to keep the current behavior and avoid DB write during write chunk operation. + db.getStore() + .getMetadataTable() + .putWithBatch(batch, containerData.getBytesUsedKey(), containerData.getBytesUsed()); // Set Block Count for a container. if (incrBlockCount) { - db.getStore().getMetadataTable().putWithBatch( - batch, containerData.getBlockCountKey(), - containerData.getBlockCount() + 1); + db.getStore() + .getMetadataTable() + .putWithBatch(batch, containerData.getBlockCountKey(), containerData.getBlockCount() + 1); } db.getStore().getBatchHandler().commitBatchOperation(batch); @@ -194,55 +192,48 @@ public long persistPutBlock(KeyValueContainer container, container.updateBlockCommitSequenceId(bcsId); } - // Increment block count and add block to pendingPutBlockCache - // in-memory after the DB update. + // Increment block count and add block to pendingPutBlockCache in-memory after the DB update. if (incrBlockCount) { containerData.incrBlockCount(); } // If the Block is not in PendingPutBlockCache (and it is not endOfBlock), - // add it there so that subsequent putBlock calls for this block do not - // have to read the DB to check for block existence + // add it there + // so that later putBlock calls for this block do not have to read the DB to check for block existence if (!isBlockInCache && !endOfBlock) { container.addToPendingPutBlockCache(localID); } else if (isBlockInCache && endOfBlock) { - // Remove the block from the PendingPutBlockCache as there would not - // be any more writes to this block + // Remove the block from the PendingPutBlockCache as there would not be anymore writes to this block container.removeFromPendingPutBlockCache(localID); } - if (LOG.isDebugEnabled()) { - LOG.debug( - "Block " + data.getBlockID() + " successfully committed with bcsId " - + bcsId + " chunk size " + data.getChunks().size()); - } + LOG.debug("Block {} successfully committed with bcsId {} chunk size {}", + data.getBlockID(), + bcsId, + data.getChunks().size()); return data.getSize(); } } @Override - public void finalizeBlock(Container container, BlockID blockId) - throws IOException { - Preconditions.checkNotNull(blockId, "blockId cannot " + - "be null for finalizeBlock operation."); - Preconditions.checkState(blockId.getContainerID() >= 0, - "Container Id cannot be negative"); + public void finalizeBlock(Container container, BlockID blockId) throws IOException { + Preconditions.checkNotNull(blockId, "blockId cannot be null for finalizeBlock operation."); + Preconditions.checkState(blockId.getContainerID() >= 0, "Container Id cannot be negative"); KeyValueContainer kvContainer = (KeyValueContainer)container; long localID = blockId.getLocalID(); kvContainer.removeFromPendingPutBlockCache(localID); - try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), - config)) { + try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), config)) { // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); // persist finalizeBlock - try (BatchOperation batch = db.getStore().getBatchHandler() - .initBatchOperation()) { - db.getStore().getFinalizeBlocksTable().putWithBatch(batch, - kvContainer.getContainerData().getBlockKey(localID), localID); + try (BatchOperation batch = db.getStore().getBatchHandler().initBatchOperation()) { + db.getStore() + .getFinalizeBlocksTable() + .putWithBatch(batch, kvContainer.getContainerData().getBlockKey(localID), localID); db.getStore().getBatchHandler().commitBatchOperation(batch); mergeLastChunkForBlockFinalization(blockId, db, kvContainer, batch, localID); @@ -250,48 +241,41 @@ public void finalizeBlock(Container container, BlockID blockId) } } - private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, - KeyValueContainer kvContainer, BatchOperation batch, - long localID) throws IOException { - // if the chunk list of the block to be finalized was written incremental, - // merge the last chunk into block data. + private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, KeyValueContainer kvContainer, + BatchOperation batch, long localID) throws IOException { + // If the chunk list of the block to be finalized was written incremental, merge the last chunk into block data. BlockData blockData = getBlockByID(db, blockId, kvContainer.getContainerData()); if (blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) { BlockData emptyBlockData = new BlockData(blockId); emptyBlockData.addMetadata(INCREMENTAL_CHUNK_LIST, ""); - db.getStore().putBlockByID(batch, true, localID, - emptyBlockData, kvContainer.getContainerData(), true); + db.getStore() + .putBlockByID(batch, true, localID, emptyBlockData, kvContainer.getContainerData(), true); } } @Override public BlockData getBlock(Container container, BlockID blockID) throws IOException { BlockUtils.verifyBCSId(container, blockID); - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); + KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); long bcsId = blockID.getBlockCommitSequenceId(); try (DBHandle db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. + // This is a post-condition that acts as a hint to the user. Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); BlockData blockData = getBlockByID(db, blockID, containerData); long id = blockData.getBlockID().getBlockCommitSequenceId(); if (id < bcsId) { - throw new StorageContainerException( - "bcsId " + bcsId + " mismatches with existing block Id " - + id + " for block " + blockID + ".", BCSID_MISMATCH); + throw new StorageContainerException("bcsId " + bcsId + " mismatches with existing block Id " + id + + " for block " + blockID + ".", BCSID_MISMATCH); } return blockData; } } @Override - public long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); + public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { + KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); try (DBHandle db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. + // This is a post-condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); BlockData blockData = getBlockByID(db, blockID, containerData); @@ -304,6 +288,7 @@ public int getDefaultReadBufferCapacity() { return defaultReadBufferCapacity; } + @Override public int getReadMappedBufferThreshold() { return readMappedBufferThreshold; } @@ -315,42 +300,34 @@ public int getReadMappedBufferMaxCount() { /** * Deletes an existing block. - * As Deletion is handled by BlockDeletingService, - * UnsupportedOperationException is thrown always + * As Deletion is handled by {@link BlockDeletingService}, {@link UnsupportedOperationException} is thrown always. * * @param container - Container from which block need to be deleted. * @param blockID - ID of the block. */ @Override - public void deleteBlock(Container container, BlockID blockID) throws - IOException { + public void deleteBlock(Container container, BlockID blockID) { // Block/ Chunk Deletion is handled by BlockDeletingService. - // SCM sends Block Deletion commands directly to Datanodes and not - // through a Pipeline. + // SCM sends Block Deletion commands directly to Datanodes and not through a Pipeline. throw new UnsupportedOperationException(); } @Override - public List listBlock(Container container, long startLocalID, int - count) throws IOException { + public List listBlock(Container container, long startLocalID, int count) throws IOException { Preconditions.checkNotNull(container, "container cannot be null"); - Preconditions.checkState(startLocalID >= 0 || startLocalID == -1, - "startLocal ID cannot be negative"); - Preconditions.checkArgument(count > 0, - "Count must be a positive number."); + Preconditions.checkState(startLocalID >= 0 || startLocalID == -1, "startLocal ID cannot be negative"); + Preconditions.checkArgument(count > 0, "Count must be a positive number."); container.readLock(); try { - List result = null; - KeyValueContainerData cData = - (KeyValueContainerData) container.getContainerData(); + List result; + KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); try (DBHandle db = BlockUtils.getDB(cData, config)) { result = new ArrayList<>(); - String startKey = (startLocalID == -1) ? cData.startKeyEmpty() + String startKey = (startLocalID == -1) + ? cData.startKeyEmpty() : cData.getBlockKey(startLocalID); - List> range = - db.getStore().getBlockDataTable() - .getSequentialRangeKVs(startKey, count, - cData.containerPrefix(), cData.getUnprefixedKeyFilter()); + List> range = db.getStore().getBlockDataTable() + .getSequentialRangeKVs(startKey, count, cData.containerPrefix(), cData.getUnprefixedKeyFilter()); for (Table.KeyValue entry : range) { result.add(db.getStore().getCompleteBlockData(entry.getValue(), null, entry.getKey())); } @@ -369,8 +346,7 @@ public void shutdown() { BlockUtils.shutdownCache(config); } - private BlockData getBlockByID(DBHandle db, BlockID blockID, - KeyValueContainerData containerData) throws IOException { + private BlockData getBlockByID(DBHandle db, BlockID blockID, KeyValueContainerData containerData) throws IOException { String blockKey = containerData.getBlockKey(blockID.getLocalID()); return db.getStore().getBlockByID(blockID, blockKey); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java index 6a1d5533cf2..0139e286eb1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java @@ -52,64 +52,57 @@ */ public class ChunkManagerDispatcher implements ChunkManager { - private static final Logger LOG = - LoggerFactory.getLogger(ChunkManagerDispatcher.class); - - private final Map handlers - = new EnumMap<>(ContainerLayoutVersion.class); - - ChunkManagerDispatcher(boolean sync, BlockManager manager, - VolumeSet volSet) { - handlers.put(FILE_PER_CHUNK, - new FilePerChunkStrategy(sync, manager, volSet)); - handlers.put(FILE_PER_BLOCK, - new FilePerBlockStrategy(sync, manager, volSet)); + private static final Logger LOG = LoggerFactory.getLogger(ChunkManagerDispatcher.class); + + private final Map handlers = new EnumMap<>(ContainerLayoutVersion.class); + + ChunkManagerDispatcher(boolean sync, BlockManager manager, VolumeSet volSet) { + handlers.put(FILE_PER_CHUNK, new FilePerChunkStrategy(sync, manager, volSet)); + handlers.put(FILE_PER_BLOCK, new FilePerBlockStrategy(sync, manager)); } @Override - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ChunkBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { + public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, + DispatcherContext dispatcherContext) throws StorageContainerException { - selectHandler(container) - .writeChunk(container, blockID, info, data, dispatcherContext); + selectHandler(container).writeChunk(container, blockID, info, data, dispatcherContext); } - public String streamInit(Container container, BlockID blockID) - throws StorageContainerException { - return selectHandler(container) - .streamInit(container, blockID); + /** + * Initializes a stream for a given container and block ID. + * + * @param container the container in which the stream is being initialized + * @param blockID the block ID for which the stream is being initialized + * @return a String representing the initialization state of the stream + * @throws StorageContainerException if there is an error during stream initialization + */ + @Override + public String streamInit(Container container, BlockID blockID) throws StorageContainerException { + return selectHandler(container).streamInit(container, blockID); } @Override - public StateMachine.DataChannel getStreamDataChannel( - Container container, BlockID blockID, ContainerMetrics metrics) - throws StorageContainerException { - return selectHandler(container) - .getStreamDataChannel(container, blockID, metrics); + public StateMachine.DataChannel getStreamDataChannel(Container container, BlockID blockID, ContainerMetrics metrics) + throws StorageContainerException { + return selectHandler(container).getStreamDataChannel(container, blockID, metrics); } @Override - public void finishWriteChunks(KeyValueContainer kvContainer, - BlockData blockData) throws IOException { + public void finishWriteChunks(KeyValueContainer kvContainer, BlockData blockData) throws IOException { - selectHandler(kvContainer) - .finishWriteChunks(kvContainer, blockData); + selectHandler(kvContainer).finishWriteChunks(kvContainer, blockData); } @Override - public void finalizeWriteChunk(KeyValueContainer kvContainer, - BlockID blockId) throws IOException { + public void finalizeWriteChunk(KeyValueContainer kvContainer, BlockID blockId) throws IOException { selectHandler(kvContainer).finalizeWriteChunk(kvContainer, blockId); } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) - throws StorageContainerException { + public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, + DispatcherContext dispatcherContext) throws StorageContainerException { - ChunkBuffer data = selectHandler(container) - .readChunk(container, blockID, info, dispatcherContext); + ChunkBuffer data = selectHandler(container).readChunk(container, blockID, info, dispatcherContext); Preconditions.checkState(data != null); container.getContainerData().updateReadStats(data.remaining()); @@ -118,28 +111,23 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, } @Override - public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) - throws StorageContainerException { - + public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws StorageContainerException { Preconditions.checkNotNull(blockID, "Block ID cannot be null."); // Delete the chunk from disk. - // Do not decrement the ContainerData counters (usedBytes) here as it - // will be updated while deleting the block from the DB + // Do not decrement the ContainerData counters (usedBytes) + // here as it will be updated while deleting the block from the DB selectHandler(container).deleteChunk(container, blockID, info); - } @Override - public void deleteChunks(Container container, BlockData blockData) - throws StorageContainerException { - + public void deleteChunks(Container container, BlockData blockData) throws StorageContainerException { Preconditions.checkNotNull(blockData, "Block data cannot be null."); // Delete the chunks belonging to blockData. - // Do not decrement the ContainerData counters (usedBytes) here as it - // will be updated while deleting the block from the DB + // Do not decrement the ContainerData counters (usedBytes) + // here as it will be updated while deleting the block from the DB selectHandler(container).deleteChunks(container, blockData); } @@ -149,17 +137,12 @@ public void shutdown() { handlers.values().forEach(ChunkManager::shutdown); } - private @Nonnull ChunkManager selectHandler(Container container) - throws StorageContainerException { - - ContainerLayoutVersion layout = - container.getContainerData().getLayoutVersion(); + private @Nonnull ChunkManager selectHandler(Container container) throws StorageContainerException { + ContainerLayoutVersion layout = container.getContainerData().getLayoutVersion(); return selectVersionHandler(layout); } - private @Nonnull ChunkManager selectVersionHandler( - ContainerLayoutVersion version) - throws StorageContainerException { + private @Nonnull ChunkManager selectVersionHandler(ContainerLayoutVersion version) throws StorageContainerException { ChunkManager versionHandler = handlers.get(version); if (versionHandler == null) { return throwUnknownLayoutVersion(version); @@ -167,13 +150,12 @@ public void shutdown() { return versionHandler; } - private static ChunkManager throwUnknownLayoutVersion( - ContainerLayoutVersion version) throws StorageContainerException { + private static ChunkManager throwUnknownLayoutVersion(ContainerLayoutVersion version) + throws StorageContainerException { String message = "Unsupported storage container layout: " + version; LOG.warn(message); // TODO pick best result code throw new StorageContainerException(message, UNSUPPORTED_REQUEST); } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 4ca578d7717..3ac7cafea74 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; @@ -68,8 +67,7 @@ */ public class FilePerBlockStrategy implements ChunkManager { - private static final Logger LOG = - LoggerFactory.getLogger(FilePerBlockStrategy.class); + private static final Logger LOG = LoggerFactory.getLogger(FilePerBlockStrategy.class); private final boolean doSyncWrite; private final OpenFiles files = new OpenFiles(); @@ -77,19 +75,13 @@ public class FilePerBlockStrategy implements ChunkManager { private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; private final MappedBufferManager mappedBufferManager; - private final VolumeSet volumeSet; - public FilePerBlockStrategy(boolean sync, BlockManager manager, - VolumeSet volSet) { + public FilePerBlockStrategy(boolean sync, BlockManager manager) { doSyncWrite = sync; - this.defaultReadBufferCapacity = manager == null ? 0 : - manager.getDefaultReadBufferCapacity(); - this.readMappedBufferThreshold = manager == null ? 0 - : manager.getReadMappedBufferThreshold(); - this.readMappedBufferMaxCount = manager == null ? 0 - : manager.getReadMappedBufferMaxCount(); + this.defaultReadBufferCapacity = manager == null ? 0 : manager.getDefaultReadBufferCapacity(); + this.readMappedBufferThreshold = manager == null ? 0 : manager.getReadMappedBufferThreshold(); + this.readMappedBufferMaxCount = manager == null ? 0 : manager.getReadMappedBufferMaxCount(); LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount); - this.volumeSet = volSet; if (this.readMappedBufferMaxCount > 0) { mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount); } else { @@ -98,32 +90,27 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, } private static void checkLayoutVersion(Container container) { - Preconditions.checkArgument( - container.getContainerData().getLayoutVersion() == FILE_PER_BLOCK); + Preconditions.checkArgument(container.getContainerData().getLayoutVersion() == FILE_PER_BLOCK); } @Override - public String streamInit(Container container, BlockID blockID) - throws StorageContainerException { + public String streamInit(Container container, BlockID blockID) throws StorageContainerException { checkLayoutVersion(container); final File chunkFile = getChunkFile(container, blockID); return chunkFile.getAbsolutePath(); } @Override - public StateMachine.DataChannel getStreamDataChannel( - Container container, BlockID blockID, ContainerMetrics metrics) - throws StorageContainerException { + public StateMachine.DataChannel getStreamDataChannel(Container container, BlockID blockID, ContainerMetrics metrics) + throws StorageContainerException { checkLayoutVersion(container); final File chunkFile = getChunkFile(container, blockID); - return new KeyValueStreamDataChannel(chunkFile, - container.getContainerData(), metrics); + return new KeyValueStreamDataChannel(chunkFile, container.getContainerData(), metrics); } @Override - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ChunkBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { + public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, + DispatcherContext dispatcherContext) throws StorageContainerException { checkLayoutVersion(container); @@ -131,21 +118,16 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage(); if (info.getLen() <= 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skip writing empty chunk {} in stage {}", info, stage); - } + LOG.debug("Skip writing empty chunk {} in stage {}", info, stage); return; } if (stage == COMMIT_DATA) { - if (LOG.isDebugEnabled()) { - LOG.debug("Ignore chunk {} in stage {}", info, stage); - } + LOG.debug("Ignore chunk {} in stage {}", info, stage); return; } - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); + KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); final File chunkFile = getChunkFile(container, blockID); long len = info.getLen(); @@ -153,7 +135,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, HddsVolume volume = containerData.getVolume(); - FileChannel channel = null; + FileChannel channel; boolean overwrite; try { channel = files.getChannel(chunkFile, doSyncWrite); @@ -163,26 +145,21 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, throw e; } - if (LOG.isDebugEnabled()) { - LOG.debug("Writing chunk {} (overwrite: {}) in stage {} to file {}", - info, overwrite, stage, chunkFile); - } + LOG.debug("Writing chunk {} (overwrite: {}) in stage {} to file {}", info, overwrite, stage, chunkFile); - // check whether offset matches block file length if its an overwrite + // Check whether offset matches block file length if it's overwritten. if (!overwrite) { ChunkUtils.validateChunkSize(channel, info, chunkFile.getName()); } - ChunkUtils - .writeData(channel, chunkFile.getName(), data, offset, len, volume); + ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len, volume); containerData.updateWriteStats(len, overwrite); } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) - throws StorageContainerException { + public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, + DispatcherContext dispatcherContext) throws StorageContainerException { checkLayoutVersion(container); @@ -193,8 +170,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, limitReadSize(info.getLen()); - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); + KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); HddsVolume volume = containerData.getVolume(); @@ -202,27 +178,23 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, final long len = info.getLen(); long offset = info.getOffset(); - int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, - defaultReadBufferCapacity); - return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, - readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); + int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); + return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, readMappedBufferThreshold, + readMappedBufferMaxCount > 0, mappedBufferManager); } @Override - public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) - throws StorageContainerException { + public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws StorageContainerException { deleteChunk(container, blockID, info, true); } @Override - public void deleteChunks(Container container, BlockData blockData) - throws StorageContainerException { + public void deleteChunks(Container container, BlockData blockData) throws StorageContainerException { deleteChunk(container, blockData.getBlockID(), null, false); } @Override - public void finishWriteChunks(KeyValueContainer container, - BlockData blockData) throws IOException { + public void finishWriteChunks(KeyValueContainer container, BlockData blockData) throws IOException { final File chunkFile = getChunkFile(container, blockData.getBlockID()); try { files.close(chunkFile); @@ -234,8 +206,7 @@ public void finishWriteChunks(KeyValueContainer container, } @Override - public void finalizeWriteChunk(KeyValueContainer container, - BlockID blockId) throws IOException { + public void finalizeWriteChunk(KeyValueContainer container, BlockID blockId) throws IOException { synchronized (container) { File chunkFile = getChunkFile(container, blockId); try { @@ -250,8 +221,7 @@ public void finalizeWriteChunk(KeyValueContainer container, } } - private void deleteChunk(Container container, BlockID blockID, - ChunkInfo info, boolean verifyLength) + private void deleteChunk(Container container, BlockID blockID, ChunkInfo info, boolean verifyLength) throws StorageContainerException { checkLayoutVersion(container); @@ -259,17 +229,15 @@ private void deleteChunk(Container container, BlockID blockID, final File file = getChunkFile(container, blockID); - // if the chunk file does not exist, it might have already been deleted. - // The call might be because of reapply of transactions on datanode - // restart. + // If the chunk file does not exist, it might have already been deleted. + // The call might be because of reapplied of transactions on datanode restart. if (!file.exists()) { LOG.warn("Block file to be deleted does not exist: {}", file); return; } if (verifyLength) { - Preconditions.checkNotNull(info, "Chunk info cannot be null for single " + - "chunk delete"); + Preconditions.checkNotNull(info, "Chunk info cannot be null for single chunk delete"); checkFullDelete(info, file); } @@ -281,13 +249,13 @@ private static File getChunkFile(Container container, BlockID blockID) throws St return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, null); } - private static void checkFullDelete(ChunkInfo info, File chunkFile) - throws StorageContainerException { + private static void checkFullDelete(ChunkInfo info, File chunkFile) throws StorageContainerException { long fileLength = chunkFile.length(); if ((info.getOffset() > 0) || (info.getLen() != fileLength)) { - String msg = String.format( - "Trying to delete partial chunk %s from file %s with length %s", - info, chunkFile, fileLength); + String msg = String.format("Trying to delete partial chunk %s from file %s with length %s", + info, + chunkFile, + fileLength); LOG.error(msg); throw new StorageContainerException(msg, UNSUPPORTED_REQUEST); } @@ -295,25 +263,21 @@ private static void checkFullDelete(ChunkInfo info, File chunkFile) private static final class OpenFiles { - private static final RemovalListener ON_REMOVE = - event -> close(event.getKey(), event.getValue()); + private static final RemovalListener ON_REMOVE = event -> close(event.getKey(), event.getValue()); private final Cache files = CacheBuilder.newBuilder() .expireAfterAccess(Duration.ofMinutes(10)) .removalListener(ON_REMOVE) .build(); - public FileChannel getChannel(File file, boolean sync) - throws StorageContainerException { + public FileChannel getChannel(File file, boolean sync) throws StorageContainerException { try { - return files.get(file.getPath(), - () -> open(file, sync)).getChannel(); + return files.get(file.getPath(), () -> open(file, sync)).getChannel(); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw new UncheckedIOException((IOException) e.getCause()); } - throw new StorageContainerException(e.getCause(), - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); + throw new StorageContainerException(e.getCause(), ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); } } @@ -332,20 +296,15 @@ public void close(File file) { } public boolean isOpen(File file) { - return file != null && - files.getIfPresent(file.getPath()) != null; + return file != null && files.getIfPresent(file.getPath()) != null; } private static void close(String filename, OpenFile openFile) { if (openFile != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Closing file {}", filename); - } + LOG.debug("Closing file {}", filename); openFile.close(); } else { - if (LOG.isDebugEnabled()) { - LOG.debug("File {} not open", filename); - } + LOG.debug("File {} not open", filename); } } } @@ -357,9 +316,7 @@ private static final class OpenFile { private OpenFile(File file, boolean sync) throws FileNotFoundException { String mode = sync ? "rws" : "rw"; this.file = new RandomAccessFile(file, mode); - if (LOG.isDebugEnabled()) { - LOG.debug("Opened file {}", file); - } + LOG.debug("Opened file {}", file); } public FileChannel getChannel() { @@ -374,5 +331,4 @@ public void close() { } } } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 256d357a31d..dd68caadc11 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -47,8 +47,7 @@ public interface BlockManager { * all the chunks are written and stream is closed) * @return length of the Block. */ - long putBlock(Container container, BlockData data, boolean endOfBlock) - throws IOException; + long putBlock(Container container, BlockData data, boolean endOfBlock) throws IOException; /** * Gets an existing block. @@ -60,14 +59,14 @@ long putBlock(Container container, BlockData data, boolean endOfBlock) */ BlockData getBlock(Container container, BlockID blockID) throws IOException; - /** * Deletes an existing block. * * @param container - Container from which block need to be deleted. * @param blockID - ID of the block. */ - void deleteBlock(Container container, BlockID blockID) throws IOException; + @Deprecated + void deleteBlock(Container container, BlockID blockID); /** * List blocks in a container. @@ -77,23 +76,37 @@ long putBlock(Container container, BlockData data, boolean endOfBlock) * @param count - Number of blocks to return. * @return List of Blocks that match the criteria. */ - List listBlock(Container container, long startLocalID, int count) - throws IOException; + List listBlock(Container container, long startLocalID, int count) throws IOException; /** * Returns last committed length of the block. * - * @param container - Container from which block need to be fetched. + * @param container - Container from which block needs to be fetched. * @param blockID - BlockID of the block. * @return length of the block. * @throws IOException in case, the block key does not exist in db. */ - long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException; + long getCommittedBlockLength(Container container, BlockID blockID) throws IOException; - void finalizeBlock(Container container, BlockID blockId) - throws IOException; + /** + * Finalizes the specified block in a container. + * This may involve tasks such as writing final metadata, closing resources, + * or other operations required to mark the block as complete and immutable. + * + * @param container the container where the block resides. + * It should be an instance of Container and carry details about the associated storage unit. + * @param blockId the identifier of the block that needs to be finalized. + * It should be an instance of BlockID and reference the specific block to be finalized. + * @throws IOException if there is an error during the finalization process, + * such as I/O issues or problems accessing the block. + */ + void finalizeBlock(Container container, BlockID blockId) throws IOException; + /** + * Returns the default buffer capacity used for read operations. + * + * @return the default read buffer capacity. + */ int getDefaultReadBufferCapacity(); /** @return the threshold to read using memory mapped buffers. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java index 7751dba429d..dabe6b41ec9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java @@ -35,35 +35,41 @@ import java.nio.ByteBuffer; /** - * Chunk Manager allows read, write, delete and listing of chunks in - * a container. + * Chunk Manager allows read, write, delete and listing of chunks in a container. */ - public interface ChunkManager { /** - * writes a given chunk. + * Writes a chunk of data to the specified container and block. * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param data - * @param dispatcherContext - dispatcher context info. - * @throws StorageContainerException + * @param container The container to which the chunk is written. + * @param blockID The ID of the block within the container. + * @param info Metadata about the chunk being written. + * @param data Buffer containing the chunk data to be written. + * @param dispatcherContext Context information for the dispatcher performing the write operation. + * @throws StorageContainerException If an error occurs during the write operation. */ - void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ChunkBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException; + void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, + DispatcherContext dispatcherContext) throws StorageContainerException; - default void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { + /** + * Writes a chunk of data to the specified container and block. + * + * @param container The container to which the chunk is written. + * @param blockID The ID of the block within the container. + * @param info Metadata about the chunk being written. + * @param data ByteBuffer containing the chunk data to be written. + * @param dispatcherContext Context information for the dispatcher performing the write operation. + * @throws StorageContainerException If an error occurs during the write operation. + */ + default void writeChunk(Container container, BlockID blockID, ChunkInfo info, ByteBuffer data, + DispatcherContext dispatcherContext) throws StorageContainerException { ChunkBuffer wrapper = ChunkBuffer.wrap(data); writeChunk(container, blockID, info, wrapper, dispatcherContext); } /** - * reads the data defined by a chunk. + * Reads the data defined by a chunk. * * @param container - Container for the chunk * @param blockID - ID of the block. @@ -75,8 +81,8 @@ default void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, - DispatcherContext dispatcherContext) throws StorageContainerException; + ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) + throws StorageContainerException; /** * Deletes a given chunk. @@ -84,13 +90,17 @@ ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, * @param container - Container for the chunk * @param blockID - ID of the block. * @param info - Chunk Info - * @throws StorageContainerException */ - void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws - StorageContainerException; + void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws StorageContainerException; - void deleteChunks(Container container, BlockData blockData) throws - StorageContainerException; + /** + * Deletes the chunks associated with the given block data in the specified container. + * + * @param container The container from which the chunks will be deleted. + * @param blockData The block data containing information about the chunks to be deleted. + * @throws StorageContainerException If an error occurs during the deletion process. + */ + void deleteChunks(Container container, BlockData blockData) throws StorageContainerException; // TODO : Support list operations. @@ -101,42 +111,74 @@ default void shutdown() { // if applicable } - default void finishWriteChunks(KeyValueContainer kvContainer, - BlockData blockData) throws IOException { + /** + * Finalizes the process of writing chunks to the specified container and block data. + * + * @param kvContainer The container to which the chunks are being written. + * @param blockData The block data associated with the chunks being written. + * @throws IOException If an I/O error occurs during the finalization process. + */ + default void finishWriteChunks(KeyValueContainer kvContainer, BlockData blockData) throws IOException { // no-op } - default void finalizeWriteChunk(KeyValueContainer container, - BlockID blockId) throws IOException { + /** + * Finalizes the process of writing a chunk to the specified container and block. + * + * @param container The container where the chunk is written. + * @param blockId The ID of the block within the container. + * @throws IOException If an I/O error occurs during the finalization process. + */ + default void finalizeWriteChunk(KeyValueContainer container, BlockID blockId) throws IOException { // no-op } - default String streamInit(Container container, BlockID blockID) - throws StorageContainerException { + /** + * Initializes a data stream for the specified container and block. + * + * @param container The container where the stream will be initialized. + * @param blockID The ID of the block within the container. + * @return A string that represents the status or identifier of the initialized stream. + * @throws StorageContainerException If an error occurs during the stream initialization. + */ + default String streamInit(Container container, BlockID blockID) throws StorageContainerException { return null; } - default StateMachine.DataChannel getStreamDataChannel( - Container container, BlockID blockID, ContainerMetrics metrics) - throws StorageContainerException { + /** + * Retrieves the data channel for streaming data within a specified container and block. + * + * @param container The container where the data stream exists. + * @param blockID The ID of the block within the container. + * @param metrics Metrics pertaining to the container. + * @return The DataChannel object associated with the specified container and block. + * @throws StorageContainerException If an error occurs during the execution of the method. + */ + default StateMachine.DataChannel getStreamDataChannel(Container container, BlockID blockID, ContainerMetrics metrics) + throws StorageContainerException { return null; } - static int getBufferCapacityForChunkRead(ChunkInfo chunkInfo, - int defaultReadBufferCapacity) { + /** + * Determines the appropriate buffer capacity for reading a chunk based on provided chunk information + * and a default buffer capacity. + * + * @param chunkInfo The metadata information about the chunk to be read. + * @param defaultReadBufferCapacity The default buffer capacity to be used if no specific capacity is determined. + * @return The calculated buffer capacity for reading the chunk. + */ + static int getBufferCapacityForChunkRead(ChunkInfo chunkInfo, int defaultReadBufferCapacity) { long bufferCapacity = 0; if (chunkInfo.isReadDataIntoSingleBuffer()) { // Older client - read all chunk data into one single buffer. bufferCapacity = chunkInfo.getLen(); } else { - // Set buffer capacity to checksum boundary size so that each buffer - // corresponds to one checksum. If checksum is NONE, then set buffer - // capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 1MB). + // Set buffer capacity to checksum boundary size so that each buffer corresponds to one checksum. + // If checksum is NONE, then set buffer capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 1MB). ChecksumData checksumData = chunkInfo.getChecksumData(); if (checksumData != null) { - if (checksumData.getChecksumType() == - ContainerProtos.ChecksumType.NONE) { + if (checksumData.getChecksumType() == ContainerProtos.ChecksumType.NONE) { bufferCapacity = defaultReadBufferCapacity; } else { bufferCapacity = checksumData.getBytesPerChecksum(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java index cc83f453ebd..80b38bee898 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java @@ -25,8 +25,7 @@ import java.io.File; /** - * This class serves as an intermediate format for all possible database - * layouts for datanodes. + * This class serves as an intermediate format for all possible database layouts for datanodes. */ public abstract class AbstractDatanodeDBDefinition implements DBDefinition { @@ -38,8 +37,7 @@ public abstract class AbstractDatanodeDBDefinition implements DBDefinition { * @param config The ozone global configuration. * {@link DBDefinition}. */ - protected AbstractDatanodeDBDefinition(String dbPath, - ConfigurationSource config) { + protected AbstractDatanodeDBDefinition(String dbPath, ConfigurationSource config) { this.dbDir = new File(dbPath); this.config = config; } @@ -56,24 +54,20 @@ public File getDBLocation(ConfigurationSource conf) { @Override public String getLocationConfigKey() { - throw new UnsupportedOperationException( - "No location config key available for datanode databases."); + throw new UnsupportedOperationException("No location config key available for datanode databases."); } public ConfigurationSource getConfig() { return config; } - public abstract DBColumnFamilyDefinition - getBlockDataColumnFamily(); + public abstract DBColumnFamilyDefinition getBlockDataColumnFamily(); - public abstract DBColumnFamilyDefinition - getMetadataColumnFamily(); + public abstract DBColumnFamilyDefinition getMetadataColumnFamily(); public DBColumnFamilyDefinition getFinalizeBlocksColumnFamily() { return null; } - public abstract DBColumnFamilyDefinition - getLastChunkInfoColumnFamily(); + public abstract DBColumnFamilyDefinition getLastChunkInfoColumnFamily(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 26719d7f035..9b5e980f9c2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -49,8 +49,8 @@ import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; /** - * Implementation of the {@link DatanodeStore} interface that contains - * functionality common to all more derived datanode store implementations. + * Implementation of the {@link DatanodeStore} interface + * that contains functionality common to all more derived datanode store implementations. */ public abstract class AbstractDatanodeStore implements DatanodeStore { @@ -66,8 +66,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table finalizeBlocksTableWithIterator; - public static final Logger LOG = - LoggerFactory.getLogger(AbstractDatanodeStore.class); + public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); private volatile DBStore store; private final AbstractDatanodeDBDefinition dbDef; private final ManagedColumnFamilyOptions cfOptions; @@ -81,16 +80,14 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { * @param config - Ozone Configuration. * @throws IOException - on Failure. */ - protected AbstractDatanodeStore(ConfigurationSource config, - AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) + protected AbstractDatanodeStore(ConfigurationSource config, AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) throws IOException { - dbProfile = DatanodeDBProfile - .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); + dbProfile = DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); - // The same config instance is used on each datanode, so we can share the - // corresponding column family options, providing a single shared cache - // for all containers on a datanode. + // The same config instance is used on each datanode, + // so we can share the corresponding column family options, + // providing a single shared cache for all containers on a datanode. cfOptions = dbProfile.getColumnFamilyOptions(config); this.dbDef = dbDef; @@ -99,40 +96,34 @@ protected AbstractDatanodeStore(ConfigurationSource config, } @Override - public void start(ConfigurationSource config) - throws IOException { + public void start(ConfigurationSource config) throws IOException { if (this.store == null) { ManagedDBOptions options = dbProfile.getDBOptions(); options.setCreateIfMissing(true); options.setCreateMissingColumnFamilies(true); - if (this.dbDef instanceof DatanodeSchemaOneDBDefinition || - this.dbDef instanceof DatanodeSchemaTwoDBDefinition) { + if (this.dbDef instanceof DatanodeSchemaOneDBDefinition || this.dbDef instanceof DatanodeSchemaTwoDBDefinition) { long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); options.setMaxTotalWalSize(maxWalSize); } - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); + DatanodeConfiguration dc = config.getObject(DatanodeConfiguration.class); // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); + InfoLogLevel level = InfoLogLevel.valueOf(dc.getRocksdbLogLevel() + "_LEVEL"); options.setInfoLogLevel(level); options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); + options.setDeleteObsoleteFilesPeriodMicros(dc.getRocksdbDeleteObsoleteFilesPeriod()); - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name + // For V3, all Rocksdb dir has the same "container.db" name. + // So use parentDirName(storage UUID)-dbDirName as db metrics name this.store = DBStoreBuilder.newBuilder(config, dbDef) .setDBOptions(options) .setDefaultCFOptions(cfOptions) .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) + .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + dbDef.getName()) .build(); } else { this.store = DBStoreBuilder.newBuilder(config, dbDef) @@ -142,35 +133,28 @@ public void start(ConfigurationSource config) .build(); } - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); + // Use the DatanodeTable wrapper + // to disable the table iterator on existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is necessary. + metadataTable = new DatanodeTable<>(dbDef.getMetadataColumnFamily().getTable(this.store)); checkTableStatus(metadataTable, metadataTable.getName()); - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); + // The block iterator this class returns will need to use the table iterator internally, + // so construct a block data table instance that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = dbDef.getBlockDataColumnFamily().getTable(this.store); blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); checkTableStatus(blockDataTable, blockDataTable.getName()); if (dbDef.getFinalizeBlocksColumnFamily() != null) { - finalizeBlocksTableWithIterator = - dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); + finalizeBlocksTableWithIterator = dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); - finalizeBlocksTable = new DatanodeTable<>( - finalizeBlocksTableWithIterator); + finalizeBlocksTable = new DatanodeTable<>(finalizeBlocksTableWithIterator); checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); } if (dbDef.getLastChunkInfoColumnFamily() != null) { - lastChunkInfoTable = new DatanodeTable<>( - dbDef.getLastChunkInfoColumnFamily().getTable(this.store)); + lastChunkInfoTable = new DatanodeTable<>(dbDef.getLastChunkInfoColumnFamily().getTable(this.store)); checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); } } @@ -222,22 +206,19 @@ public Table getFinalizeBlocksTable() { @Override public BlockIterator getBlockIterator(long containerID) throws IOException { - return new KeyValueBlockIterator(containerID, - blockDataTableWithIterator.iterator()); + return new KeyValueBlockIterator(containerID, blockDataTableWithIterator.iterator()); } @Override public BlockIterator getBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException { - return new KeyValueBlockIterator(containerID, - blockDataTableWithIterator.iterator(), filter); + return new KeyValueBlockIterator(containerID, blockDataTableWithIterator.iterator(), filter); } @Override public BlockIterator getFinalizeBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException { - return new KeyValueBlockLocalIdIterator(containerID, - finalizeBlocksTableWithIterator.iterator(), filter); + return new KeyValueBlockLocalIdIterator(containerID, finalizeBlocksTableWithIterator.iterator(), filter); } @Override @@ -286,47 +267,36 @@ protected Table getFinalizeBlocksTableWithIterator() { return this.finalizeBlocksTableWithIterator; } - protected static void checkTableStatus(Table table, String name) - throws IOException { - String logMessage = "Unable to get a reference to %s table. Cannot " + - "continue."; - String errMsg = "Inconsistent DB state, Table - %s. Please check the" + - " logs for more info."; + protected static void checkTableStatus(Table table, String name) throws IOException { if (table == null) { - LOG.error(String.format(logMessage, name)); - throw new IOException(String.format(errMsg, name)); + LOG.error("Unable to get a reference to {}} table. Cannot continue.", name); + throw new IOException("Inconsistent DB state, Table - " + name + ". Please check the logs for more info."); } } /** - * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no - * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * Block Iterator for KeyValue Container. + * This block iterator returns blocks which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. + * If no filter is specified, then the default filter used is {@link MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public - public static class KeyValueBlockIterator implements - BlockIterator, Closeable { + public static class KeyValueBlockIterator implements BlockIterator, Closeable { - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueBlockIterator.class); + private static final Logger LOG = LoggerFactory.getLogger(KeyValueBlockIterator.class); - private final TableIterator> - blockIterator; - private static final KeyPrefixFilter DEFAULT_BLOCK_FILTER = - MetadataKeyFilters.getUnprefixedKeyFilter(); + private final TableIterator> blockIterator; + private static final KeyPrefixFilter DEFAULT_BLOCK_FILTER = MetadataKeyFilters.getUnprefixedKeyFilter(); private final KeyPrefixFilter blockFilter; private BlockData nextBlock; private final long containerID; /** * KeyValueBlockIterator to iterate unprefixed blocks in a container. + * * @param iterator - The underlying iterator to apply the block filter to. */ KeyValueBlockIterator(long containerID, - TableIterator> - iterator) { + TableIterator> iterator) { this.containerID = containerID; this.blockIterator = iterator; this.blockFilter = DEFAULT_BLOCK_FILTER; @@ -334,12 +304,12 @@ public static class KeyValueBlockIterator implements /** * KeyValueBlockIterator to iterate blocks in a container. + * * @param iterator - The underlying iterator to apply the block filter to. * @param filter - Block filter, filter to be applied for blocks */ - KeyValueBlockIterator(long containerID, - TableIterator> - iterator, KeyPrefixFilter filter) { + KeyValueBlockIterator(long containerID, TableIterator> iterator, + KeyPrefixFilter filter) { this.containerID = containerID; this.blockIterator = iterator; this.blockFilter = filter; @@ -347,8 +317,8 @@ public static class KeyValueBlockIterator implements /** * This method returns blocks matching with the filter. + * * @return next block or null if no more blocks - * @throws IOException */ @Override public BlockData nextBlock() throws IOException, NoSuchElementException { @@ -360,8 +330,7 @@ public BlockData nextBlock() throws IOException, NoSuchElementException { if (hasNext()) { return nextBlock(); } - throw new NoSuchElementException("Block Iterator reached end for " + - "ContainerID " + containerID); + throw new NoSuchElementException("Block Iterator reached end for ContainerID " + containerID); } @Override @@ -374,10 +343,10 @@ public boolean hasNext() throws IOException { byte[] keyBytes = StringUtils.string2Bytes(keyValue.getKey()); if (blockFilter.filterKey(null, keyBytes, null)) { nextBlock = keyValue.getValue(); - if (LOG.isTraceEnabled()) { - LOG.trace("Block matching with filter found: blockID is : {} for " + - "containerID {}", nextBlock.getLocalID(), containerID); - } + LOG.trace( + "Block matching with filter found: blockID is : {} for containerID {}", + nextBlock.getLocalID(), + containerID); return true; } } @@ -404,32 +373,27 @@ public void close() throws IOException { /** * Block localId Iterator for KeyValue Container. - * This Block localId iterator returns localIds - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no - * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * This Block localId iterator returns localIds which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. + * If no filter is specified, then the default filter used is {@link MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public - public static class KeyValueBlockLocalIdIterator implements - BlockIterator, Closeable { + public static class KeyValueBlockLocalIdIterator implements BlockIterator, Closeable { - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueBlockLocalIdIterator.class); + private static final Logger LOG = LoggerFactory.getLogger(KeyValueBlockLocalIdIterator.class); - private final TableIterator> blockLocalIdIterator; + private final TableIterator> blockLocalIdIterator; private final KeyPrefixFilter localIdFilter; private Long nextLocalId; private final long containerID; /** * KeyValueBlockLocalIdIterator to iterate block localIds in a container. + * * @param iterator - The iterator to apply the blockLocalId filter to. * @param filter - BlockLocalId filter to be applied for block localIds. */ KeyValueBlockLocalIdIterator(long containerID, - TableIterator> - iterator, KeyPrefixFilter filter) { + TableIterator> iterator, KeyPrefixFilter filter) { this.containerID = containerID; this.blockLocalIdIterator = iterator; this.localIdFilter = filter; @@ -437,8 +401,8 @@ public static class KeyValueBlockLocalIdIterator implements /** * This method returns blocks matching with the filter. + * * @return next block local Id or null if no more block localIds - * @throws IOException */ @Override public Long nextBlock() throws IOException, NoSuchElementException { @@ -450,8 +414,7 @@ public Long nextBlock() throws IOException, NoSuchElementException { if (hasNext()) { return nextBlock(); } - throw new NoSuchElementException("Block Local ID Iterator " + - "reached end for ContainerID " + containerID); + throw new NoSuchElementException("Block Local ID Iterator reached end for ContainerID " + containerID); } @Override @@ -464,10 +427,7 @@ public boolean hasNext() throws IOException { byte[] keyBytes = StringUtils.string2Bytes(keyValue.getKey()); if (localIdFilter.filterKey(null, keyBytes, null)) { nextLocalId = keyValue.getValue(); - if (LOG.isTraceEnabled()) { - LOG.trace("Block matching with filter found: LocalID is : " + - "{} for containerID {}", nextLocalId, containerID); - } + LOG.trace("Block matching with filter found: LocalID is : {} for containerID {}", nextLocalId, containerID); return true; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index d47446d49b0..9a7c3e33b6f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -36,27 +36,30 @@ import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; /** - * This class defines the RocksDB structure for datanode following schema - * version 3, where the block data, metadata, and transactions which are to be - * deleted are put in their own separate column families and with containerID - * as key prefix. + * This class defines the RocksDB structure for datanode following schema version 3, + * where the block data, metadata, + * and transactions which are to be deleted are put in their own separate column families + * and with containerID as key prefix. *

* Some key format illustrations for the column families: - * - block_data: containerID | blockID - * - metadata: containerID | #BLOCKCOUNT - * containerID | #BYTESUSED - * ... - * - deleted_blocks: containerID | blockID - * - delete_txns: containerID | TransactionID - *

- * The keys would be encoded in a fix-length encoding style in order to - * utilize the "Prefix Seek" feature from Rocksdb to optimize seek. + *

    + *
  • block_data:
    + * containerID | blockID + *
  • metadata:
    + * containerID | #BLOCKCOUNT
    + * containerID | #BYTESUSED
    + * ... + *
  • deleted_blocks:
    + * containerID | blockID + *
  • delete_txns:
    + * containerID | TransactionID + *
+ * The keys would be encoded in a fix-length encoding style to use the "Prefix Seek" + * feature from Rocksdb to optimize seek. */ -public class DatanodeSchemaThreeDBDefinition - extends AbstractDatanodeDBDefinition +public class DatanodeSchemaThreeDBDefinition extends AbstractDatanodeDBDefinition implements DBDefinition.WithMapInterface { - public static final DBColumnFamilyDefinition - BLOCK_DATA = + public static final DBColumnFamilyDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", String.class, @@ -64,8 +67,7 @@ public class DatanodeSchemaThreeDBDefinition BlockData.class, BlockData.getCodec()); - public static final DBColumnFamilyDefinition - METADATA = + public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", String.class, @@ -73,8 +75,7 @@ public class DatanodeSchemaThreeDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETE_TRANSACTION = + public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", String.class, @@ -82,8 +83,7 @@ public class DatanodeSchemaThreeDBDefinition DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); - public static final DBColumnFamilyDefinition - FINALIZE_BLOCKS = + public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", String.class, @@ -91,8 +91,7 @@ public class DatanodeSchemaThreeDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - LAST_CHUNK_INFO = + public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", String.class, @@ -102,27 +101,25 @@ public class DatanodeSchemaThreeDBDefinition private static String separator = ""; - private static final Map> - COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( + private static final Map> COLUMN_FAMILIES = + DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, DELETE_TRANSACTION, FINALIZE_BLOCKS, LAST_CHUNK_INFO); - public DatanodeSchemaThreeDBDefinition(String dbPath, - ConfigurationSource config) { + public DatanodeSchemaThreeDBDefinition(String dbPath, ConfigurationSource config) { super(dbPath, config); DatanodeConfiguration dc = config.getObject(DatanodeConfiguration.class); setSeparator(dc.getContainerSchemaV3KeySeparator()); // Get global ColumnFamilyOptions first. - DatanodeDBProfile dbProfile = DatanodeDBProfile - .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); + DatanodeDBProfile dbProfile = + DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); - ManagedColumnFamilyOptions cfOptions = - dbProfile.getColumnFamilyOptions(config); + ManagedColumnFamilyOptions cfOptions = dbProfile.getColumnFamilyOptions(config); // Use prefix seek to mitigating seek overhead. // See: https://github.com/facebook/rocksdb/wiki/Prefix-Seek cfOptions.useFixedLengthPrefixExtractor(getContainerKeyPrefixLength()); @@ -140,8 +137,7 @@ public DatanodeSchemaThreeDBDefinition(String dbPath, } @Override - public DBColumnFamilyDefinition - getBlockDataColumnFamily() { + public DBColumnFamilyDefinition getBlockDataColumnFamily() { return BLOCK_DATA; } @@ -151,46 +147,70 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { } @Override - public DBColumnFamilyDefinition - getLastChunkInfoColumnFamily() { + public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { return LAST_CHUNK_INFO; } - public DBColumnFamilyDefinition - getDeleteTransactionsColumnFamily() { + /** + * Retrieves the column family definition for delete transactions. + * + * @return A DBColumnFamilyDefinition holding the configuration for delete transactions. + */ + public DBColumnFamilyDefinition getDeleteTransactionsColumnFamily() { return DELETE_TRANSACTION; } @Override - public DBColumnFamilyDefinition - getFinalizeBlocksColumnFamily() { + public DBColumnFamilyDefinition getFinalizeBlocksColumnFamily() { return FINALIZE_BLOCKS; } + /** + * Gets the length of the container key prefix in bytes. + * + * @return the length of the container key prefix as an integer. + */ public static int getContainerKeyPrefixLength() { - return FixedLengthStringCodec.string2Bytes( - getContainerKeyPrefix(0L)).length; + return FixedLengthStringCodec.string2Bytes(getContainerKeyPrefix(0L)).length; } + /** + * Generates a prefix key for a given container ID. + * + * @param containerID the identifier of the container for which the prefix key is generated + * @return the generated prefix key as a String + */ public static String getContainerKeyPrefix(long containerID) { // NOTE: Rocksdb normally needs a fixed length prefix. - return FixedLengthStringCodec.bytes2String(Longs.toByteArray(containerID)) - + separator; + return FixedLengthStringCodec.bytes2String(Longs.toByteArray(containerID)) + separator; } + /** + * Converts the container key prefix, generated based on the container ID, to a byte array representation. + * + * @param containerID The identifier of the container for which the key prefix bytes are generated. + * @return A byte array representing the container key prefix. + */ public static byte[] getContainerKeyPrefixBytes(long containerID) { // NOTE: Rocksdb normally needs a fixed length prefix. - return FixedLengthStringCodec.string2Bytes( - getContainerKeyPrefix(containerID)); + return FixedLengthStringCodec.string2Bytes(getContainerKeyPrefix(containerID)); } + /** + * Extracts the key from a prefixed key by removing the prefix. + * + * @param keyWithPrefix the key that contains the prefix to be removed + * @return the key without the prefix + */ public static String getKeyWithoutPrefix(String keyWithPrefix) { return keyWithPrefix.substring(keyWithPrefix.indexOf(separator) + 1); } + /** + * Retrieves the container ID from a given key. * - * @param key rocksDB original key - * @return containerID + * @param key the input key from which the container ID is to be extracted + * @return the container ID as a long value */ public static long getContainerId(String key) { int index = getContainerKeyPrefixLength(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index b9e7ec7bd5b..7ac5abaae95 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -32,34 +32,30 @@ import java.util.Map; /** - * This class defines the RocksDB structure for datanodes following schema - * version 2, where the block data, metadata, and transactions which are to be - * deleted are put in their own separate column families. + * This class defines the RocksDB structure for datanodes following schema version 2, + * where the block data, metadata, + * and transactions which are to be deleted are put in their own separate column families. */ -public class DatanodeSchemaTwoDBDefinition - extends AbstractDatanodeDBDefinition +public class DatanodeSchemaTwoDBDefinition extends AbstractDatanodeDBDefinition implements DBDefinition.WithMapInterface { - public static final DBColumnFamilyDefinition - BLOCK_DATA = - new DBColumnFamilyDefinition<>( - "block_data", - String.class, - StringCodec.get(), - BlockData.class, - BlockData.getCodec()); - - public static final DBColumnFamilyDefinition - METADATA = - new DBColumnFamilyDefinition<>( + public static final DBColumnFamilyDefinition BLOCK_DATA = + new DBColumnFamilyDefinition<>( + "block_data", + String.class, + StringCodec.get(), + BlockData.class, + BlockData.getCodec()); + + public static final DBColumnFamilyDefinition METADATA = + new DBColumnFamilyDefinition<>( "metadata", String.class, StringCodec.get(), Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETE_TRANSACTION = + public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", Long.class, @@ -67,8 +63,7 @@ public class DatanodeSchemaTwoDBDefinition StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); - public static final DBColumnFamilyDefinition - FINALIZE_BLOCKS = + public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", String.class, @@ -76,8 +71,7 @@ public class DatanodeSchemaTwoDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - LAST_CHUNK_INFO = + public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", String.class, @@ -85,13 +79,12 @@ public class DatanodeSchemaTwoDBDefinition BlockData.class, BlockData.getCodec()); - public DatanodeSchemaTwoDBDefinition(String dbPath, - ConfigurationSource config) { + public DatanodeSchemaTwoDBDefinition(String dbPath, ConfigurationSource config) { super(dbPath, config); } - private static final Map> - COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( + private static final Map> COLUMN_FAMILIES = + DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, DELETE_TRANSACTION, @@ -104,8 +97,7 @@ public DatanodeSchemaTwoDBDefinition(String dbPath, } @Override - public DBColumnFamilyDefinition - getBlockDataColumnFamily() { + public DBColumnFamilyDefinition getBlockDataColumnFamily() { return BLOCK_DATA; } @@ -115,16 +107,15 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { } @Override - public DBColumnFamilyDefinition - getLastChunkInfoColumnFamily() { + public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { return LAST_CHUNK_INFO; } - public DBColumnFamilyDefinition - getDeleteTransactionsColumnFamily() { + public DBColumnFamilyDefinition getDeleteTransactionsColumnFamily() { return DELETE_TRANSACTION; } + @Override public DBColumnFamilyDefinition getFinalizeBlocksColumnFamily() { return FINALIZE_BLOCKS; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index d791d9bbeab..4c5edf336e9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -41,8 +41,7 @@ * Interface for interacting with datanode databases. */ public interface DatanodeStore extends Closeable { - String NO_SUCH_BLOCK_ERR_MSG = - "Unable to find the block."; + String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; /** * Start datanode manager. @@ -87,7 +86,7 @@ public interface DatanodeStore extends Closeable { Table getDeletedBlocksTable(); /** - * A Table that keeps finalize blocks requested from client. + * A Table that keeps finalize blocks requested from a client. * * @return Table */ @@ -105,33 +104,86 @@ public interface DatanodeStore extends Closeable { */ BatchOperationHandler getBatchHandler(); + /** + * Flushes the log of the underlying database. + * + * @param sync If true, the flush operation will be synchronized to disk. + * @throws IOException If an I/O error occurs during the flush. + */ void flushLog(boolean sync) throws IOException; + /** + * Flushes all pending changes to the underlying database. + * + * @throws IOException If an I/O error occurs during the flush operation. + */ void flushDB() throws IOException; + /** + * Compacts the underlying database to improve space utilization and performance. + * This operation may be necessary to reclaim space from deleted blocks + */ void compactDB() throws IOException; - BlockIterator getBlockIterator(long containerID) - throws IOException; + /** + * Retrieves a BlockIterator that iterates over block data for a specified container ID. + * + * @param containerID The ID of the container whose block data is to be iterated over. + * @return {@link BlockIterator} An iterator over the blocks in the specified container. + * @throws IOException If an I/O error occurs while retrieving the block iterator. + */ + BlockIterator getBlockIterator(long containerID) throws IOException; - BlockIterator getBlockIterator(long containerID, - KeyPrefixFilter filter) throws IOException; + /** + * Retrieves a BlockIterator that iterates over block data for a specified container ID + * and filtered by a given KeyPrefixFilter. + * + * @param containerID The ID of the container whose block data is to be iterated over. + * @param filter The KeyPrefixFilter to apply to the block data iteration. + * @return {@link BlockIterator} An iterator over the blocks in the specified container + * that match the given filter. + * @throws IOException If an I/O error occurs while retrieving the block iterator. + */ + BlockIterator getBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException; - BlockIterator getFinalizeBlockIterator(long containerID, - KeyPrefixFilter filter) throws IOException; + /** + * Retrieves a BlockIterator that iterates over the final blocks for a specific container ID, + * filtered by a given KeyPrefixFilter. + * + * @param containerID The ID of the container whose finalize blocks are to be iterated over. + * @param filter The KeyPrefixFilter to apply to the final blocks' iteration. + * @return {@link BlockIterator} An iterator over the finalized blocks in the specified container + * that match the given filter. + * @throws IOException If an I/O error occurs while retrieving the block iterator. + */ + BlockIterator getFinalizeBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException; /** * Returns if the underlying DB is closed. This call is thread safe. + * * @return true if the DB is closed. */ boolean isClosed(); + /** + * Performs a compaction operation on the underlying database if needed. + * This method checks if the database requires compaction to improve space utilization and performance. + * Compaction may be necessary to reclaim space from deleted blocks or optimize the database structure. + * + * @throws Exception If an error occurs during the compaction process. + */ default void compactionIfNeeded() throws Exception { } - default BlockData getBlockByID(BlockID blockID, - String blockKey) throws IOException { - + /** + * Retrieves the block data associated with the specified block key. + * + * @param blockID The ID of the block to retrieve. + * @param blockKey The key associated with the block in the block data table. + * @return BlockData The data of the specified block. + * @throws IOException If an I/O error occurs, or the block data is not found. + */ + default BlockData getBlockByID(BlockID blockID, String blockKey) throws IOException { // check block data table BlockData blockData = getBlockDataTable().get(blockKey); @@ -141,19 +193,26 @@ default BlockData getBlockByID(BlockID blockID, default BlockData getCompleteBlockData(BlockData blockData, BlockID blockID, String blockKey) throws IOException { if (blockData == null) { - throw new StorageContainerException( - NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK); + throw new StorageContainerException(NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK); } return blockData; } - default void putBlockByID(BatchOperation batch, boolean incremental, - long localID, BlockData data, KeyValueContainerData containerData, - boolean endOfBlock) - throws IOException { - // old client: override chunk list. - getBlockDataTable().putWithBatch( - batch, containerData.getBlockKey(localID), data); + /** + * Puts a block by its local ID into the block data table within a batch operation. + * + * @param batch The batch operation in which to execute the put operation. + * @param incremental A flag indicating if the operation is incremental. + * @param localID The local ID of the block to be put. + * @param data The block data to be stored. + * @param containerData The container data associated with the block. + * @param endOfBlock A flag indicating if this is the end of block. + * @throws IOException If an I/O error occurs during the operation. + */ + default void putBlockByID(BatchOperation batch, boolean incremental, long localID, BlockData data, + KeyValueContainerData containerData, boolean endOfBlock) throws IOException { + // old client: override a chunk list. + getBlockDataTable().putWithBatch(batch, containerData.getBlockKey(localID), data); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java index 2b34fae7398..7e21a9d2688 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java @@ -45,15 +45,18 @@ import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix; /** - * Constructs a datanode store in accordance with schema version 3, which uses - * three column families/tables: - * 1. A block data table. - * 2. A metadata table. - * 3. A Delete Transaction Table. + * Constructs a datanode store in accordance with schema version 3, which uses three column families/tables: + *
    + *
  1. A block data table. + *
  2. A metadata table. + *
  3. A Delete Transaction Table. + *
* * This is different from schema version 2 from these points: - * - All keys have containerID as prefix. - * - The table 3 has String as key instead of Long since we want to use prefix. + *
    + *
  • All keys have containerID as prefix. + *
  • The table 3 has String as key instead of Long since we want to use prefix. + *
*/ public class DatanodeStoreSchemaThreeImpl extends DatanodeStoreWithIncrementalChunkList implements DeleteTransactionStore { @@ -63,10 +66,9 @@ public class DatanodeStoreSchemaThreeImpl extends DatanodeStoreWithIncrementalCh private final Table deleteTransactionTable; - public DatanodeStoreSchemaThreeImpl(ConfigurationSource config, - String dbPath, boolean openReadOnly) throws IOException { - super(config, new DatanodeSchemaThreeDBDefinition(dbPath, config), - openReadOnly); + public DatanodeStoreSchemaThreeImpl(ConfigurationSource config, String dbPath, boolean openReadOnly) + throws IOException { + super(config, new DatanodeSchemaThreeDBDefinition(dbPath, config), openReadOnly); this.deleteTransactionTable = ((DatanodeSchemaThreeDBDefinition) getDbDef()) .getDeleteTransactionsColumnFamily().getTable(getStore()); } @@ -77,30 +79,27 @@ public Table getDeleteTransactionTable() { } @Override - public BlockIterator getBlockIterator(long containerID) - throws IOException { - // Here we need to filter the keys with containerID as prefix - // and followed by metadata prefixes such as #deleting#. - return new KeyValueBlockIterator(containerID, - getBlockDataTableWithIterator() - .iterator(getContainerKeyPrefix(containerID)), - new MetadataKeyFilters.KeyPrefixFilter().addFilter( - getContainerKeyPrefix(containerID) + "#", true)); + public BlockIterator getBlockIterator(long containerID) throws IOException { + // Here we need to filter the keys with containerID as prefix and followed by metadata prefixes such as #deleting#. + return new KeyValueBlockIterator( + containerID, + getBlockDataTableWithIterator().iterator(getContainerKeyPrefix(containerID)), + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(getContainerKeyPrefix(containerID) + "#", true)); } @Override - public BlockIterator getBlockIterator(long containerID, - MetadataKeyFilters.KeyPrefixFilter filter) throws IOException { - return new KeyValueBlockIterator(containerID, - getBlockDataTableWithIterator() - .iterator(getContainerKeyPrefix(containerID)), filter); + public BlockIterator getBlockIterator(long containerID, MetadataKeyFilters.KeyPrefixFilter filter) + throws IOException { + return new KeyValueBlockIterator(containerID, getBlockDataTableWithIterator() + .iterator(getContainerKeyPrefix(containerID)), filter); } @Override - public BlockIterator getFinalizeBlockIterator(long containerID, - MetadataKeyFilters.KeyPrefixFilter filter) throws IOException { - return new KeyValueBlockLocalIdIterator(containerID, - getFinalizeBlocksTableWithIterator().iterator(getContainerKeyPrefix(containerID)), filter); + public BlockIterator getFinalizeBlockIterator(long containerID, MetadataKeyFilters.KeyPrefixFilter filter) + throws IOException { + return new KeyValueBlockLocalIdIterator(containerID, getFinalizeBlocksTableWithIterator() + .iterator(getContainerKeyPrefix(containerID)), filter); } public void removeKVContainerData(long containerID) throws IOException { @@ -116,52 +115,68 @@ public void removeKVContainerData(long containerID) throws IOException { } } - public void dumpKVContainerData(long containerID, File dumpDir) - throws IOException { + /** + * Dumps key-value container data for the specified container ID into the given directory. + * + * @param containerID the identifier of the container whose data will be dumped + * @param dumpDir the directory where the dumped data files will be stored + * @throws IOException if an I/O error occurs during the dumping process + */ + public void dumpKVContainerData(long containerID, File dumpDir) throws IOException { String prefix = getContainerKeyPrefix(containerID); - getMetadataTable().dumpToFileWithPrefix( - getTableDumpFile(getMetadataTable(), dumpDir), prefix); - getBlockDataTable().dumpToFileWithPrefix( - getTableDumpFile(getBlockDataTable(), dumpDir), prefix); + getMetadataTable().dumpToFileWithPrefix(getTableDumpFile(getMetadataTable(), dumpDir), prefix); + getBlockDataTable().dumpToFileWithPrefix(getTableDumpFile(getBlockDataTable(), dumpDir), prefix); if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { - getLastChunkInfoTable().dumpToFileWithPrefix( - getTableDumpFile(getLastChunkInfoTable(), dumpDir), prefix); + getLastChunkInfoTable().dumpToFileWithPrefix(getTableDumpFile(getLastChunkInfoTable(), dumpDir), prefix); } - getDeleteTransactionTable().dumpToFileWithPrefix( - getTableDumpFile(getDeleteTransactionTable(), dumpDir), - prefix); + getDeleteTransactionTable().dumpToFileWithPrefix(getTableDumpFile(getDeleteTransactionTable(), dumpDir), prefix); } - public void loadKVContainerData(File dumpDir) - throws IOException { - getMetadataTable().loadFromFile( - getTableDumpFile(getMetadataTable(), dumpDir)); - getBlockDataTable().loadFromFile( - getTableDumpFile(getBlockDataTable(), dumpDir)); + /** + * Loads key-value container data from the specified directory. + * This method loads data for the metadata, block data, + * and deletes transaction tables from files in the provided directory. + * + * @param dumpDir Directory containing the dump files for the tables. + * @throws IOException If an I/O error occurs while loading data from the files. + */ + public void loadKVContainerData(File dumpDir) throws IOException { + getMetadataTable().loadFromFile(getTableDumpFile(getMetadataTable(), dumpDir)); + getBlockDataTable().loadFromFile(getTableDumpFile(getBlockDataTable(), dumpDir)); if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { - getLastChunkInfoTable().loadFromFile( - getTableDumpFile(getLastChunkInfoTable(), dumpDir)); + getLastChunkInfoTable().loadFromFile(getTableDumpFile(getLastChunkInfoTable(), dumpDir)); } - getDeleteTransactionTable().loadFromFile( - getTableDumpFile(getDeleteTransactionTable(), dumpDir)); + getDeleteTransactionTable().loadFromFile(getTableDumpFile(getDeleteTransactionTable(), dumpDir)); } - public static File getTableDumpFile(Table table, - File dumpDir) throws IOException { + /** + * Constructs a file object representing the location for the table dump file within the specified directory. + * + * @param table the table for which the dump file is being created + * @param dumpDir the directory where the dump file will be located + * @return the file object representing the dump file for the provided table + * @throws IOException if an I/O error occurs when creating the file object + */ + public static File getTableDumpFile(Table table, File dumpDir) throws IOException { return new File(dumpDir, table.getName() + DUMP_FILE_SUFFIX); } + /** + * Constructs a file object representing the directory for dumping data within the specified metadata directory. + * + * @param metaDir the metadata directory where the dump directory will be created + * @return the file object representing the dump directory + */ public static File getDumpDir(File metaDir) { return new File(metaDir, DUMP_DIR); } + @Override public void compactionIfNeeded() throws Exception { - // Calculate number of files per level and size per level + // Calculate the number of files per level and size per level RocksDatabase rocksDB = ((RDBStore)getStore()).getDb(); - List liveFileMetaDataList = - rocksDB.getLiveFilesMetaData(); - DatanodeConfiguration df = - getDbDef().getConfig().getObject(DatanodeConfiguration.class); + List liveFileMetaDataList = rocksDB.getLiveFilesMetaData(); + DatanodeConfiguration df = getDbDef().getConfig().getObject(DatanodeConfiguration.class); int numThreshold = df.getAutoCompactionSmallSstFileNum(); long sizeThreshold = df.getAutoCompactionSmallSstFileSize(); Map>> stat = new HashMap<>(); @@ -182,10 +197,8 @@ public void compactionIfNeeded() throws Exception { }); } - for (Map.Entry>> entry : - stat.entrySet()) { - for (Map.Entry> innerEntry: - entry.getValue().entrySet()) { + for (Map.Entry>> entry : stat.entrySet()) { + for (Map.Entry> innerEntry: entry.getValue().entrySet()) { if (innerEntry.getValue().size() > numThreshold) { ColumnFamily columnFamily = null; // Find CF Handler @@ -200,32 +213,32 @@ public void compactionIfNeeded() throws Exception { long startCId = Long.MAX_VALUE; long endCId = Long.MIN_VALUE; for (LiveFileMetaData file: innerEntry.getValue()) { - long firstCId = DatanodeSchemaThreeDBDefinition.getContainerId( - FixedLengthStringCodec.bytes2String(file.smallestKey())); - long lastCId = DatanodeSchemaThreeDBDefinition.getContainerId( - FixedLengthStringCodec.bytes2String(file.largestKey())); + long firstCId = + DatanodeSchemaThreeDBDefinition + .getContainerId(FixedLengthStringCodec.bytes2String(file.smallestKey())); + long lastCId = + DatanodeSchemaThreeDBDefinition + .getContainerId(FixedLengthStringCodec.bytes2String(file.largestKey())); startCId = Math.min(firstCId, startCId); endCId = Math.max(lastCId, endCId); } // Do the range compaction - ManagedCompactRangeOptions options = - new ManagedCompactRangeOptions(); - options.setBottommostLevelCompaction( - ManagedCompactRangeOptions.BottommostLevelCompaction.kForce); - LOG.info("CF {} level {} small file number {} exceeds threshold {}" - + ". Auto compact small sst files.", entry.getKey(), - innerEntry.getKey(), innerEntry.getValue().size(), + ManagedCompactRangeOptions options = new ManagedCompactRangeOptions(); + options.setBottommostLevelCompaction(ManagedCompactRangeOptions.BottommostLevelCompaction.kForce); + LOG.info( + "CF {} level {} small file number {} exceeds threshold {}. Auto compact small sst files.", + entry.getKey(), + innerEntry.getKey(), + innerEntry.getValue().size(), numThreshold); - rocksDB.compactRange(columnFamily, - DatanodeSchemaThreeDBDefinition - .getContainerKeyPrefixBytes(startCId), - DatanodeSchemaThreeDBDefinition - .getContainerKeyPrefixBytes(endCId + 1), + rocksDB.compactRange( + columnFamily, + DatanodeSchemaThreeDBDefinition.getContainerKeyPrefixBytes(startCId), + DatanodeSchemaThreeDBDefinition.getContainerKeyPrefixBytes(endCId + 1), options); } else { - LOG.warn("Failed to find cf {} in DB {}", entry.getKey(), - getDbDef().getClass()); + LOG.warn("Failed to find cf {} in DB {}", entry.getKey(), getDbDef().getClass()); } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java index a71bb93963a..42beb907465 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java @@ -135,9 +135,10 @@ private static boolean shouldAppendLastChunk(boolean endOfBlock, return isFullChunk(data.getChunks().get(data.getChunks().size() - 1)); } + @Override public void putBlockByID(BatchOperation batch, boolean incremental, - long localID, BlockData data, KeyValueContainerData containerData, - boolean endOfBlock) throws IOException { + long localID, BlockData data, KeyValueContainerData containerData, + boolean endOfBlock) throws IOException { if (!incremental || !isPartialChunkList(data)) { // Case (1) old client: override chunk list. getBlockDataTable().putWithBatch( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index ecc4e80b4bf..acf533bfca1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -50,11 +50,9 @@ public class ContainerController { private final ContainerSet containerSet; private final Map handlers; - private static final Logger LOG = - LoggerFactory.getLogger(ContainerController.class); + private static final Logger LOG = LoggerFactory.getLogger(ContainerController.class); - public ContainerController(final ContainerSet containerSet, - final Map handlers) { + public ContainerController(final ContainerSet containerSet, final Map handlers) { this.containerSet = containerSet; this.handlers = handlers; } @@ -69,6 +67,12 @@ public Container getContainer(final long containerId) { return containerSet.getContainer(containerId); } + /** + * Retrieves the file path location of the specified container. + * + * @param containerId the ID of the container to find + * @return the container file path if the container exists, otherwise "nonexistent" + */ public String getContainerLocation(final long containerId) { Container cont = containerSet.getContainer(containerId); if (cont != null) { @@ -84,15 +88,13 @@ public String getContainerLocation(final long containerId) { * @param containerId Id of the container to update * @throws IOException in case of exception */ - public void markContainerForClose(final long containerId) - throws IOException { + public void markContainerForClose(final long containerId) throws IOException { Container container = containerSet.getContainer(containerId); if (container == null) { String warning; Set missingContainerSet = containerSet.getMissingContainerSet(); if (missingContainerSet.contains(containerId)) { - warning = "The Container is in the MissingContainerSet " + - "hence we can't close it. ContainerID: " + containerId; + warning = "The Container is in the MissingContainerSet hence we can't close it. ContainerID: " + containerId; } else { warning = "The Container is not found. ContainerID: " + containerId; } @@ -112,14 +114,12 @@ public void markContainerForClose(final long containerId) * @param reason The reason the container was marked unhealthy * @throws IOException in case of exception */ - public void markContainerUnhealthy(final long containerId, ScanResult reason) - throws IOException { + public void markContainerUnhealthy(final long containerId, ScanResult reason) throws IOException { Container container = containerSet.getContainer(containerId); if (container != null) { getHandler(container).markContainerUnhealthy(container, reason); } else { - LOG.warn("Container {} not found, may be deleted, skip mark UNHEALTHY", - containerId); + LOG.warn("Container {} not found, may be deleted, skip mark UNHEALTHY", containerId); } } @@ -129,8 +129,7 @@ public void markContainerUnhealthy(final long containerId, ScanResult reason) * @return ContainerReportsProto * @throws IOException in case of exception */ - public ContainerReportsProto getContainerReport() - throws IOException { + public ContainerReportsProto getContainerReport() throws IOException { return containerSet.getContainerReport(); } @@ -138,18 +137,16 @@ public ContainerReportsProto getContainerReport() * Quasi closes a container given its id. * * @param containerId Id of the container to quasi close - * @param reason The reason the container was quasi closed, for logging - * purposes. + * @param reason The reason the container was quasi closed, for logging purposes. * @throws IOException in case of exception */ - public void quasiCloseContainer(final long containerId, String reason) - throws IOException { + public void quasiCloseContainer(final long containerId, String reason) throws IOException { final Container container = containerSet.getContainer(containerId); getHandler(container).quasiCloseContainer(container, reason); } /** - * Closes a container given its Id. + * Closes a container given its id. * * @param containerId Id of the container to close * @throws IOException in case of exception @@ -163,18 +160,22 @@ public void closeContainer(final long containerId) throws IOException { * Returns the Container given a container id. * * @param containerId ID of the container - * @return Container */ - public void addFinalizedBlock(final long containerId, - final long localId) { + public void addFinalizedBlock(final long containerId, final long localId) { Container container = containerSet.getContainer(containerId); if (container != null) { getHandler(container).addFinalizedBlock(container, localId); } } - public boolean isFinalizedBlockExist(final long containerId, - final long localId) { + /** + * Checks if a finalized block exists within a specified container. + * + * @param containerId ID of the container to check + * @param localId ID of the block to check within the container + * @return {@code true} if the finalized block exists, {@code false} otherwise + */ + public boolean isFinalizedBlockExist(final long containerId, final long localId) { Container container = containerSet.getContainer(containerId); if (container != null) { return getHandler(container).isFinalizedBlockExist(container, localId); @@ -182,29 +183,43 @@ public boolean isFinalizedBlockExist(final long containerId, return false; } - public Container importContainer( - final ContainerData containerData, - final InputStream rawContainerStream, + /** + * Imports a container based on the provided container data, raw container stream, + * and tar container packer. + * + * @param containerData the data representing the container to be imported + * @param rawContainerStream the InputStream containing the raw container data + * @param packer the TarContainerPacker used to handle the container import + * @return the imported Container + * @throws IOException if an I/O error occurs during the import process + */ + public Container importContainer(final ContainerData containerData, final InputStream rawContainerStream, final TarContainerPacker packer) throws IOException { - return handlers.get(containerData.getContainerType()) - .importContainer(containerData, rawContainerStream, packer); + return handlers.get(containerData.getContainerType()).importContainer(containerData, rawContainerStream, packer); } - public void exportContainer(final ContainerType type, - final long containerId, final OutputStream outputStream, + /** + * Exports a container of the specified type and ID to the provided output stream, + * using the given tar container packer. + * + * @param type the type of the container to be exported + * @param containerId the ID of the container to be exported + * @param outputStream the output stream to which the container will be exported + * @param packer the tar container packer used for packing the container data + * @throws IOException if an I/O error occurs during exporting + */ + public void exportContainer(final ContainerType type, final long containerId, final OutputStream outputStream, final TarContainerPacker packer) throws IOException { - handlers.get(type).exportContainer( - containerSet.getContainer(containerId), outputStream, packer); + handlers.get(type).exportContainer(containerSet.getContainer(containerId), outputStream, packer); } /** - * Deletes a container given its Id. + * Deletes a container given its id. * @param containerId Id of the container to be deleted * @param force if this is set to true, we delete container without checking * state of the container. */ - public void deleteContainer(final long containerId, boolean force) - throws IOException { + public void deleteContainer(final long containerId, boolean force) throws IOException { final Container container = containerSet.getContainer(containerId); if (container != null) { getHandler(container).deleteContainer(container, force); @@ -221,13 +236,17 @@ private Handler getHandler(final Container container) { return handlers.get(container.getContainerType()); } + /** + * Retrieves an iterable collection of all containers managed by this controller. + * + * @return an iterable collection of containers + */ public Iterable> getContainers() { return containerSet; } /** - * Return an iterator of containers which are associated with the specified - * volume. + * Return an iterator of containers which are associated with the specified volume. * * @param volume the HDDS volume which should be used to filter containers * @return {@literal Iterator} @@ -236,15 +255,12 @@ public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } - void updateDataScanTimestamp(long containerId, Instant timestamp) - throws IOException { + void updateDataScanTimestamp(long containerId, Instant timestamp) throws IOException { Container container = containerSet.getContainer(containerId); if (container != null) { container.updateDataScanTimestamp(timestamp); } else { - LOG.warn("Container {} not found, may be deleted, " + - "skip update DataScanTimestamp", containerId); + LOG.warn("Container {} not found, may be deleted, skip update DataScanTimestamp", containerId); } } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 5cdeaaa5787..d613c26ebe6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -61,7 +61,6 @@ import org.apache.hadoop.ozone.container.replication.ReplicationServer; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.Timer; import org.apache.ratis.grpc.GrpcTlsConfig; import org.slf4j.Logger; @@ -99,9 +98,7 @@ * layer. */ public class OzoneContainer { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneContainer.class); + private static final Logger LOG = LoggerFactory.getLogger(OzoneContainer.class); private final HddsDispatcher hddsDispatcher; private final Map handlers; @@ -118,13 +115,12 @@ public class OzoneContainer { private List dataScanners; private List backgroundScanners; private final BlockDeletingService blockDeletingService; - private final StaleRecoveringContainerScrubbingService - recoveringContainerScrubbingService; + private final StaleRecoveringContainerScrubbingService recoveringContainerScrubbingService; private final GrpcTlsConfig tlsClientConfig; private final AtomicReference initializingStatus; private final ReplicationServer replicationServer; - private DatanodeDetails datanodeDetails; - private StateContext context; + private final DatanodeDetails datanodeDetails; + private final StateContext context; private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; @@ -134,13 +130,15 @@ enum InitializingStatus { } /** - * Construct OzoneContainer object. + * Constructs an OzoneContainer instance that manages various services and resources for a datanode. * - * @param datanodeDetails - * @param conf - * @param certClient - * @throws DiskOutOfSpaceException - * @throws IOException + * @param hddsDatanodeService Reference to the HddsDatanodeService instance. + * @param datanodeDetails Contains details of the datanode such as UUID and hostname. + * @param conf Configuration source for reading various settings. + * @param context State context containing information about the current state. + * @param certClient Certificate client used for handling security certificates. + * @param secretKeyClient Client used to verify secret keys. + * @throws IOException If an I/O error occurs during initialization. */ public OzoneContainer(HddsDatanodeService hddsDatanodeService, DatanodeDetails datanodeDetails, ConfigurationSource conf, @@ -149,29 +147,34 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, config = conf; this.datanodeDetails = datanodeDetails; this.context = context; - this.volumeChecker = new StorageVolumeChecker(conf, new Timer(), - datanodeDetails.threadNamePrefix()); - - volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, - context, VolumeType.DATA_VOLUME, volumeChecker); + this.volumeChecker = new StorageVolumeChecker(conf, new Timer(), datanodeDetails.threadNamePrefix()); + + volumeSet = new MutableVolumeSet( + datanodeDetails.getUuidString(), + conf, + context, + VolumeType.DATA_VOLUME, + volumeChecker); volumeSet.setFailedVolumeListener(this::handleVolumeFailures); - metaVolumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, - context, VolumeType.META_VOLUME, volumeChecker); - - dbVolumeSet = HddsServerUtil.getDatanodeDbDirs(conf).isEmpty() ? null : - new MutableVolumeSet(datanodeDetails.getUuidString(), conf, - context, VolumeType.DB_VOLUME, volumeChecker); - final DatanodeConfiguration dnConf = - conf.getObject(DatanodeConfiguration.class); + metaVolumeSet = new MutableVolumeSet( + datanodeDetails.getUuidString(), + conf, + context, + VolumeType.META_VOLUME, + volumeChecker); + + dbVolumeSet = HddsServerUtil.getDatanodeDbDirs(conf).isEmpty() + ? null + : new MutableVolumeSet(datanodeDetails.getUuidString(), conf, context, VolumeType.DB_VOLUME, volumeChecker); + final DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); if (SchemaV3.isFinalizedAndEnabled(config)) { - HddsVolumeUtil.loadAllHddsVolumeDbStore( - volumeSet, dbVolumeSet, false, LOG); + HddsVolumeUtil.loadAllHddsVolumeDbStore(volumeSet, dbVolumeSet, false, LOG); if (dnConf.autoCompactionSmallSstFile()) { this.dbCompactionExecutorService = Executors.newScheduledThreadPool( - dnConf.getAutoCompactionSmallSstFileThreads(), - new ThreadFactoryBuilder().setNameFormat( - datanodeDetails.threadNamePrefix() + - "RocksDBCompactionThread-%d").build()); + dnConf.getAutoCompactionSmallSstFileThreads(), + new ThreadFactoryBuilder() + .setNameFormat(datanodeDetails.threadNamePrefix() + "RocksDBCompactionThread-%d") + .build()); this.dbCompactionExecutorService.scheduleWithFixedDelay(this::compactDb, dnConf.getAutoCompactionSmallSstFileIntervalMinutes(), dnConf.getAutoCompactionSmallSstFileIntervalMinutes(), @@ -211,8 +214,12 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, } SecurityConfig secConf = new SecurityConfig(conf); - hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet, - handlers, context, metrics, + hddsDispatcher = new HddsDispatcher( + config, + containerSet, + handlers, + context, + metrics, TokenVerifier.create(secConf, secretKeyClient)); /* @@ -222,8 +229,13 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, */ controller = new ContainerController(containerSet, handlers); - writeChannel = XceiverServerRatis.newXceiverServerRatis(hddsDatanodeService, - datanodeDetails, config, hddsDispatcher, controller, certClient, + writeChannel = XceiverServerRatis.newXceiverServerRatis( + hddsDatanodeService, + datanodeDetails, + config, + hddsDispatcher, + controller, + certClient, context); replicationServer = new ReplicationServer( @@ -231,22 +243,21 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, conf.getObject(ReplicationConfig.class), secConf, certClient, - new ContainerImporter(conf, containerSet, controller, - volumeSet), + new ContainerImporter(conf, containerSet, controller, volumeSet), datanodeDetails.threadNamePrefix()); - readChannel = new XceiverServerGrpc( - datanodeDetails, config, hddsDispatcher, certClient); + readChannel = new XceiverServerGrpc(datanodeDetails, config, hddsDispatcher, certClient); Duration blockDeletingSvcInterval = dnConf.getBlockDeletionInterval(); - long blockDeletingServiceTimeout = config - .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); + long blockDeletingServiceTimeout = config.getTimeDuration( + OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, + OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + + int blockDeletingServiceWorkerSize = config.getInt( + OZONE_BLOCK_DELETING_SERVICE_WORKERS, + OZONE_BLOCK_DELETING_SERVICE_WORKERS_DEFAULT); - int blockDeletingServiceWorkerSize = config - .getInt(OZONE_BLOCK_DELETING_SERVICE_WORKERS, - OZONE_BLOCK_DELETING_SERVICE_WORKERS_DEFAULT); blockDeletingService = new BlockDeletingService(this, blockDeletingSvcInterval.toMillis(), blockDeletingServiceTimeout, TimeUnit.MILLISECONDS, @@ -254,17 +265,16 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, datanodeDetails.threadNamePrefix(), context.getParent().getReconfigurationHandler()); - Duration recoveringContainerScrubbingSvcInterval = - dnConf.getRecoveringContainerScrubInterval(); + Duration recoveringContainerScrubbingSvcInterval = dnConf.getRecoveringContainerScrubInterval(); - long recoveringContainerScrubbingServiceTimeout = config - .getTimeDuration(OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_TIMEOUT, - OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); + long recoveringContainerScrubbingServiceTimeout = config.getTimeDuration( + OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_TIMEOUT, + OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); - int recoveringContainerScrubbingServiceWorkerSize = config - .getInt(OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS, - OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS_DEFAULT); + int recoveringContainerScrubbingServiceWorkerSize = config.getInt( + OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS, + OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS_DEFAULT); recoveringContainerScrubbingService = new StaleRecoveringContainerScrubbingService( @@ -275,44 +285,43 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, containerSet); if (certClient != null && secConf.isGrpcTlsEnabled()) { - tlsClientConfig = new GrpcTlsConfig( - certClient.getKeyManager(), - certClient.getTrustManager(), true); + tlsClientConfig = new GrpcTlsConfig(certClient.getKeyManager(), certClient.getTrustManager(), true); } else { tlsClientConfig = null; } - initializingStatus = - new AtomicReference<>(InitializingStatus.UNINITIALIZED); + initializingStatus = new AtomicReference<>(InitializingStatus.UNINITIALIZED); } /** * Shorthand constructor used for testing in non-secure context. */ @VisibleForTesting - public OzoneContainer( - DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context) throws IOException { + public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource conf, StateContext context) + throws IOException { this(null, datanodeDetails, conf, context, null, null); } + /** + * Retrieves the TLS (Transport Layer Security) client configuration for the Ozone container. + * + * @return GrpcTlsConfig instance representing the TLS client configuration. + */ public GrpcTlsConfig getTlsClientConfig() { return tlsClientConfig; } /** - * Build's container map after volume format. + * Build's a container map after volume format. */ @VisibleForTesting public void buildContainerSet() { - Iterator volumeSetIterator = volumeSet.getVolumesList() - .iterator(); + Iterator volumeSetIterator = volumeSet.getVolumesList().iterator(); ArrayList volumeThreads = new ArrayList<>(); long startTime = System.currentTimeMillis(); - // Load container inspectors that may be triggered at startup based on - // system properties set. These can inspect and possibly repair - // containers as we iterate them here. + // Load container inspectors that may be triggered at startup based on system properties set. + // These can inspect and possibly repair containers as we iterate them here. ContainerInspectorUtil.load(); String threadNamePrefix = datanodeDetails.threadNamePrefix(); ThreadFactory threadFactory = new ThreadFactoryBuilder() @@ -321,39 +330,40 @@ public void buildContainerSet() { .build(); while (volumeSetIterator.hasNext()) { StorageVolume volume = volumeSetIterator.next(); - ContainerReader containerReader = new ContainerReader(volumeSet, - (HddsVolume) volume, containerSet, config, true); + ContainerReader containerReader = new ContainerReader( + volumeSet, + (HddsVolume) volume, + containerSet, + config, + true); Thread thread = threadFactory.newThread(containerReader); thread.start(); volumeThreads.add(thread); } try { - for (int i = 0; i < volumeThreads.size(); i++) { - volumeThreads.get(i).join(); + for (Thread volumeThread : volumeThreads) { + volumeThread.join(); } } catch (InterruptedException ex) { LOG.error("Volume Threads Interrupted exception", ex); Thread.currentThread().interrupt(); } - // After all containers have been processed, turn off container - // inspectors so they are not hit during normal datanode execution. + // After all containers have been processed, + // turn off container inspectors so they are not hit during normal datanode execution. ContainerInspectorUtil.unload(); - LOG.info("Build ContainerSet costs {}s", - (System.currentTimeMillis() - startTime) / 1000); + LOG.info("Build ContainerSet costs {}s", (System.currentTimeMillis() - startTime) / 1000); } /** * Start background daemon thread for performing container integrity checks. */ private void startContainerScrub() { - ContainerScannerConfiguration c = config.getObject( - ContainerScannerConfiguration.class); + ContainerScannerConfiguration c = config.getObject(ContainerScannerConfiguration.class); if (!c.isEnabled()) { - LOG.info("Scheduled background container scanners and " + - "the on-demand container scanner have been disabled."); + LOG.info("Scheduled background container scanners and the on-demand container scanner have been disabled."); return; } @@ -373,14 +383,12 @@ private void startContainerScrub() { private void initContainerScanner(ContainerScannerConfiguration c) { if (c.getBandwidthPerVolume() == 0L) { - LOG.warn(VOLUME_BYTES_PER_SECOND_KEY + " is set to 0, " + - "so background container data scanner will not start."); + LOG.warn("{} is set to 0, so background container data scanner will not start.", VOLUME_BYTES_PER_SECOND_KEY); return; } dataScanners = new ArrayList<>(); for (StorageVolume v : volumeSet.getVolumesList()) { - BackgroundContainerDataScanner s = - new BackgroundContainerDataScanner(c, controller, (HddsVolume) v); + BackgroundContainerDataScanner s = new BackgroundContainerDataScanner(c, controller, (HddsVolume) v); s.start(); dataScanners.add(s); backgroundScanners.add(s); @@ -389,8 +397,7 @@ private void initContainerScanner(ContainerScannerConfiguration c) { private void initMetadataScanner(ContainerScannerConfiguration c) { if (this.metadataScanner == null) { - this.metadataScanner = - new BackgroundContainerMetadataScanner(c, controller); + this.metadataScanner = new BackgroundContainerMetadataScanner(c, controller); backgroundScanners.add(metadataScanner); } this.metadataScanner.start(); @@ -398,8 +405,8 @@ private void initMetadataScanner(ContainerScannerConfiguration c) { private void initOnDemandContainerScanner(ContainerScannerConfiguration c) { if (c.getOnDemandBandwidthPerVolume() == 0L) { - LOG.warn(ON_DEMAND_VOLUME_BYTES_PER_SECOND_KEY + " is set to 0, " + - "so the on-demand container data scanner will not start."); + LOG.warn("{} is set to 0, so the on-demand container data scanner will not start.", + ON_DEMAND_VOLUME_BYTES_PER_SECOND_KEY); return; } OnDemandContainerDataScanner.init(c, controller); @@ -436,17 +443,14 @@ public void resumeContainerScrub() { /** * Starts serving requests to ozone container. - * - * @throws IOException */ public void start(String clusterId) throws IOException { - // If SCM HA is enabled, OzoneContainer#start() will be called multi-times - // from VersionEndpointTask. The first call should do the initializing job, + // If SCM HA is enabled, OzoneContainer#start() will be called multi-times from VersionEndpointTask. + // The first call should do the initializing job, // the successive calls should wait until OzoneContainer is initialized. - if (!initializingStatus.compareAndSet( - InitializingStatus.UNINITIALIZED, InitializingStatus.INITIALIZING)) { + if (!initializingStatus.compareAndSet(InitializingStatus.UNINITIALIZED, InitializingStatus.INITIALIZING)) { - // wait OzoneContainer to finish its initializing. + // Wait OzoneContainer to finish its initializing. while (initializingStatus.get() != InitializingStatus.INITIALIZED) { try { Thread.sleep(1); @@ -460,11 +464,9 @@ public void start(String clusterId) throws IOException { buildContainerSet(); - // Start background volume checks, which will begin after the configured - // delay. + // Start background volume checks, which will begin after the configured delay. volumeChecker.start(); - // Do an immediate check of all volumes to ensure datanode health before - // proceeding. + // Do an immediate check of all volumes to ensure datanode health before proceeding. volumeSet.checkAllVolumes(); metaVolumeSet.checkAllVolumes(); // DB volume set may be null if dedicated DB volumes are not used. @@ -478,10 +480,10 @@ public void start(String clusterId) throws IOException { replicationServer.start(); datanodeDetails.setPort(Name.REPLICATION, replicationServer.getPort()); - writeChannel.start(); - readChannel.start(); hddsDispatcher.init(); hddsDispatcher.setClusterId(clusterId); + writeChannel.start(); + readChannel.start(); blockDeletingService.start(); recoveringContainerScrubbingService.start(); @@ -515,12 +517,21 @@ public void stop() { ContainerMetrics.remove(); } + /** + * Handles volume failures by delegating the task to the container set. + * If the containerSet instance is not null, it invokes its handleVolumeFailures method with the current context. + */ public void handleVolumeFailures() { if (containerSet != null) { containerSet.handleVolumeFailures(context); } } + /** + * Retrieves the container set managed by this OzoneContainer. + * + * @return The ContainerSet instance associated with this OzoneContainer. + */ @VisibleForTesting public ContainerSet getContainerSet() { return containerSet; @@ -533,20 +544,34 @@ public ContainerSet getContainerSet() { */ public PipelineReportsProto getPipelineReport() { - PipelineReportsProto.Builder pipelineReportsProto = - PipelineReportsProto.newBuilder(); + PipelineReportsProto.Builder pipelineReportsProto = PipelineReportsProto.newBuilder(); pipelineReportsProto.addAllPipelineReport(writeChannel.getPipelineReport()); return pipelineReportsProto.build(); } + /** + * Retrieves the write channel used by this OzoneContainer. + * + * @return An instance of XceiverServerSpi representing the writing channel for this container. + */ public XceiverServerSpi getWriteChannel() { return writeChannel; } + /** + * Retrieves the read channel used by this OzoneContainer. + * + * @return An instance of XceiverServerSpi representing the reading channel for this container. + */ public XceiverServerSpi getReadChannel() { return readChannel; } + /** + * Retrieves the container controller that manages various container operations. + * + * @return ContainerController instance associated with this OzoneContainer. + */ public ContainerController getController() { return controller; } @@ -554,61 +579,93 @@ public ContainerController getController() { /** * Returns node report of container storage usage. */ - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { + public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() throws IOException { StorageLocationReport[] reports = volumeSet.getStorageReport(); - StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb - = StorageContainerDatanodeProtocolProtos. - NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage(config)); + StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = + StorageContainerDatanodeProtocolProtos.NodeReportProto.newBuilder(); + for (StorageLocationReport report : reports) { + nrb.addStorageReport(report.getProtoBufMessage(config)); } StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport(); - for (int i = 0; i < metaReports.length; i++) { - nrb.addMetadataStorageReport( - metaReports[i].getMetadataProtoBufMessage()); + for (StorageLocationReport metaReport : metaReports) { + nrb.addMetadataStorageReport(metaReport.getMetadataProtoBufMessage()); } if (dbVolumeSet != null) { StorageLocationReport[] dbReports = dbVolumeSet.getStorageReport(); - for (int i = 0; i < dbReports.length; i++) { - nrb.addDbStorageReport(dbReports[i].getProtoBufMessage()); + for (StorageLocationReport dbReport : dbReports) { + nrb.addDbStorageReport(dbReport.getProtoBufMessage()); } } return nrb.build(); } + /** + * Retrieves the ContainerDispatcher associated with this OzoneContainer. + * + * @return ContainerDispatcher instance used for dispatching container commands. + */ @VisibleForTesting public ContainerDispatcher getDispatcher() { return this.hddsDispatcher; } + /** + * Retrieves the volume set associated with this OzoneContainer. + * + * @return The MutableVolumeSet instance representing the volume set for this container. + */ public MutableVolumeSet getVolumeSet() { return volumeSet; } - public MutableVolumeSet getMetaVolumeSet() { - return metaVolumeSet; - } - + /** + * Retrieves the DB volume set associated with this OzoneContainer. + * + * @return The MutableVolumeSet instance representing the DB volume set for this container. + */ public MutableVolumeSet getDbVolumeSet() { return dbVolumeSet; } + /** + * Retrieves the metrics related to this OzoneContainer. + * + * @return ContainerMetrics instance containing various metrics of the container. + */ public ContainerMetrics getMetrics() { return metrics; } + /** + * Retrieves the service responsible for deleting blocks in this OzoneContainer. + * + * @return BlockDeletingService instance managing block deletions. + */ public BlockDeletingService getBlockDeletingService() { return blockDeletingService; } + /** + * Retrieves the replication server associated with this OzoneContainer. + * + * @return ReplicationServer instance managing replication tasks for this container. + */ public ReplicationServer getReplicationServer() { return replicationServer; } + /** + * Initiates the compaction of databases for all storage volumes associated with this OzoneContainer. + *

+ * For each storage volume in the volume set, + * this method starts an asynchronous task that triggers the database compaction process. + * The asynchronous tasks are executed using the dbCompactionExecutorService. + *

+ * This is useful for optimizing database storage by reclaiming unused space and enhancing read/write performance. + */ public void compactDb() { for (StorageVolume volume : volumeSet.getVolumesList()) { HddsVolume hddsVolume = (HddsVolume) volume; @@ -616,5 +673,4 @@ public void compactDb() { dbCompactionExecutorService); } } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java index 72fa88b35d9..f4bf54a3d82 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java @@ -70,6 +70,10 @@ protected AbstractReplicationTask(long containerID, this.term = term; queued = Instant.now(clock); } + + protected abstract String getMetricName(); + + protected abstract String getMetricDescriptionSegment(); public long getContainerId() { return containerId; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 5ceea125e81..92ff4b6d8d6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -26,6 +26,7 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.Set; +import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.PriorityBlockingQueue; @@ -71,11 +72,17 @@ public final class ReplicationSupervisor { private final StateContext context; private final Clock clock; - private final AtomicLong requestCounter = new AtomicLong(); - private final AtomicLong successCounter = new AtomicLong(); - private final AtomicLong failureCounter = new AtomicLong(); - private final AtomicLong timeoutCounter = new AtomicLong(); - private final AtomicLong skippedCounter = new AtomicLong(); + private final Map requestCounter = new ConcurrentHashMap<>(); + private final Map successCounter = new ConcurrentHashMap<>(); + private final Map failureCounter = new ConcurrentHashMap<>(); + private final Map timeoutCounter = new ConcurrentHashMap<>(); + private final Map skippedCounter = new ConcurrentHashMap<>(); + + private static final Map METRICS_MAP; + + static { + METRICS_MAP = new HashMap<>(); + } /** * A set of container IDs that are currently being downloaded @@ -188,6 +195,10 @@ public static Builder newBuilder() { return new Builder(); } + public static Map getMetricsMap() { + return Collections.unmodifiableMap(METRICS_MAP); + } + private ReplicationSupervisor(StateContext context, ExecutorService executor, ReplicationConfig replicationConfig, DatanodeConfiguration datanodeConfig, Clock clock, IntConsumer executorThreadUpdater) { @@ -221,6 +232,19 @@ public void addTask(AbstractReplicationTask task) { return; } + if (requestCounter.get(task.getMetricName()) == null) { + synchronized (this) { + if (requestCounter.get(task.getMetricName()) == null) { + requestCounter.put(task.getMetricName(), new AtomicLong(0)); + successCounter.put(task.getMetricName(), new AtomicLong(0)); + failureCounter.put(task.getMetricName(), new AtomicLong(0)); + timeoutCounter.put(task.getMetricName(), new AtomicLong(0)); + skippedCounter.put(task.getMetricName(), new AtomicLong(0)); + METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment()); + } + } + } + if (inFlight.add(task)) { if (task.getPriority() != ReplicationCommandPriority.LOW) { // Low priority tasks are not included in the replication queue sizes @@ -330,14 +354,14 @@ public TaskRunner(AbstractReplicationTask task) { @Override public void run() { try { - requestCounter.incrementAndGet(); + requestCounter.get(task.getMetricName()).incrementAndGet(); final long now = clock.millis(); final long deadline = task.getDeadline(); if (deadline > 0 && now > deadline) { LOG.info("Ignoring {} since the deadline has passed ({} < {})", this, Instant.ofEpochMilli(deadline), Instant.ofEpochMilli(now)); - timeoutCounter.incrementAndGet(); + timeoutCounter.get(task.getMetricName()).incrementAndGet(); return; } @@ -364,18 +388,18 @@ public void run() { task.runTask(); if (task.getStatus() == Status.FAILED) { LOG.warn("Failed {}", this); - failureCounter.incrementAndGet(); + failureCounter.get(task.getMetricName()).incrementAndGet(); } else if (task.getStatus() == Status.DONE) { LOG.info("Successful {}", this); - successCounter.incrementAndGet(); + successCounter.get(task.getMetricName()).incrementAndGet(); } else if (task.getStatus() == Status.SKIPPED) { LOG.info("Skipped {}", this); - skippedCounter.incrementAndGet(); + skippedCounter.get(task.getMetricName()).incrementAndGet(); } } catch (Exception e) { task.setStatus(Status.FAILED); LOG.warn("Failed {}", this, e); - failureCounter.incrementAndGet(); + failureCounter.get(task.getMetricName()).incrementAndGet(); } finally { inFlight.remove(task); decrementTaskCounter(task); @@ -419,7 +443,12 @@ public boolean equals(Object o) { } public long getReplicationRequestCount() { - return requestCounter.get(); + return getCount(requestCounter); + } + + public long getReplicationRequestCount(String metricsName) { + AtomicLong counter = requestCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getQueueSize() { @@ -438,20 +467,48 @@ public long getMaxReplicationStreams() { } } + private long getCount(Map counter) { + long total = 0; + for (Map.Entry entry : counter.entrySet()) { + total += entry.getValue().get(); + } + return total; + } + public long getReplicationSuccessCount() { - return successCounter.get(); + return getCount(successCounter); + } + + public long getReplicationSuccessCount(String metricsName) { + AtomicLong counter = successCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationFailureCount() { - return failureCounter.get(); + return getCount(failureCounter); + } + + public long getReplicationFailureCount(String metricsName) { + AtomicLong counter = failureCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationTimeoutCount() { - return timeoutCounter.get(); + return getCount(timeoutCounter); + } + + public long getReplicationTimeoutCount(String metricsName) { + AtomicLong counter = timeoutCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationSkippedCount() { - return skippedCounter.get(); + return getCount(skippedCounter); + } + + public long getReplicationSkippedCount(String metricsName) { + AtomicLong counter = skippedCounter.get(metricsName); + return counter != null ? counter.get() : 0; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java index 671e985d7ad..a1763976af9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java @@ -71,16 +71,47 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(Interns.info("numRequestedReplications", "Number of requested replications"), supervisor.getReplicationRequestCount()) + .addGauge(Interns.info("numSuccessReplications", + "Number of successful replications"), + supervisor.getReplicationSuccessCount()) + .addGauge(Interns.info("numFailureReplications", + "Number of failure replications"), + supervisor.getReplicationFailureCount()) .addGauge(Interns.info("numTimeoutReplications", "Number of replication requests timed out before being processed"), supervisor.getReplicationTimeoutCount()) .addGauge(Interns.info("numSkippedReplications", "Number of replication requests skipped as the container is " - + "already present"), supervisor.getReplicationSkippedCount()) + + "already present"), + supervisor.getReplicationSkippedCount()) .addGauge(Interns.info("maxReplicationStreams", "Maximum number of " + "concurrent replication tasks which can run simultaneously"), supervisor.getMaxReplicationStreams()); + Map metricsMap = ReplicationSupervisor.getMetricsMap(); + if (!metricsMap.isEmpty()) { + metricsMap.forEach((metricsName, descriptionSegment) -> { + if (!metricsName.equals("")) { + builder.addGauge(Interns.info("numRequested" + metricsName, + "Number of requested " + descriptionSegment), + supervisor.getReplicationRequestCount(metricsName)) + .addGauge(Interns.info("numSuccess" + metricsName, + "Number of successful " + descriptionSegment), + supervisor.getReplicationSuccessCount(metricsName)) + .addGauge(Interns.info("numFailure" + metricsName, + "Number of failure " + descriptionSegment), + supervisor.getReplicationFailureCount(metricsName)) + .addGauge(Interns.info("numTimeout" + metricsName, + "Number of " + descriptionSegment + " timed out before being processed"), + supervisor.getReplicationTimeoutCount(metricsName)) + .addGauge(Interns.info("numSkipped" + metricsName, + "Number of " + descriptionSegment + " skipped as the container is " + + "already present"), + supervisor.getReplicationSkippedCount(metricsName)); + } + }); + } + Map tasks = supervisor.getInFlightReplicationSummary(); for (Map.Entry entry : tasks.entrySet()) { builder.addGauge(Interns.info("numInflight" + entry.getKey(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java index ca0ca98906c..2168f324c24 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java @@ -65,6 +65,16 @@ protected ReplicationTask( replicator); } + @Override + public String getMetricName() { + return "ContainerReplications"; + } + + @Override + public String getMetricDescriptionSegment() { + return "container replications"; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index bc56141fb08..0b25808cf3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; @@ -139,8 +139,8 @@ public void init() throws IOException { conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath()); datanodeUuid = UUID.randomUUID().toString(); - volumeSet = new MutableVolumeSet(datanodeUuid, scmId, conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); + volumeSet = + new MutableVolumeSet(datanodeUuid, scmId, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); createDbInstancesForTestIfNeeded(volumeSet, scmId, scmId, conf); } @@ -151,24 +151,21 @@ public void cleanup() throws IOException { } /** - * A helper method to create some blocks and put them under deletion - * state for testing. This method directly updates container.db and - * creates some fake chunk files for testing. + * A helper method to create some blocks and put them under deletion state for testing. + * This method directly updates container.db and creates some fake chunk files for testing. */ - private void createToDeleteBlocks(ContainerSet containerSet, - int numOfContainers, int numOfBlocksPerContainer, + private void createToDeleteBlocks(ContainerSet containerSet, int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException { for (int i = 0; i < numOfContainers; i++) { - createToDeleteBlocks(containerSet, numOfBlocksPerContainer, - numOfChunksPerBlock); + createToDeleteBlocks(containerSet, numOfBlocksPerContainer, numOfChunksPerBlock); } } - private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, - int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException { + private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, int numOfBlocksPerContainer, + int numOfChunksPerBlock) throws IOException { ChunkManager chunkManager; if (layout == FILE_PER_BLOCK) { - chunkManager = new FilePerBlockStrategy(true, null, null); + chunkManager = new FilePerBlockStrategy(true, null); } else { chunkManager = new FilePerChunkStrategy(true, null, null); } @@ -177,65 +174,70 @@ private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, int txnID = 0; long containerID = ContainerTestHelper.getTestContainerID(); KeyValueContainerData data = - new KeyValueContainerData(containerID, layout, + new KeyValueContainerData(containerID, + layout, ContainerTestHelper.CONTAINER_MAX_SIZE, - UUID.randomUUID().toString(), datanodeUuid); + UUID.randomUUID().toString(), + datanodeUuid); data.closeContainer(); data.setSchemaVersion(schemaVersion); KeyValueContainer container = new KeyValueContainer(data, conf); - container.create(volumeSet, - new RoundRobinVolumeChoosingPolicy(), scmId); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId); containerSet.addContainer(container); - data = (KeyValueContainerData) containerSet.getContainer( - containerID).getContainerData(); + data = (KeyValueContainerData) containerSet.getContainer(containerID).getContainerData(); data.setSchemaVersion(schemaVersion); if (isSameSchemaVersion(schemaVersion, SCHEMA_V1)) { - createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, - containerID, numOfChunksPerBlock, buffer, chunkManager, container); - } else if (isSameSchemaVersion(schemaVersion, SCHEMA_V2) - || isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { - createPendingDeleteBlocksViaTxn(numOfBlocksPerContainer, txnID, - containerID, numOfChunksPerBlock, buffer, chunkManager, - container, data); + createPendingDeleteBlocksSchema1( + numOfBlocksPerContainer, + data, + containerID, + numOfChunksPerBlock, + buffer, + chunkManager, + container); + } else if (isSameSchemaVersion(schemaVersion, SCHEMA_V2) || isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { + createPendingDeleteBlocksViaTxn( + numOfBlocksPerContainer, + txnID, + containerID, + numOfChunksPerBlock, + buffer, + chunkManager, + container, + data); } else { - throw new UnsupportedOperationException( - "Only schema version 1,2,3 are supported."); + throw new UnsupportedOperationException("Only schema version 1,2,3 are supported."); } return data; } @SuppressWarnings("checkstyle:parameternumber") - private void createPendingDeleteBlocksSchema1(int numOfBlocksPerContainer, - KeyValueContainerData data, long containerID, int numOfChunksPerBlock, - ChunkBuffer buffer, ChunkManager chunkManager, + private void createPendingDeleteBlocksSchema1(int numOfBlocksPerContainer, KeyValueContainerData data, + long containerID, int numOfChunksPerBlock, ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container) { BlockID blockID = null; try (DBHandle metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { blockID = ContainerTestHelper.getTestBlockID(containerID); - String deleteStateName = data.getDeletingBlockKey( - blockID.getLocalID()); + String deleteStateName = data.getDeletingBlockKey(blockID.getLocalID()); BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); - putChunksInBlock(numOfChunksPerBlock, j, chunks, buffer, chunkManager, - container, blockID); + putChunksInBlock(numOfChunksPerBlock, j, chunks, buffer, chunkManager, container, blockID); kd.setChunks(chunks); metadata.getStore().getBlockDataTable().put(deleteStateName, kd); container.getContainerData().incrPendingDeletionBlocks(1); } - updateMetaData(data, container, numOfBlocksPerContainer, - numOfChunksPerBlock); + updateMetaData(data, container, numOfBlocksPerContainer, numOfChunksPerBlock); } catch (IOException exception) { - LOG.info("Exception " + exception); - LOG.warn("Failed to put block: " + blockID + " in BlockDataTable."); + LOG.info("Exception ", exception); + LOG.warn("Failed to put block: {} in BlockDataTable.", blockID); } } @SuppressWarnings("checkstyle:parameternumber") - private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, - int txnID, long containerID, int numOfChunksPerBlock, ChunkBuffer buffer, - ChunkManager chunkManager, KeyValueContainer container, + private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, int txnID, long containerID, + int numOfChunksPerBlock, ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container, KeyValueContainerData data) { List containerBlocks = new ArrayList<>(); for (int i = 0; i < numOfBlocksPerContainer; i++) { @@ -243,144 +245,121 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); - putChunksInBlock(numOfChunksPerBlock, i, chunks, buffer, chunkManager, - container, blockID); + putChunksInBlock(numOfChunksPerBlock, i, chunks, buffer, chunkManager, container, blockID); kd.setChunks(chunks); try (DBHandle metadata = BlockUtils.getDB(data, conf)) { String blockKey = data.getBlockKey(blockID.getLocalID()); metadata.getStore().getBlockDataTable().put(blockKey, kd); } catch (IOException exception) { - LOG.info("Exception = " + exception); - LOG.warn("Failed to put block: " + blockID.getLocalID() - + " in BlockDataTable."); + LOG.info("Exception = ", exception); + LOG.warn("Failed to put block: {} in BlockDataTable.", blockID.getLocalID()); } container.getContainerData().incrPendingDeletionBlocks(1); - // Below we are creating one transaction per block just for - // testing purpose + // Below we are creating one transaction per block just for testing purpose containerBlocks.add(blockID.getLocalID()); createTxn(data, containerBlocks, txnID, containerID); containerBlocks.clear(); } - updateMetaData(data, container, numOfBlocksPerContainer, - numOfChunksPerBlock); + updateMetaData(data, container, numOfBlocksPerContainer, numOfChunksPerBlock); } - private void createTxn(KeyValueContainerData data, List containerBlocks, - int txnID, long containerID) { + private void createTxn(KeyValueContainerData data, List containerBlocks, int txnID, long containerID) { try (DBHandle metadata = BlockUtils.getDB(data, conf)) { - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx = - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction + DeletedBlocksTransaction dtx = + DeletedBlocksTransaction .newBuilder().setTxID(txnID).setContainerID(containerID) .addAllLocalID(containerBlocks).setCount(0).build(); - try (BatchOperation batch = metadata.getStore().getBatchHandler() - .initBatchOperation()) { + try (BatchOperation batch = metadata.getStore().getBatchHandler().initBatchOperation()) { DatanodeStore ds = metadata.getStore(); if (isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { - DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = - (DatanodeStoreSchemaThreeImpl) ds; - dnStoreThreeImpl.getDeleteTransactionTable() - .putWithBatch(batch, data.getDeleteTxnKey(txnID), dtx); + DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = (DatanodeStoreSchemaThreeImpl) ds; + dnStoreThreeImpl.getDeleteTransactionTable().putWithBatch(batch, data.getDeleteTxnKey(txnID), dtx); } else { - DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = - (DatanodeStoreSchemaTwoImpl) ds; - dnStoreTwoImpl.getDeleteTransactionTable() - .putWithBatch(batch, (long) txnID, dtx); + DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds; + dnStoreTwoImpl.getDeleteTransactionTable().putWithBatch(batch, (long) txnID, dtx); } metadata.getStore().getBatchHandler().commitBatchOperation(batch); } } catch (IOException exception) { - LOG.warn("Transaction creation was not successful for txnID: " + txnID - + " consisting of " + containerBlocks.size() + " blocks."); + LOG.warn("Transaction creation was not successful for txnID: {} consisting of blocks.", + txnID, + containerBlocks.size()); } } - private void putChunksInBlock(int numOfChunksPerBlock, int i, - List chunks, ChunkBuffer buffer, - ChunkManager chunkManager, KeyValueContainer container, BlockID blockID) { + private void putChunksInBlock(int numOfChunksPerBlock, int i, List chunks, + ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container, BlockID blockID) { long chunkLength = 100; try { for (int k = 0; k < numOfChunksPerBlock; k++) { - // This real chunkName should be localID_chunk_chunkIndex, here is for - // explicit debug and the necessity of HDDS-7446 to detect the orphan - // chunks through the chunk file name + // This real chunkName should be localID_chunk_chunkIndex, + // here is for explicit debug and the necessity of HDDS-7446 + // to detect the orphan chunks through the chunk file name final String chunkName = String.format("%d_chunk_%d_block_%d", - blockID.getContainerBlockID().getLocalID(), k, i); + blockID.getContainerBlockID().getLocalID(), + k, + i); final long offset = k * chunkLength; - ContainerProtos.ChunkInfo info = - ContainerProtos.ChunkInfo.newBuilder().setChunkName(chunkName) - .setLen(chunkLength).setOffset(offset) - .setChecksumData(Checksum.getNoChecksumDataProto()).build(); + ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(chunkName) + .setLen(chunkLength).setOffset(offset) + .setChecksumData(Checksum.getNoChecksumDataProto()) + .build(); chunks.add(info); ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); ChunkBuffer chunkData = buffer.duplicate(0, (int) chunkLength); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); } } catch (IOException ex) { - LOG.warn("Putting chunks in blocks was not successful for BlockID: " - + blockID); + LOG.warn("Putting chunks in blocks was not successful for BlockID: {}", blockID); } } - private void updateMetaData(KeyValueContainerData data, - KeyValueContainer container, int numOfBlocksPerContainer, + private void updateMetaData(KeyValueContainerData data, KeyValueContainer container, int numOfBlocksPerContainer, int numOfChunksPerBlock) { long chunkLength = 100; try (DBHandle metadata = BlockUtils.getDB(data, conf)) { container.getContainerData().setBlockCount(numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. + metadata.getStore().getMetadataTable().put(data.getBlockCountKey(), (long) numOfBlocksPerContainer); metadata.getStore().getMetadataTable() - .put(data.getBlockCountKey(), (long) numOfBlocksPerContainer); - metadata.getStore().getMetadataTable() - .put(data.getBytesUsedKey(), - chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer); - metadata.getStore().getMetadataTable() - .put(data.getPendingDeleteBlockCountKey(), - (long) numOfBlocksPerContainer); + .put(data.getBytesUsedKey(), chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer); + metadata.getStore().getMetadataTable().put(data.getPendingDeleteBlockCountKey(), (long) numOfBlocksPerContainer); } catch (IOException exception) { - LOG.warn("Meta Data update was not successful for container: " - + container); + LOG.warn("Meta Data update was not successful for container: {}", container); } } /** * Run service runDeletingTasks and wait for it's been processed. */ - private void deleteAndWait(BlockDeletingServiceTestImpl service, - int timesOfProcessed) throws TimeoutException, InterruptedException { + private void deleteAndWait(BlockDeletingServiceTestImpl service, int timesOfProcessed) + throws TimeoutException, InterruptedException { service.runDeletingTasks(); - GenericTestUtils.waitFor(() - -> service.getTimesOfProcessed() == timesOfProcessed, 100, 3000); + GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == timesOfProcessed, 100, 3000); } /** - * Get under deletion blocks count from DB, - * note this info is parsed from container.db. + * Get under deletion blocks count from DB, note this info is parsed from container.db. */ - private int getUnderDeletionBlocksCount(DBHandle meta, - KeyValueContainerData data) throws IOException { + private int getUnderDeletionBlocksCount(DBHandle meta, KeyValueContainerData data) throws IOException { if (data.hasSchema(SCHEMA_V1)) { return meta.getStore().getBlockDataTable() - .getRangeKVs(null, 100, data.containerPrefix(), - data.getDeletingBlockKeyFilter()) + .getRangeKVs(null, 100, data.containerPrefix(), data.getDeletingBlockKeyFilter()) .size(); } else if (data.hasSchema(SCHEMA_V2)) { int pendingBlocks = 0; DatanodeStore ds = meta.getStore(); - DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = - (DatanodeStoreSchemaTwoImpl) ds; + DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds; try ( - TableIterator> - iter = dnStoreTwoImpl.getDeleteTransactionTable().iterator()) { + TableIterator> iter = + dnStoreTwoImpl.getDeleteTransactionTable().iterator()) { while (iter.hasNext()) { - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction - delTx = iter.next().getValue(); + DeletedBlocksTransaction delTx = iter.next().getValue(); pendingBlocks += delTx.getLocalIDList().size(); } } @@ -388,36 +367,29 @@ private int getUnderDeletionBlocksCount(DBHandle meta, } else if (data.hasSchema(SCHEMA_V3)) { int pendingBlocks = 0; DatanodeStore ds = meta.getStore(); - DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = - (DatanodeStoreSchemaThreeImpl) ds; + DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = (DatanodeStoreSchemaThreeImpl) ds; try ( - TableIterator> - iter = dnStoreThreeImpl.getDeleteTransactionTable() - .iterator(data.containerPrefix())) { + TableIterator> iter = + dnStoreThreeImpl.getDeleteTransactionTable().iterator(data.containerPrefix())) { while (iter.hasNext()) { - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction - delTx = iter.next().getValue(); + DeletedBlocksTransaction delTx = iter.next().getValue(); pendingBlocks += delTx.getLocalIDList().size(); } } return pendingBlocks; } else { - throw new UnsupportedOperationException( - "Only schema version 1,2,3 are supported."); + throw new UnsupportedOperationException("Only schema version 1,2,3 are supported."); } } - /** - * In some cases, the pending delete blocks metadata will become larger - * than the actual number of pending delete blocks in the database. If - * there are no delete transactions in the DB, this metadata counter should - * be reset to zero. + * In some cases, + * the pending delete blocks metadata will become larger than the actual number of pending delete blocks + * in the database. + * If there are no delete transactions in the DB, this metadata counter should be reset to zero. */ @ContainerTestVersionInfo.ContainerTest - public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); // This test is not relevant for schema V1. if (isSameSchemaVersion(schemaVersion, SCHEMA_V1)) { @@ -431,107 +403,86 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) conf.setFromObject(dnConf); ContainerSet containerSet = new ContainerSet(1000); - // Create one container with no actual pending delete blocks, but an - // incorrect metadata value indicating it has enough pending deletes to - // use up the whole block deleting limit. - KeyValueContainerData incorrectData = - createToDeleteBlocks(containerSet, - 0, 1); + // Create one container with no actual pending delete blocks, + // but an incorrect metadata value + // indicating it has enough pending deletes to use up the whole block deleting limit. + KeyValueContainerData incorrectData = createToDeleteBlocks(containerSet, 0, 1); try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { // Check pre-create state. - assertEquals(0, getUnderDeletionBlocksCount(db, - incorrectData)); + assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() .get(incorrectData.getPendingDeleteBlockCountKey()).longValue()); - assertEquals(0, - incorrectData.getNumPendingDeletionBlocks()); + assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); // Alter the pending delete value in memory and the DB. incorrectData.incrPendingDeletionBlocks(blockDeleteLimit); - db.getStore().getMetadataTable().put( - incorrectData.getPendingDeleteBlockCountKey(), - (long)blockDeleteLimit); + db.getStore().getMetadataTable().put(incorrectData.getPendingDeleteBlockCountKey(), (long)blockDeleteLimit); } // Create one container with fewer pending delete blocks than the first. int correctNumBlocksToDelete = blockDeleteLimit - 1; - KeyValueContainerData correctData = createToDeleteBlocks(containerSet, - correctNumBlocksToDelete, 1); + KeyValueContainerData correctData = createToDeleteBlocks(containerSet, correctNumBlocksToDelete, 1); // Check its metadata was set up correctly. - assertEquals(correctNumBlocksToDelete, - correctData.getNumPendingDeletionBlocks()); + assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); try (DBHandle db = BlockUtils.getDB(correctData, conf)) { - assertEquals(correctNumBlocksToDelete, - getUnderDeletionBlocksCount(db, correctData)); - assertEquals(correctNumBlocksToDelete, - db.getStore().getMetadataTable() - .get(correctData.getPendingDeleteBlockCountKey()).longValue()); + assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); + assertEquals(correctNumBlocksToDelete, db.getStore().getMetadataTable() + .get(correctData.getPendingDeleteBlockCountKey()).longValue()); } - // Create the deleting service instance with very large interval between - // runs so we can trigger it manually. + // Create the deleting service instance with a very large interval between runs so we can trigger it manually. ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = - new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { - }); - OzoneContainer ozoneContainer = - mockDependencies(containerSet, keyValueHandler); - BlockDeletingService svc = new BlockDeletingService(ozoneContainer, - 1_000_000, 1_000_000, TimeUnit.SECONDS, 1, conf); - - // On the first run, the container with incorrect metadata should consume - // the block deletion limit, and the correct container with fewer pending - // delete blocks will not be processed. + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); + OzoneContainer ozoneContainer = mockDependencies(containerSet, keyValueHandler); + BlockDeletingService svc = new BlockDeletingService( + ozoneContainer, + 1_000_000, + 1_000_000, + TimeUnit.SECONDS, + 1, + conf); + + // On the first run, the container with incorrect metadata should consume the block deletion limit, + // and the correct container with fewer pending delete blocks will not be processed. svc.runPeriodicalTaskNow(); - // Pending delete block count in the incorrect container should be fixed - // and reset to 0. + // Pending delete block count in the incorrect container should be fixed and reset to 0. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { - assertEquals(0, getUnderDeletionBlocksCount(db, - incorrectData)); + assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() .get(incorrectData.getPendingDeleteBlockCountKey()).longValue()); } - // Correct container should not have been processed. - assertEquals(correctNumBlocksToDelete, - correctData.getNumPendingDeletionBlocks()); + // The Correct container should not have been processed. + assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); try (DBHandle db = BlockUtils.getDB(correctData, conf)) { - assertEquals(correctNumBlocksToDelete, - getUnderDeletionBlocksCount(db, correctData)); - assertEquals(correctNumBlocksToDelete, - db.getStore().getMetadataTable() - .get(correctData.getPendingDeleteBlockCountKey()).longValue()); + assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); + assertEquals(correctNumBlocksToDelete, db.getStore().getMetadataTable() + .get(correctData.getPendingDeleteBlockCountKey()).longValue()); } - // On the second run, the correct container should be picked up, because - // it now has the most pending delete blocks. + // On the second run, the correct container should be picked up, because it now has the most pending delete blocks. svc.runPeriodicalTaskNow(); - // The incorrect container should remain in the same state after being - // fixed. + // The incorrect container should remain in the same state after being fixed. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { - assertEquals(0, getUnderDeletionBlocksCount(db, - incorrectData)); + assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() .get(incorrectData.getPendingDeleteBlockCountKey()).longValue()); } - // The correct container should have been processed this run and had its - // blocks deleted. + // The correct container should have been processed this run and had its blocks deleted. assertEquals(0, correctData.getNumPendingDeletionBlocks()); try (DBHandle db = BlockUtils.getDB(correctData, conf)) { - assertEquals(0, getUnderDeletionBlocksCount(db, - correctData)); + assertEquals(0, getUnderDeletionBlocksCount(db, correctData)); assertEquals(0, db.getStore().getMetadataTable() .get(correctData.getPendingDeleteBlockCountKey()).longValue()); } } @ContainerTestVersionInfo.ContainerTest - public void testBlockDeletion(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testBlockDeletion(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setBlockDeletionLimit(2); @@ -541,11 +492,8 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = - new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { - }); - BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf, keyValueHandler); + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); + BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); svc.start(); BlockDeletingServiceMetrics deletingServiceMetrics = svc.getMetrics(); GenericTestUtils.waitFor(svc::isStarted, 100, 3000); @@ -555,28 +503,25 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) containerSet.listContainer(0L, 1, containerData); assertEquals(1, containerData.size()); KeyValueContainerData data = (KeyValueContainerData) containerData.get(0); - KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) ? - data.getDeletingBlockKeyFilter() : data.getUnprefixedKeyFilter(); + KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) + ? data.getDeletingBlockKeyFilter() + : data.getUnprefixedKeyFilter(); try (DBHandle meta = BlockUtils.getDB(data, conf)) { Map> containerMap = containerSet.getContainerMapCopy(); assertBlockDataTableRecordCount(3, meta, filter, data.getContainerID()); - // NOTE: this test assumes that all the container is KetValueContainer and - // have DeleteTransactionId in KetValueContainerData. If other - // types is going to be added, this test should be checked. + // NOTE: this test assumes that all the container is KetValueContainer + // and have DeleteTransactionId in KetValueContainerData. + // If other types are going to be added, this test should be checked. long transactionId = ((KeyValueContainerData) containerMap .get(containerData.get(0).getContainerID()).getContainerData()) .getDeleteTransactionId(); long containerSpace = containerData.get(0).getBytesUsed(); - // Number of deleted blocks in container should be equal to 0 before - // block delete - - long deleteSuccessCount = - deletingServiceMetrics.getSuccessCount(); - long totalBlockChosenCount = - deletingServiceMetrics.getTotalBlockChosenCount(); - long totalContainerChosenCount = - deletingServiceMetrics.getTotalContainerChosenCount(); + // The Number of deleted blocks in container should be equal to 0 before block delete + + long deleteSuccessCount = deletingServiceMetrics.getSuccessCount(); + long totalBlockChosenCount = deletingServiceMetrics.getTotalBlockChosenCount(); + long totalContainerChosenCount = deletingServiceMetrics.getTotalContainerChosenCount(); assertEquals(0, transactionId); // Ensure there are 3 blocks under deletion and 0 deleted blocks @@ -584,76 +529,51 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) assertEquals(3, meta.getStore().getMetadataTable() .get(data.getPendingDeleteBlockCountKey()).longValue()); - // Container contains 3 blocks. So, space used by the container - // should be greater than zero. + // The Container contains 3 blocks. So, space used by the container should be greater than zero. assertThat(containerSpace).isGreaterThan(0); // An interval will delete 1 * 2 blocks deleteAndWait(svc, 1); - GenericTestUtils.waitFor(() -> - containerData.get(0).getBytesUsed() == containerSpace / - 3, 100, 3000); - // After first interval 2 blocks will be deleted. Hence, current space - // used by the container should be less than the space used by the - // container initially(before running deletion services). + GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == containerSpace / 3, 100, 3000); + // After first interval 2 blocks will be deleted. + // Hence, the current space used by the container should be less than the space + // used by the container initially (before running deletion services). assertThat(containerData.get(0).getBytesUsed()).isLessThan(containerSpace); - assertEquals(2, - deletingServiceMetrics.getSuccessCount() - - deleteSuccessCount); - assertEquals(2, - deletingServiceMetrics.getTotalBlockChosenCount() - - totalBlockChosenCount); - assertEquals(1, - deletingServiceMetrics.getTotalContainerChosenCount() - - totalContainerChosenCount); - // The value of the getTotalPendingBlockCount Metrics is obtained - // before the deletion is processing - // So the Pending Block count will be 3 - assertEquals(3, - deletingServiceMetrics.getTotalPendingBlockCount()); + assertEquals(2, deletingServiceMetrics.getSuccessCount() - deleteSuccessCount); + assertEquals(2, deletingServiceMetrics.getTotalBlockChosenCount() - totalBlockChosenCount); + assertEquals(1, deletingServiceMetrics.getTotalContainerChosenCount() - totalContainerChosenCount); + // The value of the getTotalPendingBlockCount Metrics is obtained before the deletion is processing, + // so the Pending Block count will be 3 + assertEquals(3, deletingServiceMetrics.getTotalPendingBlockCount()); deleteAndWait(svc, 2); - // After deletion of all 3 blocks, space used by the containers - // should be zero. - GenericTestUtils.waitFor(() -> - containerData.get(0).getBytesUsed() == 0, 100, 3000); + // After deletion of all 3 blocks, space used by the containers should be zero. + GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == 0, 100, 3000); - // Check finally DB counters. + // Finally, check DB counters. // Not checking bytes used, as handler is a mock call. - assertEquals(0, meta.getStore().getMetadataTable() - .get(data.getPendingDeleteBlockCountKey()).longValue()); - assertEquals(0, - meta.getStore().getMetadataTable().get(data.getBlockCountKey()) - .longValue()); - assertEquals(3, - deletingServiceMetrics.getSuccessCount() - - deleteSuccessCount); - assertEquals(3, - deletingServiceMetrics.getTotalBlockChosenCount() - - totalBlockChosenCount); - assertEquals(2, - deletingServiceMetrics.getTotalContainerChosenCount() - - totalContainerChosenCount); + assertEquals(0, meta.getStore().getMetadataTable().get(data.getPendingDeleteBlockCountKey()).longValue()); + assertEquals(0, meta.getStore().getMetadataTable().get(data.getBlockCountKey()).longValue()); + assertEquals(3, deletingServiceMetrics.getSuccessCount() - deleteSuccessCount); + assertEquals(3, deletingServiceMetrics.getTotalBlockChosenCount() - totalBlockChosenCount); + assertEquals(2, deletingServiceMetrics.getTotalContainerChosenCount() - totalContainerChosenCount); // check if blockData get deleted assertBlockDataTableRecordCount(0, meta, filter, data.getContainerID()); - // The value of the getTotalPendingBlockCount Metrics is obtained - // before the deletion is processing - // So the Pending Block count will be 1 - assertEquals(1, - deletingServiceMetrics.getTotalPendingBlockCount()); + // The value of the getTotalPendingBlockCount Metrics is obtained before the deletion is processing, + // so the Pending Block count will be 1 + assertEquals(1, deletingServiceMetrics.getTotalPendingBlockCount()); } svc.shutdown(); } @ContainerTestVersionInfo.ContainerTest - public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); - // Skip schemaV1, when markBlocksForDeletionSchemaV1, the unrecorded blocks - // from received TNXs will be deleted, not in BlockDeletingService + // Skip schemaV1, when markBlocksForDeletionSchemaV1, + // the unrecorded blocks from received TNXs will be deleted, not in BlockDeletingService Assumptions.assumeFalse(isSameSchemaVersion(schemaVersion, SCHEMA_V1)); int numOfContainers = 2; @@ -665,37 +585,33 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) conf.setFromObject(dnConf); ContainerSet containerSet = new ContainerSet(1000); - createToDeleteBlocks(containerSet, numOfContainers, numOfBlocksPerContainer, - numOfChunksPerBlock); + createToDeleteBlocks(containerSet, numOfContainers, numOfBlocksPerContainer, numOfChunksPerBlock); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = - new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { - }); - BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf, keyValueHandler); + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); + BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); svc.start(); GenericTestUtils.waitFor(svc::isStarted, 100, 3000); - // Ensure 2 container was created + // Ensure 2 containers were created List containerData = Lists.newArrayList(); containerSet.listContainer(0L, 2, containerData); assertEquals(2, containerData.size()); KeyValueContainerData ctr1 = (KeyValueContainerData) containerData.get(0); KeyValueContainerData ctr2 = (KeyValueContainerData) containerData.get(1); - KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) ? - ctr1.getDeletingBlockKeyFilter() : ctr1.getUnprefixedKeyFilter(); + KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) + ? ctr1.getDeletingBlockKeyFilter() + : ctr1.getUnprefixedKeyFilter(); - // Have two unrecorded blocks onDisk and another two not to simulate the - // possible cases + // Have two unrecorded blocks onDisk and another two not to simulate the possible cases int numUnrecordedBlocks = 4; int numExistingOnDiskUnrecordedBlocks = 2; List unrecordedBlockIds = new ArrayList<>(); Set unrecordedChunks = new HashSet<>(); try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) { - // create unrecorded blocks in a new txn and update metadata, + // Create unrecorded blocks in a new txn and update metadata, // service shall first choose the top pendingDeletion container // if using the TopNOrderedContainerDeletionChoosingPolicy File chunkDir = ContainerUtils.getChunkDir(ctr1); @@ -703,8 +619,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) long localId = System.nanoTime() + i; unrecordedBlockIds.add(localId); String chunkName; - for (int indexOfChunk = 0; indexOfChunk < numOfChunksPerBlock; - indexOfChunk++) { + for (int indexOfChunk = 0; indexOfChunk < numOfChunksPerBlock; indexOfChunk++) { if (layout == FILE_PER_BLOCK) { chunkName = localId + ".block"; } else { @@ -714,7 +629,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) unrecordedChunks.add(chunkFile); } } - // create unreferenced onDisk chunks + // Create unreferenced onDisk chunks Iterator iter = unrecordedChunks.iterator(); for (int m = 0; m < numExistingOnDiskUnrecordedBlocks; m++) { File chunk = iter.next(); @@ -725,8 +640,8 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) createTxn(ctr1, unrecordedBlockIds, 100, ctr1.getContainerID()); ctr1.updateDeleteTransactionId(100); ctr1.incrPendingDeletionBlocks(numUnrecordedBlocks); - updateMetaData(ctr1, (KeyValueContainer) containerSet.getContainer( - ctr1.getContainerID()), 3, 1); + updateMetaData(ctr1, + (KeyValueContainer) containerSet.getContainer(ctr1.getContainerID()), 3, 1); // Ensure there are 3 + 4 = 7 blocks under deletion assertEquals(7, getUnderDeletionBlocksCount(meta, ctr1)); } @@ -736,15 +651,14 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) assertEquals(3, ctr2.getNumPendingDeletionBlocks()); // Totally 2 container * 3 blocks + 4 unrecorded block = 10 blocks - // So we shall experience 5 rounds to delete all blocks + // so we shall experience 5 rounds to delete all blocks. // Unrecorded blocks should not affect the actual NumPendingDeletionBlocks deleteAndWait(svc, 1); deleteAndWait(svc, 2); deleteAndWait(svc, 3); deleteAndWait(svc, 4); deleteAndWait(svc, 5); - GenericTestUtils.waitFor(() -> ctr2.getNumPendingDeletionBlocks() == 0, - 200, 2000); + GenericTestUtils.waitFor(() -> ctr2.getNumPendingDeletionBlocks() == 0, 200, 2000); // To make sure the container stat calculation is right assertEquals(0, ctr1.getBlockCount()); @@ -752,11 +666,11 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) assertEquals(0, ctr2.getBlockCount()); assertEquals(0, ctr2.getBytesUsed()); - // check if blockData get deleted + // Check if blockData get deleted assertBlockDataTableRecordCount(0, ctr1, filter); assertBlockDataTableRecordCount(0, ctr2, filter); - // check if all the unreferenced chunks get deleted + // Check if all the unreferenced chunks get deleted for (File f : unrecordedChunks) { assertFalse(f.exists()); } @@ -765,11 +679,9 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) } @ContainerTestVersionInfo.ContainerTest - public void testShutdownService(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testShutdownService(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, - TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); @@ -778,11 +690,8 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) createToDeleteBlocks(containerSet, 1, 100, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = - new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { - }); - BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf, keyValueHandler); + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); + BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); @@ -796,8 +705,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) } @ContainerTestVersionInfo.ContainerTest - public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setBlockDeletionLimit(3); @@ -811,13 +719,16 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); - // set timeout value as 1ns to trigger timeout behavior + // Cet timeout value as 1 ns to trigger timeout behavior long timeout = 1; - OzoneContainer ozoneContainer = - mockDependencies(containerSet, keyValueHandler); - BlockDeletingService svc = new BlockDeletingService(ozoneContainer, - TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, - 10, conf); + OzoneContainer ozoneContainer = mockDependencies(containerSet, keyValueHandler); + BlockDeletingService svc = new BlockDeletingService( + ozoneContainer, + TimeUnit.MILLISECONDS.toNanos(1000), + timeout, + TimeUnit.NANOSECONDS, + 10, + conf); svc.start(); LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG); @@ -833,18 +744,23 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) log.stopCapturing(); svc.shutdown(); - // test for normal case that doesn't have timeout limitation + // Test for a normal case that doesn't have a timeout limitation createToDeleteBlocks(containerSet, 1, 3, 1); timeout = 0; - svc = new BlockDeletingService(ozoneContainer, - TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS, - 10, conf, "", mock(ReconfigurationHandler.class)); + svc = new BlockDeletingService( + ozoneContainer, + TimeUnit.MILLISECONDS.toNanos(1000), + timeout, + TimeUnit.MILLISECONDS, + 10, + conf, + "", + mock(ReconfigurationHandler.class)); svc.start(); - // get container meta data - KeyValueContainer container = - (KeyValueContainer) containerSet.iterator().next(); + // Get container meta data + KeyValueContainer container = (KeyValueContainer) containerSet.iterator().next(); KeyValueContainerData data = container.getContainerData(); try (DBHandle meta = BlockUtils.getDB(data, conf)) { LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); @@ -857,24 +773,20 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) }, 100, 1000); newLog.stopCapturing(); - // The block deleting successfully and shouldn't catch timed - // out warning log. + // The block deleting successfully and shouldn't catch timed out warning log. assertThat(newLog.getOutput()) .doesNotContain("Background task executes timed out, retrying in next interval"); } svc.shutdown(); } - private BlockDeletingServiceTestImpl getBlockDeletingService( - ContainerSet containerSet, ConfigurationSource config, + private BlockDeletingServiceTestImpl getBlockDeletingService(ContainerSet containerSet, ConfigurationSource config, KeyValueHandler keyValueHandler) { - OzoneContainer ozoneContainer = - mockDependencies(containerSet, keyValueHandler); + OzoneContainer ozoneContainer = mockDependencies(containerSet, keyValueHandler); return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, config); } - private OzoneContainer mockDependencies(ContainerSet containerSet, - KeyValueHandler keyValueHandler) { + private OzoneContainer mockDependencies(ContainerSet containerSet, KeyValueHandler keyValueHandler) { OzoneContainer ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); @@ -886,8 +798,7 @@ private OzoneContainer mockDependencies(ContainerSet containerSet, @Unhealthy @ContainerTestVersionInfo.ContainerTest - public void testContainerThrottle(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testContainerThrottle(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); // Properties : // - Number of containers : 2 @@ -896,8 +807,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) // - Container limit per interval : 1 // - Block limit per container : 1 // - // Each time only 1 container can be processed, so each time - // 1 block from 1 container can be deleted. + // Each time only 1 container can be processed, so each time 1 block from 1 container can be deleted. // Process 1 container per interval conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -911,15 +821,13 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) int containerCount = 2; int chunksPerBlock = 10; int blocksPerContainer = 1; - createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, - chunksPerBlock); + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, chunksPerBlock); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); - BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf, keyValueHandler); + BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); List containerData = Lists.newArrayList(); containerSet.listContainer(0L, containerCount, containerData); @@ -927,42 +835,36 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Deleting one of the two containers and its single block. - // Hence, space used by the container of whose block has been - // deleted should be zero. + // Hence, space used by the container of whose block has been deleted should be zero. deleteAndWait(service, 1); - GenericTestUtils.waitFor(() -> - (containerData.get(0).getBytesUsed() == 0 || - containerData.get(1).getBytesUsed() == 0), - 100, 3000); + GenericTestUtils.waitFor( + () -> (containerData.get(0).getBytesUsed() == 0 || containerData.get(1).getBytesUsed() == 0), + 100, + 3000); - assertFalse((containerData.get(0).getBytesUsed() == 0) && ( - containerData.get(1).getBytesUsed() == 0)); + assertFalse((containerData.get(0).getBytesUsed() == 0) && (containerData.get(1).getBytesUsed() == 0)); - // Deleting the second container. Hence, space used by both the - // containers should be zero. + // Deleting the second container. Hence, space used by both the containers should be zero. deleteAndWait(service, 2); - GenericTestUtils.waitFor(() -> - (containerData.get(0).getBytesUsed() == - 0 && containerData.get(1).getBytesUsed() == 0), - 100, 3000); + GenericTestUtils.waitFor( + () -> (containerData.get(0).getBytesUsed() == 0 && containerData.get(1).getBytesUsed() == 0), + 100, + 3000); } finally { service.shutdown(); } } @ContainerTestVersionInfo.ContainerTest - public void testContainerMaxLockHoldingTime( - ContainerTestVersionInfo versionInfo) throws Exception { + public void testContainerMaxLockHoldingTime(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); - GenericTestUtils.LogCapturer log = - GenericTestUtils.LogCapturer.captureLogs( - LoggerFactory.getLogger(BlockDeletingTask.class)); + LogCapturer log = LogCapturer.captureLogs(LoggerFactory.getLogger(BlockDeletingTask.class)); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); - // Ensure that the lock holding timeout occurs every time a deletion - // transaction is executed by setting BlockDeletingMaxLockHoldingTime to -1. + // Ensure that the lock holding timeout occurs every time a deletion transaction is executed + // by setting BlockDeletingMaxLockHoldingTime to -1. dnConf.setBlockDeletingMaxLockHoldingTime(Duration.ofMillis(-1)); dnConf.setBlockDeletionLimit(3); conf.setFromObject(dnConf); @@ -971,40 +873,36 @@ public void testContainerMaxLockHoldingTime( int containerCount = 1; int chunksPerBlock = 10; int blocksPerContainer = 3; - createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, - chunksPerBlock); + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, chunksPerBlock); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); - BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf, keyValueHandler); + BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); List containerData = Lists.newArrayList(); containerSet.listContainer(0L, containerCount, containerData); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); deleteAndWait(service, 1); - GenericTestUtils.waitFor(() -> - (containerData.get(0).getBytesUsed() == 0), - 100, 3000); - if (schemaVersion != null && ( - schemaVersion.equals(SCHEMA_V2) || schemaVersion.equals(SCHEMA_V3))) { - - // Since MaxLockHoldingTime is -1, every "deletion transaction" triggers - // a timeout except the last one, where a "deletion transaction" - // will be created for each Block, so it will start - // blocksPerContainer - 1 timeout. - assertEquals(blocksPerContainer - 1, - StringUtils.countMatches(log.getOutput(), "Max lock hold time")); + GenericTestUtils.waitFor( + () -> (containerData.get(0).getBytesUsed() == 0), + 100, + 3000); + if (schemaVersion != null && (schemaVersion.equals(SCHEMA_V2) || schemaVersion.equals(SCHEMA_V3))) { + + // Since MaxLockHoldingTime is -1, every "deletion transaction" + // triggers a timeout except the last one, + // where a "deletion transaction" will be created for each Block, + // so it will start blocksPerContainer - 1 timeout. + assertEquals(blocksPerContainer - 1, StringUtils.countMatches(log.getOutput(), "Max lock hold time")); } } finally { service.shutdown(); } } - public long currentBlockSpace(List containerData, - int totalContainers) { + public long currentBlockSpace(List containerData, int totalContainers) { long totalSpaceUsed = 0; for (int i = 0; i < totalContainers; i++) { totalSpaceUsed += containerData.get(i).getBytesUsed(); @@ -1016,16 +914,15 @@ public long currentBlockSpace(List containerData, public void testBlockThrottle(ContainerTestVersionInfo versionInfo) throws Exception { setLayoutAndSchemaForTest(versionInfo); - // Properties : - // - Number of containers : 5 - // - Number of blocks per container : 3 - // - Number of chunks per block : 1 - // - Container limit per interval : 10 - // - Block limit per container : 2 + // Properties: + // - Number of containers: 5 + // - Number of blocks per container: 3 + // - Number of chunks per block: 1 + // - Container limit per interval: 10 + // - Block limit per container: 2 // - // Each time containers can be all scanned, but only 10 blocks - // can be actually deleted. So it requires 2 waves - // to cleanup all the 15 blocks. + // Each time containers can be all scanned, but only 10 blocks can be actually deleted. + // So it requires 2 waves to clean up all the 15 blocks. DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setBlockDeletionLimit(10); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); @@ -1038,50 +935,47 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) }); int containerCount = 5; int blocksPerContainer = 3; - createToDeleteBlocks(containerSet, containerCount, - blocksPerContainer, 1); + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, 1); - BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf, keyValueHandler); + BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); List containerData = Lists.newArrayList(); containerSet.listContainer(0L, containerCount, containerData); long blockSpace = containerData.get(0).getBytesUsed() / blocksPerContainer; - long totalContainerSpace = - containerCount * containerData.get(0).getBytesUsed(); + long totalContainerSpace = containerCount * containerData.get(0).getBytesUsed(); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Total blocks = 3 * 5 = 15 // blockLimitPerInterval = 10 - // each interval will at most runDeletingTasks = 10 blocks - // but as per of deletion policy (random/topNorder), it will fetch all 3 - // blocks from first 3 containers and 1 block from last container. + // Each interval will at most runDeletingTasks = 10 blocks, + // but as per of deletion policy (random/topNorder), + // it will fetch all 3 blocks from first 3 containers and 1 block from last container. // C1 - 3 BLOCKS, C2 - 3 BLOCKS, C3 - 3 BLOCKS, C4 - 1 BLOCK - // Deleted space of 10 blocks should be equal to (initial total space - // of container - current total space of container). + // Deleted space of 10 blocks should be equal to + // (initial total space of container - current total space of container). deleteAndWait(service, 1); - GenericTestUtils.waitFor(() -> - blockLimitPerInterval * blockSpace == - (totalContainerSpace - - currentBlockSpace(containerData, containerCount)), - 100, 3000); + GenericTestUtils.waitFor( + () -> blockLimitPerInterval * blockSpace + == (totalContainerSpace - currentBlockSpace(containerData, containerCount)), + 100, + 3000); // There is only 5 blocks left to runDeletingTasks - // (Deleted space of previous 10 blocks + these left 5 blocks) should - // be equal to (initial total space of container - // - current total space of container(it will be zero as all blocks - // in all the containers are deleted)). + // (Deleted space of previous 10 blocks + these left 5 blocks) + // should be equal to + // (initial total space of container - + // current total space of container (it will be zero as all blocks in all the containers are deleted)). deleteAndWait(service, 2); long totalContainerBlocks = blocksPerContainer * containerCount; - GenericTestUtils.waitFor(() -> - totalContainerBlocks * blockSpace == - (totalContainerSpace - - currentBlockSpace(containerData, containerCount)), - 100, 3000); + GenericTestUtils.waitFor( + () -> totalContainerBlocks * blockSpace + == (totalContainerSpace - currentBlockSpace(containerData, containerCount)), + 100, + 3000); } finally { service.shutdown(); @@ -1094,15 +988,12 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) * @param expectedCount expected records in the table * @param containerData KeyValueContainerData * @param filter KeyPrefixFilter - * @throws IOException */ - private void assertBlockDataTableRecordCount(int expectedCount, - KeyValueContainerData containerData, KeyPrefixFilter filter) - throws IOException { + private void assertBlockDataTableRecordCount(int expectedCount, KeyValueContainerData containerData, + KeyPrefixFilter filter) throws IOException { try (DBHandle handle = BlockUtils.getDB(containerData, conf)) { long containerID = containerData.getContainerID(); - assertBlockDataTableRecordCount(expectedCount, handle, filter, - containerID); + assertBlockDataTableRecordCount(expectedCount, handle, filter, containerID); } } @@ -1113,23 +1004,22 @@ private void assertBlockDataTableRecordCount(int expectedCount, * @param handle DB handle * @param filter KeyPrefixFilter * @param containerID the container ID to filter results - * @throws IOException */ - private void assertBlockDataTableRecordCount(int expectedCount, - DBHandle handle, KeyPrefixFilter filter, long containerID) - throws IOException { + private void assertBlockDataTableRecordCount(int expectedCount, DBHandle handle, KeyPrefixFilter filter, + long containerID) throws IOException { long count = 0L; - try (BlockIterator iterator = handle.getStore(). - getBlockIterator(containerID, filter)) { + try (BlockIterator iterator = handle.getStore().getBlockIterator(containerID, filter)) { iterator.seekToFirst(); while (iterator.hasNext()) { iterator.nextBlock(); count += 1; } } - assertEquals(expectedCount, count, "Excepted: " + expectedCount - + ", but actual: " + count + " in the blockData table of container: " - + containerID + "."); + assertEquals( + expectedCount, + count, + "Excepted: " + expectedCount + + ", but actual: " + count + " in the blockData table of container: " + containerID + "."); } /** @@ -1137,14 +1027,11 @@ private void assertBlockDataTableRecordCount(int expectedCount, * * @param fileName the string of file to be created * @param dir the directory where file to be created - * @param sizeInBytes the bytes size of the file - * @throws IOException + * @param sizeInBytes the byte size of the file */ - private void createRandomContentFile(String fileName, File dir, - long sizeInBytes) throws IOException { + private void createRandomContentFile(String fileName, File dir, long sizeInBytes) throws IOException { File file = new File(dir, fileName); - try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, - "rw")) { + try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw")) { randomAccessFile.setLength(sizeInBytes); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 0c4612b79fa..6996522b322 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -82,16 +82,13 @@ /** * Tests processing of containers written with DB schema version 2, * which stores all its data in a one-db-per-container layout. - * Schema version 3 will use a one-db-per-disk layout, but it - * should still be able to read, delete data, and update metadata for schema - * version 2 containers. - * We have a switch "hdds.datanode.container.schema.v3.enabled", so we could - * create a test container with it off and turn it on later to - * test the container operations above. + * Schema version 3 will use a one-db-per-disk layout, + * but it should still be able to read, delete data, and update metadata for schema version 2 containers. + * We have a switch "hdds.datanode.container.schema.v3.enabled", + * so we could create a test container with it off and turn it on later to test the container operations above. *

- * The functionality executed by these tests assumes that all containers will - * have to be closed before an upgrade, meaning that containers written with - * schema version 2 will only ever be encountered in their closed state. + * The functionality executed by these tests assumes that all containers will have to be closed before an upgrade, + * meaning that containers written with schema version 2 will only ever be encountered in their closed state. *

*/ public class TestSchemaTwoBackwardsCompatibility { @@ -105,7 +102,6 @@ public class TestSchemaTwoBackwardsCompatibility { private BlockManager blockManager; private ChunkManager chunkManager; private ContainerSet containerSet; - private KeyValueHandler keyValueHandler; private OzoneContainer ozoneContainer; private static final int BLOCKS_PER_CONTAINER = 6; @@ -113,8 +109,7 @@ public class TestSchemaTwoBackwardsCompatibility { private static final int DELETE_TXNS_PER_CONTAINER = 2; private static final int BLOCKS_PER_TXN = 2; private static final int CHUNK_LENGTH = 1024; - private static final byte[] SAMPLE_DATA = - randomAlphanumeric(1024).getBytes(UTF_8); + private static final byte[] SAMPLE_DATA = randomAlphanumeric(1024).getBytes(UTF_8); @BeforeEach public void setup() throws Exception { @@ -128,15 +123,21 @@ public void setup() throws Exception { conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath()); - volumeSet = new MutableVolumeSet(datanodeUuid, clusterID, conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); + volumeSet = new MutableVolumeSet( + datanodeUuid, + clusterID, + conf, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); blockManager = new BlockManagerImpl(conf); - chunkManager = new FilePerBlockStrategy(true, blockManager, volumeSet); + chunkManager = new FilePerBlockStrategy(true, blockManager); containerSet = new ContainerSet(1000); - keyValueHandler = new KeyValueHandler(conf, datanodeUuid, - containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); + KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, + containerSet, volumeSet, ContainerMetrics.create(conf), c -> { + }); ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); @@ -152,33 +153,28 @@ public void cleanup() { @Test public void testDBFile() throws IOException { - // create a container of schema v2 + // Create a container of schema v2 KeyValueContainer container = createTestContainer(); - assertEquals(container.getContainerData().getSchemaVersion(), - OzoneConsts.SCHEMA_V2); + assertEquals(container.getContainerData().getSchemaVersion(), OzoneConsts.SCHEMA_V2); // db file should be under the container path - String containerPath = container.getContainerData().getDbFile() - .getParentFile().getParentFile().getName(); - assertEquals(containerPath, - Long.toString(container.getContainerData().getContainerID())); + String containerPath = container.getContainerData().getDbFile().getParentFile().getParentFile().getName(); + assertEquals(containerPath, Long.toString(container.getContainerData().getContainerID())); } @Test public void testBlockIteration() throws IOException { - // create a container of schema v2 + // Create a container of schema v2 KeyValueContainer container = createTestContainer(); - assertEquals(container.getContainerData().getSchemaVersion(), - OzoneConsts.SCHEMA_V2); + assertEquals(container.getContainerData().getSchemaVersion(), OzoneConsts.SCHEMA_V2); - // turn on schema v3 first, then do operations + // Turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); try (DBHandle db = BlockUtils.getDB(container.getContainerData(), conf)) { long containerID = container.getContainerData().getContainerID(); int blockCount = 0; - try (BlockIterator iter = db.getStore() - .getBlockIterator(containerID)) { + try (BlockIterator iter = db.getStore().getBlockIterator(containerID)) { while (iter.hasNext()) { BlockData blockData = iter.nextBlock(); int chunkCount = 0; @@ -196,136 +192,119 @@ public void testBlockIteration() throws IOException { @Test public void testReadMetadata() throws IOException { - // create a container of schema v2 + // Create a container of schema v2 KeyValueContainer container = createTestContainer(); - assertEquals(container.getContainerData().getSchemaVersion(), - OzoneConsts.SCHEMA_V2); + assertEquals(container.getContainerData().getSchemaVersion(), OzoneConsts.SCHEMA_V2); KeyValueContainerData cData = container.getContainerData(); assertEquals(cData.getBlockCount(), BLOCKS_PER_CONTAINER); - assertEquals(cData.getNumPendingDeletionBlocks(), - DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN); - assertEquals(cData.getBytesUsed(), - CHUNK_LENGTH * CHUNKS_PER_BLOCK * BLOCKS_PER_CONTAINER); + assertEquals(cData.getNumPendingDeletionBlocks(), DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN); + assertEquals(cData.getBytesUsed(), CHUNK_LENGTH * CHUNKS_PER_BLOCK * BLOCKS_PER_CONTAINER); - // turn on schema v3 first, then do operations + // Turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); - assertEquals((long)metadatatable.get(BLOCK_COUNT), - BLOCKS_PER_CONTAINER); - assertEquals((long)metadatatable.get(PENDING_DELETE_BLOCK_COUNT), - DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN); + assertEquals((long)metadatatable.get(BLOCK_COUNT), BLOCKS_PER_CONTAINER); + assertEquals((long)metadatatable.get(PENDING_DELETE_BLOCK_COUNT), DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN); assertEquals((long)metadatatable.get(CONTAINER_BYTES_USED), CHUNK_LENGTH * CHUNKS_PER_BLOCK * BLOCKS_PER_CONTAINER); } } @Test - public void testDeleteViaTransation() throws IOException, TimeoutException, + public void testDeleteViaTransition() throws IOException, TimeoutException, InterruptedException { conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, - BLOCKS_PER_CONTAINER); + conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, BLOCKS_PER_CONTAINER); - // create a container of schema v2 + // Create a container of schema v2 KeyValueContainer container = createTestContainer(); - assertEquals(container.getContainerData().getSchemaVersion(), - OzoneConsts.SCHEMA_V2); - // close it + assertEquals(container.getContainerData().getSchemaVersion(), OzoneConsts.SCHEMA_V2); + // Close it container.close(); containerSet.addContainer(container); KeyValueContainerData cData = container.getContainerData(); - // turn on schema v3 first, then do operations + // Turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); - // start block deleting service + // Start block deleting service long initialTotalSpace = cData.getBytesUsed(); - BlockDeletingServiceTestImpl service = - new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf); + BlockDeletingServiceTestImpl service = new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); service.runDeletingTasks(); - GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == 1, - 100, 3000); - GenericTestUtils.waitFor(() -> cData.getBytesUsed() != initialTotalSpace, - 100, 3000); + GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == 1, 100, 3000); + GenericTestUtils.waitFor(() -> cData.getBytesUsed() != initialTotalSpace, 100, 3000); - // check in-memory metadata after deletion + // Check in-memory metadata after deletion long blockSize = CHUNK_LENGTH * CHUNKS_PER_BLOCK; - long expectedKeyCount = BLOCKS_PER_CONTAINER - - DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN; + long expectedKeyCount = BLOCKS_PER_CONTAINER - DELETE_TXNS_PER_CONTAINER * BLOCKS_PER_TXN; long expectedBytesUsed = blockSize * expectedKeyCount; assertEquals(cData.getBlockCount(), expectedKeyCount); assertEquals(cData.getNumPendingDeletionBlocks(), 0); assertEquals(cData.getBytesUsed(), expectedBytesUsed); - // check db metadata after deletion + // Check db metadata after deletion try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); assertEquals((long)metadatatable.get(BLOCK_COUNT), expectedKeyCount); assertEquals((long)metadatatable.get(PENDING_DELETE_BLOCK_COUNT), 0); - assertEquals((long)metadatatable.get(CONTAINER_BYTES_USED), - expectedBytesUsed); + assertEquals((long)metadatatable.get(CONTAINER_BYTES_USED), expectedBytesUsed); } } private KeyValueContainer createTestContainer() throws IOException { long containerID = ContainerTestHelper.getTestContainerID(); - KeyValueContainerData cData = new KeyValueContainerData(containerID, + KeyValueContainerData cData = new KeyValueContainerData( + containerID, ContainerLayoutVersion.FILE_PER_BLOCK, ContainerTestHelper.CONTAINER_MAX_SIZE, - UUID.randomUUID().toString(), datanodeUuid); + UUID.randomUUID().toString(), + datanodeUuid); cData.setSchemaVersion(OzoneConsts.SCHEMA_V2); KeyValueContainer container = new KeyValueContainer(cData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - clusterID); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), clusterID); - // populate with some blocks - // metadata will be updated here, too + // Populate with some block metadata will be updated here, too for (long localID = 0; localID < BLOCKS_PER_CONTAINER; localID++) { BlockData blockData = createTestBlockData(localID, container); blockManager.putBlock(container, blockData); } - // populate with some delete txns + // Populate with some delete txns for (long txnID = 0; txnID < DELETE_TXNS_PER_CONTAINER; txnID++) { long startBlockID = txnID * DELETE_TXNS_PER_CONTAINER; List blocks = Arrays.asList(startBlockID, startBlockID + 1); - DeletedBlocksTransaction txn = - createTestDeleteTxn(txnID, blocks, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - try (BatchOperation batch = db.getStore().getBatchHandler() - .initBatchOperation()) { - DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = - (DatanodeStoreSchemaTwoImpl) db.getStore(); - dnStoreTwoImpl.getDeleteTransactionTable() - .putWithBatch(batch, txnID, txn); - - // update delete related metadata - db.getStore().getMetadataTable().putWithBatch(batch, - cData.getLatestDeleteTxnKey(), txn.getTxID()); - db.getStore().getMetadataTable().putWithBatch(batch, - cData.getPendingDeleteBlockCountKey(), - cData.getNumPendingDeletionBlocks() + BLOCKS_PER_TXN); - db.getStore().getBatchHandler().commitBatchOperation(batch); - - cData.updateDeleteTransactionId(txn.getTxID()); - cData.incrPendingDeletionBlocks(BLOCKS_PER_TXN); - } + DeletedBlocksTransaction txn = createTestDeleteTxn(txnID, blocks, containerID); + try (DBHandle db = BlockUtils.getDB(cData, conf); + BatchOperation batch = db.getStore().getBatchHandler().initBatchOperation()) { + DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) db.getStore(); + dnStoreTwoImpl.getDeleteTransactionTable().putWithBatch(batch, txnID, txn); + + // Update delete related metadata + db.getStore().getMetadataTable().putWithBatch(batch, cData.getLatestDeleteTxnKey(), txn.getTxID()); + db.getStore().getMetadataTable() + .putWithBatch( + batch, + cData.getPendingDeleteBlockCountKey(), + cData.getNumPendingDeletionBlocks() + BLOCKS_PER_TXN); + db.getStore().getBatchHandler().commitBatchOperation(batch); + + cData.updateDeleteTransactionId(txn.getTxID()); + cData.incrPendingDeletionBlocks(BLOCKS_PER_TXN); } } return container; } - private BlockData createTestBlockData(long localID, Container container) - throws StorageContainerException { + private BlockData createTestBlockData(long localID, Container container) throws StorageContainerException { long containerID = container.getContainerData().getContainerID(); BlockID blockID = new BlockID(containerID, localID); BlockData blockData = new BlockData(blockID); - // populate with some chunks + // Populate with some chunks for (int chunkIndex = 0; chunkIndex < CHUNKS_PER_BLOCK; chunkIndex++) { ChunkInfo chunk = createTestChunkData(chunkIndex, blockID, container); blockData.addChunk(chunk.getProtoBufMessage()); @@ -334,22 +313,22 @@ private BlockData createTestBlockData(long localID, Container container) return blockData; } - private ChunkInfo createTestChunkData(long chunkIndex, - BlockID blockID, Container container) throws StorageContainerException { + private ChunkInfo createTestChunkData(long chunkIndex, BlockID blockID, Container container) + throws StorageContainerException { String chunkName = blockID.getLocalID() + "_chunk_" + (chunkIndex + 1); ChunkBuffer chunkData = ChunkBuffer.wrap(ByteBuffer.wrap(SAMPLE_DATA)); - ChunkInfo chunkInfo = new ChunkInfo(chunkName, - chunkIndex * CHUNK_LENGTH, CHUNK_LENGTH); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + ChunkInfo chunkInfo = new ChunkInfo(chunkName, chunkIndex * CHUNK_LENGTH, CHUNK_LENGTH); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); return chunkInfo; } - private DeletedBlocksTransaction createTestDeleteTxn(long txnID, - List blocks, long containerID) { - return DeletedBlocksTransaction.newBuilder().setTxID(txnID) - .setContainerID(containerID).addAllLocalID(blocks).setCount(0).build(); + private DeletedBlocksTransaction createTestDeleteTxn(long txnID, List blocks, long containerID) { + return DeletedBlocksTransaction.newBuilder() + .setTxID(txnID) + .setContainerID(containerID) + .addAllLocalID(blocks) + .setCount(0) + .build(); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 1cbd6ee4706..cee90e4c298 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -87,6 +86,7 @@ import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse; +import static org.apache.hadoop.ozone.common.utils.BufferUtils.concatByteStrings; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -108,40 +108,42 @@ public class TestHddsDispatcher { @TempDir private Path tempDir; - private static final Logger LOG = LoggerFactory.getLogger( - TestHddsDispatcher.class); + private static final Logger LOG = LoggerFactory.getLogger(TestHddsDispatcher.class); @TempDir private File testDir; - public static final IncrementalReportSender NO_OP_ICR_SENDER = - c -> { - }; + public static final IncrementalReportSender NO_OP_ICR_SENDER = c -> { + }; @ContainerLayoutTestInfo.ContainerTest - public void testContainerCloseActionWhenFull( - ContainerLayoutVersion layout) throws IOException { + public void testContainerCloseActionWhenFull(ContainerLayoutVersion layout) throws IOException { String testDirPath = testDir.getPath(); OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); - MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, - null, StorageVolume.VolumeType.DATA_VOLUME, null); + MutableVolumeSet volumeSet = new MutableVolumeSet( + dd.getUuidString(), + conf, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); try { UUID scmId = UUID.randomUUID(); ContainerSet containerSet = new ContainerSet(1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); - KeyValueContainerData containerData = new KeyValueContainerData(1L, + KeyValueContainerData containerData = new KeyValueContainerData( + 1L, layout, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), + (long) StorageUnit.GB.toBytes(1), + UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - scmId.toString()); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -151,24 +153,17 @@ public void testContainerCloseActionWhenFull( context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); } - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics, null); + HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, handlers, context, metrics, null); hddsDispatcher.setClusterId(scmId.toString()); ContainerCommandResponseProto responseOne = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); - assertEquals(ContainerProtos.Result.SUCCESS, - responseOne.getResult()); - verify(context, times(0)) - .addContainerActionIfAbsent(any(ContainerAction.class)); - containerData.setBytesUsed(Double.valueOf( - StorageUnit.MB.toBytes(950)).longValue()); + assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult()); + verify(context, times(0)).addContainerActionIfAbsent(any(ContainerAction.class)); + containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue()); ContainerCommandResponseProto responseTwo = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null); - assertEquals(ContainerProtos.Result.SUCCESS, - responseTwo.getResult()); - verify(context, times(1)) - .addContainerActionIfAbsent(any(ContainerAction.class)); - + assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult()); + verify(context, times(1)).addContainerActionIfAbsent(any(ContainerAction.class)); } finally { volumeSet.shutdown(); ContainerMetrics.remove(); @@ -211,7 +206,7 @@ public void testWriteChunkChecksum() throws IOException { conf.setFromObject(dnConf); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - //Send a few WriteChunkRequests + // Send a few WriteChunkRequests ContainerCommandResponseProto response; ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0); hddsDispatcher.dispatch(writeChunkRequest0, null); @@ -220,21 +215,18 @@ public void testWriteChunkChecksum() throws IOException { assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Send Read Chunk request for written chunk. - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); + response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - ByteString responseData = BufferUtils.concatByteStrings( + ByteString responseData = concatByteStrings( response.getReadChunk().getDataBuffers().getBuffersList()); - assertEquals(writeChunkRequest0.getWriteChunk().getData(), - responseData); + assertEquals(writeChunkRequest0.getWriteChunk().getData(), responseData); // Test checksum on Read: final DispatcherContext context = DispatcherContext .newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA) .build(); - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); + response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); } finally { ContainerMetrics.remove(); @@ -242,19 +234,17 @@ public void testWriteChunkChecksum() throws IOException { } @ContainerLayoutTestInfo.ContainerTest - public void testContainerCloseActionWhenVolumeFull( - ContainerLayoutVersion layoutVersion) throws Exception { + public void testContainerCloseActionWhenVolumeFull(ContainerLayoutVersion layoutVersion) throws Exception { String testDirPath = testDir.getPath(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, - 100.0, StorageUnit.BYTES); + conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 100.0, StorageUnit.BYTES); DatanodeDetails dd = randomDatanodeDetails(); HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(testDirPath).datanodeUuid(dd.getUuidString()) .conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE); - // state of cluster : available (140) > 100 ,datanode volume - // utilisation threshold not yet reached. container creates are successful. + // State of cluster: available (140) > 100, datanode volume utilization threshold not yet reached. + // Container creates are successful. AtomicLong usedSpace = new AtomicLong(360); SpaceUsageSource spaceUsage = MockSpaceUsageSource.of(500, usedSpace); @@ -262,13 +252,12 @@ public void testContainerCloseActionWhenVolumeFull( spaceUsage, Duration.ZERO, inMemory(new AtomicLong(0))); volumeBuilder.usageCheckFactory(factory); MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); - when(volumeSet.getVolumesList()) - .thenReturn(Collections.singletonList(volumeBuilder.build())); + when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(volumeBuilder.build())); try { UUID scmId = UUID.randomUUID(); ContainerSet containerSet = new ContainerSet(1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); - // create a 50 byte container + // Create a 50-byte container KeyValueContainerData containerData = new KeyValueContainerData(1L, layoutVersion, 50, UUID.randomUUID().toString(), @@ -276,8 +265,7 @@ public void testContainerCloseActionWhenVolumeFull( Container container = new KeyValueContainer(containerData, conf); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - scmId.toString()); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -287,23 +275,19 @@ public void testContainerCloseActionWhenVolumeFull( context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); } - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics, null); + HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, handlers, context, metrics, null); hddsDispatcher.setClusterId(scmId.toString()); - containerData.getVolume().getVolumeInfo() - .ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50)); + containerData.getVolume().getVolumeInfo().ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50)); usedSpace.addAndGet(50); ContainerCommandResponseProto response = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); - assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - verify(context, times(1)) - .addContainerActionIfAbsent(any(ContainerAction.class)); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + verify(context, times(1)).addContainerActionIfAbsent(any(ContainerAction.class)); - // try creating another container now as the volume used has crossed - // threshold + // Try creating another container now as the volume used has crossed a threshold - KeyValueContainerData containerData2 = new KeyValueContainerData(1L, + KeyValueContainerData containerData2 = new KeyValueContainerData( + 1L, layoutVersion, 50, UUID.randomUUID().toString(), dd.getUuidString()); @@ -312,8 +296,7 @@ public void testContainerCloseActionWhenVolumeFull( assertThrows(StorageContainerException.class, () -> container2.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString())); - assertEquals("Container creation failed, due to disk out of space", - scException.getMessage()); + assertEquals("Container creation failed, due to disk out of space", scException.getMessage()); } finally { volumeSet.shutdown(); ContainerMetrics.remove(); @@ -332,40 +315,33 @@ public void testCreateContainerWithWriteChunk() throws IOException { HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(dd.getUuidString(), 1L, 1L); - // send read chunk request and make sure container does not exist + // Send read chunk request and make sure the container does not exist ContainerCommandResponseProto response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - assertEquals(response.getResult(), - ContainerProtos.Result.CONTAINER_NOT_FOUND); - // send write chunk request without sending create container + assertEquals(response.getResult(), ContainerProtos.Result.CONTAINER_NOT_FOUND); + // Send it write chunk request without sending create container response = hddsDispatcher.dispatch(writeChunkRequest, null); - // container should be created as part of write chunk request + // Container should be created as part of writing chunk request assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - // send read chunk request to read the chunk written above - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); + // Send read chunk request to read the chunk written above + response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - ByteString responseData = BufferUtils.concatByteStrings( + ByteString responseData = concatByteStrings( response.getReadChunk().getDataBuffers().getBuffersList()); - assertEquals(writeChunkRequest.getWriteChunk().getData(), - responseData); - // put block + assertEquals(writeChunkRequest.getWriteChunk().getData(), responseData); + // Put block ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(writeChunkRequest); response = hddsDispatcher.dispatch(putBlockRequest, null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - // send list block request - ContainerCommandRequestProto listBlockRequest = - ContainerTestHelper.getListBlockRequest(writeChunkRequest); + // Send list block request + ContainerCommandRequestProto listBlockRequest = ContainerTestHelper.getListBlockRequest(writeChunkRequest); response = hddsDispatcher.dispatch(listBlockRequest, null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); assertEquals(1, response.getListBlock().getBlockDataList().size()); - for (ContainerProtos.BlockData blockData : - response.getListBlock().getBlockDataList()) { - assertEquals(writeChunkRequest.getWriteChunk().getBlockID(), - blockData.getBlockID()); - assertEquals(writeChunkRequest.getWriteChunk().getChunkData() - .getLen(), blockData.getSize()); + for (ContainerProtos.BlockData blockData : response.getListBlock().getBlockDataList()) { + assertEquals(writeChunkRequest.getWriteChunk().getBlockID(), blockData.getBlockID()); + assertEquals(writeChunkRequest.getWriteChunk().getChunkData().getLen(), blockData.getSize()); assertEquals(1, blockData.getChunksCount()); } } finally { @@ -383,26 +359,21 @@ public void testContainerNotFoundWithCommitChunk() throws IOException { conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = - getWriteChunkRequest(dd.getUuidString(), 1L, 1L); + ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(dd.getUuidString(), 1L, 1L); - // send read chunk request and make sure container does not exist + // Send read chunk request and make sure the container does not exist ContainerCommandResponseProto response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - assertEquals( - ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(HddsDispatcher.LOG); - // send write chunk request without sending create container + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(HddsDispatcher.LOG); + // Send it write chunk request without sending create container response = hddsDispatcher.dispatch(writeChunkRequest, COMMIT_STAGE); - // container should not be found - assertEquals( - ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + // Container should not be found + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); - assertThat(logCapturer.getOutput()).contains( - "ContainerID " + writeChunkRequest.getContainerID() - + " does not exist"); + assertThat(logCapturer.getOutput()) + .contains("ContainerID " + writeChunkRequest.getContainerID() + " does not exist"); } finally { ContainerMetrics.remove(); } @@ -418,33 +389,30 @@ public void testWriteChunkWithCreateContainerFailure() throws IOException { conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( - dd.getUuidString(), 1L, 1L); + ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(dd.getUuidString(), 1L, 1L); HddsDispatcher mockDispatcher = spy(hddsDispatcher); ContainerCommandResponseProto.Builder builder = getContainerCommandResponse(writeChunkRequest, ContainerProtos.Result.DISK_OUT_OF_SPACE, ""); - // Return DISK_OUT_OF_SPACE response when writing chunk - // with container creation. - doReturn(builder.build()).when(mockDispatcher) - .createContainer(writeChunkRequest); - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(HddsDispatcher.LOG); - // send write chunk request without sending create container + // Return DISK_OUT_OF_SPACE response when writing chunk with container creation. + doReturn(builder.build()).when(mockDispatcher).createContainer(writeChunkRequest); + + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(HddsDispatcher.LOG); + // Send it write chunk request without sending create container mockDispatcher.dispatch(writeChunkRequest, null); - // verify the error log + // Verify the error log assertThat(logCapturer.getOutput()) - .contains("ContainerID " + writeChunkRequest.getContainerID() - + " creation failed , Result: DISK_OUT_OF_SPACE"); + .contains("ContainerID " + + writeChunkRequest.getContainerID() + + " creation failed, Result: DISK_OUT_OF_SPACE"); } finally { ContainerMetrics.remove(); } } @Test - public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { + public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { String testDirPath = testDir.getPath(); try { UUID scmId = UUID.randomUUID(); @@ -453,9 +421,8 @@ public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( - dd.getUuidString(), 1L, 1L); - //Send same WriteChunkRequest + ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(dd.getUuidString(), 1L, 1L); + // Send same WriteChunkRequest ContainerCommandResponseProto response; hddsDispatcher.dispatch(writeChunkRequest, null); hddsDispatcher.dispatch(writeChunkRequest, null); @@ -463,37 +430,29 @@ public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Send Read Chunk request for written chunk. - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); + response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - ByteString responseData = BufferUtils.concatByteStrings( - response.getReadChunk().getDataBuffers().getBuffersList()); - assertEquals(writeChunkRequest.getWriteChunk().getData(), - responseData); + ByteString responseData = concatByteStrings(response.getReadChunk().getDataBuffers().getBuffersList()); + assertEquals(writeChunkRequest.getWriteChunk().getData(), responseData); // Put Block - ContainerCommandRequestProto putBlockRequest = - ContainerTestHelper.getPutBlockRequest(writeChunkRequest); + ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(writeChunkRequest); - //Send same PutBlockRequest + // Send same PutBlockRequest hddsDispatcher.dispatch(putBlockRequest, null); hddsDispatcher.dispatch(putBlockRequest, null); response = hddsDispatcher.dispatch(putBlockRequest, null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Check PutBlock Data - ContainerCommandRequestProto listBlockRequest = - ContainerTestHelper.getListBlockRequest(writeChunkRequest); + ContainerCommandRequestProto listBlockRequest = ContainerTestHelper.getListBlockRequest(writeChunkRequest); response = hddsDispatcher.dispatch(listBlockRequest, null); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); assertEquals(1, response.getListBlock().getBlockDataList().size()); - for (ContainerProtos.BlockData blockData : - response.getListBlock().getBlockDataList()) { - assertEquals(writeChunkRequest.getWriteChunk().getBlockID(), - blockData.getBlockID()); - assertEquals(writeChunkRequest.getWriteChunk().getChunkData() - .getLen(), blockData.getSize()); + for (ContainerProtos.BlockData blockData : response.getListBlock().getBlockDataList()) { + assertEquals(writeChunkRequest.getWriteChunk().getBlockID(), blockData.getBlockID()); + assertEquals(writeChunkRequest.getWriteChunk().getChunkData().getLen(), blockData.getSize()); assertEquals(1, blockData.getChunksCount()); } } finally { @@ -503,23 +462,22 @@ public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { /** * Creates HddsDispatcher instance with given infos. - * @param dd datanode detail info. + * + * @param dd datanode detail info. * @param scmId UUID of scm id. - * @param conf configuration be used. + * @param conf configuration be used. * @return HddsDispatcher HddsDispatcher instance. - * @throws IOException */ - static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf) throws IOException { + static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { return createDispatcher(dd, scmId, conf, null); } - static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf, TokenVerifier tokenVerifier) throws IOException { + static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf, + TokenVerifier tokenVerifier) throws IOException { ContainerSet containerSet = new ContainerSet(1000); - VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); - volumeSet.getVolumesList().stream().forEach(v -> { + VolumeSet volumeSet = + new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + volumeSet.getVolumesList().forEach(v -> { try { v.format(scmId.toString()); v.createWorkingDir(scmId.toString(), null); @@ -537,21 +495,20 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); } - final HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, - containerSet, volumeSet, handlers, context, metrics, tokenVerifier); + final HddsDispatcher hddsDispatcher = + new HddsDispatcher(conf, containerSet, handlers, context, metrics, tokenVerifier); hddsDispatcher.setClusterId(scmId.toString()); return hddsDispatcher; } - // This method has to be removed once we move scm/TestUtils.java - // from server-scm project to container-service or to common project. + /** + * This method has to be removed + * once we move scm/TestUtils.java from a server-scm project to container-service or to common project. + */ private static DatanodeDetails randomDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, 0); + DatanodeDetails.Port restPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.REST, 0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") @@ -562,11 +519,8 @@ private static DatanodeDetails randomDatanodeDetails() { return builder.build(); } - private ContainerCommandRequestProto getWriteChunkRequest( - String datanodeId, Long containerId, Long localId) { - - ByteString data = ByteString.copyFrom( - UUID.randomUUID().toString().getBytes(UTF_8)); + private ContainerCommandRequestProto getWriteChunkRequest(String datanodeId, Long containerId, Long localId) { + ByteString data = ByteString.copyFrom(UUID.randomUUID().toString().getBytes(UTF_8)); ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo .newBuilder() .setChunkName( @@ -609,9 +563,7 @@ private ContainerCommandRequestProto getWriteChunkRequest0( ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo .newBuilder() - .setChunkName( - DigestUtils.md5Hex("dummy-key") + "_stream_" - + containerId + "_chunk_" + localId) + .setChunkName(DigestUtils.md5Hex("dummy-key") + "_stream_" + containerId + "_chunk_" + localId) .setOffset((long) chunkNum * lenOfBytes) .setLen(lenOfBytes) .setChecksumData(checksum(chunkData).getProtoBufMessage()) @@ -619,8 +571,7 @@ private ContainerCommandRequestProto getWriteChunkRequest0( WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto .newBuilder() - .setBlockID(new BlockID(containerId, localId) - .getDatanodeBlockIDProtobuf()) + .setBlockID(new BlockID(containerId, localId).getDatanodeBlockIDProtobuf()) .setChunkData(chunk) .setData(chunkData); @@ -640,29 +591,31 @@ static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long local static ContainerCommandRequestProto newPutSmallFile( BlockID blockID, ByteString data) { - final ContainerProtos.BlockData.Builder blockData - = ContainerProtos.BlockData.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest - = ContainerProtos.PutBlockRequestProto.newBuilder() - .setBlockData(blockData); - final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder() - .setKey("OverWriteRequested") - .setValue("true") - .build(); - final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk") - .setOffset(0) - .setLen(data.size()) - .addMetadata(keyValue) - .setChecksumData(checksum(data).getProtoBufMessage()) - .build(); - final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest - = ContainerProtos.PutSmallFileRequestProto.newBuilder() - .setChunkInfo(chunk) - .setBlock(putBlockRequest) - .setData(data) - .build(); + final ContainerProtos.BlockData.Builder blockData = + ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest = + ContainerProtos.PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final ContainerProtos.KeyValue keyValue = + ContainerProtos.KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ContainerProtos.ChunkInfo chunk = + ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest = + ContainerProtos.PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); return ContainerCommandRequestProto.newBuilder() .setCmdType(ContainerProtos.Type.PutSmallFile) .setContainerID(blockID.getContainerID()) @@ -672,14 +625,12 @@ static ContainerCommandRequestProto newPutSmallFile( } /** - * Creates container read chunk request using input container write chunk - * request. + * Creates container read chunk request using input container write chunk request. * * @param writeChunkRequest - Input container write chunk request * @return container read chunk request */ - private ContainerCommandRequestProto getReadChunkRequest( - ContainerCommandRequestProto writeChunkRequest) { + private ContainerCommandRequestProto getReadChunkRequest(ContainerCommandRequestProto writeChunkRequest) { WriteChunkRequestProto writeChunk = writeChunkRequest.getWriteChunk(); ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = ContainerProtos.ReadChunkRequestProto.newBuilder() @@ -712,22 +663,18 @@ private void verify() { } @Override - public void verify(ContainerCommandRequestProtoOrBuilder cmd, - String encodedToken) { + public void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) { verify(); } @Override - public void verify(Token token, - ContainerCommandRequestProtoOrBuilder cmd) { + public void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) { verify(); } }; - final ContainerCommandRequestProto request = getWriteChunkRequest( - dd.getUuidString(), 1L, 1L); - final HddsDispatcher dispatcher = createDispatcher( - dd, scmId, conf, tokenVerifier); + final ContainerCommandRequestProto request = getWriteChunkRequest(dd.getUuidString(), 1L, 1L); + final HddsDispatcher dispatcher = createDispatcher(dd, scmId, conf, tokenVerifier); final DispatcherContext[] notVerify = { newContext(Op.WRITE_STATE_MACHINE_DATA, WriteChunkStage.WRITE_DATA), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index 8f2ad307e82..d421288722b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -60,12 +60,10 @@ public void setup() throws Exception { this.containerSet = mock(ContainerSet.class); this.volumeSet = mock(MutableVolumeSet.class); DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); - StateContext context = ContainerTestUtils.getMockContext( - datanodeDetails, conf); + StateContext context = ContainerTestUtils.getMockContext(datanodeDetails, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { + for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) { handlers.put(containerType, Handler.getHandlerForContainerType( containerType, conf, @@ -73,8 +71,7 @@ public void setup() throws Exception { containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER)); } - this.dispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, null, metrics, null); + this.dispatcher = new HddsDispatcher(conf, containerSet, handlers, null, metrics, null); } @AfterEach @@ -84,25 +81,19 @@ public void tearDown() { @Test public void testGetKeyValueHandler() throws Exception { - Handler kvHandler = dispatcher.getHandler( - ContainerProtos.ContainerType.KeyValueContainer); + Handler kvHandler = dispatcher.getHandler(ContainerProtos.ContainerType.KeyValueContainer); - assertInstanceOf(KeyValueHandler.class, kvHandler, - "getHandlerForContainerType returned incorrect handler"); + assertInstanceOf(KeyValueHandler.class, kvHandler, "getHandlerForContainerType returned incorrect handler"); } @Test public void testGetHandlerForInvalidContainerType() { - // When new ContainerProtos.ContainerType are added, increment the code - // for invalid enum. - ContainerProtos.ContainerType invalidContainerType = - ContainerProtos.ContainerType.forNumber(2); + // When new ContainerProtos.ContainerType are added, increment the code for invalid enum. + ContainerProtos.ContainerType invalidContainerType = ContainerProtos.ContainerType.forNumber(2); - assertNull(invalidContainerType, - "New ContainerType detected. Not an invalid containerType"); + assertNull(invalidContainerType, "New ContainerType detected. Not an invalid containerType"); Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType); - assertNull(dispatcherHandler, - "Get Handler for Invalid ContainerType should return null."); + assertNull(dispatcherHandler, "Get Handler for Invalid ContainerType should return null."); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java index ab6e2c857c5..c067fea1445 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java @@ -86,7 +86,7 @@ public ContainerLayoutVersion getLayout() { FILE_PER_BLOCK { @Override public ChunkManager createChunkManager(boolean sync, BlockManager manager) { - return new FilePerBlockStrategy(sync, null, null); + return new FilePerBlockStrategy(sync, null); } @Override @@ -100,11 +100,9 @@ public ContainerLayoutVersion getLayout() { } }; - public abstract ChunkManager createChunkManager(boolean sync, - BlockManager manager); + public abstract ChunkManager createChunkManager(boolean sync, BlockManager manager); - public abstract void validateFileCount(File dir, long blockCount, - long chunkCount); + public abstract void validateFileCount(File dir, long blockCount, long chunkCount); public abstract ContainerLayoutVersion getLayout(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 2637f1922c6..83433cd8398 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -51,7 +51,6 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.ozone.test.GenericTestUtils; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; @@ -105,7 +104,6 @@ public void setup() throws StorageContainerException { dispatcher = new HddsDispatcher( new OzoneConfiguration(), mock(ContainerSet.class), - mock(VolumeSet.class), handlers, mock(StateContext.class), mock(ContainerMetrics.class), @@ -118,7 +116,7 @@ public void setup() throws StorageContainerException { * Test that Handler handles different command types correctly. */ @Test - public void testHandlerCommandHandling() throws Exception { + public void testHandlerCommandHandling() { reset(handler); // Test Create Container Request handling ContainerCommandRequestProto createContainerRequest = @@ -126,175 +124,131 @@ public void testHandlerCommandHandling() throws Exception { .setCmdType(ContainerProtos.Type.CreateContainer) .setContainerID(DUMMY_CONTAINER_ID) .setDatanodeUuid(DATANODE_UUID) - .setCreateContainer(ContainerProtos.CreateContainerRequestProto - .getDefaultInstance()) + .setCreateContainer(ContainerProtos.CreateContainerRequestProto.getDefaultInstance()) .build(); KeyValueContainer container = mock(KeyValueContainer.class); - KeyValueHandler - .dispatchRequest(handler, createContainerRequest, container, null); - verify(handler, times(0)).handleListBlock( - any(ContainerCommandRequestProto.class), any()); + KeyValueHandler.dispatchRequest(handler, createContainerRequest, container, null); + verify(handler, times(0)).handleListBlock(any(ContainerCommandRequestProto.class), any()); // Test Read Container Request handling - ContainerCommandRequestProto readContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer); - KeyValueHandler - .dispatchRequest(handler, readContainerRequest, container, null); - verify(handler, times(1)).handleReadContainer( - any(ContainerCommandRequestProto.class), any()); + ContainerCommandRequestProto readContainerRequest = getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer); + KeyValueHandler.dispatchRequest(handler, readContainerRequest, container, null); + verify(handler, times(1)).handleReadContainer(any(ContainerCommandRequestProto.class), any()); // Test Update Container Request handling ContainerCommandRequestProto updateContainerRequest = getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer); - KeyValueHandler - .dispatchRequest(handler, updateContainerRequest, container, null); - verify(handler, times(1)).handleUpdateContainer( - any(ContainerCommandRequestProto.class), any()); + KeyValueHandler.dispatchRequest(handler, updateContainerRequest, container, null); + verify(handler, times(1)).handleUpdateContainer(any(ContainerCommandRequestProto.class), any()); // Test Delete Container Request handling ContainerCommandRequestProto deleteContainerRequest = getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer); - KeyValueHandler - .dispatchRequest(handler, deleteContainerRequest, container, null); - verify(handler, times(1)).handleDeleteContainer( - any(ContainerCommandRequestProto.class), any()); + KeyValueHandler.dispatchRequest(handler, deleteContainerRequest, container, null); + verify(handler, times(1)).handleDeleteContainer(any(ContainerCommandRequestProto.class), any()); // Test List Container Request handling - ContainerCommandRequestProto listContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListContainer); - KeyValueHandler - .dispatchRequest(handler, listContainerRequest, container, null); - verify(handler, times(1)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); + ContainerCommandRequestProto listContainerRequest = getDummyCommandRequestProto(ContainerProtos.Type.ListContainer); + KeyValueHandler.dispatchRequest(handler, listContainerRequest, container, null); + verify(handler, times(1)).handleUnsupportedOp(any(ContainerCommandRequestProto.class)); // Test Close Container Request handling ContainerCommandRequestProto closeContainerRequest = getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer); - KeyValueHandler - .dispatchRequest(handler, closeContainerRequest, container, null); - verify(handler, times(1)).handleCloseContainer( - any(ContainerCommandRequestProto.class), any()); + KeyValueHandler.dispatchRequest(handler, closeContainerRequest, container, null); + verify(handler, times(1)).handleCloseContainer(any(ContainerCommandRequestProto.class), any()); // Test Put Block Request handling - ContainerCommandRequestProto putBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutBlock); - KeyValueHandler - .dispatchRequest(handler, putBlockRequest, container, null); - verify(handler, times(1)).handlePutBlock( - any(ContainerCommandRequestProto.class), any(), any()); + ContainerCommandRequestProto putBlockRequest = getDummyCommandRequestProto(ContainerProtos.Type.PutBlock); + KeyValueHandler.dispatchRequest(handler, putBlockRequest, container, null); + verify(handler, times(1)).handlePutBlock(any(ContainerCommandRequestProto.class), any(), any()); // Test Get Block Request handling - ContainerCommandRequestProto getBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetBlock); - KeyValueHandler - .dispatchRequest(handler, getBlockRequest, container, null); - verify(handler, times(1)).handleGetBlock( - any(ContainerCommandRequestProto.class), any()); - - // Block Deletion is handled by BlockDeletingService and need not be - // tested here. - - ContainerCommandRequestProto listBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListBlock); - KeyValueHandler - .dispatchRequest(handler, listBlockRequest, container, null); - verify(handler, times(1)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); + ContainerCommandRequestProto getBlockRequest = getDummyCommandRequestProto(ContainerProtos.Type.GetBlock); + KeyValueHandler.dispatchRequest(handler, getBlockRequest, container, null); + verify(handler, times(1)).handleGetBlock(any(ContainerCommandRequestProto.class), any()); + + // Block Deletion is handled by BlockDeletingService and need not be tested here. + + ContainerCommandRequestProto listBlockRequest = getDummyCommandRequestProto(ContainerProtos.Type.ListBlock); + KeyValueHandler.dispatchRequest(handler, listBlockRequest, container, null); + verify(handler, times(1)).handleUnsupportedOp(any(ContainerCommandRequestProto.class)); // Test Read Chunk Request handling - ContainerCommandRequestProto readChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk); - KeyValueHandler - .dispatchRequest(handler, readChunkRequest, container, null); - verify(handler, times(1)).handleReadChunk( - any(ContainerCommandRequestProto.class), any(), any()); + ContainerCommandRequestProto readChunkRequest = getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk); + KeyValueHandler.dispatchRequest(handler, readChunkRequest, container, null); + verify(handler, times(1)).handleReadChunk(any(ContainerCommandRequestProto.class), any(), any()); - // Chunk Deletion is handled by BlockDeletingService and need not be - // tested here. + // Chunk Deletion is handled by BlockDeletingService and need not be tested here. // Test Write Chunk Request handling - ContainerCommandRequestProto writeChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk); - KeyValueHandler - .dispatchRequest(handler, writeChunkRequest, container, null); - verify(handler, times(1)).handleWriteChunk( - any(ContainerCommandRequestProto.class), any(), any()); + ContainerCommandRequestProto writeChunkRequest = getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk); + KeyValueHandler.dispatchRequest(handler, writeChunkRequest, container, null); + verify(handler, times(1)).handleWriteChunk(any(ContainerCommandRequestProto.class), any(), any()); // Test List Chunk Request handling - ContainerCommandRequestProto listChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListChunk); - KeyValueHandler - .dispatchRequest(handler, listChunkRequest, container, null); - verify(handler, times(2)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); + ContainerCommandRequestProto listChunkRequest = getDummyCommandRequestProto(ContainerProtos.Type.ListChunk); + KeyValueHandler.dispatchRequest(handler, listChunkRequest, container, null); + verify(handler, times(2)).handleUnsupportedOp(any(ContainerCommandRequestProto.class)); // Test Put Small File Request handling - ContainerCommandRequestProto putSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile); - KeyValueHandler - .dispatchRequest(handler, putSmallFileRequest, container, null); - verify(handler, times(1)).handlePutSmallFile( - any(ContainerCommandRequestProto.class), any(), any()); + ContainerCommandRequestProto putSmallFileRequest = getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile); + KeyValueHandler.dispatchRequest(handler, putSmallFileRequest, container, null); + verify(handler, times(1)).handlePutSmallFile(any(ContainerCommandRequestProto.class), any(), any()); // Test Get Small File Request handling - ContainerCommandRequestProto getSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile); - KeyValueHandler - .dispatchRequest(handler, getSmallFileRequest, container, null); - verify(handler, times(1)).handleGetSmallFile( - any(ContainerCommandRequestProto.class), any()); + ContainerCommandRequestProto getSmallFileRequest = getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile); + KeyValueHandler.dispatchRequest(handler, getSmallFileRequest, container, null); + verify(handler, times(1)).handleGetSmallFile(any(ContainerCommandRequestProto.class), any()); // Test Finalize Block Request handling - ContainerCommandRequestProto finalizeBlock = - getDummyCommandRequestProto(ContainerProtos.Type.FinalizeBlock); - KeyValueHandler - .dispatchRequest(handler, finalizeBlock, container, null); - verify(handler, times(1)).handleFinalizeBlock( - any(ContainerCommandRequestProto.class), any()); + ContainerCommandRequestProto finalizeBlock = getDummyCommandRequestProto(ContainerProtos.Type.FinalizeBlock); + KeyValueHandler.dispatchRequest(handler, finalizeBlock, container, null); + verify(handler, times(1)).handleFinalizeBlock(any(ContainerCommandRequestProto.class), any()); } @Test public void testVolumeSetInKeyValueHandler() throws Exception { - File datanodeDir = - Files.createDirectory(tempDir.resolve("datanodeDir")).toFile(); - File metadataDir = - Files.createDirectory(tempDir.resolve("metadataDir")).toFile(); + File datanodeDir = Files.createDirectory(tempDir.resolve("datanodeDir")).toFile(); + File metadataDir = Files.createDirectory(tempDir.resolve("metadataDir")).toFile(); OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, datanodeDir.getAbsolutePath()); conf.set(OZONE_METADATA_DIRS, metadataDir.getAbsolutePath()); - MutableVolumeSet - volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, - null, StorageVolume.VolumeType.DATA_VOLUME, null); + MutableVolumeSet volumeSet = new MutableVolumeSet( + UUID.randomUUID().toString(), + conf, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); try { ContainerSet cset = new ContainerSet(1000); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); - StateContext context = ContainerTestUtils.getMockContext( - datanodeDetails, conf); - KeyValueHandler keyValueHandler = new KeyValueHandler(conf, - context.getParent().getDatanodeDetails().getUuidString(), cset, - volumeSet, metrics, c -> { - }); - assertEquals("org.apache.hadoop.ozone.container.common" + - ".volume.CapacityVolumeChoosingPolicy", - keyValueHandler.getVolumeChoosingPolicyForTesting() - .getClass().getName()); - - //Set a class which is not of sub class of VolumeChoosingPolicy - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); + StateContext context = ContainerTestUtils.getMockContext(datanodeDetails, conf); + KeyValueHandler keyValueHandler = new KeyValueHandler( + conf, + context.getParent().getDatanodeDetails().getUuidString(), + cset, + volumeSet, + metrics, + c -> { }); + assertEquals("org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy", + keyValueHandler.getVolumeChoosingPolicyForTesting().getClass().getName()); + + // Set a class which is not of subclass of VolumeChoosingPolicy + conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); RuntimeException exception = assertThrows(RuntimeException.class, () -> new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, metrics, c -> { })); assertThat(exception).hasMessageEndingWith( - "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + - "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); + "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + + "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); } finally { volumeSet.shutdown(); FileUtil.fullyDelete(datanodeDir); @@ -302,8 +256,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception { } } - private ContainerCommandRequestProto getDummyCommandRequestProto( - ContainerProtos.Type cmdType) { + private ContainerCommandRequestProto getDummyCommandRequestProto(ContainerProtos.Type cmdType) { return ContainerCommandRequestProto.newBuilder() .setCmdType(cmdType) .setContainerID(DUMMY_CONTAINER_ID) @@ -312,8 +265,7 @@ private ContainerCommandRequestProto getDummyCommandRequestProto( } @ContainerLayoutTestInfo.ContainerTest - public void testCloseInvalidContainer(ContainerLayoutVersion layoutVersion) - throws IOException { + public void testCloseInvalidContainer(ContainerLayoutVersion layoutVersion) throws IOException { long containerID = 1234L; OzoneConfiguration conf = new OzoneConfiguration(); KeyValueContainerData kvData = new KeyValueContainerData(containerID, @@ -329,15 +281,13 @@ public void testCloseInvalidContainer(ContainerLayoutVersion layoutVersion) .setCmdType(ContainerProtos.Type.CloseContainer) .setContainerID(DUMMY_CONTAINER_ID) .setDatanodeUuid(DATANODE_UUID) - .setCloseContainer(ContainerProtos.CloseContainerRequestProto - .getDefaultInstance()) + .setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance()) .build(); dispatcher.dispatch(closeContainerRequest, null); - when(handler.handleCloseContainer(any(), any())) - .thenCallRealMethod(); + when(handler.handleCloseContainer(any(), any())).thenCallRealMethod(); doCallRealMethod().when(handler).closeContainer(any()); - // Closing invalid container should return error response. + // Closing an invalid container should return error response. ContainerProtos.ContainerCommandResponseProto response = handler.handleCloseContainer(closeContainerRequest, container); @@ -366,11 +316,9 @@ public void testDeleteContainer() throws IOException { hddsVolume.createWorkingDir(clusterId, null); hddsVolume.createTmpDirs(clusterId); - when(volumeSet.getVolumesList()) - .thenReturn(Collections.singletonList(hddsVolume)); + when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(hddsVolume)); - List hddsVolumeList = StorageVolumeUtil - .getHddsVolumesList(volumeSet.getVolumesList()); + List hddsVolumeList = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()); assertEquals(1, hddsVolumeList.size()); @@ -378,13 +326,16 @@ public void testDeleteContainer() throws IOException { final AtomicInteger icrReceived = new AtomicInteger(0); - final KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, metrics, + final KeyValueHandler kvHandler = new KeyValueHandler( + conf, + datanodeId, + containerSet, + volumeSet, + metrics, c -> icrReceived.incrementAndGet()); kvHandler.setClusterID(clusterId); - final ContainerCommandRequestProto createContainer = - createContainerRequest(datanodeId, containerID); + final ContainerCommandRequestProto createContainer = createContainerRequest(datanodeId, containerID); kvHandler.handleCreateContainer(createContainer, null); assertEquals(1, icrReceived.get()); @@ -394,18 +345,15 @@ public void testDeleteContainer() throws IOException { assertEquals(2, icrReceived.get()); assertNull(containerSet.getContainer(containerID)); - File[] deletedContainers = - hddsVolume.getDeletedContainerDir().listFiles(); + File[] deletedContainers = hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(deletedContainers); assertEquals(0, deletedContainers.length); - // Case 2 : failed move of container dir to tmp location should trigger - // a volume scan + // Case 2: failed move of container dir to tmp location should trigger a volume scan final long container2ID = 2L; - final ContainerCommandRequestProto createContainer2 = - createContainerRequest(datanodeId, container2ID); + final ContainerCommandRequestProto createContainer2 = createContainerRequest(datanodeId, container2ID); kvHandler.handleCreateContainer(createContainer2, null); @@ -413,7 +361,7 @@ public void testDeleteContainer() throws IOException { Container container = containerSet.getContainer(container2ID); assertNotNull(container); File deletedContainerDir = hddsVolume.getDeletedContainerDir(); - // to simulate failed move + // To simulate failed move File dummyDir = new File(DUMMY_PATH); hddsVolume.setDeletedContainerDir(dummyDir); try { @@ -422,27 +370,24 @@ public void testDeleteContainer() throws IOException { assertThat(sce.getMessage()).contains("Failed to move container"); } verify(volumeSet).checkVolumeAsync(hddsVolume); - // cleanup + // Cleanup hddsVolume.setDeletedContainerDir(deletedContainerDir); - // Case 3: Delete Container on a failed volume + // Case 3: Delete Container on a failed volume hddsVolume.failVolume(); GenericTestUtils.LogCapturer kvHandlerLogs = GenericTestUtils.LogCapturer.captureLogs(KeyValueHandler.getLogger()); - // add the container back to containerSet as removed in previous delete + // Add the container back to containerSet as removed in previous delete containerSet.addContainer(container); kvHandler.deleteContainer(container, true); - String expectedLog = - "Delete container issued on containerID 2 which is " + - "in a failed volume"; + String expectedLog = "Delete container issued on containerID 2 which is in a failed volume"; assertThat(kvHandlerLogs.getOutput()).contains(expectedLog); } finally { FileUtils.deleteDirectory(new File(testDir)); } } - private static ContainerCommandRequestProto createContainerRequest( - String datanodeId, long containerID) { + private static ContainerCommandRequestProto createContainerRequest(String datanodeId, long containerID) { return ContainerCommandRequestProto.newBuilder() .setCmdType(ContainerProtos.Type.CreateContainer) .setDatanodeUuid(datanodeId).setCreateContainer( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index f0c8a2077ea..369488014dc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -35,12 +35,14 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.UUID; @@ -65,12 +67,13 @@ import static org.mockito.Mockito.when; /** - * Test that KeyValueHandler fails certain operations when the - * container is unhealthy. + * Test that KeyValueHandler fails certain operations when the container is unhealthy. */ public class TestKeyValueHandlerWithUnhealthyContainer { - public static final Logger LOG = LoggerFactory.getLogger( - TestKeyValueHandlerWithUnhealthyContainer.class); + public static final Logger LOG = LoggerFactory.getLogger(TestKeyValueHandlerWithUnhealthyContainer.class); + + @TempDir + private File tempDir; private IncrementalReportSender mockIcrSender; @@ -104,7 +107,8 @@ public void testGetBlock() { } private static Stream getAllClientVersions() { - return Arrays.stream(ClientVersion.values()).flatMap(client -> IntStream.range(0, 6) + return Arrays.stream(ClientVersion.values()) + .flatMap(client -> IntStream.range(0, 6) .mapToObj(rid -> Arguments.of(client, rid))); } @@ -119,9 +123,13 @@ public void testGetBlockWithReplicaIndexMismatch(ClientVersion clientVersion, in handler.handleGetBlock( getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.GetBlock, rid), container); - assertEquals((replicaIndex > 0 && rid != replicaIndex && clientVersion.toProtoValue() >= - ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? - ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, + assertEquals( + replicaIndex > 0 + && rid != replicaIndex + && clientVersion.toProtoValue() + >= ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue() + ? ContainerProtos.Result.CONTAINER_NOT_FOUND + : UNKNOWN_BCSID, response.getResult()); } @@ -134,8 +142,7 @@ public void testGetCommittedBlockLength() { ContainerProtos.ContainerCommandResponseProto response = handler.handleGetCommittedBlockLength( - getDummyCommandRequestProto( - ContainerProtos.Type.GetCommittedBlockLength), + getDummyCommandRequestProto(ContainerProtos.Type.GetCommittedBlockLength), container); assertEquals(UNKNOWN_BCSID, response.getResult()); } @@ -147,8 +154,7 @@ public void testReadChunk() { ContainerProtos.ContainerCommandResponseProto response = handler.handleReadChunk( - getDummyCommandRequestProto( - ContainerProtos.Type.ReadChunk), + getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk), container, null); assertEquals(UNKNOWN_BCSID, response.getResult()); } @@ -160,14 +166,19 @@ public void testReadChunkWithReplicaIndexMismatch(ClientVersion clientVersion, i KeyValueHandler handler = getDummyHandler(); for (int rid = 0; rid <= 5; rid++) { ContainerProtos.ContainerCommandResponseProto response = - handler.handleReadChunk(getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.ReadChunk, rid), - container, null); - assertEquals((replicaIndex > 0 && rid != replicaIndex && - clientVersion.toProtoValue() >= ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? - ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, + handler.handleReadChunk( + getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.ReadChunk, rid), + container, + null); + assertEquals( + replicaIndex > 0 + && rid != replicaIndex + && clientVersion.toProtoValue() + >= ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue() + ? ContainerProtos.Result.CONTAINER_NOT_FOUND + : UNKNOWN_BCSID, response.getResult()); } - } @Test @@ -177,8 +188,7 @@ public void testFinalizeBlock() { ContainerProtos.ContainerCommandResponseProto response = handler.handleFinalizeBlock( - getDummyCommandRequestProto( - ContainerProtos.Type.FinalizeBlock), + getDummyCommandRequestProto(ContainerProtos.Type.FinalizeBlock), container); assertEquals(CONTAINER_UNHEALTHY, response.getResult()); } @@ -190,27 +200,21 @@ public void testGetSmallFile() { ContainerProtos.ContainerCommandResponseProto response = handler.handleGetSmallFile( - getDummyCommandRequestProto( - ContainerProtos.Type.GetSmallFile), + getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile), container); assertEquals(UNKNOWN_BCSID, response.getResult()); } @Test void testNPEFromPutBlock() throws IOException { - KeyValueContainer container = new KeyValueContainer( - mock(KeyValueContainerData.class), - new OzoneConfiguration()); + KeyValueContainer container = new KeyValueContainer(mock(KeyValueContainerData.class), new OzoneConfiguration()); KeyValueHandler subject = getDummyHandler(); BlockID blockID = getTestBlockID(1); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - getWriteChunkRequest(MockPipeline.createSingleNodePipeline(), - blockID, 123); + getWriteChunkRequest(MockPipeline.createSingleNodePipeline(), blockID, 123); ContainerProtos.ContainerCommandResponseProto response = - subject.handle( - getPutBlockRequest(writeChunkRequest), - container, null); + subject.handle(getPutBlockRequest(writeChunkRequest), container, null); assertEquals(CONTAINER_INTERNAL_ERROR, response.getResult()); } @@ -220,21 +224,17 @@ public void testMarkContainerUnhealthyInFailedVolume() throws IOException { KeyValueContainerData mockContainerData = mock(KeyValueContainerData.class); HddsVolume mockVolume = mock(HddsVolume.class); when(mockContainerData.getVolume()).thenReturn(mockVolume); - KeyValueContainer container = new KeyValueContainer( - mockContainerData, new OzoneConfiguration()); + when(mockContainerData.getMetadataPath()).thenReturn(tempDir.getAbsolutePath()); + KeyValueContainer container = new KeyValueContainer(mockContainerData, new OzoneConfiguration()); - // When volume is failed, the call to mark the container unhealthy should - // be ignored. + // When volume is failed, the call to mark the container unhealthy should be ignored. when(mockVolume.isFailed()).thenReturn(true); - handler.markContainerUnhealthy(container, - ContainerTestUtils.getUnhealthyScanResult()); + handler.markContainerUnhealthy(container, ContainerTestUtils.getUnhealthyScanResult()); verify(mockIcrSender, never()).send(any()); - // When volume is healthy, ICR should be sent when container is marked - // unhealthy. + // When volume is healthy, ICR should be sent when container is marked unhealthy. when(mockVolume.isFailed()).thenReturn(false); - handler.markContainerUnhealthy(container, - ContainerTestUtils.getUnhealthyScanResult()); + handler.markContainerUnhealthy(container, ContainerTestUtils.getUnhealthyScanResult()); verify(mockIcrSender, atMostOnce()).send(any()); } @@ -254,27 +254,28 @@ private KeyValueHandler getDummyHandler() { stateMachine.getDatanodeDetails().getUuidString(), mock(ContainerSet.class), mock(MutableVolumeSet.class), - mock(ContainerMetrics.class), mockIcrSender); + mock(ContainerMetrics.class), + mockIcrSender); } private KeyValueContainer getMockUnhealthyContainer() { KeyValueContainerData containerData = mock(KeyValueContainerData.class); - when(containerData.getState()).thenReturn( - ContainerProtos.ContainerDataProto.State.UNHEALTHY); + when(containerData.getState()) + .thenReturn(ContainerProtos.ContainerDataProto.State.UNHEALTHY); when(containerData.getBlockCommitSequenceId()).thenReturn(100L); - when(containerData.getProtoBufMessage()).thenReturn(ContainerProtos - .ContainerDataProto.newBuilder().setContainerID(1).build()); + when(containerData.getProtoBufMessage()) + .thenReturn(ContainerProtos.ContainerDataProto.newBuilder().setContainerID(1).build()); return new KeyValueContainer(containerData, new OzoneConfiguration()); } private KeyValueContainer getMockContainerWithReplicaIndex(int replicaIndex) { KeyValueContainerData containerData = mock(KeyValueContainerData.class); - when(containerData.getState()).thenReturn( - ContainerProtos.ContainerDataProto.State.CLOSED); + when(containerData.getState()) + .thenReturn(ContainerProtos.ContainerDataProto.State.CLOSED); when(containerData.getBlockCommitSequenceId()).thenReturn(100L); when(containerData.getReplicaIndex()).thenReturn(replicaIndex); - when(containerData.getProtoBufMessage()).thenReturn(ContainerProtos - .ContainerDataProto.newBuilder().setContainerID(1).build()); + when(containerData.getProtoBufMessage()) + .thenReturn(ContainerProtos.ContainerDataProto.newBuilder().setContainerID(1).build()); return new KeyValueContainer(containerData, new OzoneConfiguration()); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 1f69db78d62..ef37c226653 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -27,6 +27,7 @@ import java.time.Instant; import java.time.ZoneId; import java.util.List; +import java.util.SortedMap; import java.util.UUID; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.CountDownLatch; @@ -46,6 +47,8 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -55,7 +58,9 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -109,6 +114,8 @@ public class TestReplicationSupervisor { }; private final AtomicReference replicatorRef = new AtomicReference<>(); + private final AtomicReference ecReplicatorRef = + new AtomicReference<>(); private ContainerSet set; @@ -135,6 +142,7 @@ public void setUp() throws Exception { @AfterEach public void cleanup() { replicatorRef.set(null); + ecReplicatorRef.set(null); } @ContainerLayoutTestInfo.ContainerTest @@ -394,6 +402,107 @@ public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) { assertEquals(0, supervisor.getReplicationSuccessCount()); } + @ContainerLayoutTestInfo.ContainerTest + public void testMultipleReplication(ContainerLayoutVersion layout, + @TempDir File tempFile) throws IOException { + this.layoutVersion = layout; + OzoneConfiguration conf = new OzoneConfiguration(); + // GIVEN + ReplicationSupervisor replicationSupervisor = + supervisorWithReplicator(FakeReplicator::new); + ReplicationSupervisor ecReconstructionSupervisor = supervisorWithECReconstruction(); + ReplicationSupervisorMetrics replicationMetrics = + ReplicationSupervisorMetrics.create(replicationSupervisor); + ReplicationSupervisorMetrics ecReconstructionMetrics = + ReplicationSupervisorMetrics.create(ecReconstructionSupervisor); + try { + //WHEN + replicationSupervisor.addTask(createTask(1L)); + ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(2L)); + replicationSupervisor.addTask(createTask(1L)); + replicationSupervisor.addTask(createTask(3L)); + ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(4L)); + + SimpleContainerDownloader moc = mock(SimpleContainerDownloader.class); + Path res = Paths.get("file:/tmp/no-such-file"); + when(moc.getContainerDataFromReplicas(anyLong(), anyList(), + any(Path.class), any())).thenReturn(res); + + final String testDir = tempFile.getPath(); + MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + when(volumeSet.getVolumesList()).thenReturn(singletonList( + new HddsVolume.Builder(testDir).conf(conf).build())); + ContainerController mockedCC = mock(ContainerController.class); + ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet); + ContainerReplicator replicator = new DownloadAndImportReplicator( + conf, set, importer, moc); + replicatorRef.set(replicator); + replicationSupervisor.addTask(createTask(5L)); + + ReplicateContainerCommand cmd1 = createCommand(6L); + cmd1.setDeadline(clock.millis() + 10000); + ReplicationTask task1 = new ReplicationTask(cmd1, replicatorRef.get()); + clock.fastForward(15000); + replicationSupervisor.addTask(task1); + + ReconstructECContainersCommand cmd2 = createReconstructionCmd(7L); + cmd2.setDeadline(clock.millis() + 10000); + ECReconstructionCoordinatorTask task2 = new ECReconstructionCoordinatorTask( + ecReplicatorRef.get(), new ECReconstructionCommandInfo(cmd2)); + clock.fastForward(15000); + ecReconstructionSupervisor.addTask(task2); + ecReconstructionSupervisor.addTask(createECTask(8L)); + ecReconstructionSupervisor.addTask(createECTask(9L)); + + //THEN + assertEquals(2, replicationSupervisor.getReplicationSuccessCount()); + assertEquals(2, replicationSupervisor.getReplicationSuccessCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationFailureCount()); + assertEquals(1, replicationSupervisor.getReplicationFailureCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationSkippedCount()); + assertEquals(1, replicationSupervisor.getReplicationSkippedCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationTimeoutCount()); + assertEquals(1, replicationSupervisor.getReplicationTimeoutCount( + task1.getMetricName())); + assertEquals(5, replicationSupervisor.getReplicationRequestCount()); + assertEquals(5, replicationSupervisor.getReplicationRequestCount( + task1.getMetricName())); + assertEquals(0, replicationSupervisor.getReplicationRequestCount( + task2.getMetricName())); + + assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount()); + assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount( + task2.getMetricName())); + assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount()); + assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount( + task2.getMetricName())); + assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount()); + assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount( + task2.getMetricName())); + assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount()); + assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount( + task2.getMetricName())); + assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount( + task1.getMetricName())); + + MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl(); + replicationMetrics.getMetrics(replicationMetricsCollector, true); + assertEquals(1, replicationMetricsCollector.getRecords().size()); + + MetricsCollectorImpl ecReconstructionMetricsCollector = new MetricsCollectorImpl(); + ecReconstructionMetrics.getMetrics(ecReconstructionMetricsCollector, true); + assertEquals(1, ecReconstructionMetricsCollector.getRecords().size()); + } finally { + replicationMetrics.unRegister(); + ecReconstructionMetrics.unRegister(); + replicationSupervisor.stop(); + ecReconstructionSupervisor.stop(); + } + } + @ContainerLayoutTestInfo.ContainerTest public void testPriorityOrdering(ContainerLayoutVersion layout) throws InterruptedException { @@ -476,6 +585,16 @@ private static class BlockingTask extends AbstractReplicationTask { this.waitForCompleteLatch = waitForCompletion; } + @Override + protected String getMetricName() { + return "Blockings"; + } + + @Override + protected String getMetricDescriptionSegment() { + return "blockings"; + } + @Override public void runTask() { runningLatch.countDown(); @@ -502,6 +621,16 @@ private static class OrderedTask extends AbstractReplicationTask { setPriority(priority); } + @Override + protected String getMetricName() { + return "Ordereds"; + } + + @Override + protected String getMetricDescriptionSegment() { + return "ordereds"; + } + @Override public void runTask() { completeList.add(name); @@ -531,6 +660,22 @@ private ReplicationSupervisor supervisorWith( return supervisor; } + private ReplicationSupervisor supervisorWithECReconstruction() throws IOException { + ConfigurationSource conf = new OzoneConfiguration(); + ExecutorService executor = newDirectExecutorService(); + ReplicationServer.ReplicationConfig repConf = + conf.getObject(ReplicationServer.ReplicationConfig.class); + ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() + .stateContext(context).replicationConfig(repConf).executor(executor) + .clock(clock).build(); + + FakeECReconstructionCoordinator coordinator = new FakeECReconstructionCoordinator( + new OzoneConfiguration(), null, null, context, + ECReconstructionMetrics.create(), "", supervisor); + ecReplicatorRef.set(coordinator); + return supervisor; + } + private ReplicationTask createTask(long containerId) { ReplicateContainerCommand cmd = createCommand(containerId); return new ReplicationTask(cmd, replicatorRef.get()); @@ -538,7 +683,13 @@ private ReplicationTask createTask(long containerId) { private ECReconstructionCoordinatorTask createECTask(long containerId) { return new ECReconstructionCoordinatorTask(null, - createReconstructionCmd(containerId)); + createReconstructionCmdInfo(containerId)); + } + + private ECReconstructionCoordinatorTask createECTaskWithCoordinator(long containerId) { + ECReconstructionCommandInfo ecReconstructionCommandInfo = createReconstructionCmdInfo(containerId); + return new ECReconstructionCoordinatorTask(ecReplicatorRef.get(), + ecReconstructionCommandInfo); } private static ReplicateContainerCommand createCommand(long containerId) { @@ -548,18 +699,20 @@ private static ReplicateContainerCommand createCommand(long containerId) { return cmd; } - private static ECReconstructionCommandInfo createReconstructionCmd( + private static ECReconstructionCommandInfo createReconstructionCmdInfo( long containerId) { - List sources - = new ArrayList<>(); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( - MockDatanodeDetails.randomDatanodeDetails(), 1)); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( + return new ECReconstructionCommandInfo(createReconstructionCmd(containerId)); + } + + private static ReconstructECContainersCommand createReconstructionCmd( + long containerId) { + List sources = + new ArrayList<>(); + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( + MockDatanodeDetails.randomDatanodeDetails(), 1)); + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 2)); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 3)); byte[] missingIndexes = new byte[1]; @@ -567,14 +720,44 @@ private static ECReconstructionCommandInfo createReconstructionCmd( List target = singletonList( MockDatanodeDetails.randomDatanodeDetails()); - ReconstructECContainersCommand cmd = - new ReconstructECContainersCommand(containerId, - sources, - target, - Proto2Utils.unsafeByteString(missingIndexes), - new ECReplicationConfig(3, 2)); - - return new ECReconstructionCommandInfo(cmd); + ReconstructECContainersCommand cmd = new ReconstructECContainersCommand(containerId, sources, target, + Proto2Utils.unsafeByteString(missingIndexes), + new ECReplicationConfig(3, 2)); + cmd.setTerm(CURRENT_TERM); + return cmd; + } + + /** + * A fake coordinator that simulates successful reconstruction of ec containers. + */ + private class FakeECReconstructionCoordinator extends ECReconstructionCoordinator { + + private final OzoneConfiguration conf = new OzoneConfiguration(); + private final ReplicationSupervisor supervisor; + + FakeECReconstructionCoordinator(ConfigurationSource conf, + CertificateClient certificateClient, SecretKeySignerClient secretKeyClient, + StateContext context, ECReconstructionMetrics metrics, String threadNamePrefix, + ReplicationSupervisor supervisor) + throws IOException { + super(conf, certificateClient, secretKeyClient, context, metrics, threadNamePrefix); + this.supervisor = supervisor; + } + + @Override + public void reconstructECContainerGroup(long containerID, + ECReplicationConfig repConfig, SortedMap sourceNodeMap, + SortedMap targetNodeMap) { + assertEquals(1, supervisor.getTotalInFlightReplications()); + + KeyValueContainerData kvcd = new KeyValueContainerData( + containerID, layoutVersion, 100L, + UUID.randomUUID().toString(), UUID.randomUUID().toString()); + KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); + assertDoesNotThrow(() -> { + set.addContainer(kvc); + }); + } } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index e7c4ec4ce3d..29531f31518 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Objects; +import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.StringCodec; @@ -162,7 +163,15 @@ public String toString() { */ public static TransactionInfo readTransactionInfo( DBStoreHAManager metadataManager) throws IOException { - return metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY); + return metadataManager.getTransactionInfoTable().getSkipCache(TRANSACTION_INFO_KEY); + } + + public ByteString toByteString() throws IOException { + return ByteString.copyFrom(getCodec().toPersistedFormat(this)); + } + + public static TransactionInfo fromByteString(ByteString byteString) throws IOException { + return byteString == null ? null : getCodec().fromPersistedFormat(byteString.toByteArray()); } public SnapshotInfo toSnapshotInfo() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java index c47b176e93b..015cd10b8b9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java @@ -49,7 +49,7 @@ public static boolean waitForCheckpointDirectoryExist(File file, final boolean success = RatisHelper.attemptUntilTrue(file::exists, POLL_INTERVAL_DURATION, maxWaitTimeout); if (!success) { LOG.info("Checkpoint directory: {} didn't get created in {} secs.", - maxWaitTimeout.getSeconds(), file.getAbsolutePath()); + file.getAbsolutePath(), maxWaitTimeout.getSeconds()); } return success; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index c818c07b1ac..f153823db7c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -354,6 +355,24 @@ public V getValue() { public String toString() { return "(key=" + key + ", value=" + value + ")"; } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof KeyValue)) { + return false; + } + KeyValue kv = (KeyValue) obj; + try { + return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int hashCode() { + return Objects.hash(getKey(), getValue()); + } }; } diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index e08e9c52060..389d9d78f21 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css @@ -91,3 +91,7 @@ body { .om-roles-background { background-color: #dcfbcd!important; } + +.scm-roles-background { + background-color: #dcfbcd!important; +} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index c1f7d16aefa..9706ebdf6b3 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html @@ -21,6 +21,6 @@ Input arguments: - {{$ctrl.jmx.InputArguments}} +

{{$ctrl.jmx.InputArguments.join('\n')}}
diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto index 6cfae24d41e..798b68f1c5d 100644 --- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto +++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto @@ -33,21 +33,20 @@ package hadoop.hdds.datanode; /** * Commands that are used to manipulate the state of containers on a datanode. * - * These commands allow us to work against the datanode - from - * StorageContainer Manager as well as clients. + * These commands allow us to work against the datanode - from StorageContainer Manager as well as clients. * - * 1. CreateContainer - This call is usually made by Storage Container - * manager, when we need to create a new container on a given datanode. + * 1. CreateContainer - This call is usually made by Storage Container manager, + * when we need to create a new container on a given datanode. * - * 2. ReadContainer - Allows end user to stat a container. For example - * this allows us to return the metadata of a container. + * 2. ReadContainer - Allows end user to stat a container. + * For example this allows us to return the metadata of a container. * * 3. UpdateContainer - Updates a container metadata. - + * * 4. DeleteContainer - This call is made to delete a container. * - * 5. ListContainer - Returns the list of containers on this - * datanode. This will be used by tests and tools. + * 5. ListContainer - Returns the list of containers on this datanode. + * This will be used by tests and tools. * * 6. PutBlock - Given a valid container, creates a block. * @@ -55,8 +54,7 @@ package hadoop.hdds.datanode; * * 8. DeleteBlock - Deletes a given block. * - * 9. ListBlock - Returns a list of blocks that are present inside - * a given container. + * 9. ListBlock - Returns a list of blocks that are present inside a given container. * * 10. ReadChunk - Allows us to read a chunk. * @@ -77,6 +75,8 @@ package hadoop.hdds.datanode; * 18. CopyContainer - Copies a container from a remote machine. * * 19. FinalizeBlock - Finalize block request from client. + * + * 20. VerifyBlock - Verify block request from client. */ enum Type { @@ -108,6 +108,8 @@ enum Type { FinalizeBlock = 21; Echo = 22; + + VerifyBlock = 23; } @@ -168,7 +170,6 @@ message DatanodeBlockID { required int64 localID = 2; optional uint64 blockCommitSequenceId = 3 [default = 0]; optional int32 replicaIndex = 4; - } message KeyValue { @@ -179,17 +180,16 @@ message KeyValue { message ContainerCommandRequestProto { required Type cmdType = 1; // Type of the command - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. + // A string that identifies this command, we generate Trace ID in Ozone frontend + // and this allows us to trace that command all over ozone. optional string traceID = 2; required int64 containerID = 3; required string datanodeUuid = 4; optional string pipelineID = 5; - // One of the following command is available when the corresponding - // cmdType is set. At the protocol level we allow only - // one command in each packet. + // One of the following command is available when the corresponding cmdType is set. + // At the protocol level we allow only one command in each packet. // TODO : Upgrade to Protobuf 2.6 or later. optional CreateContainerRequestProto createContainer = 6; optional ReadContainerRequestProto readContainer = 7; @@ -217,6 +217,8 @@ message ContainerCommandRequestProto { optional FinalizeBlockRequestProto finalizeBlock = 25; optional EchoRequestProto echo = 26; + + optional VerifyBlockRequestProto verifyBlock = 27; } message ContainerCommandResponseProto { @@ -250,13 +252,15 @@ message ContainerCommandResponseProto { optional FinalizeBlockResponseProto finalizeBlock = 22; optional EchoResponseProto echo = 23; + + optional VerifyBlockResponseProto verifyBlock = 24; } message ContainerDataProto { enum State { OPEN = 1; CLOSING = 2; - QUASI_CLOSED =3; + QUASI_CLOSED = 3; CLOSED = 4; UNHEALTHY = 5; INVALID = 6; @@ -274,15 +278,14 @@ message ContainerDataProto { } message Container2BCSIDMapProto { - // repeated Container2BCSIDMapEntryProto container2BCSID = 1; - map container2BCSID = 1; + // repeated Container2BCSIDMapEntryProto container2BCSID = 1; + map container2BCSID = 1; } enum ContainerType { KeyValueContainer = 1; } - // Container Messages. message CreateContainerRequestProto { repeated KeyValue metadata = 2; @@ -366,7 +369,6 @@ message GetBlockResponseProto { required BlockData blockData = 1; } - message DeleteBlockRequestProto { required DatanodeBlockID blockID = 1; } @@ -386,7 +388,6 @@ message DeleteBlockResponseProto { message ListBlockRequestProto { optional int64 startLocalID = 2; required uint32 count = 3; - } message ListBlockResponseProto { @@ -411,12 +412,12 @@ message ChunkInfo { required uint64 offset = 2; required uint64 len = 3; repeated KeyValue metadata = 4; - required ChecksumData checksumData =5; + required ChecksumData checksumData = 5; optional bytes stripeChecksum = 6; } message ChunkInfoList { - repeated ChunkInfo chunks = 1; + repeated ChunkInfo chunks = 1; } message ChecksumData { @@ -426,11 +427,11 @@ message ChecksumData { } enum ChecksumType { - NONE = 1; - CRC32 = 2; - CRC32C = 3; - SHA256 = 4; - MD5 = 5; + NONE = 1; + CRC32 = 2; + CRC32C = 3; + SHA256 = 4; + MD5 = 5; } message WriteChunkRequestProto { @@ -487,8 +488,7 @@ message ListChunkResponseProto { repeated ChunkInfo chunkData = 1; } -/** For small file access combines write chunk and putBlock into a single -RPC */ +// For small file access combines write chunk and putBlock into a single RPC message PutSmallFileRequestProto { required PutBlockRequestProto block = 1; @@ -546,11 +546,23 @@ message SendContainerRequest { message SendContainerResponse { } +message VerifyBlockRequestProto { + required DatanodeBlockID blockID = 1; +} + +message VerifyBlockResponseProto { + enum Reason { + MISSING_CHUNK = 1; + CORRUPTED_CHUNK = 2; + } + required bool valid = 1; + optional Reason reason = 2; +} + service XceiverClientProtocolService { // A client-to-datanode RPC to send container commands rpc send(stream ContainerCommandRequestProto) returns - (stream ContainerCommandResponseProto) {}; - + (stream ContainerCommandResponseProto) {}; } service IntraDatanodeProtocolService { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 4555d1cf4a3..6cd4f6235ce 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -191,6 +191,7 @@ message DatanodeUsageInfoProto { optional int64 containerCount = 5; optional int64 committed = 6; optional int64 freeSpaceToSpare = 7; + optional int64 pipelineCount = 8; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 4f7df496906..1cafab3f67c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -32,6 +32,7 @@ public class DatanodeUsageInfo { private DatanodeDetails datanodeDetails; private SCMNodeStat scmNodeStat; private int containerCount; + private int pipelineCount; /** * Constructs a DatanodeUsageInfo with DatanodeDetails and SCMNodeStat. @@ -45,6 +46,7 @@ public DatanodeUsageInfo( this.datanodeDetails = datanodeDetails; this.scmNodeStat = scmNodeStat; this.containerCount = -1; + this.pipelineCount = -1; } /** @@ -145,6 +147,14 @@ public void setContainerCount(int containerCount) { this.containerCount = containerCount; } + public int getPipelineCount() { + return pipelineCount; + } + + public void setPipelineCount(int pipelineCount) { + this.pipelineCount = pipelineCount; + } + /** * Gets Comparator that compares two DatanodeUsageInfo on the basis of * their utilization values. Utilization is (capacity - remaining) divided @@ -210,6 +220,7 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { } builder.setContainerCount(containerCount); + builder.setPipelineCount(pipelineCount); return builder; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 05a68628852..7121d8f7a9d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -146,6 +146,8 @@ public class SCMNodeManager implements NodeManager { private static final String LASTHEARTBEAT = "LASTHEARTBEAT"; private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT"; private static final String TOTALCAPACITY = "CAPACITY"; + private static final String DNUUID = "UUID"; + private static final String VERSION = "VERSION"; /** * Constructs SCM machine Manager. */ @@ -447,6 +449,11 @@ public RegisteredCommand register( processNodeReport(datanodeDetails, nodeReport); LOG.info("Updated datanode to: {}", dn); scmNodeEventPublisher.fireEvent(SCMEvents.NODE_ADDRESS_UPDATE, dn); + } else if (isVersionChange(oldNode.getVersion(), datanodeDetails.getVersion())) { + LOG.info("Update the version for registered datanode = {}, " + + "oldVersion = {}, newVersion = {}.", + datanodeDetails.getUuid(), oldNode.getVersion(), datanodeDetails.getVersion()); + nodeStateManager.updateNode(datanodeDetails, layoutInfo); } } catch (NodeNotFoundException e) { LOG.error("Cannot find datanode {} from nodeStateManager", @@ -508,6 +515,18 @@ private boolean updateDnsToUuidMap( return ipChanged || hostNameChanged; } + /** + * Check if the version has been updated. + * + * @param oldVersion datanode oldVersion + * @param newVersion datanode newVersion + * @return true means replacement is needed, while false means replacement is not needed. + */ + private boolean isVersionChange(String oldVersion, String newVersion) { + final boolean versionChanged = !Objects.equals(oldVersion, newVersion); + return versionChanged; + } + /** * Send heartbeat to indicate the datanode is alive and doing well. * @@ -982,6 +1001,7 @@ public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { DatanodeUsageInfo usageInfo = new DatanodeUsageInfo(dn, stat); try { usageInfo.setContainerCount(getContainerCount(dn)); + usageInfo.setPipelineCount(getPipeLineCount(dn)); } catch (NodeNotFoundException ex) { LOG.error("Unknown datanode {}.", dn, ex); } @@ -1135,6 +1155,8 @@ public Map> getNodeStatusInfo() { String nonScmUsedPerc = storagePercentage[1]; map.put(USEDSPACEPERCENT, "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%"); + map.put(DNUUID, dni.getUuidString()); + map.put(VERSION, dni.getVersion()); nodes.put(hostName, map); } return nodes; @@ -1610,6 +1632,11 @@ public int getContainerCount(DatanodeDetails datanodeDetails) return nodeStateManager.getContainerCount(datanodeDetails.getUuid()); } + public int getPipeLineCount(DatanodeDetails datanodeDetails) + throws NodeNotFoundException { + return nodeStateManager.getPipelinesCount(datanodeDetails); + } + @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { writeLock().lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index de609356b22..75a5193116c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.server; +import java.util.List; import java.util.Map; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -72,7 +73,7 @@ public interface SCMMXBean extends ServiceRuntimeInfo { String getClusterId(); - String getScmRatisRoles(); + List> getScmRatisRoles(); /** * Primordial node is the node on which scm init operation is performed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 868e54f1935..5f69d9fee2b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -171,6 +171,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ReflectionUtils; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.JvmPauseMonitor; import org.slf4j.Logger; @@ -2131,10 +2132,54 @@ public ContainerTokenGenerator getContainerTokenGenerator() { } @Override - public String getScmRatisRoles() { + public List> getScmRatisRoles() { final SCMRatisServer server = getScmHAManager().getRatisServer(); - return server != null ? - HddsUtils.format(server.getRatisRoles()) : "STANDALONE"; + + // If Ratis is disabled + if (server == null) { + return getRatisRolesException("Ratis is disabled"); + } + + // To attempt to find the SCM Leader, + // and if the Leader is not found + // return Leader is not found message. + RaftServer.Division division = server.getDivision(); + RaftPeerId leaderId = division.getInfo().getLeaderId(); + if (leaderId == null) { + return getRatisRolesException("No leader found"); + } + + // If the SCMRatisServer is stopped, return a service stopped message. + if (server.isStopped()) { + return getRatisRolesException("Server is shutting down"); + } + + // Attempt to retrieve role information. + try { + List ratisRoles = server.getRatisRoles(); + List> result = new ArrayList<>(); + for (String role : ratisRoles) { + String[] roleArr = role.split(":"); + List scmInfo = new ArrayList<>(); + // Host Name + scmInfo.add(roleArr[0]); + // Node ID + scmInfo.add(roleArr[3]); + // Ratis Port + scmInfo.add(roleArr[1]); + // Role + scmInfo.add(roleArr[2]); + result.add(scmInfo); + } + return result; + } catch (Exception e) { + LOG.error("Failed to getRatisRoles.", e); + return getRatisRolesException("Exception Occurred, " + e.getMessage()); + } + } + + private static List> getRatisRolesException(String exceptionString) { + return Collections.singletonList(Collections.singletonList(exceptionString)); } /** diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 3f825d4e25f..0f233bf4ea1 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -140,6 +140,10 @@

Node Status

'sortdesc':(columnName == 'comstate' && !reverse)}">Commisioned State Last Heartbeat + UUID + Version @@ -157,6 +161,8 @@

Node Status

{{typestat.opstate}} {{typestat.comstate}} {{typestat.lastheartbeat}} + {{typestat.uuid}} + {{typestat.version}} @@ -210,10 +216,6 @@

Status

Force Exit Safe Mode {{$ctrl.overview.jmx.SafeModeExitForceful}} - - SCM Roles (HA) - {{$ctrl.overview.jmx.ScmRatisRoles}} - Primordial Node (HA) {{$ctrl.overview.jmx.PrimordialNode}} @@ -235,6 +237,35 @@

Meta-Data Volume Information

+

SCM Roles (HA)

+

{{$ctrl.overview.jmx.ScmRatisRoles[0][0]}}

+
+ + + + + + + + + + + + + + + + + + + + + + + +
Host NameNode IDRatis PortRole
{{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
{{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
+
+

Safemode rules statuses

diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index 6fac6849530..e00f8b8ede8 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -56,6 +56,11 @@ } } + $http.get("jmx?qry=Ratis:service=RaftServer,group=*,id=*") + .then(function (result) { + ctrl.role = result.data.beans[0]; + }); + function get_protocol(URLScheme, value, baseProto, fallbackProto) { let protocol = "unknown" let port = -1; @@ -95,6 +100,8 @@ capacity: value && value.find((element) => element.key === "CAPACITY").value, comstate: value && value.find((element) => element.key === "COMSTATE").value, lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value, + uuid: value && value.find((element) => element.key === "UUID").value, + version: value && value.find((element) => element.key === "VERSION").value, port: portSpec.port, protocol: portSpec.proto } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index b967fa0658c..2c069291a86 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,6 +155,8 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); + System.out.printf("%-13s: %d %n", "Pipeline(s)", + info.getPipelineCount()); System.out.printf("%-13s: %d %n", "Container(s)", info.getContainerCount()); System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", @@ -192,6 +194,7 @@ private static class DatanodeUsage { private long committed = 0; private long freeSpaceToSpare = 0; private long containerCount = 0; + private long pipelineCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { if (proto.hasNode()) { @@ -212,6 +215,9 @@ private static class DatanodeUsage { if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } + if (proto.hasPipelineCount()) { + pipelineCount = proto.getPipelineCount(); + } if (proto.hasFreeSpaceToSpare()) { freeSpaceToSpare = proto.getFreeSpaceToSpare(); } @@ -277,5 +283,8 @@ public double getRemainingRatio() { return remaining / (double) capacity; } + public long getPipelineCount() { + return pipelineCount; + } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 7c70456995b..e5392ef618d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -59,12 +59,22 @@ public void execute(ScmClient scmClient) throws IOException { List pipelineList = new ArrayList<>(); Predicate predicate = replicationFilter.orElse(null); - for (Pipeline pipeline : scmClient.listPipelines()) { - boolean filterPassed = (predicate != null) && predicate.test(pipeline); - if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { - pipelineList.add(pipeline); + List pipelines = scmClient.listPipelines(); + if (predicate == null) { + for (Pipeline pipeline : pipelines) { + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED) { + pipelineList.add(pipeline); + } + } + } else { + for (Pipeline pipeline : pipelines) { + boolean filterPassed = predicate.test(pipeline); + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { + pipelineList.add(pipeline); + } } } + System.out.println("Sending close command for " + pipelineList.size() + " pipelines..."); pipelineList.forEach(pipeline -> { try { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index b3c15a46f76..46985147def 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -105,19 +105,22 @@ public void setup() throws Exception { final File metadataPath = new File(testRoot, "metadata"); assertTrue(metadataPath.mkdirs()); - CONF.set(HDDS_DATANODE_DIR_KEY, - volume1Path.getAbsolutePath() + "," + volume2Path.getAbsolutePath()); + CONF.set(HDDS_DATANODE_DIR_KEY, volume1Path.getAbsolutePath() + "," + volume2Path.getAbsolutePath()); CONF.set(OZONE_METADATA_DIRS, metadataPath.getAbsolutePath()); datanodeId = UUID.randomUUID(); - volumeSet = new MutableVolumeSet(datanodeId.toString(), SCM_ID, CONF, - null, StorageVolume.VolumeType.DATA_VOLUME, null); + volumeSet = new MutableVolumeSet( + datanodeId.toString(), + SCM_ID, + CONF, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); // create rocksdb instance in volume dir final List volumes = new ArrayList<>(); for (StorageVolume storageVolume : volumeSet.getVolumesList()) { HddsVolume hddsVolume = (HddsVolume) storageVolume; - StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, CONF, null, - null); + StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, CONF, null, null); volumes.add(hddsVolume); } @@ -136,7 +139,7 @@ public void setup() throws Exception { containerSet = new ContainerSet(1000); blockManager = new BlockManagerImpl(CONF); - chunkManager = new FilePerBlockStrategy(true, blockManager, null); + chunkManager = new FilePerBlockStrategy(true, blockManager); } @BeforeAll @@ -153,31 +156,27 @@ public void after() throws Exception { public void testUpgrade() throws IOException { int num = 2; - final Map> - keyValueContainerBlockDataMap = genSchemaV2Containers(num); + final Map> keyValueContainerBlockDataMap = genSchemaV2Containers(num); assertEquals(num, keyValueContainerBlockDataMap.size()); shutdownAllVolume(); final UpgradeManager upgradeManager = new UpgradeManager(); final List results = - upgradeManager.run(CONF, - StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); + upgradeManager.run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); checkV3MetaData(keyValueContainerBlockDataMap, results, upgradeManager); } - private Map putAnyBlockData(KeyValueContainerData data, - KeyValueContainer container, - int numBlocks) { + private Map putAnyBlockData(KeyValueContainerData data, KeyValueContainer container, + int numBlocks) { // v3 key ==> block data final Map containerBlockDataMap = new HashMap<>(); int txnID = 0; for (int i = 0; i < numBlocks; i++) { txnID = txnID + 1; - BlockID blockID = - ContainerTestHelper.getTestBlockID(data.getContainerID()); + BlockID blockID = ContainerTestHelper.getTestBlockID(data.getContainerID()); BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); putChunksInBlock(1, i, chunks, container, blockID); @@ -185,27 +184,25 @@ private Map putAnyBlockData(KeyValueContainerData data, try { final String localIDKey = Long.toString(blockID.getLocalID()); - final String blockKey = DatanodeSchemaThreeDBDefinition - .getContainerKeyPrefix(data.getContainerID()) + localIDKey; + final String blockKey = + DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix(data.getContainerID()) + localIDKey; blockManager.putBlock(container, kd); containerBlockDataMap.put(blockKey, kd); } catch (IOException exception) { - LOG.warn("Failed to put block: " + blockID.getLocalID() - + " in BlockDataTable.", exception); + LOG.warn("Failed to put block: {} in BlockDataTable.", blockID.getLocalID(), exception); } } return containerBlockDataMap; } - private void putChunksInBlock(int numOfChunksPerBlock, int i, - List chunks, - KeyValueContainer container, BlockID blockID) { + private void putChunksInBlock(int numOfChunksPerBlock, int i, List chunks, + KeyValueContainer container, BlockID blockID) { final long chunkLength = 100; try { for (int k = 0; k < numOfChunksPerBlock; k++) { - final String chunkName = String.format("%d_chunk_%d_block_%d", - blockID.getContainerBlockID().getLocalID(), k, i); + final String chunkName = + String.format("%d_chunk_%d_block_%d", blockID.getContainerBlockID().getLocalID(), k, i); final long offset = k * chunkLength; ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo.newBuilder().setChunkName(chunkName) @@ -219,26 +216,26 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, } } } catch (IOException ex) { - LOG.warn("Putting chunks in blocks was not successful for BlockID: " - + blockID); + LOG.warn("Putting chunks in blocks was not successful for BlockID: {}", blockID); } } - private Map> - genSchemaV2Containers(int numContainers) throws IOException { + private Map> genSchemaV2Containers(int numContainers) + throws IOException { CONF.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false); // container id ==> blocks - final Map> checkBlockDataMap = - new HashMap<>(); + final Map> checkBlockDataMap = new HashMap<>(); // create container for (int i = 0; i < numContainers; i++) { long containerId = ContainerTestHelper.getTestContainerID(); - KeyValueContainerData data = new KeyValueContainerData(containerId, + KeyValueContainerData data = new KeyValueContainerData( + containerId, ContainerLayoutVersion.FILE_PER_BLOCK, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), + ContainerTestHelper.CONTAINER_MAX_SIZE, + UUID.randomUUID().toString(), datanodeId.toString()); data.setSchemaVersion(OzoneConsts.SCHEMA_V2); @@ -246,12 +243,10 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, container.create(volumeSet, volumeChoosingPolicy, SCM_ID); containerSet.addContainer(container); - data = (KeyValueContainerData) containerSet.getContainer(containerId) - .getContainerData(); + data = (KeyValueContainerData) containerSet.getContainer(containerId).getContainerData(); data.setSchemaVersion(OzoneConsts.SCHEMA_V2); - final Map blockDataMap = - putAnyBlockData(data, container, 10); + final Map blockDataMap = putAnyBlockData(data, container, 10); data.closeContainer(); container.close(); @@ -267,32 +262,26 @@ public void shutdownAllVolume() { } } - private void checkV3MetaData(Map> blockDataMap, List results, - UpgradeManager upgradeManager) throws IOException { + private void checkV3MetaData(Map> blockDataMap, + List results, UpgradeManager upgradeManager) throws IOException { Map resultMap = new HashMap<>(); for (UpgradeManager.Result result : results) { resultMap.putAll(result.getResultMap()); } - for (Map.Entry> entry : - blockDataMap.entrySet()) { + for (Map.Entry> entry : blockDataMap.entrySet()) { final KeyValueContainerData containerData = entry.getKey(); final Map blockKeyValue = entry.getValue(); - final UpgradeTask.UpgradeContainerResult result = - resultMap.get(containerData.getContainerID()); - final KeyValueContainerData v3ContainerData = - (KeyValueContainerData) result.getNewContainerData(); + final UpgradeTask.UpgradeContainerResult result = resultMap.get(containerData.getContainerID()); + final KeyValueContainerData v3ContainerData = (KeyValueContainerData) result.getNewContainerData(); final DatanodeStoreSchemaThreeImpl datanodeStoreSchemaThree = upgradeManager.getDBStore(v3ContainerData.getVolume()); - final Table blockDataTable = - datanodeStoreSchemaThree.getBlockDataTable(); + final Table blockDataTable = datanodeStoreSchemaThree.getBlockDataTable(); - for (Map.Entry blockDataEntry : blockKeyValue - .entrySet()) { + for (Map.Entry blockDataEntry : blockKeyValue.entrySet()) { final String v3key = blockDataEntry.getKey(); final BlockData blockData = blockDataTable.get(v3key); final BlockData originBlockData = blockDataEntry.getValue(); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index 09f6621735e..a691e754606 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -94,6 +94,7 @@ public void testCorrectJsonValuesInReport() throws IOException { assertEquals(80.00, json.get(0).get("remainingPercent").doubleValue(), 0.001); assertEquals(5, json.get(0).get("containerCount").longValue()); + assertEquals(10, json.get(0).get("pipelineCount").longValue()); } @Test @@ -122,6 +123,7 @@ public void testOutputDataFieldsAligning() throws IOException { assertThat(output).contains("Remaining :"); assertThat(output).contains("Remaining % :"); assertThat(output).contains("Container(s) :"); + assertThat(output).contains("Pipeline(s) :"); assertThat(output).contains("Container Pre-allocated :"); assertThat(output).contains("Remaining Allocatable :"); assertThat(output).contains("Free Space To Spare :"); @@ -135,6 +137,7 @@ private List getUsageProto() { .setRemaining(80) .setUsed(10) .setContainerCount(5) + .setPipelineCount(10) .build()); return result; } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java new file mode 100644 index 00000000000..013350fe871 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.pipeline; + +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import picocli.CommandLine; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for the ClosePipelineSubcommand class. + */ +class TestClosePipelinesSubCommand { + + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private ClosePipelineSubcommand cmd; + private ScmClient scmClient; + + public static Stream values() { + return Stream.of( + arguments( + new String[]{"--all"}, + "Sending close command for 2 pipelines...\n", + "with empty parameters" + ), + arguments( + new String[]{"--all", "-ffc", "THREE"}, + "Sending close command for 1 pipelines...\n", + "by filter factor, opened" + ), + arguments( + new String[]{"--all", "-ffc", "ONE"}, + "Sending close command for 0 pipelines...\n", + "by filter factor, closed" + ), + arguments( + new String[]{"--all", "-r", "rs-3-2-1024k", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by replication and type, opened" + ), + arguments( + new String[]{"--all", "-r", "rs-6-3-1024k", "-t", "EC"}, + "Sending close command for 0 pipelines...\n", + "by replication and type, closed" + ), + arguments( + new String[]{"--all", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by type, opened" + ), + arguments( + new String[]{"--all", "-t", "RS"}, + "Sending close command for 0 pipelines...\n", + "by type, closed" + ) + ); + } + + @BeforeEach + public void setup() throws IOException { + cmd = new ClosePipelineSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + + scmClient = mock(ScmClient.class); + when(scmClient.listPipelines()).thenAnswer(invocation -> createPipelines()); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @ParameterizedTest(name = "{index}. {2}") + @MethodSource("values") + void testCloseAllPipelines(String[] commands, String expectedOutput, String testName) throws IOException { + CommandLine c = new CommandLine(cmd); + c.parseArgs(commands); + cmd.execute(scmClient); + assertEquals(expectedOutput, outContent.toString(DEFAULT_ENCODING)); + } + + private List createPipelines() { + List pipelines = new ArrayList<>(); + pipelines.add(createPipeline(StandaloneReplicationConfig.getInstance(ONE), + Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.CLOSED)); + + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + new ECReplicationConfig(6, 3), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + RatisReplicationConfig.getInstance(THREE), Pipeline.PipelineState.CLOSED)); + return pipelines; + } + + private Pipeline createPipeline(ReplicationConfig repConfig, + Pipeline.PipelineState state) { + return new Pipeline.Builder() + .setId(PipelineID.randomId()) + .setCreateTimestamp(System.currentTimeMillis()) + .setState(state) + .setReplicationConfig(repConfig) + .setNodes(createDatanodeDetails(1)) + .build(); + } + + private List createDatanodeDetails(int count) { + List dns = new ArrayList<>(); + for (int i = 0; i < count; i++) { + HddsProtos.DatanodeDetailsProto dnd = + HddsProtos.DatanodeDetailsProto.newBuilder() + .setHostName("host" + i) + .setIpAddress("1.2.3." + i + 1) + .setNetworkLocation("/default") + .setNetworkName("host" + i) + .addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()) + .setUuid(UUID.randomUUID().toString()) + .build(); + dns.add(DatanodeDetails.getFromProtoBuf(dnd)); + } + return dns; + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 15babfde69d..9dc11637f3c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2316,9 +2316,16 @@ public List listStatusLight(String volumeName, String bucketName, String keyName, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { OmKeyArgs keyArgs = prepareOmKeyArgs(volumeName, bucketName, keyName); - return ozoneManagerClient - .listStatusLight(keyArgs, recursive, startKey, numEntries, - allowPartialPrefixes); + if (omVersion.compareTo(OzoneManagerVersion.LIGHTWEIGHT_LIST_STATUS) >= 0) { + return ozoneManagerClient.listStatusLight(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); + } else { + return ozoneManagerClient.listStatus(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes) + .stream() + .map(OzoneFileStatusLight::fromOzoneFileStatus) + .collect(Collectors.toList()); + } } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 8fa8921cc9a..b70ea51fde5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -333,6 +333,7 @@ public static boolean isReadOnly( case DeleteSnapshot: case RenameSnapshot: case SnapshotMoveDeletedKeys: + case SnapshotMoveTableKeys: case SnapshotPurge: case RecoverLease: case SetTimes: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index aa7d06e2a9f..bf4ffa9d8de 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -317,9 +317,11 @@ public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) if (confHBaseEnhancementsAllowed) { return confHsyncEnabled; } else { - LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, confHsyncEnabled, - confKey); + if (confHsyncEnabled) { + LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true, + confKey); + } return false; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 47a48c37e8e..8584796c2e9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -19,6 +19,7 @@ */ import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -124,6 +125,7 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; + private ByteString lastTransactionInfo; private SnapshotInfo(Builder b) { this.snapshotId = b.snapshotId; @@ -145,6 +147,7 @@ private SnapshotInfo(Builder b) { this.exclusiveSize = b.exclusiveSize; this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; + this.lastTransactionInfo = b.lastTransactionInfo; } public void setName(String name) { @@ -261,13 +264,15 @@ public SnapshotInfo.Builder toBuilder() { .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) + .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir); + .setDeepCleanedDeletedDir(deepCleanedDeletedDir) + .setLastTransactionInfo(lastTransactionInfo); } /** @@ -293,6 +298,7 @@ public static class Builder { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; + private ByteString lastTransactionInfo; public Builder() { // default values @@ -411,6 +417,11 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { return this; } + public Builder setLastTransactionInfo(ByteString lastTransactionInfo) { + this.lastTransactionInfo = lastTransactionInfo; + return this; + } + public SnapshotInfo build() { Preconditions.checkNotNull(name); return new SnapshotInfo(this); @@ -445,6 +456,10 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { sib.setGlobalPreviousSnapshotID(toProtobuf(globalPreviousSnapshotId)); } + if (lastTransactionInfo != null) { + sib.setLastTransactionInfo(lastTransactionInfo); + } + sib.setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) @@ -513,6 +528,10 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getDeepCleanedDeletedDir()); } + if (snapshotInfoProto.hasLastTransactionInfo()) { + osib.setLastTransactionInfo(snapshotInfoProto.getLastTransactionInfo()); + } + osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); @@ -605,6 +624,14 @@ public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { this.deepCleanedDeletedDir = deepCleanedDeletedDir; } + public ByteString getLastTransactionInfo() { + return lastTransactionInfo; + } + + public void setLastTransactionInfo(ByteString lastTransactionInfo) { + this.lastTransactionInfo = lastTransactionInfo; + } + /** * Generate default name of snapshot, (used if user doesn't provide one). */ @@ -673,7 +700,8 @@ public boolean equals(Object o) { referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && exclusiveReplicatedSize == that.exclusiveReplicatedSize && - deepCleanedDeletedDir == that.deepCleanedDeletedDir; + deepCleanedDeletedDir == that.deepCleanedDeletedDir && + Objects.equals(lastTransactionInfo, that.lastTransactionInfo); } @Override @@ -684,7 +712,7 @@ public int hashCode() { globalPreviousSnapshotId, snapshotPath, checkpointDir, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, - exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir); + exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo); } /** @@ -692,27 +720,7 @@ public int hashCode() { */ @Override public SnapshotInfo copyObject() { - return new Builder() - .setSnapshotId(snapshotId) - .setName(name) - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setSnapshotStatus(snapshotStatus) - .setCreationTime(creationTime) - .setDeletionTime(deletionTime) - .setPathPreviousSnapshotId(pathPreviousSnapshotId) - .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) - .setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) - .setDbTxSequenceNumber(dbTxSequenceNumber) - .setDeepClean(deepClean) - .setSstFiltered(sstFiltered) - .setReferencedSize(referencedSize) - .setReferencedReplicatedSize(referencedReplicatedSize) - .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir) - .build(); + return this.toBuilder().build(); } @Override @@ -737,6 +745,7 @@ public String toString() { ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + + ", lastTransactionInfo: '" + lastTransactionInfo + '\'' + '}'; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index bdd1428b16d..432b55051da 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -2117,12 +2117,8 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { .setGetFileStatusRequest(req) .build(); - final GetFileStatusResponse resp; - try { - resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse(); - } catch (IOException e) { - throw e; - } + final GetFileStatusResponse resp = handleError(submitRequest(omRequest)) + .getGetFileStatusResponse(); return OzoneFileStatus.getFromProtobuf(resp.getStatus()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index ccb2080a875..e28c9477f29 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -242,11 +242,13 @@ public static MD5MD5Crc32FileChecksumProto convert( DataOutputBuffer buf = new DataOutputBuffer(); checksum.write(buf); byte[] bytes = buf.getData(); - DataInputBuffer buffer = new DataInputBuffer(); - buffer.reset(bytes, 0, bytes.length); - int bytesPerCRC = buffer.readInt(); - long crcPerBlock = buffer.readLong(); - buffer.close(); + int bytesPerCRC; + long crcPerBlock; + try (DataInputBuffer buffer = new DataInputBuffer()) { + buffer.reset(bytes, 0, bytes.length); + bytesPerCRC = buffer.readInt(); + crcPerBlock = buffer.readLong(); + } int offset = Integer.BYTES + Long.BYTES; ByteString byteString = ByteString.copyFrom( diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index db517a7f7c6..38cc5b71a18 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -33,6 +33,7 @@ OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice +OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index c28483c6735..9b846ca722e 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -407,6 +407,7 @@ Apache License 2.0 org.apache.ratis:ratis-proto org.apache.ratis:ratis-server org.apache.ratis:ratis-server-api + org.apache.ratis:ratis-shell org.apache.ratis:ratis-thirdparty-misc org.apache.ratis:ratis-tools org.apache.thrift:libthrift @@ -458,6 +459,7 @@ MIT org.kohsuke.metainf-services:metainf-services org.slf4j:slf4j-api org.slf4j:slf4j-reload4j + org.slf4j:slf4j-simple Public Domain diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 042c9380e4a..22139ae86e8 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -252,6 +252,7 @@ share/ozone/lib/ratis-netty.jar share/ozone/lib/ratis-proto.jar share/ozone/lib/ratis-server-api.jar share/ozone/lib/ratis-server.jar +share/ozone/lib/ratis-shell.jar share/ozone/lib/ratis-thirdparty-misc.jar share/ozone/lib/ratis-tools.jar share/ozone/lib/re2j.jar @@ -264,6 +265,7 @@ share/ozone/lib/simpleclient_dropwizard.jar share/ozone/lib/simpleclient.jar share/ozone/lib/slf4j-api.jar share/ozone/lib/slf4j-reload4j.jar +share/ozone/lib/slf4j-simple.jar share/ozone/lib/snakeyaml.jar share/ozone/lib/snappy-java.jar share/ozone/lib/spring-beans.jar diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index 511679c56f4..57715cda95f 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -30,6 +30,10 @@ Key Can Be Read Dir Can Be Listed Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX} +Dir Can Be Listed Using Shell + ${result} = Execute ozone sh key list /vol1/bucket1 + Should Contain ${result} key-${SUFFIX} + File Can Be Get Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/ Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX} diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot new file mode 100644 index 00000000000..e006e154af1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone debug ldb CLI +Library OperatingSystem +Resource ../lib/os.robot +Test Timeout 5 minute +Suite Setup Write keys + +*** Variables *** +${PREFIX} ${EMPTY} +${VOLUME} cli-debug-volume${PREFIX} +${BUCKET} cli-debug-bucket +${DEBUGKEY} debugKey +${TESTFILE} testfile + +*** Keywords *** +Write keys + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Execute ozone sh volume create ${VOLUME} + Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE} bs=100000 count=15 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE} + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE} + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE} + Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3 + +*** Test Cases *** +Test ozone debug ldb ls + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db ls + Should contain ${output} keyTable + +Test ozone debug ldb scan + # test count option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --count + Should Not Be Equal ${output} 0 + # test valid json for scan command + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq -r '.' + Should contain ${output} keyName + Should contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test startkey option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --startkey="/cli-debug-volume/cli-debug-bucket/testfile2" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test endkey option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --endkey="/cli-debug-volume/cli-debug-bucket/testfile2" + Should contain ${output} testfile1 + Should contain ${output} testfile2 + Should not contain ${output} testfile3 + # test fields option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --fields="volumeName,bucketName,keyName" + Should contain ${output} volumeName + Should contain ${output} bucketName + Should contain ${output} keyName + Should not contain ${output} objectID + Should not contain ${output} dataSize + Should not contain ${output} keyLocationVersions + # test filter option with one filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with one multi-level filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option with multiple filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile3,acls.name:equals:systest" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option with no records match both filters + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest,keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot index 22805efcb1b..651cda016f2 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot @@ -56,3 +56,11 @@ Compare Key With Local File with Different File Compare Key With Local File if File Does Not Exist ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file Should Be Equal ${matches} ${FALSE} + +Rejects Put Key With Zero Expected Generation + ${output} = Execute and checkrc ozone sh key put --expectedGeneration 0 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 + Should Contain ${output} must be positive + +Rejects Put Key With Negative Expected Generation + ${output} = Execute and checkrc ozone sh key put --expectedGeneration -1 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 + Should Contain ${output} must be positive diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index dd06d55f75f..d62a217e606 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -107,6 +107,11 @@ Test Multipart Upload Complete ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' Should Be Equal As Strings ${eTag2} ${part2Md5Sum} +#complete multipart upload without any parts + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 + Should contain ${result} InvalidRequest + Should contain ${result} must specify at least one part + #complete multipart upload ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' Should contain ${result} ${BUCKET} diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 22ceed9ed3c..51e2b4e965d 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -61,6 +61,7 @@ function ozone_usage ozone_add_subcommand "debug" client "Ozone debug tool" ozone_add_subcommand "repair" client "Ozone repair tool" ozone_add_subcommand "checknative" client "checks if native libraries are loaded" + ozone_add_subcommand "ratis" client "Ozone ratis tool" ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false } @@ -98,8 +99,8 @@ function ozonecmd_case echo "Usage: ozone classpath " echo "Where the artifact name is one of:" echo "" - ls -1 ${OZONE_HOME}/share/ozone/classpath/ | sed 's/.classpath//' - exit -1 + find ${OZONE_HOME}/share/ozone/classpath/ -type f -exec basename {} \; | sed 's/.classpath//' | sort + exit 1 fi ;; datanode) @@ -228,8 +229,12 @@ function ozonecmd_case OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; checknative) - OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative - OZONE_RUN_ARTIFACT_NAME="ozone-tools" + OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative + OZONE_RUN_ARTIFACT_NAME="ozone-tools" + ;; + ratis) + OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneRatis + OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; *) OZONE_CLASSNAME="${subcmd}" @@ -282,7 +287,6 @@ fi OZONE_SUBCMD=$1 shift - if ozone_need_reexec ozone "${OZONE_SUBCMD}"; then ozone_uservar_su ozone "${OZONE_SUBCMD}" \ "${MYNAME}" \ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index c274d8fea30..6f79839cd02 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -61,6 +61,7 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.SecretKeyTestClient; +import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; import org.apache.hadoop.ozone.client.io.InsufficientLocationsException; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -83,6 +84,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -99,6 +101,7 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -117,6 +120,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * This class tests container commands on EC containers. @@ -613,30 +617,33 @@ private static byte[] getBytesWith(int singleDigitNumber, int total) { @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWith(List missingIndexes) + void testECReconstructionCoordinatorWith(List missingIndexes, boolean triggerRetry) throws Exception { - testECReconstructionCoordinator(missingIndexes, 3); + testECReconstructionCoordinator(missingIndexes, 3, triggerRetry); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes) - throws Exception { - testECReconstructionCoordinator(missingIndexes, 1); + void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes, + boolean triggerRetry) throws Exception { + testECReconstructionCoordinator(missingIndexes, 1, triggerRetry); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes) - throws Exception { - testECReconstructionCoordinator(missingIndexes, 4); + void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes, + boolean triggerRetry) throws Exception { + testECReconstructionCoordinator(missingIndexes, 4, triggerRetry); } - static Stream> recoverableMissingIndexes() { - return Stream - .concat(IntStream.rangeClosed(1, 5).mapToObj(ImmutableList::of), Stream - .of(ImmutableList.of(2, 3), ImmutableList.of(2, 4), - ImmutableList.of(3, 5), ImmutableList.of(4, 5))); + static Stream recoverableMissingIndexes() { + Stream args = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), true)); + Stream args1 = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), false)); + Stream args2 = Stream.of(arguments(ImmutableList.of(2, 3), true), + arguments(ImmutableList.of(2, 4), true), arguments(ImmutableList.of(3, 5), true)); + Stream args3 = Stream.of(arguments(ImmutableList.of(2, 3), false), + arguments(ImmutableList.of(2, 4), false), arguments(ImmutableList.of(3, 5), false)); + return Stream.concat(Stream.concat(args, args1), Stream.concat(args2, args3)); } /** @@ -647,7 +654,7 @@ static Stream> recoverableMissingIndexes() { public void testECReconstructionCoordinatorWithMissingIndexes135() { InsufficientLocationsException exception = assertThrows(InsufficientLocationsException.class, () -> { - testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3); + testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3, false); }); String expectedMessage = @@ -658,7 +665,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() { } private void testECReconstructionCoordinator(List missingIndexes, - int numInputChunks) throws Exception { + int numInputChunks, boolean triggerRetry) throws Exception { ObjectStore objectStore = rpcClient.getObjectStore(); String keyString = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString(); @@ -667,7 +674,7 @@ private void testECReconstructionCoordinator(List missingIndexes, objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, numInputChunks); + createKeyAndWriteData(keyString, bucket, numInputChunks, triggerRetry); try ( XceiverClientManager xceiverClientManager = @@ -779,7 +786,7 @@ private void testECReconstructionCoordinator(List missingIndexes, .getReplicationConfig(), cToken); assertEquals(blockDataArrList.get(i).length, reconstructedBlockData.length); - checkBlockData(blockDataArrList.get(i), reconstructedBlockData); + checkBlockDataWithRetry(blockDataArrList.get(i), reconstructedBlockData, triggerRetry); XceiverClientSpi client = xceiverClientManager.acquireClient( newTargetPipeline); try { @@ -800,7 +807,7 @@ private void testECReconstructionCoordinator(List missingIndexes, } private void createKeyAndWriteData(String keyString, OzoneBucket bucket, - int numChunks) throws IOException { + int numChunks, boolean triggerRetry) throws IOException { for (int i = 0; i < numChunks; i++) { inputChunks[i] = getBytesWith(i + 1, EC_CHUNK_SIZE); } @@ -809,11 +816,48 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket, new HashMap<>())) { assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); for (int i = 0; i < numChunks; i++) { + // We generally wait until the data is written to the last chunk + // before attempting to trigger CloseContainer. + // We use an asynchronous approach for this trigger, + // aiming to ensure that closing the container does not interfere with the write operation. + // However, this process often needs to be executed multiple times before it takes effect. + if (i == numChunks - 1 && triggerRetry) { + triggerRetryByCloseContainer(out); + } out.write(inputChunks[i]); } } } + private void triggerRetryByCloseContainer(OzoneOutputStream out) { + CompletableFuture.runAsync(() -> { + BlockOutputStreamEntry blockOutputStreamEntry = out.getKeyOutputStream().getStreamEntries().get(0); + BlockID entryBlockID = blockOutputStreamEntry.getBlockID(); + long entryContainerID = entryBlockID.getContainerID(); + Pipeline entryPipeline = blockOutputStreamEntry.getPipeline(); + Map replicaIndexes = entryPipeline.getReplicaIndexes(); + try { + for (Map.Entry entry : replicaIndexes.entrySet()) { + DatanodeDetails key = entry.getKey(); + Integer value = entry.getValue(); + XceiverClientManager xceiverClientManager = new XceiverClientManager(config); + Token cToken = containerTokenGenerator + .generateToken(ANY_USER, ContainerID.valueOf(entryContainerID)); + XceiverClientSpi client = xceiverClientManager.acquireClient( + createSingleNodePipeline(entryPipeline, key, value)); + try { + ContainerProtocolCalls.closeContainer(client, entryContainerID, cToken.encodeToUrlString()); + } finally { + xceiverClientManager.releaseClient(client, false); + } + break; + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + @Test public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() throws Exception { @@ -826,7 +870,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, 3); + createKeyAndWriteData(keyString, bucket, 3, false); OzoneKeyDetails key = bucket.getKey(keyString); long conID = key.getOzoneKeyLocations().get(0).getContainerID(); @@ -900,6 +944,25 @@ private void closeContainer(long conID) HddsProtos.LifeCycleEvent.CLOSE); } + private void checkBlockDataWithRetry( + org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, + org.apache.hadoop.ozone.container.common.helpers.BlockData[] + reconstructedBlockData, boolean triggerRetry) { + if (triggerRetry) { + for (int i = 0; i < reconstructedBlockData.length; i++) { + assertEquals(blockData[i].getBlockID(), reconstructedBlockData[i].getBlockID()); + List oldBlockDataChunks = blockData[i].getChunks(); + List newBlockDataChunks = reconstructedBlockData[i].getChunks(); + for (int j = 0; j < newBlockDataChunks.size(); j++) { + ContainerProtos.ChunkInfo chunkInfo = oldBlockDataChunks.get(j); + assertEquals(chunkInfo, newBlockDataChunks.get(j)); + } + } + return; + } + checkBlockData(blockData, reconstructedBlockData); + } + private void checkBlockData( org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, org.apache.hadoop.ozone.container.common.helpers.BlockData[] @@ -967,8 +1030,7 @@ public static void prepareData(int[][] ranges) throws Exception { out.write(values[i]); } } -// List containerIDs = -// new ArrayList<>(scm.getContainerManager().getContainerIDs()); + List containerIDs = scm.getContainerManager().getContainers() .stream() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java index ca3733588aa..4d3fe400ed3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java @@ -77,6 +77,8 @@ import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.params.provider.Arguments.arguments; @@ -91,8 +93,8 @@ public class TestFinalizeBlock { private MiniOzoneCluster cluster; private OzoneConfiguration conf; private ObjectStore objectStore; - private static String volumeName = UUID.randomUUID().toString(); - private static String bucketName = UUID.randomUUID().toString(); + private static final String VOLUME_NAME = UUID.randomUUID().toString(); + private static final String BUCKET_NAME = UUID.randomUUID().toString(); public static Stream dnLayoutParams() { return Stream.of( @@ -106,8 +108,7 @@ public static Stream dnLayoutParams() { private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); - conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, - 0, StorageUnit.MB); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); @@ -118,23 +119,21 @@ private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throw conf.setBoolean(CONTAINER_SCHEMA_V3_ENABLED, enableSchemaV3); conf.setEnum(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, version); - DatanodeConfiguration datanodeConfiguration = conf.getObject( - DatanodeConfiguration.class); + DatanodeConfiguration datanodeConfiguration = conf.getObject(DatanodeConfiguration.class); datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(100)); conf.setFromObject(datanodeConfiguration); ScmConfig scmConfig = conf.getObject(ScmConfig.class); scmConfig.setBlockDeletionInterval(Duration.ofMillis(100)); conf.setFromObject(scmConfig); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(ONE, 30000); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); + objectStore.createVolume(VOLUME_NAME); + objectStore.getVolume(VOLUME_NAME).createBucket(BUCKET_NAME); } @AfterEach @@ -151,36 +150,40 @@ public void shutdown() { @ParameterizedTest @MethodSource("dnLayoutParams") - public void testFinalizeBlock(boolean enableSchemaV3, ContainerLayoutVersion version) - throws Exception { + public void testFinalizeBlock(boolean enableSchemaV3, ContainerLayoutVersion version) throws Exception { setup(enableSchemaV3, version); String keyName = UUID.randomUUID().toString(); // create key createKey(keyName); - ContainerID containerId = cluster.getStorageContainerManager() - .getContainerManager().getContainers().get(0).containerID(); + ContainerID containerId = + cluster.getStorageContainerManager().getContainerManager().getContainers().get(0).containerID(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName).setDataSize(0) + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setDataSize(0) .build(); List omKeyLocationInfoGroupList = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(containerId); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); + ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(containerId); + Pipeline pipeline = + cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID()); XceiverClientManager xceiverClientManager = new XceiverClientManager(conf); - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); + XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline); // Before finalize block WRITE chunk on the same block should pass through ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getWriteChunkRequest(pipeline, ( - new BlockID(containerId.getId(), omKeyLocationInfoGroupList.get(0) - .getLocationList().get(0).getLocalID())), 100); + ContainerTestHelper + .getWriteChunkRequest( + pipeline, + new BlockID( + containerId.getId(), + omKeyLocationInfoGroupList.get(0).getLocationList().get(0).getLocalID()), + 100); xceiverClient.sendCommand(request); // Before finalize block PUT block on the same block should pass through @@ -189,18 +192,18 @@ public void testFinalizeBlock(boolean enableSchemaV3, ContainerLayoutVersion ver // Now Finalize Block request = getFinalizeBlockRequest(omKeyLocationInfoGroupList, container); - ContainerProtos.ContainerCommandResponseProto response = - xceiverClient.sendCommand(request); + ContainerProtos.ContainerCommandResponseProto response = xceiverClient.sendCommand(request); - assertTrue(response.getFinalizeBlock() - .getBlockData().getBlockID().getLocalID() - == omKeyLocationInfoGroupList.get(0) - .getLocationList().get(0).getLocalID()); + assertEquals( + response.getFinalizeBlock().getBlockData().getBlockID().getLocalID(), + omKeyLocationInfoGroupList.get(0).getLocationList().get(0).getLocalID()); - assertTrue(((KeyValueContainerData)getContainerfromDN( - cluster.getHddsDatanodes().get(0), - containerId.getId()).getContainerData()) - .getFinalizedBlockSet().size() == 1); + assertEquals( + 1, + ((KeyValueContainerData) getContainerFromDN( + cluster.getHddsDatanodes().get(0), + containerId.getId()).getContainerData() + ).getFinalizedBlockSet().size()); testRejectPutAndWriteChunkAfterFinalizeBlock(containerId, pipeline, xceiverClient, omKeyLocationInfoGroupList); testFinalizeBlockReloadAfterDNRestart(containerId); @@ -215,22 +218,27 @@ private void testFinalizeBlockReloadAfterDNRestart(ContainerID containerId) { } // After restart DN, finalizeBlock should be loaded into memory - assertTrue(((KeyValueContainerData) - getContainerfromDN(cluster.getHddsDatanodes().get(0), - containerId.getId()).getContainerData()) - .getFinalizedBlockSet().size() == 1); + assertEquals(1, + ((KeyValueContainerData) getContainerFromDN( + cluster.getHddsDatanodes().get(0), + containerId.getId()).getContainerData() + ).getFinalizedBlockSet().size()); } private void testFinalizeBlockClearAfterCloseContainer(ContainerID containerId) throws InterruptedException, TimeoutException { - OzoneTestUtils.closeAllContainers(cluster.getStorageContainerManager().getEventQueue(), + OzoneTestUtils.closeAllContainers( + cluster.getStorageContainerManager().getEventQueue(), cluster.getStorageContainerManager()); // Finalize Block should be cleared from container data. - GenericTestUtils.waitFor(() -> ( - (KeyValueContainerData) getContainerfromDN(cluster.getHddsDatanodes().get(0), - containerId.getId()).getContainerData()).getFinalizedBlockSet().size() == 0, - 100, 10 * 1000); + GenericTestUtils.waitFor( + () -> ((KeyValueContainerData) getContainerFromDN( + cluster.getHddsDatanodes().get(0), + containerId.getId() + ).getContainerData()).getFinalizedBlockSet().isEmpty(), + 100, + 10 * 1000); try { // Restart DataNode cluster.restartHddsDatanode(0, true); @@ -239,27 +247,26 @@ private void testFinalizeBlockClearAfterCloseContainer(ContainerID containerId) } // After DN restart also there should not be any finalizeBlock - assertTrue(((KeyValueContainerData)getContainerfromDN( + assertTrue(((KeyValueContainerData) getContainerFromDN( cluster.getHddsDatanodes().get(0), - containerId.getId()).getContainerData()) - .getFinalizedBlockSet().size() == 0); + containerId.getId()).getContainerData() + ).getFinalizedBlockSet().isEmpty()); } private void testRejectPutAndWriteChunkAfterFinalizeBlock(ContainerID containerId, Pipeline pipeline, - XceiverClientSpi xceiverClient, List omKeyLocationInfoGroupList) - throws IOException { + XceiverClientSpi xceiverClient, List omKeyLocationInfoGroupList) throws IOException { // Try doing WRITE chunk on the already finalized block ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getWriteChunkRequest(pipeline, - (new BlockID(containerId.getId(), omKeyLocationInfoGroupList.get(0) - .getLocationList().get(0).getLocalID())), 100); + ContainerTestHelper.getWriteChunkRequest( + pipeline, + (new BlockID(containerId.getId(), omKeyLocationInfoGroupList.get(0).getLocationList().get(0).getLocalID())), + 100); try { xceiverClient.sendCommand(request); fail("Write chunk should fail."); } catch (IOException e) { - assertTrue(e.getCause().getMessage() - .contains("Block already finalized")); + assertThat(e.getCause().getMessage()).contains("Block already finalized"); } // Try doing PUT block on the already finalized block @@ -268,8 +275,7 @@ private void testRejectPutAndWriteChunkAfterFinalizeBlock(ContainerID containerI xceiverClient.sendCommand(request); fail("Put block should fail."); } catch (IOException e) { - assertTrue(e.getCause().getMessage() - .contains("Block already finalized")); + assertThat(e.getCause().getMessage()).contains("Block already finalized"); } } @@ -283,15 +289,12 @@ private ContainerProtos.ContainerCommandRequestProto getFinalizeBlockRequest( } /** - * create a key with specified name. - * @param keyName - * @throws IOException + * Create a key with specified name. */ private void createKey(String keyName) throws IOException { - OzoneOutputStream key = objectStore.getVolume(volumeName) - .getBucket(bucketName) - .createKey(keyName, 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + OzoneOutputStream key = objectStore.getVolume(VOLUME_NAME) + .getBucket(BUCKET_NAME) + .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write("test".getBytes(UTF_8)); key.close(); } @@ -299,9 +302,10 @@ private void createKey(String keyName) throws IOException { /** * Return the container for the given containerID from the given DN. */ - private Container getContainerfromDN(HddsDatanodeService hddsDatanodeService, - long containerID) { - return hddsDatanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); + private Container getContainerFromDN(HddsDatanodeService hddsDatanodeService, long containerID) { + return hddsDatanodeService.getDatanodeStateMachine() + .getContainer() + .getContainerSet() + .getContainer(containerID); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index e6264cd3e11..48efa7b130d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -85,6 +85,7 @@ @Timeout(300) public class TestContainerMetrics { static final String TEST_DIR = GenericTestUtils.getRandomizedTempPath() + File.separator; + @TempDir private Path tempDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -93,8 +94,7 @@ public class TestContainerMetrics { @BeforeAll public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); - CONF.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, - DFS_METRICS_PERCENTILES_INTERVALS); + CONF.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, DFS_METRICS_PERCENTILES_INTERVALS); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, TEST_DIR); @@ -110,18 +110,18 @@ public static void cleanup() { } @AfterEach - public void cleanUp() throws IOException { + public void cleanUp() { FileUtils.deleteQuietly(new File(CONF.get(ScmConfigKeys.HDDS_DATANODE_DIR_KEY))); - FileUtils.deleteQuietly(CONF.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) == null ? - null : new File(CONF.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR))); + FileUtils.deleteQuietly(CONF.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) == null + ? null + : new File(CONF.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR))); } @Test public void testContainerMetrics() throws Exception { runTestClientServer(pipeline -> CONF .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), pipeline -> new XceiverClientGrpc(pipeline, CONF), (dn, volumeSet) -> new XceiverServerGrpc(dn, CONF, createDispatcher(dn, volumeSet), null), (dn, p) -> { @@ -140,48 +140,44 @@ public void testContainerMetricsRatis() throws Exception { private static MutableVolumeSet createVolumeSet(DatanodeDetails dn, String path) throws IOException { CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path); return new MutableVolumeSet( - dn.getUuidString(), CONF, - null, StorageVolume.VolumeType.DATA_VOLUME, null); + dn.getUuidString(), + CONF, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); } private HddsDispatcher createDispatcher(DatanodeDetails dd, VolumeSet volumeSet) { ContainerSet containerSet = new ContainerSet(1000); - StateContext context = ContainerTestUtils.getMockContext( - dd, CONF); + StateContext context = ContainerTestUtils.getMockContext(dd, CONF); ContainerMetrics metrics = ContainerMetrics.create(CONF); Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { + for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) { handlers.put(containerType, Handler.getHandlerForContainerType(containerType, CONF, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> { })); } - HddsDispatcher dispatcher = new HddsDispatcher(CONF, containerSet, - volumeSet, handlers, context, metrics, null); + HddsDispatcher dispatcher = new HddsDispatcher(CONF, containerSet, handlers, context, metrics, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); return dispatcher; } - static void runTestClientServer( - CheckedConsumer initConf, - CheckedFunction createClient, - CheckedBiFunction createServer, - CheckedBiConsumer initServer) - throws Exception { + static void runTestClientServer(CheckedConsumer initConf, + CheckedFunction createClient, + CheckedBiFunction createServer, + CheckedBiConsumer initServer) throws Exception { + XceiverServerSpi server = null; XceiverClientSpi client = null; long containerID = ContainerTestHelper.getTestContainerID(); MutableVolumeSet volumeSet = null; try { - final Pipeline pipeline = - MockPipeline.createSingleNodePipeline(); + final Pipeline pipeline = MockPipeline.createSingleNodePipeline(); initConf.accept(pipeline); DatanodeDetails dn = pipeline.getFirstNode(); @@ -196,21 +192,17 @@ static void runTestClientServer( // Write Chunk BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); final ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper.getWriteChunkRequest( - pipeline, blockID, 1024); + ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024); ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); //Read Chunk final ContainerProtos.ContainerCommandRequestProto readChunkRequest = - ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest - .getWriteChunk()); + ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk()); response = client.sendCommand(readChunkRequest); assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - MetricsRecordBuilder containerMetrics = getMetrics( - "StorageContainerMetrics"); + MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics"); assertCounter("NumOps", 3L, containerMetrics); assertCounter("numCreateContainer", 1L, containerMetrics); assertCounter("numWriteChunk", 1L, containerMetrics); @@ -222,11 +214,9 @@ static void runTestClientServer( Thread.sleep((DFS_METRICS_PERCENTILES_INTERVALS + 1) * 1000); assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics); - List volumes = - StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()); + List volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()); HddsVolume hddsVolume = volumes.get(0); - MetricsRecordBuilder volumeIOMetrics = - getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName()); + MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName()); assertCounter("ReadBytes", 1024L, volumeIOMetrics); assertCounter("ReadOpCount", 1L, volumeIOMetrics); assertCounter("WriteBytes", 1024L, volumeIOMetrics); @@ -247,14 +237,17 @@ static void runTestClientServer( private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolumeSet volumeSet) throws IOException { - CONF.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + CONF.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); CONF.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); - final ContainerDispatcher dispatcher = createDispatcher(dn, - volumeSet); - return XceiverServerRatis.newXceiverServerRatis(null, dn, CONF, dispatcher, + final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); + return XceiverServerRatis.newXceiverServerRatis( + null, + dn, + CONF, + dispatcher, new ContainerController(new ContainerSet(1000), Maps.newHashMap()), - null, null); + null, + null); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 8db7b137472..c7a185bd1a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -83,8 +83,7 @@ * Test Containers. */ public class TestContainerServer { - static final String TEST_DIR = GenericTestUtils.getTestDir("dfs") - .getAbsolutePath() + File.separator; + static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; @TempDir @@ -96,8 +95,7 @@ public static void setup() { CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); - caClient = new DNCertificateClient(new SecurityConfig(CONF), null, - dn, null, null, null); + caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); } @AfterAll @@ -108,14 +106,13 @@ public static void tearDown() throws Exception { @Test public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); - runTestClientServer(1, (pipeline, conf) -> conf + runTestClientServer(1, + (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, - (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, - new TestContainerDispatcher(), caClient), (dn, p) -> { - }); + (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, new TestContainerDispatcher(), caClient), + (dn, p) -> { }); } @Test @@ -124,21 +121,23 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + static XceiverServerRatis newXceiverServerRatis(DatanodeDetails dn, OzoneConfiguration conf) throws IOException { + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); - return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, + return XceiverServerRatis.newXceiverServerRatis( + null, + dn, + conf, + dispatcher, new ContainerController(new ContainerSet(1000), Maps.newHashMap()), - caClient, null); + caClient, + null); } - static void runTestClientServerRatis(RpcType rpc, int numNodes) - throws Exception { + static void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, @@ -149,17 +148,14 @@ static void runTestClientServerRatis(RpcType rpc, int numNodes) static void runTestClientServer( int numDatanodes, CheckedBiConsumer initConf, - CheckedBiFunction createClient, - CheckedBiFunction createServer, - CheckedBiConsumer initServer) - throws Exception { + CheckedBiFunction createClient, + CheckedBiFunction createServer, + CheckedBiConsumer initServer) throws Exception { + final List servers = new ArrayList<>(); XceiverClientSpi client = null; try { - final Pipeline pipeline = - MockPipeline.createPipeline(numDatanodes); + final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes); initConf.accept(pipeline, CONF); for (DatanodeDetails dn : pipeline.getNodes()) { @@ -173,9 +169,7 @@ static void runTestClientServer( client.connect(); final ContainerCommandRequestProto request = - ContainerTestHelper - .getCreateContainerRequest( - ContainerTestHelper.getTestContainerID(), pipeline); + ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline); assertNotNull(request.getTraceID()); client.sendCommand(request); @@ -187,32 +181,32 @@ static void runTestClientServer( } } - private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf) - throws IOException { + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", - RandomStringUtils.randomAlphabetic(4)).toString()); + Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); conf.set(OZONE_METADATA_DIRS, TEST_DIR); - VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); + VolumeSet volumeSet = new MutableVolumeSet( + dd.getUuidString(), conf, + null, + StorageVolume.VolumeType.DATA_VOLUME, + null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { + for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) { handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, + Handler.getHandlerForContainerType(containerType, + conf, dd.getUuid().toString(), - containerSet, volumeSet, metrics, - c -> { - })); + containerSet, + volumeSet, + metrics, + c -> { })); } - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics, null); + HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, handlers, context, metrics, null); hddsDispatcher.setClusterId(scmId.toString()); return hddsDispatcher; } @@ -220,9 +214,10 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, @Test public void testClientServerWithContainerDispatcher() throws Exception { DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, - UUID.randomUUID(), CONF); - runTestClientServer(1, (pipeline, conf) -> conf + HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); + runTestClientServer( + 1, + (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), @@ -240,9 +235,7 @@ private static class TestContainerDispatcher implements ContainerDispatcher { * @return Command Response */ @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, - DispatcherContext context) { + public ContainerCommandResponseProto dispatch(ContainerCommandRequestProto msg, DispatcherContext context) { return ContainerTestHelper.getCreateContainerResponse(msg); } @@ -258,6 +251,7 @@ public void validateContainerCommand( @Override public void shutdown() { } + @Override public Handler getHandler(ContainerProtos.ContainerType containerType) { return null; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 0bdf61b3bd5..f16197b97b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -120,8 +120,7 @@ public class TestSecureContainerServer { @TempDir private Path tempDir; - private static final String TEST_DIR - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; + private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClientTestImpl caClient; private static SecretKeyClient secretKeyClient; @@ -140,11 +139,9 @@ public static void setup() throws Exception { long tokenLifetime = TimeUnit.HOURS.toMillis(1); - blockTokenSecretManager = new OzoneBlockTokenSecretManager(tokenLifetime, - secretKeyClient); + blockTokenSecretManager = new OzoneBlockTokenSecretManager(tokenLifetime, secretKeyClient); - containerTokenSecretManager = new ContainerTokenSecretManager( - tokenLifetime, secretKeyClient); + containerTokenSecretManager = new ContainerTokenSecretManager(tokenLifetime, secretKeyClient); } @AfterAll @@ -160,41 +157,47 @@ public void cleanUp() throws IOException { @Test public void testClientServer() throws Exception { DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, - UUID.randomUUID(), CONF); - runTestClientServer(1, (pipeline, conf) -> conf + HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); + runTestClientServer( + 1, + (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, - (dn, conf) -> new XceiverServerGrpc(dd, conf, - hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); + (dn, conf) -> new XceiverServerGrpc(dd, conf, hddsDispatcher, caClient), + (dn, p) -> { }, + (p) -> { }); } - private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf) throws IOException { + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", - RandomStringUtils.randomAlphabetic(4)).toString()); + Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); conf.set(OZONE_METADATA_DIRS, TEST_DIR); - VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); + VolumeSet volumeSet = + new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { + for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) { handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, + Handler.getHandlerForContainerType( + containerType, + conf, dd.getUuid().toString(), - containerSet, volumeSet, metrics, + containerSet, + volumeSet, + metrics, c -> { })); } HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics, + conf, + containerSet, + handlers, + context, + metrics, TokenVerifier.create(new SecurityConfig(conf), secretKeyClient)); hddsDispatcher.setClusterId(scmId.toString()); return hddsDispatcher; @@ -206,25 +209,24 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - XceiverServerRatis newXceiverServerRatis( - DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, - true); - conf.setBoolean( - OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + XceiverServerRatis newXceiverServerRatis(DatanodeDetails dn, OzoneConfiguration conf) throws IOException { + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); final String dir = TEST_DIR + dn.getUuid(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); - final ContainerDispatcher dispatcher = createDispatcher(dn, - UUID.randomUUID(), conf); - return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, + final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); + return XceiverServerRatis.newXceiverServerRatis( + null, + dn, + conf, + dispatcher, new ContainerController(new ContainerSet(1000), Maps.newHashMap()), - caClient, null); + caClient, + null); } - private void runTestClientServerRatis(RpcType rpc, int numNodes) - throws Exception { + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, @@ -236,16 +238,13 @@ private void runTestClientServerRatis(RpcType rpc, int numNodes) private static void runTestClientServer( int numDatanodes, CheckedBiConsumer initConf, - CheckedBiFunction createClient, - CheckedBiFunction createServer, + CheckedBiFunction createClient, + CheckedBiFunction createServer, CheckedBiConsumer initServer, - Consumer stopServer) - throws Exception { + Consumer stopServer) throws Exception { + final List servers = new ArrayList<>(); - final Pipeline pipeline = - MockPipeline.createPipeline(numDatanodes); + final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes); initConf.accept(pipeline, CONF); @@ -262,32 +261,26 @@ private static void runTestClientServer( long containerID = getTestContainerID(); BlockID blockID = getTestBlockID(containerID); - assertFailsTokenVerification(client, - getCreateContainerRequest(containerID, pipeline)); + assertFailsTokenVerification(client, getCreateContainerRequest(containerID, pipeline)); //create the container - ContainerProtocolCalls.createContainer(client, containerID, - getToken(ContainerID.valueOf(containerID))); + ContainerProtocolCalls.createContainer(client, containerID, getToken(ContainerID.valueOf(containerID))); Token token = blockTokenSecretManager.generateToken(blockID, EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong()); String encodedToken = token.encodeToUrlString(); - ContainerCommandRequestProto.Builder writeChunk = - newWriteChunkRequestBuilder(pipeline, blockID, 1024); + ContainerCommandRequestProto.Builder writeChunk = newWriteChunkRequestBuilder(pipeline, blockID, 1024); assertRequiresToken(client, encodedToken, writeChunk); - ContainerCommandRequestProto.Builder putBlock = - newPutBlockRequestBuilder(pipeline, writeChunk.getWriteChunk()); + ContainerCommandRequestProto.Builder putBlock = newPutBlockRequestBuilder(pipeline, writeChunk.getWriteChunk()); assertRequiresToken(client, encodedToken, putBlock); - ContainerCommandRequestProto.Builder readChunk = - newReadChunkRequestBuilder(pipeline, writeChunk.getWriteChunk()); + ContainerCommandRequestProto.Builder readChunk = newReadChunkRequestBuilder(pipeline, writeChunk.getWriteChunk()); assertRequiresToken(client, encodedToken, readChunk); - ContainerCommandRequestProto.Builder getBlock = - newGetBlockRequestBuilder(pipeline, putBlock.getPutBlock()); + ContainerCommandRequestProto.Builder getBlock = newGetBlockRequestBuilder(pipeline, putBlock.getPutBlock()); assertRequiresToken(client, encodedToken, getBlock); ContainerCommandRequestProto.Builder getCommittedBlockLength = @@ -299,9 +292,8 @@ private static void runTestClientServer( } } - private static void assertRequiresToken(XceiverClientSpi client, - String encodedToken, ContainerCommandRequestProto.Builder requestBuilder) - throws Exception { + private static void assertRequiresToken(XceiverClientSpi client, String encodedToken, + ContainerCommandRequestProto.Builder requestBuilder) throws Exception { requestBuilder.setEncodedToken(""); assertFailsTokenVerification(client, requestBuilder.build()); @@ -310,23 +302,20 @@ private static void assertRequiresToken(XceiverClientSpi client, assertSucceeds(client, requestBuilder.build()); } - private static void assertSucceeds( - XceiverClientSpi client, ContainerCommandRequestProto req) - throws IOException { + private static void assertSucceeds(XceiverClientSpi client, ContainerCommandRequestProto req) throws IOException { ContainerCommandResponseProto response = client.sendCommand(req); assertEquals(SUCCESS, response.getResult()); } - private static void assertFailsTokenVerification(XceiverClientSpi client, - ContainerCommandRequestProto request) throws Exception { + private static void assertFailsTokenVerification(XceiverClientSpi client, ContainerCommandRequestProto request) + throws Exception { if (client instanceof XceiverClientGrpc || isReadOnly(request)) { ContainerCommandResponseProto response = client.sendCommand(request); assertNotEquals(response.getResult(), ContainerProtos.Result.SUCCESS); String msg = response.getMessage(); assertThat(msg).contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()); } else { - final Throwable t = assertThrows(Throwable.class, - () -> client.sendCommand(request)); + final Throwable t = assertThrows(Throwable.class, () -> client.sendCommand(request)); assertRootCauseMessage(BLOCK_TOKEN_VERIFICATION_FAILED.name(), t); } } @@ -341,8 +330,8 @@ private static void assertRootCauseMessage(String contained, Throwable t) { private static String getToken(ContainerID containerID) throws IOException { String username = ""; - return containerTokenSecretManager.generateToken( - containerTokenSecretManager.createIdentifier(username, containerID) - ).encodeToUrlString(); + return containerTokenSecretManager + .generateToken(containerTokenSecretManager.createIdentifier(username, containerID)) + .encodeToUrlString(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index 7af0b5f9aa1..a4327a49bfa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -170,6 +170,18 @@ private static Stream scanTestCases() { Named.of("Invalid EndKey key9", Arrays.asList("--endkey", "key9")), Named.of("Expect key1-key5", Pair.of("key1", "key6")) ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter key3", Arrays.asList("--filter", "keyName:equals:key3")), + Named.of("Expect key3", Pair.of("key3", "key4")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")), + Named.of("Expect key1-key3", null) + ), Arguments.of( Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)), Named.of("Default", Pair.of(0, "")), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index abc21ed4351..a173bd9222e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -80,6 +80,7 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -233,7 +234,7 @@ public void setUp() throws IOException { ozoneManager.getMetadataManager().getMetaTable().put( OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY, String.valueOf(v)); return null; - }).when(omRatisServer).submitRequest(any(), any()); + }).when(omRatisServer).submitRequest(any(), any(), anyLong()); } catch (ServiceException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java similarity index 52% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index be4ea69095b..d77f9bf9d8d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,20 +33,26 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; +import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.Order; @@ -53,17 +60,27 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Random; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -72,6 +89,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; /** * Test Snapshot Deleting Service. @@ -80,10 +102,10 @@ @Timeout(300) @TestInstance(TestInstance.Lifecycle.PER_CLASS) @TestMethodOrder(OrderAnnotation.class) -public class TestSnapshotDeletingService { +public class TestSnapshotDeletingServiceIntegrationTest { private static final Logger LOG = - LoggerFactory.getLogger(TestSnapshotDeletingService.class); + LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class); private static boolean omRatisEnabled = true; private static final ByteBuffer CONTENT = ByteBuffer.allocate(1024 * 1024 * 16); @@ -147,7 +169,7 @@ public void testSnapshotSplitAndMove() throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); assertTableRowCount(snapshotInfoTable, 2); GenericTestUtils.waitFor(() -> snapshotDeletingService @@ -174,7 +196,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { om.getMetadataManager().getSnapshotInfoTable(); runIndividualTest = false; - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); BucketArgs bucketArgs = new BucketArgs.Builder() .setBucketLayout(BucketLayout.LEGACY) @@ -454,6 +476,228 @@ public void testSnapshotWithFSO() throws Exception { rcSnap1.close(); } + private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean dirDeletionStarted) + throws InterruptedException, TimeoutException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDirDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000, + 100000); + DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000, + TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf())); + directoryDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000, + 100000); + when(ozoneManager.getMetadataManager()).thenAnswer(i -> { + // Wait for SDS to reach DDS wait block before processing any deleted directories. + GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000); + dirDeletionStarted.set(true); + return i.callRealMethod(); + }); + return directoryDeletingService; + } + + private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean keyDeletionStarted) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager, + ozoneManager.getScmClient().getBlockClient(), keyManager, 10000, + 100000, cluster.getConf())); + keyDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000, + 100000); + when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> { + // wait for SDS to reach the KDS wait block before processing any key. + GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000); + keyDeletionStarted.set(true); + return i.callRealMethod(); + }); + return keyDeletingService; + } + + @SuppressWarnings("checkstyle:parameternumber") + private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingService keyDeletingService, + DirectoryDeletingService directoryDeletingService, + AtomicBoolean snapshotDeletionStarted, + AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean keyDeletionStarted, + AtomicBoolean dirDeletionStarted, + OzoneBucket testBucket) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getSnapshotDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getSnapshotDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + OmMetadataManagerImpl omMetadataManager = Mockito.spy((OmMetadataManagerImpl)om.getMetadataManager()); + SnapshotChainManager unMockedSnapshotChainManager = + ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(); + SnapshotChainManager snapshotChainManager = Mockito.spy(unMockedSnapshotChainManager); + OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); + when(keyManager.getDeletingService()).thenReturn(keyDeletingService); + when(keyManager.getDirDeletingService()).thenReturn(directoryDeletingService); + SnapshotDeletingService snapshotDeletingService = Mockito.spy(new SnapshotDeletingService(10000, + 100000, ozoneManager)); + snapshotDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> snapshotDeletingService.getThreadCount() == 0, 1000, + 100000); + when(snapshotChainManager.iterator(anyBoolean())).thenAnswer(i -> { + Iterator itr = (Iterator) i.callRealMethod(); + return Lists.newArrayList(itr).stream().filter(uuid -> { + try { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(om, snapshotChainManager, uuid); + return snapshotInfo.getBucketName().equals(testBucket.getName()) && + snapshotInfo.getVolumeName().equals(testBucket.getVolumeName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).iterator(); + }); + when(snapshotChainManager.getLatestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getLatestGlobalSnapshotId()); + when(snapshotChainManager.getOldestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getOldestGlobalSnapshotId()); + doAnswer(i -> { + // KDS wait block reached in SDS. + GenericTestUtils.waitFor(() -> { + return keyDeletingService.isRunningOnAOS(); + }, 1000, 100000); + keyDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForKeyDeletingService(); + doAnswer(i -> { + // DDS wait block reached in SDS. + GenericTestUtils.waitFor(directoryDeletingService::isRunningOnAOS, 1000, 100000); + dirDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForDirDeletingService(); + doAnswer(i -> { + // Assert KDS & DDS is not running when SDS starts moving entries & assert all wait block, KDS processing + // AOS block & DDS AOS block have been executed. + Assertions.assertTrue(keyDeletionWaitStarted.get()); + Assertions.assertTrue(dirDeletionWaitStarted.get()); + Assertions.assertTrue(keyDeletionStarted.get()); + Assertions.assertTrue(dirDeletionStarted.get()); + Assertions.assertFalse(keyDeletingService.isRunningOnAOS()); + Assertions.assertFalse(directoryDeletingService.isRunningOnAOS()); + snapshotDeletionStarted.set(true); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(anyString(), anyString(), anyString()); + return snapshotDeletingService; + } + + @Test + @Order(4) + public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception { + AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean keyDeletionStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionStarted = new AtomicBoolean(false); + AtomicBoolean snapshotDeletionStarted = new AtomicBoolean(false); + Random random = new Random(); + String bucketName = "bucket" + random.nextInt(); + BucketArgs bucketArgs = new BucketArgs.Builder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + OzoneBucket testBucket = TestDataUtil.createBucket( + client, VOLUME_NAME, bucketArgs, bucketName); + // mock keyDeletingService + KeyDeletingService keyDeletingService = getMockedKeyDeletingService(keyDeletionWaitStarted, keyDeletionStarted); + + // mock dirDeletingService + DirectoryDeletingService directoryDeletingService = getMockedDirectoryDeletingService(dirDeletionWaitStarted, + dirDeletionStarted); + + // mock snapshotDeletingService. + SnapshotDeletingService snapshotDeletingService = getMockedSnapshotDeletingService(keyDeletingService, + directoryDeletingService, snapshotDeletionStarted, keyDeletionWaitStarted, dirDeletionWaitStarted, + keyDeletionStarted, dirDeletionStarted, testBucket); + createSnapshotFSODataForBucket(testBucket); + List> renamesKeyEntries; + List>> deletedKeyEntries; + List> deletedDirEntries; + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + } + Thread keyDeletingThread = new Thread(() -> { + try { + keyDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Thread directoryDeletingThread = new Thread(() -> { + try { + directoryDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + ExecutorService snapshotDeletingThread = Executors.newFixedThreadPool(1); + Runnable snapshotDeletionRunnable = () -> { + try { + snapshotDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + keyDeletingThread.start(); + directoryDeletingThread.start(); + Future future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + GenericTestUtils.waitFor(snapshotDeletionStarted::get, 1000, 30000); + future.get(); + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000)); + } + List> aosRenamesKeyEntries = + om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List>> aosDeletedKeyEntries = + om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List> aosDeletedDirEntries = + om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + renamesKeyEntries.forEach(entry -> Assertions.assertTrue(aosRenamesKeyEntries.contains(entry))); + deletedKeyEntries.forEach(entry -> Assertions.assertTrue(aosDeletedKeyEntries.contains(entry))); + deletedDirEntries.forEach(entry -> Assertions.assertTrue(aosDeletedDirEntries.contains(entry))); + Mockito.reset(snapshotDeletingService); + SnapshotInfo snap2 = SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2"); + Assertions.assertEquals(snap2.getSnapshotStatus(), SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); + future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + future.get(); + Assertions.assertThrows(IOException.class, () -> SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")); + cluster.restartOzoneManager(); + } + /* Flow ---- @@ -472,7 +716,7 @@ public void testSnapshotWithFSO() throws Exception { create snapshot3 delete snapshot2 */ - private void createSnapshotDataForBucket1() throws Exception { + private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); Table deletedTable = @@ -482,70 +726,147 @@ private void createSnapshotDataForBucket1() throws Exception { OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key1", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); assertTableRowCount(keyTable, 2); // Create Snapshot 1. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap1"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); assertTableRowCount(snapshotInfoTable, 1); // Overwrite bucket1key0, This is a newer version of the key which should // reclaimed as this is a different version of the key. - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key2", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); // Key 1 cannot be reclaimed as it is still referenced by Snapshot 1. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key1", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key1", false); // Key 2 is deleted here, which will be reclaimed here as // it is not being referenced by previous snapshot. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key2", false); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key0", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key2", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key0", false); assertTableRowCount(keyTable, 0); // one copy of bucket1key0 should also be reclaimed as it not same // but original deleted key created during overwrite should not be deleted assertTableRowCount(deletedTable, 2); // Create Snapshot 2. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); // Key 2 is removed from the active Db's // deletedTable when Snapshot 2 is taken. assertTableRowCount(deletedTable, 0); - TestDataUtil.createKey(bucket1, "bucket1key3", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key4", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key4", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key4", false); assertTableRowCount(keyTable, 1); assertTableRowCount(deletedTable, 0); // Create Snapshot 3. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap3"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap3"); assertTableRowCount(snapshotInfoTable, 3); SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable() - .get("/vol1/bucket1/bucket1snap2"); + .get(String.format("/%s/%s/%ssnap2", bucket.getVolumeName(), bucket.getName(), bucket.getName())); // Delete Snapshot 2. - client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); - verifySnapshotChain(snapshotInfo, "/vol1/bucket1/bucket1snap3"); + verifySnapshotChain(snapshotInfo, String.format("/%s/%s/%ssnap3", bucket.getVolumeName(), bucket.getName(), + bucket.getName())); + } + + + /* + Flow + ---- + create dir0/key0 + create dir1/key1 + overwrite dir0/key0 + create dir2/key2 + create snap1 + rename dir1/key1 -> dir1/key10 + delete dir1/key10 + delete dir2 + create snap2 + delete snap2 + */ + private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table deletedDirTable = + om.getMetadataManager().getDeletedDirTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table dirTable = + om.getMetadataManager().getDirectoryTable(); + Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); + OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) + om.getMetadataManager(); + Map countMap = + metadataManager.listTables().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> { + try { + return (int)metadataManager.countRowsInTable(e.getValue()); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + })); + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2); + + // Overwrite bucket1key0, This is a newer version of the key which should + // reclaimed as this is a different version of the key. + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // create snap1 + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); + bucket.renameKey("dir1/" + bucket.getName() + "key1", "dir1/" + bucket.getName() + "key10"); + bucket.renameKey("dir1/", "dir10/"); + assertTableRowCount(renameTable, countMap.get(renameTable.getName()) + 2); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + "dir10/" + bucket.getName() + "key10", false); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // Key 2 is deleted here, which will be reclaimed here as + // it is not being referenced by previous snapshot. + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), "dir2", true); + assertTableRowCount(deletedDirTable, countMap.get(deletedDirTable.getName()) + 1); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + // Delete Snapshot 2. + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + assertTableRowCount(snapshotInfoTable, countMap.get(snapshotInfoTable.getName()) + 2); } + private void verifySnapshotChain(SnapshotInfo deletedSnapshot, String nextSnapshot) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index cba7311b3b4..4476cbc3e38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -19,6 +19,7 @@ import java.time.Duration; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -51,6 +52,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode; +import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -236,6 +238,8 @@ public void testEmptyMissingContainerDownNode() throws Exception { // Bring down the Datanode that had the container replica. cluster.shutdownHddsDatanode(pipeline.getFirstNode()); + // Since we no longer add EMPTY_MISSING containers to the table, we should + // have zero EMPTY_MISSING containers in the DB but their information will be logged. LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -243,9 +247,18 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.size() == 1); - }); + // Check if EMPTY_MISSING containers are not added to the DB and their count is logged + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 1; + }); // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. @@ -272,7 +285,17 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.isEmpty()); + + + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 0 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 0; }); // Now remove keys from container. This data is used to @@ -283,8 +306,8 @@ public void testEmptyMissingContainerDownNode() throws Exception { reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); } - // Check existing container state in UNHEALTHY_CONTAINER table - // will be updated as EMPTY_MISSING + // Since we no longer add EMPTY_MISSING containers to the table, we should + // have zero EMPTY_MISSING containers in the DB but their information will be logged. LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -292,7 +315,16 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.size() == 1); + + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 1; }); // Now restart the cluster and verify the container is no longer missing. diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index e79797993c1..126adbdc51e 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -153,6 +153,7 @@ enum Type { GetServerDefaults = 134; GetQuotaRepairStatus = 135; StartQuotaRepair = 136; + SnapshotMoveTableKeys = 137; } enum SafeMode { @@ -295,6 +296,7 @@ message OMRequest { optional ServerDefaultsRequest ServerDefaultsRequest = 132; optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; + optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; } message OMResponse { @@ -886,6 +888,7 @@ message SnapshotInfo { optional uint64 exclusiveReplicatedSize = 18; // note: shared sizes can be calculated from: referenced - exclusive optional bool deepCleanedDeletedDir = 19; + optional bytes lastTransactionInfo = 20; } message SnapshotDiffJobProto { @@ -1980,6 +1983,13 @@ message SnapshotMoveDeletedKeysRequest { repeated string deletedDirsToMove = 5; } +message SnapshotMoveTableKeysRequest { + optional hadoop.hdds.UUID fromSnapshotID = 1; + repeated SnapshotMoveKeyInfos deletedKeys = 2; + repeated SnapshotMoveKeyInfos deletedDirs = 3; + repeated hadoop.hdds.KeyValue renamedKeys = 4; +} + message SnapshotMoveKeyInfos { optional string key = 1; repeated KeyInfo keyInfos = 2; diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index cf0819ca527..67f7ce2f07c 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -116,6 +116,22 @@ public interface OMMetadataManager extends DBStoreHAManager { */ String getBucketKey(String volume, String bucket); + /** + * Given a volume and bucket, return the corresponding DB key prefix. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefix(String volume, String bucket); + + /** + * Given a volume and bucket, return the corresponding DB key prefix for FSO buckets. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException; + /** * Given a volume, bucket and a key, return the corresponding DB key. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index b7fa5d746fb..068ba9aa4ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -28,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; @@ -35,6 +37,7 @@ import java.io.IOException; import java.time.Duration; +import java.util.ArrayList; import java.util.List; /** @@ -119,6 +122,29 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, */ PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException; + /** + * Returns a list rename entries from the snapshotRenamedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + + + /** + * Returns a list deleted entries from the deletedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + /** * Returns the names of up to {@code count} open keys whose age is * greater than or equal to {@code expireThreshold}. @@ -216,6 +242,26 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, */ Table.KeyValue getPendingDeletionDir() throws IOException; + /** + * Returns an iterator for pending deleted directories. + * @throws IOException + */ + TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException; + + default List> getDeletedDirEntries(String volume, String bucket, int size) + throws IOException { + List> deletedDirEntries = new ArrayList<>(size); + try (TableIterator> iterator = + getDeletedDirEntries(volume, bucket)) { + while (deletedDirEntries.size() < size && iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + deletedDirEntries.add(Table.newKeyValue(kv.getKey(), kv.getValue())); + } + return deletedDirEntries; + } + } + /** * Returns all sub directories under the given parent directory. * @@ -243,7 +289,7 @@ List getPendingDeletionSubFiles(long volumeId, * Returns the instance of Directory Deleting Service. * @return Background service. */ - BackgroundService getDirDeletingService(); + DirectoryDeletingService getDirDeletingService(); /** * Returns the instance of Open Key Cleanup Service. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 6d276d95284..e3f56f2deaf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,6 +37,7 @@ import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -86,6 +87,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -189,7 +191,7 @@ public class KeyManagerImpl implements KeyManager { private final KeyProviderCryptoExtension kmsProvider; private final boolean enableFileSystemPaths; - private BackgroundService dirDeletingService; + private DirectoryDeletingService dirDeletingService; private final OMPerformanceMetrics metrics; private BackgroundService openKeyCleanupService; @@ -305,7 +307,7 @@ public void start(OzoneConfiguration configuration) { try { snapshotDeletingService = new SnapshotDeletingService( snapshotServiceInterval, snapshotServiceTimeout, - ozoneManager, scmClient.getBlockClient()); + ozoneManager); snapshotDeletingService.start(); } catch (IOException e) { LOG.error("Error starting Snapshot Deleting Service", e); @@ -662,6 +664,60 @@ public PendingKeysDeletion getPendingDeletionKeys(final int count) .getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager()); } + private List> getTableEntries(String startKey, + TableIterator> tableIterator, + Function valueFunction, int size) throws IOException { + List> entries = new ArrayList<>(); + /* Seek to the start key if it not null. The next key in queue is ensured to start with the bucket + prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. + */ + if (startKey != null) { + tableIterator.seek(startKey); + tableIterator.seekToFirst(); + } + int currentCount = 0; + while (tableIterator.hasNext() && currentCount < size) { + Table.KeyValue kv = tableIterator.next(); + if (kv != null) { + entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue()))); + currentCount++; + } + } + return entries; + } + + private Optional getBucketPrefix(String volumeName, String bucketName, boolean isFSO) throws IOException { + // Bucket prefix would be empty if both volume & bucket is empty i.e. either null or "". + if (StringUtils.isEmpty(volumeName) && StringUtils.isEmpty(bucketName)) { + return Optional.empty(); + } else if (StringUtils.isEmpty(bucketName) || StringUtils.isEmpty(volumeName)) { + throw new IOException("One of volume : " + volumeName + ", bucket: " + bucketName + " is empty." + + " Either both should be empty or none of the arguments should be empty"); + } + return isFSO ? Optional.of(metadataManager.getBucketKeyPrefixFSO(volumeName, bucketName)) : + Optional.of(metadataManager.getBucketKeyPrefix(volumeName, bucketName)); + } + + @Override + public List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, renamedKeyIter, Function.identity(), size); + } + } + + @Override + public List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size); + } + } + @Override public ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, BucketLayout bucketLayout, Duration leaseThreshold) throws IOException { @@ -688,7 +744,7 @@ public KeyDeletingService getDeletingService() { } @Override - public BackgroundService getDirDeletingService() { + public DirectoryDeletingService getDirDeletingService() { return dirDeletingService; } @@ -723,8 +779,7 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - - + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { @@ -1325,7 +1380,6 @@ private OmKeyInfo createFakeDirIfShould(String volume, String bucket, return null; } - private OzoneFileStatus getOzoneFileStatusFSO(OmKeyArgs args, String clientAddress, boolean skipFileNotFoundError) throws IOException { final String volumeName = args.getVolumeName(); @@ -1784,17 +1838,13 @@ private List buildFinalStatusList( } fileStatusFinalList.add(fileStatus); } - return sortPipelineInfo(fileStatusFinalList, keyInfoList, omKeyArgs, clientAddress); } - private List sortPipelineInfo( List fileStatusFinalList, List keyInfoList, OmKeyArgs omKeyArgs, String clientAddress) throws IOException { - - if (omKeyArgs.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -1976,6 +2026,13 @@ public Table.KeyValue getPendingDeletionDir() return null; } + @Override + public TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, true); + return metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse("")); + } + @Override public List getPendingDeletionSubDirs(long volumeId, long bucketId, OmKeyInfo parentInfo, long numEntries) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ee92dbc2fde..2e85fef162c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -824,7 +824,7 @@ public String getUserKey(String user) { /** * Given a volume and bucket, return the corresponding DB key. * - * @param volume - User name + * @param volume - Volume name * @param bucket - Bucket name */ @Override @@ -838,6 +838,22 @@ public String getBucketKey(String volume, String bucket) { return builder.toString(); } + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefix(String volume, String bucket) { + return getOzoneKey(volume, bucket, OM_KEY_PREFIX); + } + + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException { + return getOzoneKeyFSO(volume, bucket, OM_KEY_PREFIX); + } + @Override public String getOzoneKey(String volume, String bucket, String key) { StringBuilder builder = new StringBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 18b29118182..dde5b22e793 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; @@ -674,6 +675,41 @@ private ReferenceCounted getSnapshot(String snapshotTableKey, boolea return snapshotCache.get(snapshotInfo.getSnapshotId()); } + /** + * Checks if the last transaction performed on the snapshot has been flushed to disk. + * @param metadataManager Metadatamanager of Active OM. + * @param snapshotTableKey table key corresponding to snapshot in snapshotInfoTable. + * @return True if the changes have been flushed to DB otherwise false + * @throws IOException + */ + public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, String snapshotTableKey) + throws IOException { + // Need this info from cache since the snapshot could have been updated only on cache and not on disk. + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotTableKey); + return areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); + } + + /** + * Checks if the last transaction performed on the snapshot has been flushed to disk. + * @param metadataManager Metadatamanager of Active OM. + * @param snapshotInfo SnapshotInfo value. + * @return True if the changes have been flushed to DB otherwise false. It would return true if the snapshot + * provided is null meaning the snapshot doesn't exist. + * @throws IOException + */ + public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, SnapshotInfo snapshotInfo) + throws IOException { + if (snapshotInfo != null) { + TransactionInfo snapshotTransactionInfo = snapshotInfo.getLastTransactionInfo() != null ? + TransactionInfo.fromByteString(snapshotInfo.getLastTransactionInfo()) : null; + TransactionInfo omTransactionInfo = TransactionInfo.readTransactionInfo(metadataManager); + // If transactionInfo field is null then return true to keep things backward compatible. + return snapshotTransactionInfo == null || omTransactionInfo.compareTo(snapshotTransactionInfo) >= 0; + } + return true; + } + + /** * Returns OmSnapshot object and skips active check. * This should only be used for API calls initiated by background service e.g. purgeKeys, purgeSnapshot, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b6903ca9e91..0038bca2e32 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2065,6 +2065,7 @@ private void addOMNodeToPeers(String newOMNodeId) throws IOException { } catch (IOException e) { LOG.error("{}: Couldn't add OM {} to peer list.", getOMNodeId(), newOMNodeId); + return; } if (omRatisSnapshotProvider == null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java index 5a4ff643157..2301bbbdbf2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java @@ -164,12 +164,8 @@ private static OmBucketInfo resolveBucketInfoLink( * buck-src has the actual BucketLayout that will be used by the * links. */ - try { - return resolveBucketInfoLink(metadataManager, - buckInfo.getSourceVolume(), buckInfo.getSourceBucket(), visited); - } catch (IOException e) { - throw e; - } + return resolveBucketInfoLink(metadataManager, buckInfo.getSourceVolume(), + buckInfo.getSourceBucket(), visited); } return buckInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index b069a174cd0..e4102665d62 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -24,8 +24,10 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.NoSuchElementException; @@ -56,6 +58,7 @@ public class SnapshotChainManager { private final ConcurrentMap snapshotIdToTableKey; private UUID latestGlobalSnapshotId; private final boolean snapshotChainCorrupted; + private UUID oldestGlobalSnapshotId; public SnapshotChainManager(OMMetadataManager metadataManager) { globalSnapshotChain = Collections.synchronizedMap(new LinkedHashMap<>()); @@ -104,6 +107,8 @@ private void addSnapshotGlobal(UUID snapshotID, UUID prevGlobalID) // On add snapshot, set previous snapshot entry nextSnapshotID = // snapshotID globalSnapshotChain.get(prevGlobalID).setNextSnapshotId(snapshotID); + } else { + oldestGlobalSnapshotId = snapshotID; } globalSnapshotChain.put(snapshotID, @@ -171,7 +176,6 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { // for node removal UUID next = globalSnapshotChain.get(snapshotID).getNextSnapshotId(); UUID prev = globalSnapshotChain.get(snapshotID).getPreviousSnapshotId(); - if (prev != null && !globalSnapshotChain.containsKey(prev)) { throw new IOException(String.format( "Global snapshot chain corruption. " + @@ -197,6 +201,9 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { if (latestGlobalSnapshotId.equals(snapshotID)) { latestGlobalSnapshotId = prev; } + if (snapshotID.equals(oldestGlobalSnapshotId)) { + oldestGlobalSnapshotId = next; + } return true; } else { // snapshotID not found in snapshot chain, log warning and return @@ -382,6 +389,42 @@ public UUID getLatestGlobalSnapshotId() throws IOException { return latestGlobalSnapshotId; } + /** + * Get oldest of global snapshot in snapshot chain. + */ + public UUID getOldestGlobalSnapshotId() throws IOException { + validateSnapshotChain(); + return oldestGlobalSnapshotId; + } + + public Iterator iterator(final boolean reverse) throws IOException { + validateSnapshotChain(); + return new Iterator() { + private UUID currentSnapshotId = reverse ? getLatestGlobalSnapshotId() : getOldestGlobalSnapshotId(); + @Override + public boolean hasNext() { + return currentSnapshotId != null; + } + + @Override + public UUID next() { + try { + UUID prevSnapshotId = currentSnapshotId; + if (reverse && hasPreviousGlobalSnapshot(currentSnapshotId) || + !reverse && hasNextGlobalSnapshot(currentSnapshotId)) { + currentSnapshotId = + reverse ? previousGlobalSnapshot(currentSnapshotId) : nextGlobalSnapshot(currentSnapshotId); + } else { + currentSnapshotId = null; + } + return prevSnapshotId; + } catch (IOException e) { + throw new UncheckedIOException("Error while getting next snapshot for " + currentSnapshotId, e); + } + } + }; + } + /** * Get latest path snapshot in snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 6e1c9da34cb..bd462224e9d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om; import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -35,15 +34,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,8 +65,6 @@ */ public class TrashOzoneFileSystem extends FileSystem { - private static final RpcController NULL_RPC_CONTROLLER = null; - private static final int OZONE_FS_ITERATE_BATCH_SIZE = 100; private static final int OZONE_MAX_LIST_KEYS_SIZE = 10000; @@ -97,34 +91,15 @@ public TrashOzoneFileSystem(OzoneManager ozoneManager) throws IOException { ozoneConfiguration = OzoneConfiguration.of(getConf()); } - private RaftClientRequest getRatisRequest( - OzoneManagerProtocolProtos.OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(CLIENT_ID) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.getAndIncrement()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - } - private void submitRequest(OzoneManagerProtocolProtos.OMRequest omRequest) throws Exception { ozoneManager.getMetrics().incNumTrashWriteRequests(); if (ozoneManager.isRatisEnabled()) { - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); + // perform preExecute as ratis submit do no perform preExecute + OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); omRequest = omClientRequest.preExecute(ozoneManager); - RaftClientRequest req = getRatisRequest(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, req); - } else { - ozoneManager.getOmServerProtocol(). - submitRequest(NULL_RPC_CONTROLLER, omRequest); } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.getAndIncrement()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index a6fcc40dda1..8e4cc9fbf4d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; @@ -426,8 +427,12 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * in RocksDB callback flush. If multiple operations are flushed in one * specific batch, we are not sure at the flush of which specific operation * the callback is coming. - * There could be a possibility of race condition that is exposed to rocksDB - * behaviour for the batch. + * PurgeSnapshot is also considered a barrier, since purgeSnapshot transaction on a standalone basis is an + * idempotent operation. Once the directory gets deleted the previous transactions that have been performed on the + * snapshotted rocksdb would start failing on replay since those transactions have not been committed but the + * directory could have been partially deleted/ fully deleted. This could also lead to inconsistencies in the DB + * reads from the purged rocksdb if operations are not performed consciously. + * There could be a possibility of race condition that is exposed to rocksDB behaviour for the batch. * Hence, we treat createSnapshot as separate batch flush. *

* e.g. requestBuffer = [request1, request2, snapshotRequest1, @@ -435,19 +440,17 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * response = [[request1, request2], [snapshotRequest1], [request3], * [snapshotRequest2], [request4]] */ - private List> splitReadyBufferAtCreateSnapshot() { + private synchronized List> splitReadyBufferAtCreateSnapshot() { final List> response = new ArrayList<>(); - OMResponse previousOmResponse = null; for (final Entry entry : readyBuffer) { OMResponse omResponse = entry.getResponse().getOMResponse(); // New queue gets created in three conditions: // 1. It is first element in the response, - // 2. Current request is createSnapshot request. - // 3. Previous request was createSnapshot request. - if (response.isEmpty() || omResponse.hasCreateSnapshotResponse() - || (previousOmResponse != null && - previousOmResponse.hasCreateSnapshotResponse())) { + // 2. Current request is createSnapshot/purgeSnapshot request. + // 3. Previous request was createSnapshot/purgeSnapshot request. + if (response.isEmpty() || isStandaloneBatchCmdTypes(omResponse) + || isStandaloneBatchCmdTypes(previousOmResponse)) { response.add(new LinkedList<>()); } @@ -458,6 +461,15 @@ private List> splitReadyBufferAtCreateSnapshot() { return response; } + private static boolean isStandaloneBatchCmdTypes(OMResponse response) { + if (response == null) { + return false; + } + final OzoneManagerProtocolProtos.Type type = response.getCmdType(); + return type == OzoneManagerProtocolProtos.Type.SnapshotPurge + || type == OzoneManagerProtocolProtos.Type.CreateSnapshot; + } + private void addCleanupEntry(Entry entry, Map> cleanupEpochs) { Class responseClass = entry.getResponse().getClass(); @@ -612,7 +624,7 @@ int getCurrentBufferSize() { return currentBuffer.size(); } - int getReadyBufferSize() { + synchronized int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 78d6ed89d2d..af4d42ad68a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -301,15 +301,23 @@ private RaftClientRequest createRaftRequest(OMRequest omRequest) { } /** - * API used internally from OzoneManager Server when requests needs to be - * submitted to ratis, where the crafted RaftClientRequest is passed along. + * API used internally from OzoneManager Server when requests need to be submitted. * @param omRequest - * @param raftClientRequest + * @param cliId + * @param callId * @return OMResponse * @throws ServiceException */ - public OMResponse submitRequest(OMRequest omRequest, - RaftClientRequest raftClientRequest) throws ServiceException { + public OMResponse submitRequest(OMRequest omRequest, ClientId cliId, long callId) throws ServiceException { + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(cliId) + .setServerId(getRaftPeerId()) + .setGroupId(getRaftGroupId()) + .setCallId(callId) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); RaftClientReply raftClientReply = submitRequestToRatis(raftClientRequest); return createOmResponse(omRequest, raftClientReply); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 5dc640c742c..5e324b376fb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import java.io.File; import java.nio.file.InvalidPathException; @@ -77,6 +78,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; @@ -98,6 +100,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.ClientId; import org.rocksdb.RocksDBException; import java.io.IOException; @@ -117,6 +120,7 @@ public final class OzoneManagerRatisUtils { private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerRatisUtils.class); + private static final RpcController NULL_RPC_CONTROLLER = null; private OzoneManagerRatisUtils() { } @@ -229,6 +233,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); + case SnapshotMoveTableKeys: + return new OMSnapshotMoveTableKeysRequest(omRequest); case SnapshotPurge: return new OMSnapshotPurgeRequest(omRequest); case SetSnapshotProperty: @@ -502,4 +508,13 @@ public static GrpcTlsConfig createServerTlsConfig(SecurityConfig conf, return null; } + + public static OzoneManagerProtocolProtos.OMResponse submitRequest( + OzoneManager om, OMRequest omRequest, ClientId clientId, long callId) throws ServiceException { + if (om.isRatisEnabled()) { + return om.getOmRatisServer().submitRequest(omRequest, clientId, callId); + } else { + return om.getOmServerProtocol().submitRequest(NULL_RPC_CONTROLLER, omRequest); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index cb10c0d2e40..dd08ff17165 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Set; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -149,6 +150,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } } + if (fromSnapshotInfo != null) { + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + } } catch (IOException ex) { // Case of IOException for fromProtobuf will not happen // as this is created and send within OM diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 5738d7945bf..14c80bb7a93 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -21,6 +21,10 @@ import java.io.IOException; import java.util.ArrayList; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; @@ -61,6 +65,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? purgeKeysRequest.getSnapshotTableKey() : null; List keysToBePurgedList = new ArrayList<>(); + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -71,17 +76,27 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keysToBePurgedList.add(deletedKey); } } + final SnapshotInfo fromSnapshotInfo; try { - SnapshotInfo fromSnapshotInfo = null; - if (fromSnapshot != null) { - fromSnapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); - } - omClientResponse = new OMKeyPurgeResponse(omResponse.build(), - keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); + fromSnapshotInfo = fromSnapshot == null ? null : SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); } catch (IOException ex) { - omClientResponse = new OMKeyPurgeResponse(createErrorOMResponse(omResponse, ex)); + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, ex)); + } + + // Setting transaction info for snapshot, this is to prevent duplicate purge requests to OM from background + // services. + try { + if (fromSnapshotInfo != null) { + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + } + } catch (IOException e) { + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); } + omClientResponse = new OMKeyPurgeResponse(omResponse.build(), keysToBePurgedList, fromSnapshotInfo, + keysToUpdateList); return omClientResponse; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 3aa4151cea3..2ded4f6a83e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -166,7 +167,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ((RDBStore) omMetadataManager.getStore()).getDb() .getLatestSequenceNumber(); snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); - + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); // Snapshot referenced size should be bucket's used bytes OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index df4781bb0ca..2ddf308bb50 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -20,6 +20,9 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; @@ -79,18 +82,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Check the snapshot exists. SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); - nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); + nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); // Get next non-deleted snapshot. - List nextDBKeysList = - moveDeletedKeysRequest.getNextDBKeysList(); - List reclaimKeysList = - moveDeletedKeysRequest.getReclaimKeysList(); - List renamedKeysList = - moveDeletedKeysRequest.getRenamedKeysList(); - List movedDirs = - moveDeletedKeysRequest.getDeletedDirsToMoveList(); - + List nextDBKeysList = moveDeletedKeysRequest.getNextDBKeysList(); + List reclaimKeysList = moveDeletedKeysRequest.getReclaimKeysList(); + List renamedKeysList = moveDeletedKeysRequest.getRenamedKeysList(); + List movedDirs = moveDeletedKeysRequest.getDeletedDirsToMoveList(); + + // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshot)); + if (nextSnapshot != null) { + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), nextSnapshot)); + } omClientResponse = new OMSnapshotMoveDeletedKeysResponse( omResponse.build(), fromSnapshot, nextSnapshot, nextDBKeysList, reclaimKeysList, renamedKeysList, movedDirs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java new file mode 100644 index 00000000000..0eb0d3cd166 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveTableKeysResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +/** + * Handles OMSnapshotMoveTableKeysRequest Request. + * This is an OM internal request. Does not need @RequireSnapshotFeatureState. + */ +public class OMSnapshotMoveTableKeysRequest extends OMClientRequest { + + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotMoveTableKeysRequest.class); + + public OMSnapshotMoveTableKeysRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + String bucketKeyPrefix = omMetadataManager.getBucketKeyPrefix(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + String bucketKeyPrefixFSO = omMetadataManager.getBucketKeyPrefixFSO(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + + Set keys = new HashSet<>(); + List deletedKeys = new ArrayList<>(moveTableKeysRequest.getDeletedKeysList().size()); + + //validate deleted key starts with bucket prefix.[///] + for (SnapshotMoveKeyInfos deletedKey : moveTableKeysRequest.getDeletedKeysList()) { + // Filter only deleted keys with at least one keyInfo per key. + if (!deletedKey.getKeyInfosList().isEmpty()) { + deletedKeys.add(deletedKey); + if (!deletedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Deleted Key: " + deletedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedKey.getKey())) { + throw new OMException("Duplicate Deleted Key: " + deletedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedKey.getKey()); + } + } + } + + keys.clear(); + List renamedKeysList = new ArrayList<>(moveTableKeysRequest.getRenamedKeysList().size()); + //validate rename key starts with bucket prefix.[///] + for (HddsProtos.KeyValue renamedKey : moveTableKeysRequest.getRenamedKeysList()) { + if (renamedKey.hasKey() && renamedKey.hasValue()) { + renamedKeysList.add(renamedKey); + if (!renamedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Rename Key: " + renamedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(renamedKey.getKey())) { + throw new OMException("Duplicate rename Key: " + renamedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(renamedKey.getKey()); + } + } + } + keys.clear(); + + // Filter only deleted dirs with only one keyInfo per key. + List deletedDirs = new ArrayList<>(moveTableKeysRequest.getDeletedDirsList().size()); + //validate deleted key starts with bucket FSO path prefix.[///] + for (SnapshotMoveKeyInfos deletedDir : moveTableKeysRequest.getDeletedDirsList()) { + // Filter deleted directories with exactly one keyInfo per key. + if (deletedDir.getKeyInfosList().size() == 1) { + deletedDirs.add(deletedDir); + if (!deletedDir.getKey().startsWith(bucketKeyPrefixFSO)) { + throw new OMException("Deleted dir: " + deletedDir + " doesn't start with prefix " + + bucketKeyPrefixFSO, OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedDir.getKey())) { + throw new OMException("Duplicate deleted dir Key: " + deletedDir + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedDir.getKey()); + } + } + } + return getOmRequest().toBuilder().setSnapshotMoveTableKeysRequest( + moveTableKeysRequest.toBuilder().clearDeletedDirs().clearDeletedKeys().clearRenamedKeys() + .addAllDeletedKeys(deletedKeys).addAllDeletedDirs(deletedDirs) + .addAllRenamedKeys(renamedKeysList).build()).build(); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + + OMClientResponse omClientResponse; + OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); + try { + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + // If there is no snapshot in the chain after the current snapshot move the keys to Active Object Store. + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); + + // If next snapshot is not active then ignore move. Since this could be a redundant operations. + if (nextSnapshot != null && nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + throw new OMException("Next snapshot : " + nextSnapshot + " in chain is not active.", + OMException.ResultCodes.INVALID_SNAPSHOT_ERROR); + } + + // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshot)); + if (nextSnapshot != null) { + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), nextSnapshot)); + } + omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, + moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), + moveTableKeysRequest.getRenamedKeysList()); + } catch (IOException ex) { + omClientResponse = new OMSnapshotMoveTableKeysResponse(createErrorOMResponse(omResponse, ex)); + } + return omClientResponse; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 47304b416ae..ca29d4e112b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; @@ -104,15 +105,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } SnapshotInfo nextSnapshot = - SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); + SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); // Step 1: Update the deep clean flag for the next active snapshot updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); // Step 2: Update the snapshot chain. updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex); - // Step 3: Purge the snapshot from SnapshotInfoTable cache. + // Step 3: Purge the snapshot from SnapshotInfoTable cache and also remove from the map. omMetadataManager.getSnapshotInfoTable() .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + updatedSnapshotInfos.remove(fromSnapshot.getTableKey()); + } + + for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), snapshotInfo)); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 138e942e2b6..28c3e3d758e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -48,12 +48,13 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. */ @CleanupTableInfo(cleanupTables = {DELETED_TABLE, DELETED_DIR_TABLE, - DIRECTORY_TABLE, FILE_TABLE}) + DIRECTORY_TABLE, FILE_TABLE, SNAPSHOT_INFO_TABLE}) public class OMDirectoriesPurgeResponseWithFSO extends OmKeyResponse { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoriesPurgeResponseWithFSO.class); @@ -97,6 +98,7 @@ public void addToDBBatch(OMMetadataManager metadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } + metadataManager.getSnapshotInfoTable().putWithBatch(batchOp, fromSnapshotInfo.getTableKey(), fromSnapshotInfo); } else { processPaths(metadataManager, batchOp); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index b59c7d18d40..cd2f7d190f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -39,12 +39,13 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** * Response for {@link OMKeyPurgeRequest} request. */ -@CleanupTableInfo(cleanupTables = {DELETED_TABLE}) +@CleanupTableInfo(cleanupTables = {DELETED_TABLE, SNAPSHOT_INFO_TABLE}) public class OMKeyPurgeResponse extends OmKeyResponse { private List purgeKeyList; private SnapshotInfo fromSnapshot; @@ -90,6 +91,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); } else { processKeys(batchOperation, omMetadataManager); processKeysToUpdate(batchOperation, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 3b5a7454f9d..7d1b7f237b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -40,6 +40,7 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -133,6 +134,11 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, } } + // Flush snapshot info to rocksDB. + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); + if (nextSnapshot != null) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); + } } private void deleteDirsFromSnapshot(BatchOperation batchOp, @@ -194,8 +200,7 @@ private void processKeys(BatchOperation batchOp, } for (SnapshotMoveKeyInfos dBKey : nextDBKeysList) { - RepeatedOmKeyInfo omKeyInfos = - createRepeatedOmKeyInfo(dBKey, metadataManager); + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(dBKey, metadataManager); if (omKeyInfos == null) { continue; } @@ -218,36 +223,5 @@ public static RepeatedOmKeyInfo createRepeatedOmKeyInfo( return result; } - - private RepeatedOmKeyInfo createRepeatedOmKeyInfo( - SnapshotMoveKeyInfos snapshotMoveKeyInfos, - OMMetadataManager metadataManager) throws IOException { - String dbKey = snapshotMoveKeyInfos.getKey(); - List keyInfoList = snapshotMoveKeyInfos.getKeyInfosList(); - // When older version of keys are moved to the next snapshot's deletedTable - // The newer version might also be in the next snapshot's deletedTable and - // it might overwrite. This is to avoid that and also avoid having - // orphans blocks. - RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); - - for (KeyInfo keyInfo : keyInfoList) { - OmKeyInfo omKeyInfo = OmKeyInfo.getFromProtobuf(keyInfo); - if (result == null) { - result = new RepeatedOmKeyInfo(omKeyInfo); - } else if (!isSameAsLatestOmKeyInfo(omKeyInfo, result)) { - result.addOmKeyInfo(omKeyInfo); - } - } - - return result; - } - - private boolean isSameAsLatestOmKeyInfo(OmKeyInfo omKeyInfo, - RepeatedOmKeyInfo result) { - int size = result.getOmKeyInfoList().size(); - assert size > 0; - OmKeyInfo keyInfoFromRepeated = result.getOmKeyInfoList().get(size - 1); - return omKeyInfo.equals(keyInfoFromRepeated); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java new file mode 100644 index 00000000000..b06570afb14 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; + +/** + * Response for OMSnapshotMoveDeletedKeysRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotMoveTableKeysResponse extends OMClientResponse { + + private SnapshotInfo fromSnapshot; + private SnapshotInfo nextSnapshot; + private List deletedKeys; + private List renameKeysList; + private List deletedDirs; + + public OMSnapshotMoveTableKeysResponse(OMResponse omResponse, + @Nonnull SnapshotInfo fromSnapshot, SnapshotInfo nextSnapshot, + List deletedKeys, + List deletedDirs, + List renamedKeys) { + super(omResponse); + this.fromSnapshot = fromSnapshot; + this.nextSnapshot = nextSnapshot; + this.deletedKeys = deletedKeys; + this.renameKeysList = renamedKeys; + this.deletedDirs = deletedDirs; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotMoveTableKeysResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager(); + + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { + + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); + + if (nextSnapshot != null) { + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { + + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); + RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); + // Init Batch Operation for snapshot db. + try (BatchOperation writeBatch = nextSnapshotStore.initBatchOperation()) { + addKeysToNextSnapshot(writeBatch, nextOmSnapshot.getMetadataManager()); + nextSnapshotStore.commitBatchOperation(writeBatch); + nextSnapshotStore.getDb().flushWal(true); + nextSnapshotStore.getDb().flush(); + } + } + } else { + // Handle the case where there is no next Snapshot. + addKeysToNextSnapshot(batchOperation, omMetadataManager); + } + + // Update From Snapshot Deleted Table. + RDBStore fromSnapshotStore = (RDBStore) fromOmSnapshot.getMetadataManager().getStore(); + try (BatchOperation fromSnapshotBatchOp = fromSnapshotStore.initBatchOperation()) { + deleteKeysFromSnapshot(fromSnapshotBatchOp, fromOmSnapshot.getMetadataManager()); + fromSnapshotStore.commitBatchOperation(fromSnapshotBatchOp); + fromSnapshotStore.getDb().flushWal(true); + fromSnapshotStore.getDb().flush(); + } + } + + // Flush snapshot info to rocksDB. + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); + if (nextSnapshot != null) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); + } + } + + private void deleteKeysFromSnapshot(BatchOperation batchOp, OMMetadataManager fromSnapshotMetadataManager) + throws IOException { + for (SnapshotMoveKeyInfos deletedOmKeyInfo : deletedKeys) { + // Delete keys from current snapshot that are moved to next snapshot. + fromSnapshotMetadataManager.getDeletedTable().deleteWithBatch(batchOp, deletedOmKeyInfo.getKey()); + } + + // Delete rename keys from current snapshot that are moved to next snapshot. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + fromSnapshotMetadataManager.getSnapshotRenamedTable().deleteWithBatch(batchOp, renameEntry.getKey()); + } + + // Delete deletedDir from current snapshot that are moved to next snapshot. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + fromSnapshotMetadataManager.getDeletedDirTable().deleteWithBatch(batchOp, deletedDirInfo.getKey()); + } + + } + + private void addKeysToNextSnapshot(BatchOperation batchOp, OMMetadataManager metadataManager) throws IOException { + + // Add renamed keys to the next snapshot or active DB. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + metadataManager.getSnapshotRenamedTable().putWithBatch(batchOp, renameEntry.getKey(), renameEntry.getValue()); + } + // Add deleted keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedKeyInfo : deletedKeys) { + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(deletedKeyInfo, + metadataManager); + metadataManager.getDeletedTable().putWithBatch(batchOp, deletedKeyInfo.getKey(), omKeyInfos); + } + // Add deleted dir keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + metadataManager.getDeletedDirTable().putWithBatch(batchOp, deletedDirInfo.getKey(), + OmKeyInfo.getFromProtobuf(deletedDirInfo.getKeyInfosList().get(0))); + } + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 154bd474b6d..2c2d16bf14c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -33,11 +33,11 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; @@ -48,8 +48,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.util.Preconditions; import java.io.IOException; @@ -247,10 +245,7 @@ private int submitPurgeKeysRequest(List results, // Submit PurgeKeys request to OM try { - RaftClientRequest raftClientRequest = - createRaftClientRequestForPurge(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { LOG.error("PurgeKey request failed. Will retry at next run."); return 0; @@ -259,20 +254,6 @@ private int submitPurgeKeysRequest(List results, return deletedCount; } - protected RaftClientRequest createRaftClientRequestForPurge( - OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - } - /** * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. @@ -311,15 +292,7 @@ protected void submitPurgePaths(List requests, // Submit Purge paths request to OM try { - if (isRatisEnabled()) { - RaftClientRequest raftClientRequest = - createRaftClientRequestForPurge(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { LOG.error("PurgePaths request failed. Will retry at next run."); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index c8703c3c4c6..ad16c49d5e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -82,6 +82,7 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { private final long pathLimitPerTask; private final int ratisByteLimit; private final AtomicBoolean suspended; + private AtomicBoolean isRunningOnAOS; public DirectoryDeletingService(long interval, TimeUnit unit, long serviceTimeout, OzoneManager ozoneManager, @@ -98,6 +99,7 @@ public DirectoryDeletingService(long interval, TimeUnit unit, // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); this.suspended = new AtomicBoolean(false); + this.isRunningOnAOS = new AtomicBoolean(false); } private boolean shouldRun() { @@ -108,6 +110,10 @@ private boolean shouldRun() { return getOzoneManager().isLeaderReady() && !suspended.get(); } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + /** * Suspend the service. */ @@ -127,11 +133,16 @@ public void resume() { @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DirectoryDeletingService.DirDeletingTask()); + queue.add(new DirectoryDeletingService.DirDeletingTask(this)); return queue; } - private class DirDeletingTask implements BackgroundTask { + private final class DirDeletingTask implements BackgroundTask { + private final DirectoryDeletingService directoryDeletingService; + + private DirDeletingTask(DirectoryDeletingService service) { + this.directoryDeletingService = service; + } @Override public int getPriority() { @@ -144,6 +155,7 @@ public BackgroundTaskResult call() { if (LOG.isDebugEnabled()) { LOG.debug("Running DirectoryDeletingService"); } + isRunningOnAOS.set(true); getRunCount().incrementAndGet(); long dirNum = 0L; long subDirNum = 0L; @@ -210,8 +222,11 @@ public BackgroundTaskResult call() { LOG.error("Error while running delete directories and files " + "background task. Will retry at next run.", e); } + isRunningOnAOS.set(false); + synchronized (directoryDeletingService) { + this.directoryDeletingService.notify(); + } } - // place holder by returning empty results of this call back. return BackgroundTaskResult.EmptyTaskResult.newResult(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index aa2eb6720a3..e7553004edf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -42,8 +42,7 @@ import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; @@ -67,8 +66,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,6 +92,7 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; private final Map snapshotSeekMap; + private AtomicBoolean isRunningOnAOS; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, @@ -114,6 +112,7 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); this.snapshotSeekMap = new HashMap<>(); + this.isRunningOnAOS = new AtomicBoolean(false); } /** @@ -126,10 +125,14 @@ public AtomicLong getDeletedKeyCount() { return deletedKeyCount; } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new KeyDeletingTask()); + queue.add(new KeyDeletingTask(this)); return queue; } @@ -172,7 +175,12 @@ public void setKeyLimitPerTask(int keyLimitPerTask) { * the blocks info in its deletedBlockLog), it removes these keys from the * DB. */ - private class KeyDeletingTask implements BackgroundTask { + private final class KeyDeletingTask implements BackgroundTask { + private final KeyDeletingService deletingService; + + private KeyDeletingTask(KeyDeletingService service) { + this.deletingService = service; + } @Override public int getPriority() { @@ -186,7 +194,7 @@ public BackgroundTaskResult call() { if (shouldRun()) { final long run = getRunCount().incrementAndGet(); LOG.debug("Running KeyDeletingService {}", run); - + isRunningOnAOS.set(true); int delCount = 0; try { // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in @@ -220,6 +228,11 @@ public BackgroundTaskResult call() { } } + isRunningOnAOS.set(false); + synchronized (deletingService) { + this.deletingService.notify(); + } + // By design, no one cares about the results of this call back. return EmptyTaskResult.newResult(); } @@ -481,24 +494,7 @@ private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = getOzoneManager().getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java index 1199a0c6506..f1084155e98 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java @@ -29,16 +29,13 @@ import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -208,24 +205,7 @@ private OMRequest createRequest(List private void submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - ozoneManager.getOmServerProtocol().submitRequest(null, - omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { LOG.error("Expired multipart info delete request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java index 45112037c1b..768c77ad16e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; @@ -55,12 +54,11 @@ import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Policy; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Role; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetRangerServiceVersionRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -375,19 +373,6 @@ long getRangerOzoneServicePolicyVersion() throws IOException { return policyVersion; } - private RaftClientRequest newRaftClientRequest(OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(CLIENT_ID) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - } - public void setOMDBRangerServiceVersion(long version) throws ServiceException { // OM DB update goes through Ratis @@ -402,9 +387,7 @@ public void setOMDBRangerServiceVersion(long version) .build(); try { - RaftClientRequest raftClientRequest = newRaftClientRequest(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.get()); } catch (ServiceException e) { LOG.error("SetRangerServiceVersion request failed. " + "Will retry at next run."); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java index ab556230194..c0d958f6121 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java @@ -31,8 +31,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteOpenKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -41,8 +40,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -268,24 +265,7 @@ private OMRequest createDeleteOpenKeysRequest( private OMResponse submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - return server.submitRequest(omRequest, raftClientRequest); - } else { - return ozoneManager.getOmServerProtocol().submitRequest( - null, omRequest); - } + return OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { LOG.error("Open key " + omRequest.getCmdType() + " request failed. Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java index c043a6a72f2..1a29ee8d96b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java @@ -51,14 +51,11 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -194,22 +191,7 @@ private void repairActiveDb( private OzoneManagerProtocolProtos.OMResponse submitRequest( OzoneManagerProtocolProtos.OMRequest omRequest, ClientId clientId) throws Exception { try { - if (om.isRatisEnabled()) { - OzoneManagerRatisServer server = om.getOmRatisServer(); - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(om.getOmRatisServer().getRaftPeerId()) - .setGroupId(om.getOmRatisServer().getRaftGroupId()) - .setCallId(RUN_CNT.getAndIncrement()) - .setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - return server.submitRequest(omRequest, raftClientRequest); - } else { - RUN_CNT.getAndIncrement(); - return om.getOmServerProtocol().submitRequest( - null, omRequest); - } + return OzoneManagerRatisUtils.submitRequest(om, omRequest, clientId, RUN_CNT.getAndIncrement()); } catch (ServiceException e) { LOG.error("repair quota count " + omRequest.getCmdType() + " request failed.", e); throw e; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index a98081c63a1..edc6c7a1629 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -20,57 +20,49 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; @@ -96,16 +88,17 @@ public class SnapshotDeletingService extends AbstractKeyDeletingService { private final AtomicBoolean suspended; private final OzoneConfiguration conf; private final AtomicLong successRunCount; - private final long snapshotDeletionPerTask; - private final int keyLimitPerSnapshot; + private final int keyLimitPerTask; + private final int snapshotDeletionPerTask; private final int ratisByteLimit; + private final long serviceTimeout; public SnapshotDeletingService(long interval, long serviceTimeout, - OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient) + OzoneManager ozoneManager) throws IOException { super(SnapshotDeletingService.class.getSimpleName(), interval, TimeUnit.MILLISECONDS, SNAPSHOT_DELETING_CORE_POOL_SIZE, - serviceTimeout, ozoneManager, scmClient); + serviceTimeout, ozoneManager, null); this.ozoneManager = ozoneManager; this.omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) @@ -114,8 +107,7 @@ public SnapshotDeletingService(long interval, long serviceTimeout, this.successRunCount = new AtomicLong(0); this.suspended = new AtomicBoolean(false); this.conf = ozoneManager.getConfiguration(); - this.snapshotDeletionPerTask = conf - .getLong(SNAPSHOT_DELETING_LIMIT_PER_TASK, + this.snapshotDeletionPerTask = conf.getInt(SNAPSHOT_DELETING_LIMIT_PER_TASK, SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT); int limit = (int) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, @@ -123,9 +115,35 @@ public SnapshotDeletingService(long interval, long serviceTimeout, StorageUnit.BYTES); // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); - this.keyLimitPerSnapshot = conf.getInt( + this.keyLimitPerTask = conf.getInt( OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK, OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); + this.serviceTimeout = serviceTimeout; + } + + // Wait for a notification from KeyDeletingService if the key deletion is running. This is to ensure, merging of + // entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForKeyDeletingService() throws InterruptedException { + KeyDeletingService keyDeletingService = getOzoneManager().getKeyManager().getDeletingService(); + synchronized (keyDeletingService) { + while (keyDeletingService.isRunningOnAOS()) { + keyDeletingService.wait(serviceTimeout); + } + } + } + + // Wait for a notification from DirectoryDeletingService if the directory deletion is running. This is to ensure, + // merging of entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForDirDeletingService() throws InterruptedException { + DirectoryDeletingService directoryDeletingService = getOzoneManager().getKeyManager() + .getDirDeletingService(); + synchronized (directoryDeletingService) { + while (directoryDeletingService.isRunningOnAOS()) { + directoryDeletingService.wait(serviceTimeout); + } + } } private class SnapshotDeletingTask implements BackgroundTask { @@ -139,316 +157,89 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = null; - ReferenceCounted rcOmPreviousSnapshot = null; - - Table snapshotInfoTable = - ozoneManager.getMetadataManager().getSnapshotInfoTable(); - List purgeSnapshotKeys = new ArrayList<>(); - try (TableIterator> iterator = snapshotInfoTable.iterator()) { - + try { + int remaining = keyLimitPerTask; + Iterator iterator = chainManager.iterator(true); + List snapshotsToBePurged = new ArrayList<>(); long snapshotLimit = snapshotDeletionPerTask; - - while (iterator.hasNext() && snapshotLimit > 0) { - SnapshotInfo snapInfo = iterator.next().getValue(); - - // Only Iterate in deleted snapshot + while (iterator.hasNext() && snapshotLimit > 0 && remaining > 0) { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, chainManager, iterator.next()); if (shouldIgnoreSnapshot(snapInfo)) { continue; } - - // Note: Can refactor this to use try-with-resources. - // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.getSnapshot( - snapInfo.getVolumeName(), - snapInfo.getBucketName(), - snapInfo.getName()); - OmSnapshot omSnapshot = rcOmSnapshot.get(); - - Table snapshotDeletedTable = - omSnapshot.getMetadataManager().getDeletedTable(); - Table snapshotDeletedDirTable = - omSnapshot.getMetadataManager().getDeletedDirTable(); - - Table renamedTable = - omSnapshot.getMetadataManager().getSnapshotRenamedTable(); - - long volumeId = ozoneManager.getMetadataManager() - .getVolumeId(snapInfo.getVolumeName()); - // Get bucketInfo for the snapshot bucket to get bucket layout. - String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey( - snapInfo.getVolumeName(), snapInfo.getBucketName()); - OmBucketInfo bucketInfo = ozoneManager.getMetadataManager() - .getBucketTable().get(dbBucketKey); - - if (bucketInfo == null) { - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; - throw new IllegalStateException("Bucket " + "/" + - snapInfo.getVolumeName() + "/" + snapInfo.getBucketName() + - " is not found. BucketInfo should not be null for snapshotted" + - " bucket. The OM is in unexpected state."); - } - - String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - String dbBucketKeyForDir = ozoneManager.getMetadataManager() - .getBucketKey(Long.toString(volumeId), - Long.toString(bucketInfo.getObjectID())) + OM_KEY_PREFIX; - - if (isSnapshotReclaimable(snapshotDeletedTable, - snapshotDeletedDirTable, snapshotBucketKey, dbBucketKeyForDir)) { - purgeSnapshotKeys.add(snapInfo.getTableKey()); - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; + LOG.info("Started Snapshot Deletion Processing for snapshot : {}", snapInfo.getTableKey()); + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, chainManager, snapInfo); + // Continue if the next snapshot is not active. This is to avoid unnecessary copies from one snapshot to + // another. + if (nextSnapshot != null && + nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { continue; } - //TODO: [SNAPSHOT] Add lock to deletedTable and Active DB. - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(snapInfo, chainManager); - Table previousKeyTable = null; - Table previousDirTable = null; - OmSnapshot omPreviousSnapshot = null; - - // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable - // and next snapshot deletedKeyTable. - if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( - previousSnapshot.getVolumeName(), - previousSnapshot.getBucketName(), - previousSnapshot.getName()); - omPreviousSnapshot = rcOmPreviousSnapshot.get(); - - previousKeyTable = omPreviousSnapshot - .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); - previousDirTable = omPreviousSnapshot - .getMetadataManager().getDirectoryTable(); + // nextSnapshot = null means entries would be moved to AOS. + if (nextSnapshot == null) { + waitForKeyDeletingService(); + waitForDirDeletingService(); } - - // Move key to either next non deleted snapshot's deletedTable - // or keep it in current snapshot deleted table. - List toReclaimList = new ArrayList<>(); - List toNextDBList = new ArrayList<>(); - // A list of renamed keys/files/dirs - List renamedList = new ArrayList<>(); - List dirsToMove = new ArrayList<>(); - - long remainNum = handleDirectoryCleanUp(snapshotDeletedDirTable, - previousDirTable, renamedTable, dbBucketKeyForDir, snapInfo, - omSnapshot, dirsToMove, renamedList); - int deletionCount = 0; - - try (TableIterator> deletedIterator = snapshotDeletedTable - .iterator()) { - - List keysToPurge = new ArrayList<>(); - deletedIterator.seek(snapshotBucketKey); - - while (deletedIterator.hasNext() && - deletionCount < remainNum) { - Table.KeyValue - deletedKeyValue = deletedIterator.next(); - String deletedKey = deletedKeyValue.getKey(); - - // Exit if it is out of the bucket scope. - if (!deletedKey.startsWith(snapshotBucketKey)) { - // If snapshot deletedKeyTable doesn't have any - // entry in the snapshot scope it can be reclaimed - break; - } - - RepeatedOmKeyInfo repeatedOmKeyInfo = deletedKeyValue.getValue(); - - SnapshotMoveKeyInfos.Builder toReclaim = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - SnapshotMoveKeyInfos.Builder toNextDb = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - HddsProtos.KeyValue.Builder renamedKey = HddsProtos.KeyValue - .newBuilder(); - - for (OmKeyInfo keyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - splitRepeatedOmKeyInfo(toReclaim, toNextDb, renamedKey, - keyInfo, previousKeyTable, renamedTable, - bucketInfo, volumeId); + try (ReferenceCounted snapshot = omSnapshotManager.getSnapshot( + snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName())) { + KeyManager snapshotKeyManager = snapshot.get().getKeyManager(); + int moveCount = 0; + // Get all entries from deletedKeyTable. + List>> deletedKeyEntries = + snapshotKeyManager.getDeletedKeyEntries(snapInfo.getVolumeName(), snapInfo.getBucketName(), + null, remaining); + moveCount += deletedKeyEntries.size(); + // Get all entries from deletedDirTable. + List> deletedDirEntries = snapshotKeyManager.getDeletedDirEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), remaining - moveCount); + moveCount += deletedDirEntries.size(); + // Get all entries from snapshotRenamedTable. + List> renameEntries = snapshotKeyManager.getRenamesKeyEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), null, remaining - moveCount); + moveCount += renameEntries.size(); + if (moveCount > 0) { + List deletedKeys = new ArrayList<>(deletedKeyEntries.size()); + List deletedDirs = new ArrayList<>(deletedDirEntries.size()); + List renameKeys = new ArrayList<>(renameEntries.size()); + + // Convert deletedKeyEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue> deletedEntry : deletedKeyEntries) { + deletedKeys.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedEntry.getKey()) + .addAllKeyInfos(deletedEntry.getValue() + .stream().map(val -> val.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())).build()); } - // If all the KeyInfos are reclaimable in RepeatedOmKeyInfo - // then no need to update current snapshot deletedKeyTable. - if (!(toReclaim.getKeyInfosCount() == - repeatedOmKeyInfo.getOmKeyInfoList().size())) { - toReclaimList.add(toReclaim.build()); - toNextDBList.add(toNextDb.build()); - } else { - // The key can be reclaimed here. - List blocksForKeyDelete = omSnapshot - .getMetadataManager() - .getBlocksForKeyDelete(deletedKey); - if (blocksForKeyDelete != null) { - keysToPurge.addAll(blocksForKeyDelete); - } + // Convert deletedDirEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue deletedDirEntry : deletedDirEntries) { + deletedDirs.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedDirEntry.getKey()) + .addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); } - if (renamedKey.hasKey() && renamedKey.hasValue()) { - renamedList.add(renamedKey.build()); + // Convert renamedEntries to KeyValue. + for (Table.KeyValue renameEntry : renameEntries) { + renameKeys.add(HddsProtos.KeyValue.newBuilder().setKey(renameEntry.getKey()) + .setValue(renameEntry.getValue()).build()); } - deletionCount++; + submitSnapshotMoveDeletedKeys(snapInfo, deletedKeys, renameKeys, deletedDirs); + remaining -= moveCount; + } else { + snapshotsToBePurged.add(snapInfo.getTableKey()); } - - // Delete keys From deletedTable - processKeyDeletes(keysToPurge, omSnapshot.getKeyManager(), - null, snapInfo.getTableKey()); - successRunCount.incrementAndGet(); - } catch (IOException ex) { - LOG.error("Error while running Snapshot Deleting Service for " + - "snapshot " + snapInfo.getTableKey() + " with snapshotId " + - snapInfo.getSnapshotId() + ". Processed " + deletionCount + - " keys and " + (keyLimitPerSnapshot - remainNum) + - " directories and files", ex); } + successRunCount.incrementAndGet(); snapshotLimit--; - // Submit Move request to OM. - submitSnapshotMoveDeletedKeys(snapInfo, toReclaimList, - toNextDBList, renamedList, dirsToMove); - - // Properly decrement ref count for rcOmPreviousSnapshot - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - rcOmPreviousSnapshot = null; - } + } + if (!snapshotsToBePurged.isEmpty()) { + submitSnapshotPurgeRequest(snapshotsToBePurged); } } catch (IOException e) { LOG.error("Error while running Snapshot Deleting Service", e); - } finally { - // Decrement ref counts - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - } - if (rcOmSnapshot != null) { - rcOmSnapshot.close(); - } } - submitSnapshotPurgeRequest(purgeSnapshotKeys); - return BackgroundTaskResult.EmptyTaskResult.newResult(); } - private boolean isSnapshotReclaimable( - Table snapshotDeletedTable, - Table snapshotDeletedDirTable, - String snapshotBucketKey, String dbBucketKeyForDir) throws IOException { - - boolean isDirTableCleanedUp = false; - boolean isKeyTableCleanedUp = false; - try (TableIterator> iterator = snapshotDeletedTable.iterator();) { - iterator.seek(snapshotBucketKey); - // If the next entry doesn't start with snapshotBucketKey then - // deletedKeyTable is already cleaned up. - isKeyTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(snapshotBucketKey); - } - - try (TableIterator> - iterator = snapshotDeletedDirTable.iterator()) { - iterator.seek(dbBucketKeyForDir); - // If the next entry doesn't start with dbBucketKeyForDir then - // deletedDirTable is already cleaned up. - isDirTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(dbBucketKeyForDir); - } - - return (isDirTableCleanedUp || snapshotDeletedDirTable.isEmpty()) && - (isKeyTableCleanedUp || snapshotDeletedTable.isEmpty()); - } - - @SuppressWarnings("checkstyle:ParameterNumber") - private long handleDirectoryCleanUp( - Table snapshotDeletedDirTable, - Table previousDirTable, - Table renamedTable, - String dbBucketKeyForDir, SnapshotInfo snapInfo, - OmSnapshot omSnapshot, List dirsToMove, - List renamedList) { - - long dirNum = 0L; - long subDirNum = 0L; - long subFileNum = 0L; - long remainNum = keyLimitPerSnapshot; - int consumedSize = 0; - List purgePathRequestList = new ArrayList<>(); - List> allSubDirList - = new ArrayList<>(keyLimitPerSnapshot); - try (TableIterator> deletedDirIterator = - snapshotDeletedDirTable.iterator()) { - - long startTime = Time.monotonicNow(); - deletedDirIterator.seek(dbBucketKeyForDir); - - while (deletedDirIterator.hasNext()) { - Table.KeyValue deletedDir = - deletedDirIterator.next(); - String deletedDirKey = deletedDir.getKey(); - - // Exit for dirs out of snapshot scope. - if (!deletedDirKey.startsWith(dbBucketKeyForDir)) { - break; - } - - if (isDirReclaimable(deletedDir, previousDirTable, - renamedTable, renamedList)) { - // Reclaim here - PurgePathRequest request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - if (isBufferLimitCrossed(ratisByteLimit, consumedSize, - request.getSerializedSize())) { - if (purgePathRequestList.size() != 0) { - // if message buffer reaches max limit, avoid sending further - remainNum = 0; - break; - } - // if directory itself is having a lot of keys / files, - // reduce capacity to minimum level - remainNum = MIN_ERR_LIMIT_PER_TASK; - request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - } - consumedSize += request.getSerializedSize(); - purgePathRequestList.add(request); - remainNum = remainNum - request.getDeletedSubFilesCount(); - remainNum = remainNum - request.getMarkDeletedSubDirsCount(); - // Count up the purgeDeletedDir, subDirs and subFiles - if (request.getDeletedDir() != null - && !request.getDeletedDir().isEmpty()) { - dirNum++; - } - subDirNum += request.getMarkDeletedSubDirsCount(); - subFileNum += request.getDeletedSubFilesCount(); - } else { - dirsToMove.add(deletedDir.getKey()); - } - } - - remainNum = optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, - subDirNum, subFileNum, allSubDirList, purgePathRequestList, - snapInfo.getTableKey(), startTime, ratisByteLimit - consumedSize, - omSnapshot.getKeyManager()); - } catch (IOException e) { - LOG.error("Error while running delete directories and files for " + - "snapshot " + snapInfo.getTableKey() + " in snapshot deleting " + - "background task. Will retry at next run.", e); - } - - return remainNum; - } - private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { if (!purgeSnapshotKeys.isEmpty()) { SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest @@ -466,92 +257,36 @@ private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { } } - @SuppressWarnings("checkstyle:ParameterNumber") - private void splitRepeatedOmKeyInfo(SnapshotMoveKeyInfos.Builder toReclaim, - SnapshotMoveKeyInfos.Builder toNextDb, - HddsProtos.KeyValue.Builder renamedKey, OmKeyInfo keyInfo, - Table previousKeyTable, - Table renamedTable, - OmBucketInfo bucketInfo, long volumeId) throws IOException { - - if (isKeyReclaimable(previousKeyTable, renamedTable, - keyInfo, bucketInfo, volumeId, renamedKey)) { - // Update in current db's deletedKeyTable - toReclaim.addKeyInfos(keyInfo - .getProtobuf(ClientVersion.CURRENT_VERSION)); - } else { - // Move to next non deleted snapshot's deleted table - toNextDb.addKeyInfos(keyInfo.getProtobuf( - ClientVersion.CURRENT_VERSION)); - } - } - - private boolean isDirReclaimable( - Table.KeyValue deletedDir, - Table previousDirTable, - Table renamedTable, - List renamedList) throws IOException { + private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, + List deletedKeys, + List renamedList, + List dirsToMove) { - if (previousDirTable == null) { - return true; - } - - String deletedDirDbKey = deletedDir.getKey(); - OmKeyInfo deletedDirInfo = deletedDir.getValue(); - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( - deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - deletedDirInfo.getObjectID()); - - /* - snapshotRenamedTable: /volumeName/bucketName/objectID -> - /volumeId/bucketId/parentId/dirName - */ - String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); - String prevDbKey = null; - - if (dbKeyBeforeRename != null) { - prevDbKey = dbKeyBeforeRename; - HddsProtos.KeyValue renamedDir = HddsProtos.KeyValue - .newBuilder() - .setKey(dbRenameKey) - .setValue(dbKeyBeforeRename) - .build(); - renamedList.add(renamedDir); - } else { - // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to - // OzoneDeletePathKey. Changing it back to check the previous DirTable. - prevDbKey = ozoneManager.getMetadataManager() - .getOzoneDeletePathDirKey(deletedDirDbKey); - } - - OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); - if (prevDirectoryInfo == null) { - return true; - } - - return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); - } - - public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, - List toReclaimList, - List toNextDBList, - List renamedList, - List dirsToMove) throws InterruptedException { + SnapshotMoveTableKeysRequest.Builder moveDeletedKeysBuilder = SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(toProtobuf(snapInfo.getSnapshotId())); - SnapshotMoveDeletedKeysRequest.Builder moveDeletedKeysBuilder = - SnapshotMoveDeletedKeysRequest.newBuilder() - .setFromSnapshot(snapInfo.getProtobuf()); - - SnapshotMoveDeletedKeysRequest moveDeletedKeys = moveDeletedKeysBuilder - .addAllReclaimKeys(toReclaimList) - .addAllNextDBKeys(toNextDBList) + SnapshotMoveTableKeysRequest moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) .addAllRenamedKeys(renamedList) - .addAllDeletedDirsToMove(dirsToMove) + .addAllDeletedDirs(dirsToMove) .build(); + if (isBufferLimitCrossed(ratisByteLimit, 0, moveDeletedKeys.getSerializedSize())) { + int remaining = MIN_ERR_LIMIT_PER_TASK; + deletedKeys = deletedKeys.subList(0, Math.min(remaining, deletedKeys.size())); + remaining -= deletedKeys.size(); + renamedList = renamedList.subList(0, Math.min(remaining, renamedList.size())); + remaining -= renamedList.size(); + dirsToMove = dirsToMove.subList(0, Math.min(remaining, dirsToMove.size())); + moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) + .addAllRenamedKeys(renamedList) + .addAllDeletedDirs(dirsToMove) + .build(); + } OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SnapshotMoveDeletedKeys) - .setSnapshotMoveDeletedKeysRequest(moveDeletedKeys) + .setCmdType(Type.SnapshotMoveTableKeys) + .setSnapshotMoveTableKeysRequest(moveDeletedKeys) .setClientId(clientId.toString()) .build(); @@ -560,36 +295,26 @@ public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, } } - public void submitRequest(OMRequest omRequest) { + private void submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - ozoneManager.getOmServerProtocol().submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { - LOG.error("Snapshot Deleting request failed. " + - "Will retry at next run.", e); + LOG.error("Request: {} fired by SnapshotDeletingService failed. Will retry in the next run", omRequest, e); } } } + /** + * Checks if a given snapshot has been deleted and all the changes made to snapshot have been flushed to disk. + * @param snapInfo SnapshotInfo corresponding to the snapshot. + * @return true if the snapshot is still active or changes to snapshot have not been flushed to disk otherwise false. + * @throws IOException + */ @VisibleForTesting - boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) { + boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { SnapshotInfo.SnapshotStatus snapshotStatus = snapInfo.getSnapshotStatus(); - return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; + return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); } // TODO: Move this util class. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 9746b4421b7..26d5d24a8a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -34,13 +34,12 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -48,8 +47,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import java.io.IOException; import java.util.ArrayList; @@ -436,25 +433,7 @@ private void updateDeepCleanSnapshotDir(String snapshotKeyTable) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = - getOzoneManager().getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index e0f40dabd8a..7af6d085137 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -24,8 +24,11 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -33,6 +36,8 @@ import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.NoSuchElementException; import java.util.HashMap; import java.util.Map; @@ -86,6 +91,13 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } + public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + UUID snapshotId) throws IOException { + String tableKey = chainManager.getTableKey(snapshotId); + return SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); + } + public static void dropColumnFamilyHandle( final ManagedRocksDB rocksDB, final ColumnFamilyHandle columnFamilyHandle) { @@ -138,37 +150,25 @@ public static void checkSnapshotActive(SnapshotInfo snapInfo, } } + /** - * Get the next non deleted snapshot in the snapshot chain. + * Get the next snapshot in the snapshot chain. */ - public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OzoneManager ozoneManager) + public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + SnapshotInfo snapInfo) throws IOException { - // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); } - try { - while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), + if (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { - - UUID nextPathSnapshot = - chainManager.nextPathSnapshot( - snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String tableKey = chainManager.getTableKey(nextPathSnapshot); - SnapshotInfo nextSnapshotInfo = getSnapshotInfo(ozoneManager, tableKey); - - if (nextSnapshotInfo.getSnapshotStatus().equals( - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE)) { - return nextSnapshotInfo; - } - - snapInfo = nextSnapshotInfo; + UUID nextPathSnapshot = chainManager.nextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); + return getSnapshotInfo(ozoneManager, chainManager, nextPathSnapshot); } } catch (NoSuchElementException ex) { LOG.error("The snapshot {} is not longer in snapshot chain, It " + @@ -242,4 +242,44 @@ public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, final long bucketId = metadataManager.getBucketId(volumeName, bucketName); return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; } + + /** + * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. + * @param snapshotMoveKeyInfos keyInfos to be added. + * @param metadataManager metadataManager for a store. + * @return + * @throws IOException + */ + public static RepeatedOmKeyInfo createMergedRepeatedOmKeyInfoFromDeletedTableEntry( + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos, OMMetadataManager metadataManager) throws + IOException { + String dbKey = snapshotMoveKeyInfos.getKey(); + List keyInfoList = new ArrayList<>(); + for (OzoneManagerProtocolProtos.KeyInfo info : snapshotMoveKeyInfos.getKeyInfosList()) { + OmKeyInfo fromProtobuf = OmKeyInfo.getFromProtobuf(info); + keyInfoList.add(fromProtobuf); + } + // When older version of keys are moved to the next snapshot's deletedTable + // The newer version might also be in the next snapshot's deletedTable and + // it might overwrite the existing value which inturn could lead to orphan block in the system. + // Checking the keyInfoList with the last n versions of the omKeyInfo versions would ensure all versions are + // present in the list and would also avoid redundant additions to the list if the last n versions match, which + // can happen on om transaction replay on snapshotted rocksdb. + RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); + if (result == null) { + result = new RepeatedOmKeyInfo(keyInfoList); + } else if (!isSameAsLatestOmKeyInfo(keyInfoList, result)) { + keyInfoList.forEach(result::addOmKeyInfo); + } + return result; + } + + private static boolean isSameAsLatestOmKeyInfo(List omKeyInfos, + RepeatedOmKeyInfo result) { + int size = result.getOmKeyInfoList().size(); + if (size >= omKeyInfos.size()) { + return omKeyInfos.equals(result.getOmKeyInfoList().subList(size - omKeyInfos.size(), size)); + } + return false; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 125c9efcaf2..6e24c9ff93f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -44,9 +44,9 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.s3.S3SecretCacheProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; @@ -81,12 +81,12 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer doubleBuffer; private OzoneManager ozoneManager; private S3SecretLockedManager secretManager; - private final CreateSnapshotResponse snapshotResponse1 = mock(CreateSnapshotResponse.class); - private final CreateSnapshotResponse snapshotResponse2 = mock(CreateSnapshotResponse.class); private final OMResponse omKeyResponse = mock(OMResponse.class); private final OMResponse omBucketResponse = mock(OMResponse.class); private final OMResponse omSnapshotResponse1 = mock(OMResponse.class); private final OMResponse omSnapshotResponse2 = mock(OMResponse.class); + private final OMResponse omSnapshotPurgeResponseProto1 = mock(OMResponse.class); + private final OMResponse omSnapshotPurgeResponseProto2 = mock(OMResponse.class); private static OMClientResponse omKeyCreateResponse = mock(OMKeyCreateResponse.class); private static OMClientResponse omBucketCreateResponse = @@ -95,6 +95,9 @@ class TestOzoneManagerDoubleBuffer { mock(OMSnapshotCreateResponse.class); private static OMClientResponse omSnapshotCreateResponse2 = mock(OMSnapshotCreateResponse.class); + private static OMClientResponse omSnapshotPurgeResponse1 = mock(OMSnapshotPurgeResponse.class); + private static OMClientResponse omSnapshotPurgeResponse2 = mock(OMSnapshotPurgeResponse.class); + @TempDir private File tempDir; private OzoneManagerDoubleBuffer.FlushNotifier flushNotifier; @@ -143,19 +146,22 @@ public void setup() throws IOException { doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse1).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse2).checkAndUpdateDB(any(), any()); + doNothing().when(omSnapshotPurgeResponse1).checkAndUpdateDB(any(), any()); + doNothing().when(omSnapshotPurgeResponse2).checkAndUpdateDB(any(), any()); when(omKeyResponse.getTraceID()).thenReturn("keyTraceId"); when(omBucketResponse.getTraceID()).thenReturn("bucketTraceId"); when(omSnapshotResponse1.getTraceID()).thenReturn("snapshotTraceId-1"); when(omSnapshotResponse2.getTraceID()).thenReturn("snapshotTraceId-2"); - when(omSnapshotResponse1.hasCreateSnapshotResponse()) - .thenReturn(true); - when(omSnapshotResponse2.hasCreateSnapshotResponse()) - .thenReturn(true); - when(omSnapshotResponse1.getCreateSnapshotResponse()) - .thenReturn(snapshotResponse1); - when(omSnapshotResponse2.getCreateSnapshotResponse()) - .thenReturn(snapshotResponse2); + when(omSnapshotPurgeResponseProto1.getTraceID()).thenReturn("snapshotPurgeTraceId-1"); + when(omSnapshotPurgeResponseProto2.getTraceID()).thenReturn("snapshotPurgeTraceId-2"); + + when(omKeyResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateKey); + when(omBucketResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateBucket); + when(omSnapshotPurgeResponseProto1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotPurgeResponseProto2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotResponse1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotResponse2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); when(omKeyCreateResponse.getOMResponse()).thenReturn(omKeyResponse); when(omBucketCreateResponse.getOMResponse()).thenReturn(omBucketResponse); @@ -163,6 +169,10 @@ public void setup() throws IOException { .thenReturn(omSnapshotResponse1); when(omSnapshotCreateResponse2.getOMResponse()) .thenReturn(omSnapshotResponse2); + when(omSnapshotPurgeResponse1.getOMResponse()) + .thenReturn(omSnapshotPurgeResponseProto1); + when(omSnapshotPurgeResponse2.getOMResponse()) + .thenReturn(omSnapshotPurgeResponseProto2); } @AfterEach @@ -194,8 +204,35 @@ private static Stream doubleBufferFlushCases() { omSnapshotCreateResponse1, omSnapshotCreateResponse2, omBucketCreateResponse), - 4L, 4L, 14L, 16L, 1L, 1.142F) - ); + 4L, 4L, 14L, 16L, 1L, 1.142F), + Arguments.of(Arrays.asList(omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2), + 2L, 2L, 16L, 18L, 1L, 1.125F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omBucketCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2), + 3L, 4L, 19L, 22L, 2L, 1.157F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omSnapshotPurgeResponse1, + omBucketCreateResponse, + omSnapshotPurgeResponse2), + 4L, 4L, 23L, 26L, 1L, 1.1300F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2, + omBucketCreateResponse), + 4L, 4L, 27L, 30L, 1L, 1.111F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omBucketCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotCreateResponse1, + omSnapshotPurgeResponse2, + omBucketCreateResponse, + omSnapshotCreateResponse2), + 6L, 7L, 33L, 37L, 2L, 1.121F) + + ); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index c807c04688d..eff23a18e6e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -30,9 +30,11 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -40,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfigValidator; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.ClientVersion; @@ -109,6 +112,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.logging.log4j.util.Strings; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; @@ -749,17 +753,17 @@ public static OMRequest.Builder newCreateBucketRequest( .setClientId(UUID.randomUUID().toString()); } - public static List< HddsProtos.KeyValue> getMetadataList() { - List metadataList = new ArrayList<>(); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( + public static List< KeyValue> getMetadataList() { + List metadataList = new ArrayList<>(); + metadataList.add(KeyValue.newBuilder().setKey("key1").setValue( "value1").build()); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( + metadataList.add(KeyValue.newBuilder().setKey("key2").setValue( "value2").build()); return metadataList; } - public static HddsProtos.KeyValue fsoMetadata() { - return HddsProtos.KeyValue.newBuilder() + public static KeyValue fsoMetadata() { + return KeyValue.newBuilder() .setKey(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS) .setValue(Boolean.FALSE.toString()) .build(); @@ -1050,7 +1054,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, .setMultipartNumber(partNumber) .setMultipartUploadID(multipartUploadID) .addAllKeyLocations(new ArrayList<>()) - .addMetadata(HddsProtos.KeyValue.newBuilder() + .addMetadata(KeyValue.newBuilder() .setKey(OzoneConsts.ETAG) .setValue(DatatypeConverter.printHexBinary( new DigestInputStream( @@ -1321,6 +1325,69 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + public static OMRequest moveSnapshotTableKeyRequest(UUID snapshotId, + List>> deletedKeys, + List>> deletedDirs, + List> renameKeys) { + List deletedMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedKeys) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())) + .build(); + deletedMoveKeys.add(snapshotMoveKeyInfos); + } + + List deletedDirMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedDirs) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())) + .build(); + deletedDirMoveKeys.add(snapshotMoveKeyInfos); + } + + List renameKeyList = new ArrayList<>(); + for (Pair renameKey : renameKeys) { + KeyValue.Builder keyValue = KeyValue.newBuilder(); + keyValue.setKey(renameKey.getKey()); + if (!Strings.isBlank(renameKey.getValue())) { + keyValue.setValue(renameKey.getValue()); + } + renameKeyList.add(keyValue.build()); + } + + + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest snapshotMoveTableKeysRequest = + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(HddsUtils.toProtobuf(snapshotId)) + .addAllDeletedKeys(deletedMoveKeys) + .addAllDeletedDirs(deletedDirMoveKeys) + .addAllRenamedKeys(renameKeyList) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setSnapshotMoveTableKeysRequest(snapshotMoveTableKeysRequest) + .setCmdType(Type.SnapshotMoveTableKeys) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Rename Snapshot. * diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index cbb782e184f..9eb8738b9d4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -26,18 +26,23 @@ import java.util.List; import java.util.UUID; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMDirectoriesPurgeResponseWithFSO; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; @@ -109,7 +114,7 @@ private void updateBlockInfo(OmKeyInfo omKeyInfo) throws IOException { * Create OMRequest which encapsulates DeleteKeyRequest. * @return OMRequest */ - private OMRequest createPurgeKeysRequest(String purgeDeletedDir, + private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDeletedDir, List keyList, OmBucketInfo bucketInfo) throws IOException { List purgePathRequestList = new ArrayList<>(); @@ -127,7 +132,9 @@ private OMRequest createPurgeKeysRequest(String purgeDeletedDir, OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); purgeDirRequest.addAllDeletedPath(purgePathRequestList); - + if (fromSnapshot != null) { + purgeDirRequest.setSnapshotTableKey(fromSnapshot); + } OzoneManagerProtocolProtos.OMRequest omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.PurgeDirectories) @@ -138,8 +145,7 @@ private OMRequest createPurgeKeysRequest(String purgeDeletedDir, } private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( final long volumeId, final long bucketId, final String purgeDeletedDir, - final List purgeDeletedFiles, - final List markDirsAsDeleted) { + final List purgeDeletedFiles, final List markDirsAsDeleted) { // Put all keys to be purged in a list OzoneManagerProtocolProtos.PurgePathRequest.Builder purgePathsRequest = OzoneManagerProtocolProtos.PurgePathRequest.newBuilder(); @@ -182,13 +188,13 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest( + OMRequest omRequest = createPurgeKeysRequest(null, null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -205,7 +211,59 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(deletedKeyNames); + validateDeletedKeys(omMetadataManager, deletedKeyNames); + } + + @Test + public void testValidateAndUpdateCacheSnapshotLastTransactionInfoUpdated() throws Exception { + // Create and Delete keys. The keys should be moved to DeletedKeys table + List deletedKeyInfos = createAndDeleteKeys(1, null); + // The keys should be present in the DeletedKeys table before purging + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); + + String snapshotName = "snap1"; + SnapshotInfo snapshotInfo = createSnapshot(snapshotName); + ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() + .getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), snapshotInfo.getName()); + // Keys should be present in snapshot + validateDeletedKeysTable(rcOmSnapshot.get().getMetadataManager(), deletedKeyInfos, true); + // keys should have been moved from AOS + validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, false); + + // Create PurgeKeysRequest to purge the deleted keys + assertEquals(snapshotInfo.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + bucketKey); + OMRequest omRequest = createPurgeKeysRequest(snapshotInfo.getTableKey(), + null, deletedKeyInfos, omBucketInfo); + OMRequest preExecutedRequest = preExecute(omRequest); + OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = + new OMDirectoriesPurgeRequestWithFSO(preExecutedRequest); + + assertEquals(1000L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); + OMDirectoriesPurgeResponseWithFSO omClientResponse + = (OMDirectoriesPurgeResponseWithFSO) omKeyPurgeRequest + .validateAndUpdateCache(ozoneManager, 100L); + + SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotInfo.getTableKey()); + + assertEquals(snapshotInfoOnDisk, snapshotInfo); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) + .toByteString()); + assertEquals(snapshotInfo, updatedSnapshotInfo); + omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + assertEquals(0L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); + + performBatchOperationCommit(omClientResponse); + + // The keys should exist in the DeletedKeys table after dir delete + validateDeletedKeys(rcOmSnapshot.get().getMetadataManager(), deletedKeyNames); + snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); + assertEquals(snapshotInfo, snapshotInfoOnDisk); + rcOmSnapshot.close(); } @Test @@ -214,13 +272,13 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest( + OMRequest omRequest = createPurgeKeysRequest(null, null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -258,35 +316,32 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(deletedKeyNames); + validateDeletedKeys(omMetadataManager, deletedKeyNames); } - private void performBatchOperationCommit( - OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { + private void performBatchOperationCommit(OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } } @Nonnull - private List validateDeletedKeysTable( - List deletedKeyInfos) throws IOException { + private List validateDeletedKeysTable(OMMetadataManager omMetadataManager, + List deletedKeyInfos, boolean keyExists) throws IOException { List deletedKeyNames = new ArrayList<>(); for (OmKeyInfo deletedKey : deletedKeyInfos) { String keyName = omMetadataManager.getOzoneKey(deletedKey.getVolumeName(), deletedKey.getBucketName(), deletedKey.getKeyName()); - assertTrue(omMetadataManager.getDeletedTable().isExist(keyName)); + assertEquals(omMetadataManager.getDeletedTable().isExist(keyName), keyExists); deletedKeyNames.add(keyName); } return deletedKeyNames; } - private void validateDeletedKeys( + private void validateDeletedKeys(OMMetadataManager omMetadataManager, List deletedKeyNames) throws IOException { for (String deletedKey : deletedKeyNames) { assertTrue(omMetadataManager.getDeletedTable().isExist( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index 2cd0de920be..c323fecd501 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,12 +23,10 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.junit.jupiter.api.Test; @@ -42,12 +40,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.when; /** * Tests {@link OMKeyPurgeRequest} and {@link OMKeyPurgeResponse}. @@ -115,35 +111,6 @@ private OMRequest createPurgeKeysRequest(List deletedKeys, .build(); } - /** - * Create snapshot and checkpoint directory. - */ - private SnapshotInfo createSnapshot(String snapshotName) throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volumeName, bucketName, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); - // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); - - String key = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - return snapshotInfo; - } - private OMRequest preExecute(OMRequest originalOmRequest) throws IOException { OMKeyPurgeRequest omKeyPurgeRequest = new OMKeyPurgeRequest(originalOmRequest); @@ -205,6 +172,8 @@ public void testKeyPurgeInSnapshot() throws Exception { List deletedKeyNames = createAndDeleteKeys(1, null); SnapshotInfo snapInfo = createSnapshot("snap1"); + assertEquals(snapInfo.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // The keys should be not present in the active Db's deletedTable for (String deletedKey : deletedKeyNames) { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); @@ -230,6 +199,12 @@ public void testKeyPurgeInSnapshot() throws Exception { omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L); + SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + assertEquals(snapshotInfoOnDisk, snapInfo); + snapInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) + .toByteString()); + assertEquals(snapInfo, updatedSnapshotInfo); OMResponse omResponse = OMResponse.newBuilder() .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) .setCmdType(Type.PurgeKeys) @@ -245,7 +220,8 @@ public void testKeyPurgeInSnapshot() throws Exception { // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } - + snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); + assertEquals(snapshotInfoOnDisk, snapInfo); // The keys should not exist in the DeletedKeys table for (String deletedKey : deletedKeyNames) { assertFalse(omSnapshot.getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index f636152c35c..e2219d5fcc1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMPerformanceMetrics; @@ -43,9 +44,15 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; @@ -237,7 +244,7 @@ public void setup() throws Exception { .thenReturn(bucket); when(ozoneManager.resolveBucketLink(any(Pair.class))) .thenReturn(bucket); - OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + OmSnapshotManager omSnapshotManager = Mockito.spy(new OmSnapshotManager(ozoneManager)); when(ozoneManager.getOmSnapshotManager()) .thenReturn(omSnapshotManager); @@ -285,4 +292,34 @@ public void stop() { omMetrics.unRegister(); framework().clearInlineMocks(); } + + /** + * Create snapshot and checkpoint directory. + */ + protected SnapshotInfo createSnapshot(String snapshotName) throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + BatchOperation batchOperation = omMetadataManager.getStore() + .initBatchOperation(); + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volumeName, bucketName, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + // Add to batch and commit to DB. + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + batchOperation.close(); + + String key = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + return snapshotInfo; + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 3997f39d7bd..af904382256 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -19,14 +19,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -37,18 +31,15 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseWithFSO; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.util.UUID; @@ -64,69 +55,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotCreateRequest class, which handles CreateSnapshot request. */ -public class TestOMSnapshotCreateRequest { - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotCreateRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -139,9 +80,9 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String snapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); doPreExecute(omRequest); } @@ -157,9 +98,9 @@ public void testPreExecute(String snapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String snapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -169,8 +110,8 @@ public void testPreExecuteFailure(String snapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -180,29 +121,29 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); assertNotNull(omClientResponse.getOMResponse()); @@ -226,20 +167,21 @@ public void testValidateAndUpdateCache() throws Exception { // Get value from cache SnapshotInfo snapshotInfoInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfoInCache); assertEquals(snapshotInfoFromProto, snapshotInfoInCache); - - assertEquals(0, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotCreates()); + assertEquals(snapshotInfoInCache.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); + assertEquals(0, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotCreates()); } @Test public void testEntryRenamedKeyTable() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); Table snapshotRenamedTable = - omMetadataManager.getSnapshotRenamedTable(); + getOmMetadataManager().getSnapshotRenamedTable(); renameKey("key1", "key2", 0); renameDir("dir1", "dir2", 5); @@ -249,17 +191,17 @@ public void testEntryRenamedKeyTable() throws Exception { // Create snapshot createSnapshot(snapshotName1); - String snapKey = getTableKey(volumeName, - bucketName, snapshotName1); + String snapKey = getTableKey(getVolumeName(), + getBucketName(), snapshotName1); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapKey); assertNotNull(snapshotInfo); renameKey("key3", "key4", 10); renameDir("dir3", "dir4", 15); // Rename table should have two entries as rename is within snapshot scope. - assertEquals(2, omMetadataManager + assertEquals(2, getOmMetadataManager() .countRowsInTable(snapshotRenamedTable)); // Create snapshot to clear snapshotRenamedTable @@ -269,33 +211,33 @@ public void testEntryRenamedKeyTable() throws Exception { @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String key = getTableKey(volumeName, bucketName, snapshotName1); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); OMRequest omRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Now try to create again to verify error - omRequest = createSnapshotRequest(volumeName, bucketName, snapshotName1); + omRequest = createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); omSnapshotCreateRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getCreateSnapshotResponse()); assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, omResponse.getStatus()); - assertEquals(1, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(2, omMetrics.getNumSnapshotCreates()); + assertEquals(1, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(2, getOmMetrics().getNumSnapshotCreates()); } private void renameKey(String fromKey, String toKey, long offset) @@ -314,15 +256,15 @@ private void renameKey(String fromKey, String toKey, long offset) new OMKeyRenameResponse(omResponse, fromKeyInfo.getKeyName(), toKeyInfo.getKeyName(), toKeyInfo); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); - OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(getVolumeName(), + getBucketName(), fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) .setObjectID(100L) .build(); @@ -340,32 +282,32 @@ private void renameDir(String fromKey, String toKey, long offset) new OMKeyRenameResponseWithFSO(omResponse, getDBKeyName(fromKeyInfo), getDBKeyName(toKeyInfo), fromKeyParent, null, toKeyInfo, null, true, BucketLayout.FILE_SYSTEM_OPTIMIZED); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } protected String getDBKeyName(OmKeyInfo keyInfo) throws IOException { - return omMetadataManager.getOzonePathKey( - omMetadataManager.getVolumeId(volumeName), - omMetadataManager.getBucketId(volumeName, bucketName), + return getOmMetadataManager().getOzonePathKey( + getOmMetadataManager().getVolumeId(getVolumeName()), + getOmMetadataManager().getBucketId(getVolumeName(), getBucketName()), keyInfo.getParentObjectID(), keyInfo.getKeyName()); } private void createSnapshot(String snapName) throws Exception { OMRequest omRequest = createSnapshotRequest( - volumeName, bucketName, snapName); + getVolumeName(), getBucketName(), snapName); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); //create entry OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + omClientResponse.checkAndUpdateDB(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private OMSnapshotCreateRequest doPreExecute( OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } /** @@ -382,15 +324,15 @@ public static OMSnapshotCreateRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 5a8bb5d7c0d..4c5dc2e77f0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -19,33 +19,21 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; @@ -61,10 +49,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -72,60 +56,15 @@ * Mostly mirrors TestOMSnapshotCreateRequest. * testEntryNotExist() and testEntryExists() are unique. */ -public class TestOMSnapshotDeleteRequest { - @TempDir - private File folder; +public class TestOMSnapshotDeleteRequest extends TestSnapshotRequestAndResponse { - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - - private String volumeName; - private String bucketName; private String snapshotName; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - - OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB( - volumeName, bucketName, omMetadataManager); - } - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - } - - @ValueSource(strings = { // '-' is allowed. "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", @@ -136,9 +75,9 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String deleteSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); doPreExecute(omRequest); } @@ -154,9 +93,9 @@ public void testPreExecute(String deleteSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String deleteSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -166,8 +105,8 @@ public void testPreExecuteFailure(String deleteSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, snapshotName); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -177,27 +116,27 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // As we have not still called validateAndUpdateCache, get() should // return null. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // add key to cache - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName, null, Time.now()); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Trigger validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -207,14 +146,14 @@ public void testValidateAndUpdateCache() throws Exception { assertEquals(OK, omResponse.getStatus()); // check cache - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotCreates()); + assertEquals(0, getOmMetrics().getNumSnapshotCreates()); // Expected -1 because no snapshot was created before. - assertEquals(-1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleted()); - assertEquals(0, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(-1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -222,25 +161,25 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testEntryNotExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = deleteSnapshotRequest( - volumeName, bucketName, snapshotName); + getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // Entry does not exist - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Trigger delete snapshot validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 1L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(0, omMetrics.getNumSnapshotDeleted()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -249,50 +188,50 @@ public void testEntryNotExist() throws Exception { */ @Test public void testEntryExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); OMRequest omRequest1 = - createSnapshotRequest(volumeName, bucketName, snapshotName); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest1, ozoneManager); + TestOMSnapshotCreateRequest.doPreExecute(omRequest1, getOzoneManager()); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Create snapshot entry - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1L); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - assertEquals(1, omMetrics.getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); OMRequest omRequest2 = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest2); // Delete snapshot entry OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); // Response should be successful OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(OK, omResponse.getStatus()); - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); // The snapshot entry should still exist in the table, // but marked as DELETED. assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); assertThat(snapshotInfo.getDeletionTime()).isGreaterThan(0L); - assertEquals(0, omMetrics.getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); // Now delete snapshot entry again, expect error. - omRequest2 = deleteSnapshotRequest(volumeName, bucketName, snapshotName); + omRequest2 = deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); omSnapshotDeleteRequest = doPreExecute(omRequest2); omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 3L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 3L); omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -300,11 +239,11 @@ public void testEntryExist() throws Exception { assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); // Snapshot entry should still be there. - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } private OMSnapshotDeleteRequest doPreExecute( @@ -313,7 +252,7 @@ private OMSnapshotDeleteRequest doPreExecute( new OMSnapshotDeleteRequest(originalRequest); OMRequest modifiedRequest = - omSnapshotDeleteRequest.preExecute(ozoneManager); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); return new OMSnapshotDeleteRequest(modifiedRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java new file mode 100644 index 00000000000..247f322dfcf --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.deleteSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.moveSnapshotTableKeyRequest; + +/** + * Class to test OmSnapshotMoveTableKeyRequest. + */ +public class TestOMSnapshotMoveTableKeysRequest extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysRequest() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private SnapshotInfo deleteSnapshot(SnapshotInfo snapshotInfo, long transactionIndex) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = deleteSnapshotRequest(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName()); + OMSnapshotDeleteRequest omSnapshotDeleteRequest = new OMSnapshotDeleteRequest(omRequest); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), transactionIndex); + return SnapshotUtils.getSnapshotInfo(getOzoneManager(), snapshotInfo.getTableKey()); + } + + @Test + public void testValidateAndUpdateCacheWithNextSnapshotInactive() throws Exception { + createSnapshots(true); + snapshotInfo2 = deleteSnapshot(snapshotInfo2, 0); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertFalse(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_SNAPSHOT_ERROR, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDeletedKeyPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 10, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDeletedDirPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidNumberKeys() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 10)) + .flatMap(List::stream).collect(Collectors.toList()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 0, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + renameKeys.add(Pair.of(getOmMetadataManager().getRenameKey(getVolumeName(), getBucketName(), 11), null)); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omRequest = omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager()); + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedDir : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedDirsList()) { + Assertions.assertEquals(1, deletedDir.getKeyInfosList().size()); + } + + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedKey : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedKeysList()) { + Assertions.assertNotEquals(0, deletedKey.getKeyInfosList().size()); + } + + for (HddsProtos.KeyValue renameKey : omRequest.getSnapshotMoveTableKeysRequest().getRenamedKeysList()) { + Assertions.assertTrue(renameKey.hasKey() && renameKey.hasValue()); + } + + } + + @Test + public void testPreExecuteWithInvalidRenamePrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(invalidVolumeName, invalidBucketName, 0, 10, snapshotName2)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0); + List>> deletedDirs = getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + // perform preExecute. + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertTrue(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedDir() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateRenameKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 8edd096e766..1c44decdfda 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -19,44 +19,32 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; @@ -68,10 +56,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -79,49 +65,16 @@ /** * Tests OMSnapshotPurgeRequest class. */ -public class TestOMSnapshotPurgeRequestAndResponse { - private List checkpointPaths = new ArrayList<>(); - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OMMetadataManager omMetadataManager; - private OmSnapshotManager omSnapshotManager; - private AuditLogger auditLogger; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotPurgeRequestAndResponse extends TestSnapshotRequestAndResponse { + private final List checkpointPaths = new ArrayList<>(); private String keyName; + public TestOMSnapshotPurgeRequestAndResponse() { + super(true); + } + @BeforeEach - void setup(@TempDir File testDir) throws Exception { - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - when(ozoneManager.isAdmin(any())).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - - ReferenceCounted rcOmMetadataReader = - mock(ReferenceCounted.class); - when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); - omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + public void setup() throws Exception { keyName = UUID.randomUUID().toString(); } @@ -132,17 +85,14 @@ private List createSnapshots(int numSnapshotKeys) throws Exception { Random random = new Random(); - // Add volume, bucket and key entries to OM DB. - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); // Create Snapshot and CheckpointDir List purgeSnapshots = new ArrayList<>(numSnapshotKeys); for (int i = 1; i <= numSnapshotKeys; i++) { String snapshotName = keyName + "-" + random.nextLong(); createSnapshotCheckpoint(snapshotName); - purgeSnapshots.add(SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName)); + purgeSnapshots.add(SnapshotInfo.getTableKey(getVolumeName(), + getBucketName(), snapshotName)); } return purgeSnapshots; @@ -172,39 +122,7 @@ private OMRequest createPurgeKeysRequest(List purgeSnapshotKeys) { * Create snapshot and checkpoint directory. */ private void createSnapshotCheckpoint(String snapshotName) throws Exception { - createSnapshotCheckpoint(volumeName, bucketName, snapshotName); - } - - private void createSnapshotCheckpoint(String volume, - String bucket, - String snapshotName) throws Exception { - OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volume, bucket, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - // Add to batch and commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - } - - String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); - // Check the DB is still there - assertTrue(Files.exists(snapshotDirPath)); - checkpointPaths.add(snapshotDirPath); + checkpointPaths.add(createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName)); } private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) @@ -212,7 +130,7 @@ private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) OMSnapshotPurgeRequest omSnapshotPurgeRequest = new OMSnapshotPurgeRequest(originalOmRequest); OMRequest modifiedOmRequest = omSnapshotPurgeRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); return new OMSnapshotPurgeRequest(modifiedOmRequest); } @@ -224,48 +142,48 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) // validateAndUpdateCache for OMSnapshotPurgeRequest. OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } } @Test public void testValidateAndUpdateCache() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); for (String snapshotTableKey: snapshotDbKeysToPurge) { - assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); } - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } // Check if the entries are deleted. - assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertTrue(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } - assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount + 1, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, getOmMetrics().getNumSnapshotPurgeFails()); } /** @@ -273,8 +191,8 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testValidateAndUpdateCacheFailure() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); @@ -283,17 +201,17 @@ public void testValidateAndUpdateCacheFailure() throws Exception { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); - assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, getOmMetrics().getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after @@ -306,7 +224,7 @@ public void testSnapshotChainCleanup(int index) throws Exception { // Before purge, check snapshot chain OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); SnapshotInfo snapInfo = metadataManager.getSnapshotInfoTable() @@ -340,8 +258,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { snapInfo.getSnapshotId()); } - long rowsInTableBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long rowsInTableBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); // Purge Snapshot of the given index. List toPurgeList = Collections.singletonList(snapShotToPurge); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -364,8 +282,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { .getGlobalPreviousSnapshotId(), prevGlobalSnapId); } - assertNotEquals(rowsInTableBeforePurge, omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); + assertNotEquals(rowsInTableBeforePurge, getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable())); } private static Stream snapshotPurgeCases() { @@ -419,14 +337,14 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int toIndex, boolean createInBucketOrder) throws Exception { SnapshotChainManager chainManager = - ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager(); + ((OmMetadataManagerImpl) getOmMetadataManager()).getSnapshotChainManager(); int totalKeys = numberOfBuckets * numberOfKeysPerBucket; List buckets = new ArrayList<>(); for (int i = 0; i < numberOfBuckets; i++) { String bucketNameLocal = "bucket-" + UUID.randomUUID(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketNameLocal, - omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(getVolumeName(), bucketNameLocal, + getOmMetadataManager()); buckets.add(bucketNameLocal); } @@ -437,26 +355,43 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int bucketIndex = createInBucketOrder ? i : j; String bucket = buckets.get(bucketIndex % numberOfBuckets); String snapshotName = UUID.randomUUID().toString(); - createSnapshotCheckpoint(volumeName, bucket, snapshotName); + createSnapshotCheckpoint(getVolumeName(), bucket, snapshotName); String snapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucket, snapshotName); + SnapshotInfo.getTableKey(getVolumeName(), bucket, snapshotName); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey); snapshotInfoList.add(snapshotInfo); } } - long numberOfSnapshotBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long numberOfSnapshotBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(totalKeys, numberOfSnapshotBeforePurge); assertEquals(totalKeys, chainManager.getGlobalSnapshotChain().size()); - - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList); - + Map expectedTransactionInfos = new HashMap<>(); + // Ratis transaction uses term index 1 while creating snapshot. + ByteString expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)) + .toByteString(); + for (SnapshotInfo snapshotInfo : snapshotInfoList) { + expectedTransactionInfos.put(snapshotInfo.getSnapshotId(), expectedLastTransactionVal); + } + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList, expectedTransactionInfos); + // Ratis transaction uses term index 200 while purging snapshot. + expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L)) + .toByteString(); List purgeSnapshotKeys = new ArrayList<>(); for (int i = fromIndex; i <= toIndex; i++) { SnapshotInfo purgeSnapshotInfo = snapshotInfoList.get(i); - String purgeSnapshotKey = SnapshotInfo.getTableKey(volumeName, + UUID snapId = purgeSnapshotInfo.getSnapshotId(); + // expecting nextPathSnapshot & nextGlobalSnapshot in chain gets updated. + if (chainManager.hasNextGlobalSnapshot(snapId)) { + expectedTransactionInfos.put(chainManager.nextGlobalSnapshot(snapId), expectedLastTransactionVal); + } + if (chainManager.hasNextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId)) { + expectedTransactionInfos.put(chainManager.nextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId), + expectedLastTransactionVal); + } + String purgeSnapshotKey = SnapshotInfo.getTableKey(getVolumeName(), purgeSnapshotInfo.getBucketName(), purgeSnapshotInfo.getName()); purgeSnapshotKeys.add(purgeSnapshotKey); @@ -469,34 +404,34 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( for (int i = 0; i < totalKeys; i++) { if (i < fromIndex || i > toIndex) { SnapshotInfo info = snapshotInfoList.get(i); - String snapshotKey = SnapshotInfo.getTableKey(volumeName, + String snapshotKey = SnapshotInfo.getTableKey(getVolumeName(), info.getBucketName(), info.getName()); snapshotInfoListAfterPurge.add( - omMetadataManager.getSnapshotInfoTable().get(snapshotKey)); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); } } long expectNumberOfSnapshotAfterPurge = totalKeys - (toIndex - fromIndex + 1); - long actualNumberOfSnapshotAfterPurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long actualNumberOfSnapshotAfterPurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(expectNumberOfSnapshotAfterPurge, actualNumberOfSnapshotAfterPurge); assertEquals(expectNumberOfSnapshotAfterPurge, chainManager .getGlobalSnapshotChain().size()); - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( - snapshotInfoListAfterPurge); + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoListAfterPurge, expectedTransactionInfos); } private void validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( - List snapshotInfoList - ) throws IOException { + List snapshotInfoList, Map expectedTransactionInfos) throws IOException { if (snapshotInfoList.isEmpty()) { return; } - + for (SnapshotInfo snapshotInfo : snapshotInfoList) { + assertEquals(snapshotInfo.getLastTransactionInfo(), expectedTransactionInfos.get(snapshotInfo.getSnapshotId())); + } OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java index ab2bac1bd0e..a746597288a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -17,17 +17,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -35,17 +26,14 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; @@ -62,75 +50,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. */ -public class TestOMSnapshotRenameRequest { - - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotRenameRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -143,11 +75,11 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String toSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); doPreExecute(omRequest); } @@ -167,10 +99,10 @@ public void testPreExecute(String toSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String toSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage().contains("Invalid snapshot name: " + toSnapshotName)); @@ -179,8 +111,8 @@ public void testPreExecuteFailure(String toSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -190,39 +122,39 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Add key to cache. - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName1, UUID.randomUUID(), Time.now()); snapshotInfo.setReferencedSize(1000L); snapshotInfo.setReferencedReplicatedSize(3 * 1000L); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 2L); assertNotNull(omClientResponse.getOMResponse()); @@ -244,56 +176,56 @@ public void testValidateAndUpdateCache() throws Exception { SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); - String key2 = getTableKey(volumeName, bucketName, snapshotName2); + String key2 = getTableKey(getVolumeName(), getBucketName(), snapshotName2); // Get value from cache SnapshotInfo snapshotInfoNewInCache = - omMetadataManager.getSnapshotInfoTable().get(key2); + getOmMetadataManager().getSnapshotInfoTable().get(key2); assertNotNull(snapshotInfoNewInCache); assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); SnapshotInfo snapshotInfoOldInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNull(snapshotInfoOldInCache); } @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // First make sure we have two snapshots. OzoneManagerProtocolProtos.OMRequest createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName2); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName2); omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -303,24 +235,24 @@ public void testEntryExists() throws Exception { @Test public void testEntryNotFound() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -330,7 +262,7 @@ public void testEntryNotFound() throws Exception { private OMSnapshotRenameRequest doPreExecute( OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } public static OMSnapshotRenameRequest doPreExecute( @@ -344,15 +276,15 @@ public static OMSnapshotRenameRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index b5bfc2714b0..380922f9e22 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.io.TempDir; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -60,37 +51,13 @@ * Tests TestOMSnapshotSetPropertyRequest * TestOMSnapshotSetPropertyResponse class. */ -public class TestOMSnapshotSetPropertyRequestAndResponse { - private BatchOperation batchOperation; - private OzoneManager ozoneManager; - private OMMetadataManager omMetadataManager; - private OMMetrics omMetrics; - private String volumeName; - private String bucketName; +public class TestOMSnapshotSetPropertyRequestAndResponse extends TestSnapshotRequestAndResponse { private String snapName; private long exclusiveSize; private long exclusiveSizeAfterRepl; @BeforeEach - void setup(@TempDir File testDir) throws Exception { - omMetrics = OMMetrics.create(); - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + void setup() { snapName = UUID.randomUUID().toString(); exclusiveSize = 2000L; exclusiveSizeAfterRepl = 6000L; @@ -98,11 +65,11 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); @@ -111,28 +78,27 @@ public void testValidateAndUpdateCache() throws IOException { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(request); OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) omSnapshotSetPropertyRequest - .validateAndUpdateCache(ozoneManager, 200L); + .validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotSetPropertyResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotSetPropertyResponse.checkAndUpdateDB(getOmMetadataManager(), + getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetProperties()); - assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, getOmMetrics().getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); assertCacheValues(snapshotEntry.getKey()); @@ -149,11 +115,11 @@ public void testValidateAndUpdateCache() throws IOException { */ @Test public void testValidateAndUpdateCacheFailure() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); @@ -161,27 +127,27 @@ public void testValidateAndUpdateCacheFailure() throws IOException { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); for (OMRequest omRequest: snapshotUpdateSizeRequests) { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); - OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) - omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotSetPropertyRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); } - assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyCount, getOmMetrics().getNumSnapshotSetProperties()); assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetPropertyFails()); } private void assertCacheValues(String dbKey) { - CacheValue cacheValue = omMetadataManager + CacheValue cacheValue = getOmMetadataManager() .getSnapshotInfoTable() .getCacheValue(new CacheKey<>(dbKey)); assertEquals(exclusiveSize, cacheValue.getCacheValue().getExclusiveSize()); @@ -193,7 +159,7 @@ private List createSnapshotUpdateSizeRequest() throws IOException { List omRequests = new ArrayList<>(); try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); SnapshotSize snapshotSize = SnapshotSize.newBuilder() @@ -220,8 +186,8 @@ private List createSnapshotUpdateSizeRequest() private void createSnapshotDataForTest() throws IOException { // Create 10 Snapshots for (int i = 0; i < 10; i++) { - OMRequestTestUtils.addSnapshotToTableCache(volumeName, bucketName, - snapName + i, omMetadataManager); + OMRequestTestUtils.addSnapshotToTableCache(getVolumeName(), getBucketName(), + snapName + i, getOmMetadataManager()); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 7f74f3d17ec..a370c20ad1b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -27,6 +27,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -98,7 +99,8 @@ public void testAddToDBBatch(int numberOfKeys) throws Exception { snapshotName, snapshotId, Time.now()); - + snapshotInfo.setLastTransactionInfo( + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // confirm table is empty assertEquals(0, omMetadataManager .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java new file mode 100644 index 00000000000..d2e2d94ec73 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * Test class to test OMSnapshotMoveTableKeysResponse. + */ +public class TestOMSnapshotMoveTableKeysResponse extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysResponse() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 0, + 10, snapshotName1)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 0, + 10, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 5, + 15, snapshotName2)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 5, + 8, 10, 10).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 8, + 15, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 5, 15, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private void addDataToTable(Table table, List> vals) throws IOException { + for (Pair pair : vals) { + table.put(pair.getKey(), pair.getValue()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testMoveTableKeysToNextSnapshot(boolean nextSnapshotExists) throws Exception { + createSnapshots(nextSnapshotExists); + + try (ReferenceCounted snapshot1 = getOmSnapshotManager().getSnapshot(getVolumeName(), getBucketName(), + snapshotName1); + ReferenceCounted snapshot2 = nextSnapshotExists ? getOmSnapshotManager().getSnapshot( + getVolumeName(), getBucketName(), snapshotName2) : null) { + OmSnapshot snapshot = snapshot1.get(); + List deletedTable = new ArrayList<>(); + List deletedDirTable = new ArrayList<>(); + List renamedTable = new ArrayList<>(); + Map renameEntries = new HashMap<>(); + snapshot.getMetadataManager().getDeletedTable().iterator() + .forEachRemaining(entry -> { + try { + deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> omKeyInfo.getProtobuf( + ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + snapshot.getMetadataManager().getDeletedDirTable().iterator() + .forEachRemaining(entry -> { + try { + deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()); + renameEntries.put(entry.getKey(), entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + OMSnapshotMoveTableKeysResponse response = new OMSnapshotMoveTableKeysResponse( + OzoneManagerProtocolProtos.OMResponse.newBuilder().setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.SnapshotMoveTableKeys).build(), + snapshotInfo1, nextSnapshotExists ? snapshotInfo2 : null, deletedTable, deletedDirTable, renamedTable); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + response.addToDBBatch(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + } + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedDirTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getSnapshotRenamedTable().isEmpty()); + OMMetadataManager nextMetadataManager = + nextSnapshotExists ? snapshot2.get().getMetadataManager() : getOmMetadataManager(); + AtomicInteger count = new AtomicInteger(); + nextMetadataManager.getDeletedTable().iterator().forEachRemaining(entry -> { + count.getAndIncrement(); + try { + int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10; + Assertions.assertEquals(maxCount, entry.getValue().getOmKeyInfoList().size()); + List versions = entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions) + .map(omKeyInfo -> omKeyInfo.get(0).getVersion()).collect(Collectors.toList()); + List expectedVersions = new ArrayList<>(); + if (maxCount == 20) { + expectedVersions.addAll(LongStream.range(10, 20).boxed().collect(Collectors.toList())); + } + expectedVersions.addAll(LongStream.range(0, 10).boxed().collect(Collectors.toList())); + Assertions.assertEquals(expectedVersions, versions); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + Assertions.assertEquals(15, count.get()); + count.set(0); + + nextMetadataManager.getDeletedDirTable().iterator().forEachRemaining(entry -> count.getAndIncrement()); + Assertions.assertEquals(15, count.get()); + count.set(0); + nextMetadataManager.getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + String expectedValue = renameEntries.getOrDefault(entry.getKey(), entry.getValue()); + Assertions.assertEquals(expectedValue, entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + count.getAndIncrement(); + }); + Assertions.assertEquals(15, count.get()); + } + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java index 3948f4fab80..e04891da83a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java @@ -20,7 +20,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotManager; @@ -56,25 +57,26 @@ public class TestSnapshotDeletingService { private SnapshotChainManager chainManager; @Mock private OmMetadataManagerImpl omMetadataManager; - @Mock - private ScmBlockLocationProtocol scmClient; private final OzoneConfiguration conf = new OzoneConfiguration();; private final long sdsRunInterval = Duration.ofMillis(1000).toMillis(); private final long sdsServiceTimeout = Duration.ofSeconds(10).toMillis(); - private static Stream testCasesForIgnoreSnapshotGc() { - SnapshotInfo filteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true).setName("snap1").build(); - SnapshotInfo unFilteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1").build(); + private static Stream testCasesForIgnoreSnapshotGc() throws IOException { + SnapshotInfo flushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true) + .setLastTransactionInfo(TransactionInfo.valueOf(1, 1).toByteString()) + .setName("snap1").build(); + SnapshotInfo unFlushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1") + .setLastTransactionInfo(TransactionInfo.valueOf(0, 0).toByteString()).build(); return Stream.of( - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); } @ParameterizedTest @@ -87,9 +89,15 @@ public void testProcessSnapshotLogicInSDS(SnapshotInfo snapshotInfo, Mockito.when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); Mockito.when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); + if (status == SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED) { + Table transactionInfoTable = Mockito.mock(Table.class); + Mockito.when(omMetadataManager.getTransactionInfoTable()).thenReturn(transactionInfoTable); + Mockito.when(transactionInfoTable.getSkipCache(Mockito.anyString())) + .thenReturn(TransactionInfo.valueOf(1, 1)); + } SnapshotDeletingService snapshotDeletingService = - new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager, scmClient); + new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager); snapshotInfo.setSnapshotStatus(status); assertEquals(expectedOutcome, snapshotDeletingService.shouldIgnoreSnapshot(snapshotInfo)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index c5ae809718e..f49bfc33976 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -38,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -168,6 +170,7 @@ public void testAddSnapshot() throws Exception { } assertEquals(snapshotID3, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID3, chainManager.getLatestPathSnapshotId( String.join("/", "vol1", "bucket1"))); @@ -285,6 +288,7 @@ public void testChainFromLoadFromTable(boolean increasingTIme) assertFalse(chainManager.isSnapshotChainCorrupted()); // check if snapshots loaded correctly from snapshotInfoTable assertEquals(snapshotID2, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID2, chainManager.nextGlobalSnapshot(snapshotID1)); assertEquals(snapshotID1, chainManager.previousPathSnapshot(String .join("/", "vol1", "bucket1"), snapshotID2)); @@ -305,6 +309,34 @@ public void testChainFromLoadFromTable(boolean increasingTIme) () -> chainManager.nextGlobalSnapshot(snapshotID1)); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 5, 10}) + public void testSnapshotChainIterator(int numberOfSnapshots) throws IOException { + Table snapshotInfo = omMetadataManager.getSnapshotInfoTable(); + List snapshotInfoList = new ArrayList<>(); + + UUID prevSnapshotID = null; + long time = System.currentTimeMillis(); + for (int i = 0; i < numberOfSnapshots; i++) { + UUID snapshotID = UUID.randomUUID(); + SnapshotInfo snapInfo = createSnapshotInfo(snapshotID, prevSnapshotID, + prevSnapshotID, time++); + snapshotInfo.put(snapshotID.toString(), snapInfo); + prevSnapshotID = snapshotID; + snapshotInfoList.add(snapInfo); + } + chainManager = new SnapshotChainManager(omMetadataManager); + assertFalse(chainManager.isSnapshotChainCorrupted()); + List reverseChain = Lists.newArrayList(chainManager.iterator(true)); + Collections.reverse(reverseChain); + List forwardChain = Lists.newArrayList(chainManager.iterator(false)); + List expectedChain = snapshotInfoList.stream().map(SnapshotInfo::getSnapshotId).collect(Collectors.toList()); + assertEquals(expectedChain, reverseChain); + assertEquals(expectedChain, forwardChain); + assertEquals(forwardChain, reverseChain); + + } + private static Stream invalidSnapshotChain() { List nodes = IntStream.range(0, 5) .mapToObj(i -> UUID.randomUUID()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index dc00433e179..29e0115861f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -19,12 +19,18 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; +import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -35,6 +41,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -114,4 +121,44 @@ public void testSnapshotSSTFilteredFlag() throws Exception { snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); assertTrue(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).isSstFiltered()); } + + @Test + public void testLastTransactionInfo() throws Exception { + Table snapshotInfo = + omMetadataManager.getSnapshotInfoTable(); + SnapshotInfo info = createSnapshotInfo(); + snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); + assertNull(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo()); + // checking if true value is returned when snapshot is null. + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, (SnapshotInfo)null)); + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(0, 0)); + // Checking if changes have been flushed when lastTransactionInfo is null + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, info)); + TermIndex termIndex = TermIndex.valueOf(1, 1); + info.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + // Checking if changes to snapshot object has been updated but not updated on cache or disk. + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + snapshotInfo.addCacheEntry(new CacheKey<>(EXPECTED_SNAPSHOT_KEY), CacheValue.get(termIndex.getIndex(), info)); + + assertEquals(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo(), info.getLastTransactionInfo()); + + // Checking if changes have not been flushed when snapshot last transaction info is behind OmTransactionTable value. + assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + omMetadataManager.getTransactionInfoTable().addCacheEntry(new CacheKey<>(OzoneConsts.TRANSACTION_INFO_KEY), + CacheValue.get(termIndex.getIndex(), TransactionInfo.valueOf(1, 1))); + assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + + // Checking changes are flushed when transaction is equal. + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, + TransactionInfo.valueOf(1, 1)); + + + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + // Checking changes are flushed when transactionIndex is greater . + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, 2)); + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + // Checking changes are flushed when both term & transactionIndex is greater. + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(2, 2)); + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java new file mode 100644 index 00000000000..e60e23de22a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class to test snapshot functionalities. + */ +public class TestSnapshotRequestAndResponse { + @TempDir + private File testDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + private OmSnapshotManager omSnapshotManager; + + private String volumeName; + private String bucketName; + private boolean isAdmin; + + public BatchOperation getBatchOperation() { + return batchOperation; + } + + public String getBucketName() { + return bucketName; + } + + public boolean isAdmin() { + return isAdmin; + } + + public OmMetadataManagerImpl getOmMetadataManager() { + return omMetadataManager; + } + + public OMMetrics getOmMetrics() { + return omMetrics; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public File getTestDir() { + return testDir; + } + + public String getVolumeName() { + return volumeName; + } + + protected TestSnapshotRequestAndResponse() { + this.isAdmin = false; + } + + protected TestSnapshotRequestAndResponse(boolean isAdmin) { + this.isAdmin = isAdmin; + } + + @BeforeEach + public void baseSetup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + testDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(isAdmin); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + IAccessAuthorizer accessAuthorizer = mock(IAccessAuthorizer.class); + when(ozoneManager.getAccessAuthorizer()).thenReturn(accessAuthorizer); + when(accessAuthorizer.isNative()).thenReturn(false); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected Path createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volume, bucket, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + // Add to batch and commit to DB. + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } + + String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + snapshotInfo.getCheckpointDir()); + // Check the DB is still there + assertTrue(Files.exists(snapshotDirPath)); + return snapshotDirPath; + } + + protected List>> getDeletedKeys(String volume, String bucket, + int startRange, int endRange, + int numberOfKeys, + int minVersion) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKey(volume, bucket, "key" + String.format("%010d", i))), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration()), + new OmKeyLocationInfoGroup(minVersion + cnt, new ArrayList<>(), false)) + .setCreationTime(0).setModificationTime(0).build()) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); + } + + protected List> getRenameKeys(String volume, String bucket, + int startRange, int endRange, + String renameKeyPrefix) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getRenameKey(volume, bucket, i), + omMetadataManager.getOzoneKeyFSO(volume, bucket, renameKeyPrefix + i)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + } + + protected List>> getDeletedDirKeys(String volume, String bucket, + int startRange, int endRange, int numberOfKeys) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKeyFSO(volume, bucket, "1/key" + i)), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration())).build()) + .collect(Collectors.toList())); + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + +} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 7c293ff1861..0882de3bf4f 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -31,6 +31,7 @@ import javax.sql.DataSource; import java.sql.Connection; import java.sql.SQLException; +import java.util.Arrays; /** * Class used to create tables that are required for tracking containers. @@ -69,11 +70,39 @@ public enum UnHealthyContainerStates { public void initializeSchema() throws SQLException { Connection conn = dataSource.getConnection(); dslContext = DSL.using(conn); - if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { + + if (TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { + // Drop the existing constraint if it exists + String constraintName = UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1"; + dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .dropConstraint(constraintName) + .execute(); + + // Add the updated constraint with all enum states + addUpdatedConstraint(); + } else { + // Create the table if it does not exist createUnhealthyContainersTable(); } } + /** + * Add the updated constraint to the table. + */ + private void addUpdatedConstraint() { + // Get all enum values as a list of strings + String[] enumStates = Arrays.stream(UnHealthyContainerStates.values()) + .map(Enum::name) + .toArray(String[]::new); + + // Alter the table to add the updated constraint + dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .add(DSL.constraint(UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1") + .check(field(name("container_state")) + .in(enumStates))) + .execute(); + } + /** * Create the Missing Containers table. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 86ef6c022d5..cbdc198f8aa 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -408,13 +408,18 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - List emptyMissingFiltered = containers.stream() - .filter( - container -> !container.getContainerState() - .equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) - .collect( - Collectors.toList()); - for (UnhealthyContainers c : emptyMissingFiltered) { + + // Filtering out EMPTY_MISSING and NEGATIVE_SIZE containers from the response. + // These container states are not being inserted into the database as they represent + // edge cases that are not critical to track as unhealthy containers. + List filteredContainers = containers.stream() + .filter(container -> !container.getContainerState() + .equals(UnHealthyContainerStates.EMPTY_MISSING.toString()) + && !container.getContainerState() + .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString())) + .collect(Collectors.toList()); + + for (UnhealthyContainers c : filteredContainers) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java index 070b7e1ccd4..3ce4fc7f837 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java @@ -32,6 +32,7 @@ */ @Path("/triggerdbsync") @Produces(MediaType.APPLICATION_JSON) +@AdminOnly public class TriggerDBSyncEndpoint { private OzoneManagerServiceProvider ozoneManagerServiceProvider; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index ba03ec61f14..eaf08d9ca83 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,12 +50,6 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; - /** - * Total count of containers with negative size. - */ - @JsonProperty("negativeSizeCount") - private long negativeSizeCount = 0; - /** * A collection of unhealthy containers. */ @@ -83,9 +77,6 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; - } else if (state.equals( - UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { - this.negativeSizeCount = count; } } @@ -105,10 +96,6 @@ public long getMisReplicatedCount() { return misReplicatedCount; } - public long getNegativeSizeCount() { - return negativeSizeCount; - } - public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 639047d37bd..11af6eaff53 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -29,6 +29,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -78,6 +79,8 @@ public class ContainerHealthTask extends ReconScmTask { private final ReconContainerMetadataManager reconContainerMetadataManager; private final PlacementPolicy placementPolicy; private final long interval; + private Map> + unhealthyContainerStateStatsMapForTesting; private final Set processedContainers = new HashSet<>(); @@ -185,10 +188,12 @@ private void checkAndProcessContainers( private void logUnhealthyContainerStats( Map> unhealthyContainerStateStatsMap) { + unhealthyContainerStateStatsMapForTesting = + new HashMap<>(unhealthyContainerStateStatsMap); // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. + // EMPTY_MISSING in unhealthy container statistics but do not add it to the table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); @@ -256,6 +261,11 @@ private void completeProcessingContainer( * completeProcessingContainer is called. This will check to see if any * additional records need to be added to the database. * + * If a container is identified as missing, empty-missing, under-replicated, + * over-replicated or mis-replicated, the method checks with SCM to determine + * if it has been deleted, using {@code containerDeletedInSCM}. If the container is + * deleted in SCM, the corresponding record is removed from Recon. + * * @param currentTime Timestamp to place on all records generated by this run * @param unhealthyContainerStateCountMap * @return Count of records processed @@ -273,9 +283,11 @@ private long processExistingDBRecords(long currentTime, recordCount++; UnhealthyContainersRecord rec = cursor.fetchNext(); try { + // Set the current container if it's not already set if (currentContainer == null) { currentContainer = setCurrentContainer(rec.getContainerId()); } + // If the container ID has changed, finish processing the previous one if (currentContainer.getContainerID() != rec.getContainerId()) { completeProcessingContainer( currentContainer, existingRecords, currentTime, @@ -283,24 +295,29 @@ private long processExistingDBRecords(long currentTime, existingRecords.clear(); currentContainer = setCurrentContainer(rec.getContainerId()); } - if (ContainerHealthRecords - .retainOrUpdateRecord(currentContainer, rec - )) { - // Check if the missing container is deleted in SCM - if (currentContainer.isMissing() && - containerDeletedInSCM(currentContainer.getContainer())) { - rec.delete(); - } - existingRecords.add(rec.getContainerState()); - if (rec.changed()) { - rec.update(); - } - } else { + + // Unhealthy Containers such as MISSING, UNDER_REPLICATED, + // OVER_REPLICATED, MIS_REPLICATED can have their unhealthy states changed or retained. + if (!ContainerHealthRecords.retainOrUpdateRecord(currentContainer, rec)) { + rec.delete(); LOG.info("DELETED existing unhealthy container record...for Container: {}", currentContainer.getContainerID()); + } + + // If the container is marked as MISSING and it's deleted in SCM, remove the record + if (currentContainer.isMissing() && containerDeletedInSCM(currentContainer.getContainer())) { rec.delete(); + LOG.info("DELETED existing unhealthy container record...for Container: {}", + currentContainer.getContainerID()); + } + + existingRecords.add(rec.getContainerState()); + // If the record was changed, update it + if (rec.changed()) { + rec.update(); } } catch (ContainerNotFoundException cnf) { + // If the container is not found, delete the record and reset currentContainer rec.delete(); currentContainer = null; } @@ -326,13 +343,6 @@ private void processContainer(ContainerInfo container, long currentTime, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); - // Handle negative sized containers separately - if (h.getContainer().getUsedBytes() < 0) { - handleNegativeSizedContainers(h, currentTime, - unhealthyContainerStateStatsMap); - return; - } - if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -349,6 +359,18 @@ private void processContainer(ContainerInfo container, long currentTime, } } + /** + * Ensures the container's state in Recon is updated to match its state in SCM. + * + * If SCM reports the container as DELETED, this method attempts to transition + * the container's state in Recon from CLOSED to DELETING, or from DELETING to + * DELETED, based on the current state in Recon. It logs each transition attempt + * and handles any exceptions that may occur. + * + * @param containerInfo the container whose state is being checked and potentially updated. + * @return {@code true} if the container was found to be DELETED in SCM and the + * state transition was attempted in Recon; {@code false} otherwise. + */ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { try { ContainerWithPipeline containerWithPipeline = @@ -358,6 +380,8 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.DELETE); + LOG.debug("Successfully changed container {} state from CLOSED to DELETING.", + containerInfo.containerID()); } if (containerInfo.getState() == HddsProtos.LifeCycleState.DELETING && containerManager.getContainerReplicas(containerInfo.containerID()) @@ -365,6 +389,7 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { ) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.CLEANUP); + LOG.info("Successfully Deleted container {} from Recon.", containerInfo.containerID()); } return true; } @@ -380,28 +405,50 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { /** * This method is used to handle containers with negative sizes. It logs an - * error message and inserts a record into the UNHEALTHY_CONTAINERS table. + * error message. * @param containerHealthStatus * @param currentTime * @param unhealthyContainerStateStatsMap */ - private void handleNegativeSizedContainers( + private static void handleNegativeSizedContainers( ContainerHealthStatus containerHealthStatus, long currentTime, Map> unhealthyContainerStateStatsMap) { + // NEGATIVE_SIZE containers are also not inserted into the database. + // This condition usually arises due to corrupted or invalid metadata, where + // the container's size is inaccurately recorded as negative. Since this does not + // represent a typical unhealthy scenario and may not have any meaningful + // impact on system health, such containers are logged for investigation but + // excluded from the UNHEALTHY_CONTAINERS table to maintain data integrity. ContainerInfo container = containerHealthStatus.getContainer(); - LOG.error( - "Container {} has negative size. Please visit Recon's unhealthy " + - "container endpoint for more details.", - container.getContainerID()); - UnhealthyContainers record = - ContainerHealthRecords.recordForState(containerHealthStatus, - UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); - List records = Collections.singletonList(record); - populateContainerStats(containerHealthStatus, - UnHealthyContainerStates.NEGATIVE_SIZE, + LOG.error("Container {} has negative size.", container.getContainerID()); + populateContainerStats(containerHealthStatus, UnHealthyContainerStates.NEGATIVE_SIZE, + unhealthyContainerStateStatsMap); + } + + /** + * This method is used to handle containers that are empty and missing. It logs + * a debug message. + * @param containerHealthStatus + * @param currentTime + * @param unhealthyContainerStateStatsMap + */ + private static void handleEmptyMissingContainers( + ContainerHealthStatus containerHealthStatus, long currentTime, + Map> + unhealthyContainerStateStatsMap) { + // EMPTY_MISSING containers are not inserted into the database. + // These containers typically represent those that were never written to + // or remain in an incomplete state. Tracking such containers as unhealthy + // would not provide valuable insights since they don't pose a risk or issue + // to the system. Instead, they are logged for awareness, but not stored in + // the UNHEALTHY_CONTAINERS table to avoid unnecessary entries. + ContainerInfo container = containerHealthStatus.getContainer(); + LOG.debug("Empty container {} is missing. It will be logged in the " + + "unhealthy container statistics, but no record will be created in the " + + "UNHEALTHY_CONTAINERS table.", container.getContainerID()); + populateContainerStats(containerHealthStatus, EMPTY_MISSING, unhealthyContainerStateStatsMap); - containerHealthSchemaManager.insertUnhealthyContainerRecords(records); } /** @@ -492,22 +539,21 @@ public static List generateUnhealthyRecords( populateContainerStats(container, UnHealthyContainerStates.MISSING, unhealthyContainerStateStatsMap); } else { - - LOG.debug("Empty container {} is missing. Kindly check the " + - "consolidated container stats per UNHEALTHY state logged as " + - "starting with **Container State Stats:**"); - - records.add( - recordForState(container, EMPTY_MISSING, - time)); - populateContainerStats(container, - EMPTY_MISSING, + handleEmptyMissingContainers(container, time, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return return records; } + // For Negative sized containers we only log but not insert into DB + if (container.getContainer().getUsedBytes() < 0 + && !recordForStateExists.contains( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + handleNegativeSizedContainers(container, time, + unhealthyContainerStateStatsMap); + } + if (container.isUnderReplicated() && !recordForStateExists.contains( UnHealthyContainerStates.UNDER_REPLICATED.toString())) { @@ -650,4 +696,23 @@ private static void populateContainerStats( (value + container.getContainer().getUsedBytes())); } } + + /** + * Expose the logger for testing purposes. + * + * @return the logger instance + */ + @VisibleForTesting + public Logger getLogger() { + return LOG; + } + + /** + * Expose the unhealthyContainerStateStatsMap for testing purposes. + */ + @VisibleForTesting + public Map> getUnhealthyContainerStateStatsMap() { + return unhealthyContainerStateStatsMapForTesting; + } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index a7f486ea5ac..c773187c4b1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -155,6 +155,7 @@ public class ReconStorageContainerManagerFacade private final SCMNodeDetails reconNodeDetails; private final SCMHAManager scmhaManager; private final SequenceIdGenerator sequenceIdGen; + private final ContainerHealthTask containerHealthTask; private DBStore dbStore; private ReconNodeManager nodeManager; @@ -272,7 +273,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, scmServiceProvider, reconTaskStatusDao, reconTaskConfig); - ContainerHealthTask containerHealthTask = new ContainerHealthTask( + containerHealthTask = new ContainerHealthTask( containerManager, scmServiceProvider, reconTaskStatusDao, containerHealthSchemaManager, containerPlacementPolicy, reconTaskConfig, reconContainerMetadataManager, conf); @@ -741,6 +742,12 @@ public StorageContainerServiceProvider getScmServiceProvider() { public ContainerSizeCountTask getContainerSizeCountTask() { return containerSizeCountTask; } + + @VisibleForTesting + public ContainerHealthTask getContainerHealthTask() { + return containerHealthTask; + } + @VisibleForTesting public ContainerCountBySizeDao getContainerCountBySizeDao() { return containerCountBySizeDao; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index 0230d4dd61d..6b2bab246b7 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -76,10 +76,11 @@ class AutoReloadPanel extends React.Component { ); const lastUpdatedDeltaFullText = lastUpdatedOMDBDelta === 0 || lastUpdatedOMDBDelta === undefined || lastUpdatedOMDBFull === 0 || lastUpdatedOMDBFull === undefined ? '' : + //omSyncLoad should be clickable at all times. If the response from the dbsync is false it will show DB update is already running else show triggered sync ( <>   | DB Synced at {lastUpdatedDeltaFullToolTip} -  

+ + ) +} + +export default BucketsTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx new file mode 100644 index 00000000000..494d898509b --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import moment from 'moment'; +import { Popover, Tooltip } from 'antd' +import { + CheckCircleFilled, + CloseCircleFilled, + HourglassFilled, + InfoCircleOutlined, + WarningFilled +} from '@ant-design/icons'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { TableRowSelection } from 'antd/es/table/interface'; + +import StorageBar from '@/v2/components/storageBar/storageBar'; +import DecommissionSummary from '@/v2/components/decommissioningSummary/decommissioningSummary'; + +import { ReplicationIcon } from '@/utils/themeIcons'; +import { getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; + +import { + Datanode, + DatanodeOpState, + DatanodeOpStateList, + DatanodeState, + DatanodeStateList, + DatanodeTableProps +} from '@/v2/types/datanode.types'; +import { Pipeline } from '@/v2/types/pipelines.types'; + + +let decommissioningUuids: string | string[] = []; + +const headerIconStyles: React.CSSProperties = { + display: 'flex', + alignItems: 'center' +} + +const renderDatanodeState = (state: DatanodeState) => { + const stateIconMap = { + HEALTHY: , + STALE: , + DEAD: + }; + const icon = state in stateIconMap ? stateIconMap[state] : ''; + return {icon} {state}; +}; + +const renderDatanodeOpState = (opState: DatanodeOpState) => { + const opStateIconMap = { + IN_SERVICE: , + DECOMMISSIONING: , + DECOMMISSIONED: , + ENTERING_MAINTENANCE: , + IN_MAINTENANCE: + }; + const icon = opState in opStateIconMap ? opStateIconMap[opState] : ''; + return {icon} {opState}; +}; + +export const COLUMNS: ColumnsType = [ + { + title: 'Hostname', + dataIndex: 'hostname', + key: 'hostname', + sorter: (a: Datanode, b: Datanode) => a.hostname.localeCompare( + b.hostname, undefined, { numeric: true } + ), + defaultSortOrder: 'ascend' as const + }, + { + title: 'State', + dataIndex: 'state', + key: 'state', + filterMultiple: true, + filters: DatanodeStateList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Datanode) => record.state === value, + render: (text: DatanodeState) => renderDatanodeState(text), + sorter: (a: Datanode, b: Datanode) => a.state.localeCompare(b.state) + }, + { + title: 'Operational State', + dataIndex: 'opState', + key: 'opState', + filterMultiple: true, + filters: DatanodeOpStateList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Datanode) => record.opState === value, + render: (text: DatanodeOpState) => renderDatanodeOpState(text), + sorter: (a: Datanode, b: Datanode) => a.opState.localeCompare(b.opState) + }, + { + title: 'UUID', + dataIndex: 'uuid', + key: 'uuid', + sorter: (a: Datanode, b: Datanode) => a.uuid.localeCompare(b.uuid), + defaultSortOrder: 'ascend' as const, + render: (uuid: string, record: Datanode) => { + return ( + //1. Compare Decommission Api's UUID with all UUID in table and show Decommission Summary + (decommissioningUuids && decommissioningUuids.includes(record.uuid) && record.opState !== 'DECOMMISSIONED') ? + : {uuid} + ); + } + }, + { + title: 'Storage Capacity', + dataIndex: 'storageUsed', + key: 'storageUsed', + sorter: (a: Datanode, b: Datanode) => a.storageRemaining - b.storageRemaining, + render: (_: string, record: Datanode) => ( + + ) + }, + { + title: 'Last Heartbeat', + dataIndex: 'lastHeartbeat', + key: 'lastHeartbeat', + sorter: (a: Datanode, b: Datanode) => moment(a.lastHeartbeat).unix() - moment(b.lastHeartbeat).unix(), + render: (heartbeat: number) => { + return heartbeat > 0 ? getTimeDiffFromTimestamp(heartbeat) : 'NA'; + } + }, + { + title: 'Pipeline ID(s)', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipeline[], record: Datanode) => { + const renderPipelineIds = (pipelineIds: Pipeline[]) => { + return pipelineIds?.map((pipeline: any, index: any) => ( +
+ + {pipeline.pipelineID} +
+ )) + } + + return ( + + {pipelines.length} pipelines + + ); + } + }, + { + title: () => ( + + Leader Count + + + + + ), + dataIndex: 'leaderCount', + key: 'leaderCount', + sorter: (a: Datanode, b: Datanode) => a.leaderCount - b.leaderCount + }, + { + title: 'Containers', + dataIndex: 'containers', + key: 'containers', + sorter: (a: Datanode, b: Datanode) => a.containers - b.containers + }, + { + title: () => ( + + Open Container + + + + + ), + dataIndex: 'openContainers', + key: 'openContainers', + sorter: (a: Datanode, b: Datanode) => a.openContainers - b.openContainers + }, + { + title: 'Version', + dataIndex: 'version', + key: 'version', + sorter: (a: Datanode, b: Datanode) => a.version.localeCompare(b.version), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Setup Time', + dataIndex: 'setupTime', + key: 'setupTime', + sorter: (a: Datanode, b: Datanode) => a.setupTime - b.setupTime, + render: (uptime: number) => { + return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Revision', + dataIndex: 'revision', + key: 'revision', + sorter: (a: Datanode, b: Datanode) => a.revision.localeCompare(b.revision), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Build Date', + dataIndex: 'buildDate', + key: 'buildDate', + sorter: (a: Datanode, b: Datanode) => a.buildDate.localeCompare(b.buildDate), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Network Location', + dataIndex: 'networkLocation', + key: 'networkLocation', + sorter: (a: Datanode, b: Datanode) => a.networkLocation.localeCompare(b.networkLocation), + defaultSortOrder: 'ascend' as const + } +]; + +const DatanodesTable: React.FC = ({ + data, + handleSelectionChange, + decommissionUuids, + selectedColumns, + loading = false, + selectedRows = [], + searchColumn = 'hostname', + searchTerm = '' +}) => { + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ); + } + + function getFilteredData(data: Datanode[]) { + return data?.filter( + (datanode: Datanode) => datanode[searchColumn].includes(searchTerm) + ) ?? []; + } + + function isSelectable(record: Datanode) { + // Disable checkbox for any datanode which is not DEAD to prevent removal + return record.state !== 'DEAD' && true; + } + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => ( + `${range[0]}-${range[1]} of ${total} Datanodes` + ), + showSizeChanger: true + }; + + const rowSelection: TableRowSelection = { + selectedRowKeys: selectedRows, + onChange: (rows: React.Key[]) => { handleSelectionChange(rows) }, + getCheckboxProps: (record: Datanode) => ({ + disabled: isSelectable(record) + }), + }; + + React.useEffect(() => { + decommissioningUuids = decommissionUuids; + }, [decommissionUuids]) + + return ( +
+
+ + ); +} + +export default DatanodesTable; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx new file mode 100644 index 00000000000..6c07749436d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import Tooltip from 'antd/es/tooltip'; +import { InfoCircleOutlined } from '@ant-design/icons'; + +import { ReplicationIcon } from '@/utils/themeIcons'; +import { getDurationFromTimestamp, getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; +import { Pipeline, PipelinesTableProps, PipelineStatusList } from '@/v2/types/pipelines.types'; + + +// TODO: When Datanodes PR gets merged remove these declarations +// And import from datanodes.types + +type SummaryDatanodeDetails = { + level: number; + parent: unknown | null; + cost: number; + uuid: string; + uuidString: string; + ipAddress: string; + hostName: string; + ports: { + name: string; + value: number + }[]; + certSerialId: null, + version: string | null; + setupTime: number; + revision: string | null; + buildDate: string; + persistedOpState: string; + persistedOpStateExpiryEpochSec: number; + initialVersion: number; + currentVersion: number; + signature: number; + decommissioned: boolean; + networkName: string; + networkLocation: string; + networkFullPath: string; + numOfLeaves: number; +} + +export const COLUMNS: ColumnsType = [ + { + title: 'Pipeline ID', + dataIndex: 'pipelineId', + key: 'pipelineId', + sorter: (a: Pipeline, b: Pipeline) => a.pipelineId.localeCompare(b.pipelineId), + + }, + { + title: 'Replication Type & Factor', + dataIndex: 'replicationType', + key: 'replicationType', + render: (replicationType: string, record: Pipeline) => { + const replicationFactor = record.replicationFactor; + return ( + + + {replicationType} ({replicationFactor}) + + ); + }, + sorter: (a: Pipeline, b: Pipeline) => + (a.replicationType + a.replicationFactor.toString()).localeCompare(b.replicationType + b.replicationFactor.toString()), + defaultSortOrder: 'descend' as const + }, + { + title: 'Status', + dataIndex: 'status', + key: 'status', + filterMultiple: true, + filters: PipelineStatusList.map(status => ({ text: status, value: status })), + onFilter: (value, record: Pipeline) => record.status === value, + sorter: (a: Pipeline, b: Pipeline) => a.status.localeCompare(b.status) + }, + { + title: 'Containers', + dataIndex: 'containers', + key: 'containers', + sorter: (a: Pipeline, b: Pipeline) => a.containers - b.containers + }, + { + title: 'Datanodes', + dataIndex: 'datanodes', + key: 'datanodes', + render: (datanodes: SummaryDatanodeDetails[]) => ( +
+ {datanodes.map(datanode => ( +
+ triggerNode}> + {datanode?.hostName ?? 'N/A'} + +
+ ))} +
+ ) + }, + { + title: 'Leader', + dataIndex: 'leaderNode', + key: 'leaderNode', + sorter: (a: Pipeline, b: Pipeline) => a.leaderNode.localeCompare(b.leaderNode) + }, + { + title: () => ( + + Last Leader Election  + + + + + ), + dataIndex: 'lastLeaderElection', + key: 'lastLeaderElection', + render: (lastLeaderElection: number) => lastLeaderElection > 0 ? + getTimeDiffFromTimestamp(lastLeaderElection) : 'NA', + sorter: (a: Pipeline, b: Pipeline) => a.lastLeaderElection - b.lastLeaderElection + }, + { + title: 'Lifetime', + dataIndex: 'duration', + key: 'duration', + render: (duration: number) => getDurationFromTimestamp(duration), + sorter: (a: Pipeline, b: Pipeline) => a.duration - b.duration + }, + { + title: () => ( + + No. of Elections  + + + + + ), + dataIndex: 'leaderElections', + key: 'leaderElections', + render: (leaderElections: number) => leaderElections > 0 ? + leaderElections : 'NA', + sorter: (a: Pipeline, b: Pipeline) => a.leaderElections - b.leaderElections + } +]; + +const PipelinesTable: React.FC = ({ + loading = false, + data, + selectedColumns, + searchTerm = '' +}) => { + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} pipelines`, + showSizeChanger: true, + }; + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ) + } + + function getFilteredData(data: Pipeline[]) { + return data.filter( + (pipeline: Pipeline) => pipeline['pipelineId'].includes(searchTerm) + ) + } + + return ( +
+
+ + ) +} + +export default PipelinesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx new file mode 100644 index 00000000000..4de0d713fce --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import QuotaBar from '@/components/quotaBar/quotaBar'; +import { byteToSize } from '@/utils/common'; +import { Volume, VolumesTableProps } from '@/v2/types/volume.types'; +import Table, { ColumnsType, ColumnType, TablePaginationConfig } from 'antd/es/table'; +import moment from 'moment'; +import React from 'react'; +import { Link } from 'react-router-dom'; + +export const COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'volume', + key: 'volume', + sorter: (a: Volume, b: Volume) => a.volume.localeCompare(b.volume), + defaultSortOrder: 'ascend' as const, + width: '15%' + }, + { + title: 'Owner', + dataIndex: 'owner', + key: 'owner', + sorter: (a: Volume, b: Volume) => a.owner.localeCompare(b.owner) + }, + { + title: 'Admin', + dataIndex: 'admin', + key: 'admin', + sorter: (a: Volume, b: Volume) => a.admin.localeCompare(b.admin) + }, + { + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + sorter: (a: Volume, b: Volume) => a.creationTime - b.creationTime, + render: (creationTime: number) => { + return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + sorter: (a: Volume, b: Volume) => a.modificationTime - b.modificationTime, + render: (modificationTime: number) => { + return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Quota (Size)', + dataIndex: 'quotaInBytes', + key: 'quotaInBytes', + render: (quotaInBytes: number) => { + return quotaInBytes && quotaInBytes !== -1 ? byteToSize(quotaInBytes, 3) : 'NA'; + } + }, + { + title: 'Namespace Capacity', + key: 'namespaceCapacity', + sorter: (a: Volume, b: Volume) => a.usedNamespace - b.usedNamespace, + render: (text: string, record: Volume) => ( + + ) + }, +]; + +const VolumesTable: React.FC = ({ + loading = false, + data, + handleAclClick, + selectedColumns, + searchColumn = 'volume', + searchTerm = '' +}) => { + + React.useEffect(() => { + // On table mount add the actions column + console.log("Adding new column"); + const actionsColumn: ColumnType = { + title: 'Actions', + key: 'actions', + render: (_: any, record: Volume) => { + const searchParams = new URLSearchParams(); + searchParams.append('volume', record.volume); + + return ( + <> + + Show buckets + + handleAclClick(record)}> + Show ACL + + + ); + } + } + + if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'actions') { + // Push the ACL column for initial + COLUMNS.push(actionsColumn); + selectedColumns.push({ + label: actionsColumn.title as string, + value: actionsColumn.key as string + }); + } else { + // Replace old ACL column with new ACL column with correct reference + // e.g. After page is reloaded / redirect from other page + COLUMNS[COLUMNS.length - 1] = actionsColumn; + selectedColumns[selectedColumns.length - 1] = { + label: actionsColumn.title as string, + value: actionsColumn.key as string + } + } + + }, []); + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ) + } + + function getFilteredData(data: Volume[]) { + return data.filter( + (volume: Volume) => volume[searchColumn].includes(searchTerm) + ); + } + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} volumes`, + showSizeChanger: true + }; + + return ( +
+
+ + ) +} + +export default VolumesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx index bd8950e54c8..1e2de307b17 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx @@ -16,27 +16,11 @@ * limitations under the License. */ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; -import { Table, Tag } from 'antd'; -import { - ColumnProps, - ColumnsType, - TablePaginationConfig -} from 'antd/es/table'; -import { - CheckCircleOutlined, - CloseCircleOutlined, - CloudServerOutlined, - FileUnknownOutlined, - HddOutlined, - LaptopOutlined, - SaveOutlined -} from '@ant-design/icons'; import { ValueType } from 'react-select'; import { useLocation } from 'react-router-dom'; -import QuotaBar from '@/components/quotaBar/quotaBar'; import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; import Search from '@/v2/components/search/search'; @@ -44,21 +28,18 @@ import MultiSelect from '@/v2/components/select/multiSelect'; import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; -import { AxiosGetHelper } from "@/utils/axiosRequestHelper"; -import { nullAwareLocaleCompare, showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; +import { showDataFetchError } from '@/utils/common'; import { useDebounce } from '@/v2/hooks/debounce.hook'; import { Bucket, - BucketLayout, - BucketLayoutTypeList, BucketResponse, BucketsState, - BucketStorage, - BucketStorageTypeList } from '@/v2/types/bucket.types'; import './buckets.less'; +import BucketsTable, { COLUMNS } from '@/v2/components/tables/bucketsTable'; const LIMIT_OPTIONS: Option[] = [ @@ -80,38 +61,6 @@ const LIMIT_OPTIONS: Option[] = [ } ] -const renderIsVersionEnabled = (isVersionEnabled: boolean) => { - return isVersionEnabled - ? - : -}; - -const renderStorageType = (bucketStorage: BucketStorage) => { - const bucketStorageIconMap: Record = { - RAM_DISK: , - SSD: , - DISK: , - ARCHIVE: - }; - const icon = bucketStorage in bucketStorageIconMap - ? bucketStorageIconMap[bucketStorage] - : ; - return {icon} {bucketStorage}; -}; - -const renderBucketLayout = (bucketLayout: BucketLayout) => { - const bucketLayoutColorMap = { - FILE_SYSTEM_OPTIMIZED: 'green', - OBJECT_STORE: 'orange', - LEGACY: 'blue' - }; - const color = bucketLayout in bucketLayoutColorMap ? - bucketLayoutColorMap[bucketLayout] : ''; - return {bucketLayout}; -}; - const SearchableColumnOpts = [{ label: 'Bucket', value: 'name' @@ -120,113 +69,6 @@ const SearchableColumnOpts = [{ value: 'volumeName' }] -const COLUMNS: ColumnsType = [ - { - title: 'Bucket', - dataIndex: 'name', - key: 'name', - sorter: (a: Bucket, b: Bucket) => a.name.localeCompare(b.name), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Volume', - dataIndex: 'volumeName', - key: 'volumeName', - sorter: (a: Bucket, b: Bucket) => a.volumeName.localeCompare(b.volumeName), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Owner', - dataIndex: 'owner', - key: 'owner', - sorter: (a: Bucket, b: Bucket) => nullAwareLocaleCompare(a.owner, b.owner) - }, - { - title: 'Versioning', - dataIndex: 'versioning', - key: 'isVersionEnabled', - render: (isVersionEnabled: boolean) => renderIsVersionEnabled(isVersionEnabled) - }, - { - title: 'Storage Type', - dataIndex: 'storageType', - key: 'storageType', - filterMultiple: true, - filters: BucketStorageTypeList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Bucket) => record.storageType === value, - sorter: (a: Bucket, b: Bucket) => a.storageType.localeCompare(b.storageType), - render: (storageType: BucketStorage) => renderStorageType(storageType) - }, - { - title: 'Bucket Layout', - dataIndex: 'bucketLayout', - key: 'bucketLayout', - filterMultiple: true, - filters: BucketLayoutTypeList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Bucket) => record.bucketLayout === value, - sorter: (a: Bucket, b: Bucket) => a.bucketLayout.localeCompare(b.bucketLayout), - render: (bucketLayout: BucketLayout) => renderBucketLayout(bucketLayout) - }, - { - title: 'Creation Time', - dataIndex: 'creationTime', - key: 'creationTime', - sorter: (a: Bucket, b: Bucket) => a.creationTime - b.creationTime, - render: (creationTime: number) => { - return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Modification Time', - dataIndex: 'modificationTime', - key: 'modificationTime', - sorter: (a: Bucket, b: Bucket) => a.modificationTime - b.modificationTime, - render: (modificationTime: number) => { - return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Storage Capacity', - key: 'quotaCapacityBytes', - sorter: (a: Bucket, b: Bucket) => a.usedBytes - b.usedBytes, - render: (text: string, record: Bucket) => ( - - ) - }, - { - title: 'Namespace Capacity', - key: 'namespaceCapacity', - sorter: (a: Bucket, b: Bucket) => a.usedNamespace - b.usedNamespace, - render: (text: string, record: Bucket) => ( - - ) - }, - { - title: 'Source Volume', - dataIndex: 'sourceVolume', - key: 'sourceVolume', - render: (sourceVolume: string) => { - return sourceVolume ? sourceVolume : 'NA'; - } - }, - { - title: 'Source Bucket', - dataIndex: 'sourceBucket', - key: 'sourceBucket', - render: (sourceBucket: string) => { - return sourceBucket ? sourceBucket : 'NA'; - } - } -]; - const defaultColumns = COLUMNS.map(column => ({ label: column.title as string, value: column.key as string @@ -269,7 +111,7 @@ function getFilteredBuckets( const Buckets: React.FC<{}> = () => { - let cancelSignal: AbortController; + const cancelSignal = useRef(); const [state, setState] = useState({ totalCount: 0, @@ -291,21 +133,10 @@ const Buckets: React.FC<{}> = () => { const debouncedSearch = useDebounce(searchTerm, 300); const { search } = useLocation(); - const paginationConfig: TablePaginationConfig = { - showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} buckets`, - showSizeChanger: true - }; - function getVolumeSearchParam() { return new URLSearchParams(search).get('volume'); }; - function getFilteredData(data: Bucket[]) { - return data.filter( - (bucket: Bucket) => bucket[searchColumn].includes(debouncedSearch) - ); - } - function handleVolumeChange(selected: ValueType) { const { volumeBucketMap } = state; const volumeSelections = (selected as Option[]); @@ -327,50 +158,6 @@ const Buckets: React.FC<{}> = () => { setShowPanel(true); } - function filterSelectedColumns() { - const columnKeys = selectedColumns.map((column) => column.value); - return COLUMNS.filter( - (column) => columnKeys.indexOf(column.key as string) >= 0 - ) - } - - function addAclColumn() { - // Inside the class component to access the React internal state - const aclLinkColumn: ColumnProps = { - title: 'ACLs', - dataIndex: 'acls', - key: 'acls', - render: (_: any, record: Bucket) => { - return ( - { - handleAclLinkClick(record); - }} - > - Show ACL - - ); - } - }; - - if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'acls') { - // Push the ACL column for initial - COLUMNS.push(aclLinkColumn); - } else { - // Replace old ACL column with new ACL column with correct reference - // e.g. After page is reloaded / redirect from other page - COLUMNS[COLUMNS.length - 1] = aclLinkColumn; - } - - if (defaultColumns.length > 0 && defaultColumns[defaultColumns.length - 1].label !== 'acls') { - defaultColumns.push({ - label: aclLinkColumn.title as string, - value: aclLinkColumn.key as string - }); - } - }; - function handleColumnChange(selected: ValueType) { setSelectedColumns(selected as Option[]); } @@ -383,11 +170,11 @@ const Buckets: React.FC<{}> = () => { setLoading(true); const { request, controller } = AxiosGetHelper( '/api/v1/buckets', - cancelSignal, + cancelSignal.current, '', { limit: selectedLimit.value } ); - cancelSignal = controller; + cancelSignal.current = controller; request.then(response => { const bucketsResponse: BucketResponse = response.data; const totalCount = bucketsResponse.totalCount; @@ -443,11 +230,10 @@ const Buckets: React.FC<{}> = () => { }); } - let autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); + const autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); useEffect(() => { autoReloadHelper.startPolling(); - addAclColumn(); const initialVolume = getVolumeSearchParam(); if (initialVolume) { setSelectedVolumes([{ @@ -459,7 +245,7 @@ const Buckets: React.FC<{}> = () => { return (() => { autoReloadHelper.stopPolling(); - cancelSignal && cancelSignal.abort(); + cancelRequests([cancelSignal.current!]); }) }, []); @@ -537,17 +323,13 @@ const Buckets: React.FC<{}> = () => { setSearchColumn(value as 'name' | 'volumeName'); }} /> -
-
- + ({ + label: (typeof column.title === 'string') + ? column.title + : (column.title as Function)().props.children[0], + value: column.key as string +})); + +const SearchableColumnOpts = [{ + label: 'Hostname', + value: 'hostname' +}, { + label: 'UUID', + value: 'uuid' +}, { + label: 'Version', + value: 'version' +}]; + +let decommissionUuids: string | string[] = []; +const COLUMN_UPDATE_DECOMMISSIONING = 'DECOMMISSIONING'; + +const Datanodes: React.FC<{}> = () => { + + const cancelSignal = useRef(); + const cancelDecommissionSignal = useRef(); + + const [state, setState] = useState({ + lastUpdated: 0, + columnOptions: defaultColumns, + dataSource: [] + }); + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [selectedRows, setSelectedRows] = useState([]); + const [searchTerm, setSearchTerm] = useState(''); + const [searchColumn, setSearchColumn] = useState<'hostname' | 'uuid' | 'version' | 'revision'>('hostname'); + const [modalOpen, setModalOpen] = useState(false); + + const debouncedSearch = useDebounce(searchTerm, 300); + + function handleColumnChange(selected: ValueType) { + setSelectedColumns(selected as Option[]); + } + + async function loadDecommisionAPI() { + decommissionUuids = []; + const { request, controller } = await AxiosGetHelper( + '/api/v1/datanodes/decommission/info', + cancelDecommissionSignal.current + ); + cancelDecommissionSignal.current = controller; + return request + }; + + async function loadDataNodeAPI() { + const { request, controller } = await AxiosGetHelper( + '/api/v1/datanodes', + cancelSignal.current + ); + cancelSignal.current = controller; + return request; + }; + + async function removeDatanode(selectedRowKeys: string[]) { + setLoading(true); + const { request, controller } = await AxiosPutHelper( + '/api/v1/datanodes/remove', + selectedRowKeys, + cancelSignal.current + ); + cancelSignal.current = controller; + request.then(() => { + loadData(); + }).catch((error) => { + showDataFetchError(error.toString()); + }).finally(() => { + setLoading(false); + setSelectedRows([]); + }); + } + + const loadData = async () => { + setLoading(true); + // Need to call decommission API on each interval to get updated status + // before datanode API call to compare UUID's + // update 'Operation State' column in table manually before rendering + try { + let decomissionResponse = await loadDecommisionAPI(); + decommissionUuids = decomissionResponse.data?.DatanodesDecommissionInfo?.map( + (item: DatanodeDecomissionInfo) => item.datanodeDetails.uuid + ); + } catch (error) { + decommissionUuids = []; + showDataFetchError((error as AxiosError).toString()); + } + + try { + const datanodesAPIResponse = await loadDataNodeAPI(); + const datanodesResponse: DatanodesResponse = datanodesAPIResponse.data; + const datanodes: DatanodeResponse[] = datanodesResponse.datanodes; + const dataSource: Datanode[] = datanodes?.map( + (datanode) => ({ + hostname: datanode.hostname, + uuid: datanode.uuid, + state: datanode.state, + opState: (decommissionUuids?.includes(datanode.uuid) && datanode.opState !== 'DECOMMISSIONED') + ? COLUMN_UPDATE_DECOMMISSIONING + : datanode.opState, + lastHeartbeat: datanode.lastHeartbeat, + storageUsed: datanode.storageReport.used, + storageTotal: datanode.storageReport.capacity, + storageCommitted: datanode.storageReport.committed, + storageRemaining: datanode.storageReport.remaining, + pipelines: datanode.pipelines, + containers: datanode.containers, + openContainers: datanode.openContainers, + leaderCount: datanode.leaderCount, + version: datanode.version, + setupTime: datanode.setupTime, + revision: datanode.revision, + buildDate: datanode.buildDate, + networkLocation: datanode.networkLocation + }) + ); + setLoading(false); + setState({ + ...state, + dataSource: dataSource, + lastUpdated: Number(moment()) + }); + } catch (error) { + setLoading(false); + showDataFetchError((error as AxiosError).toString()) + } + } + + const autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); + + useEffect(() => { + autoReloadHelper.startPolling(); + loadData(); + + return (() => { + autoReloadHelper.stopPolling(); + cancelRequests([ + cancelSignal.current!, + cancelDecommissionSignal.current! + ]); + }); + }, []); + + function handleSelectionChange(rows: React.Key[]) { + setSelectedRows(rows); + } + + function handleModalOk() { + setModalOpen(false); + removeDatanode(selectedRows as string[]) + }; + + function handleModalCancel() { + setModalOpen(false); + setSelectedRows([]); + }; + + const { dataSource, lastUpdated, columnOptions } = state; + + return ( + <> +
+ Datanodes + +
+
+
+
+
+ { }} + fixedColumn='hostname' + columnLength={columnOptions.length} /> + {selectedRows.length > 0 && + + } +
+ ) => setSearchTerm(e.target.value) + } + onChange={(value) => { + setSearchTerm(''); + setSearchColumn(value as 'hostname' | 'uuid' | 'version' | 'revision') + }} /> +
+ +
+
+ +
+ + Stop Tracking Datanode +
+ Are you sure, you want recon to stop tracking the selected {selectedRows.length} datanode(s)? +
+ + ); +} + +export default Datanodes; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less new file mode 100644 index 00000000000..a2fb93f7dad --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less @@ -0,0 +1,48 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + } + } + + .uuid-tooltip { + cursor: pointer; + .ant-tooltip-inner { + width: max-content; + } + } + + .tag-block { + display: flex; + column-gap: 8px; + padding: 0px 8px 16px 8px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx new file mode 100644 index 00000000000..9059da91f91 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { + useEffect, + useRef, + useState +} from 'react'; +import moment from 'moment'; +import { ValueType } from 'react-select'; + +import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; +import Search from '@/v2/components/search/search'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import PipelinesTable, { COLUMNS } from '@/v2/components/tables/pipelinesTable'; +import { showDataFetchError } from '@/utils/common'; +import { AutoReloadHelper } from '@/utils/autoReloadHelper'; +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; + +import { + Pipeline, + PipelinesResponse, + PipelinesState +} from '@/v2/types/pipelines.types'; + +import './pipelines.less'; + + +const defaultColumns = COLUMNS.map(column => ({ + label: (typeof column.title === 'string') + ? column.title + : (column.title as Function)().props.children[0], + value: column.key as string, +})); + +const Pipelines: React.FC<{}> = () => { + const cancelSignal = useRef(); + + const [state, setState] = useState({ + activeDataSource: [], + columnOptions: defaultColumns, + lastUpdated: 0, + }); + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [searchTerm, setSearchTerm] = useState(''); + + const debouncedSearch = useDebounce(searchTerm, 300); + + const loadData = () => { + setLoading(true); + //Cancel any previous requests + cancelRequests([cancelSignal.current!]); + + const { request, controller } = AxiosGetHelper( + '/api/v1/pipelines', + cancelSignal.current + ); + + cancelSignal.current = controller; + request.then(response => { + const pipelinesResponse: PipelinesResponse = response.data; + const pipelines: Pipeline[] = pipelinesResponse?.pipelines ?? {}; + setState({ + ...state, + activeDataSource: pipelines, + lastUpdated: Number(moment()) + }) + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError(error.toString()); + }) + } + + const autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); + + useEffect(() => { + autoReloadHelper.startPolling(); + loadData(); + return (() => { + autoReloadHelper.stopPolling(); + cancelRequests([cancelSignal.current!]); + }) + }, []); + + function handleColumnChange(selected: ValueType) { + setSelectedColumns(selected as Option[]); + } + + const { + activeDataSource, + columnOptions, + lastUpdated + } = state; + + return ( + <> +
+ Pipelines + +
+
+
+
+
+ { }} + fixedColumn='pipelineId' + columnLength={COLUMNS.length} /> +
+ ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
+ +
+
+ + ); +} +export default Pipelines; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx index 605883caff9..cb25cedbcec 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx @@ -18,22 +18,16 @@ import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; -import { Table } from 'antd'; -import { Link } from 'react-router-dom'; -import { - TablePaginationConfig, - ColumnsType -} from 'antd/es/table'; import { ValueType } from 'react-select/src/types'; -import QuotaBar from '@/components/quotaBar/quotaBar'; import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; -import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; import SingleSelect from '@/v2/components/select/singleSelect'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import VolumesTable, { COLUMNS } from '@/v2/components/tables/volumesTable'; import Search from '@/v2/components/search/search'; -import { byteToSize, showDataFetchError } from '@/utils/common'; +import { showDataFetchError } from '@/utils/common'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; import { useDebounce } from '@/v2/hooks/debounce.hook'; @@ -72,93 +66,6 @@ const Volumes: React.FC<{}> = () => { const cancelSignal = useRef(); - const COLUMNS: ColumnsType = [ - { - title: 'Volume', - dataIndex: 'volume', - key: 'volume', - sorter: (a: Volume, b: Volume) => a.volume.localeCompare(b.volume), - defaultSortOrder: 'ascend' as const, - width: '15%' - }, - { - title: 'Owner', - dataIndex: 'owner', - key: 'owner', - sorter: (a: Volume, b: Volume) => a.owner.localeCompare(b.owner) - }, - { - title: 'Admin', - dataIndex: 'admin', - key: 'admin', - sorter: (a: Volume, b: Volume) => a.admin.localeCompare(b.admin) - }, - { - title: 'Creation Time', - dataIndex: 'creationTime', - key: 'creationTime', - sorter: (a: Volume, b: Volume) => a.creationTime - b.creationTime, - render: (creationTime: number) => { - return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Modification Time', - dataIndex: 'modificationTime', - key: 'modificationTime', - sorter: (a: Volume, b: Volume) => a.modificationTime - b.modificationTime, - render: (modificationTime: number) => { - return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Quota (Size)', - dataIndex: 'quotaInBytes', - key: 'quotaInBytes', - render: (quotaInBytes: number) => { - return quotaInBytes && quotaInBytes !== -1 ? byteToSize(quotaInBytes, 3) : 'NA'; - } - }, - { - title: 'Namespace Capacity', - key: 'namespaceCapacity', - sorter: (a: Volume, b: Volume) => a.usedNamespace - b.usedNamespace, - render: (text: string, record: Volume) => ( - - ) - }, - { - title: 'Actions', - key: 'actions', - render: (_: any, record: Volume) => { - const searchParams = new URLSearchParams(); - searchParams.append('volume', record.volume); - - return ( - <> - - Show buckets - - handleAclLinkClick(record)}> - Show ACL - - - ); - } - } - ]; - const defaultColumns = COLUMNS.map(column => ({ label: column.title as string, value: column.key as string, @@ -167,10 +74,10 @@ const Volumes: React.FC<{}> = () => { const [state, setState] = useState({ data: [], lastUpdated: 0, - columnOptions: defaultColumns, - currentRow: {} + columnOptions: defaultColumns }); const [loading, setLoading] = useState(false); + const [currentRow, setCurrentRow] = useState>({}); const [selectedColumns, setSelectedColumns] = useState(defaultColumns); const [selectedLimit, setSelectedLimit] = useState
- + import('@/v2/pages/overview/overview')); const Volumes = lazy(() => import('@/v2/pages/volumes/volumes')) const Buckets = lazy(() => import('@/v2/pages/buckets/buckets')); +const Datanodes = lazy(() => import('@/v2/pages/datanodes/datanodes')); +const Pipelines = lazy(() => import('@/v2/pages/pipelines/pipelines')); export const routesV2 = [ { @@ -33,5 +35,13 @@ export const routesV2 = [ { path: '/Buckets', component: Buckets + }, + { + path: '/Datanodes', + component: Datanodes + }, + { + path: '/Pipelines', + component: Pipelines } ]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts index 5cfc89d85e6..eb499dc617e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts @@ -17,7 +17,6 @@ */ import { Acl } from "@/v2/types/acl.types"; -import { Option } from "@/v2/components/select/singleSelect"; import { Option as MultiOption } from "@/v2/components/select/multiSelect"; // Corresponds to OzoneManagerProtocolProtos.StorageTypeProto @@ -68,4 +67,13 @@ export type BucketsState = { volumeBucketMap: Map>; bucketsUnderVolume: Bucket[]; volumeOptions: MultiOption[]; -} \ No newline at end of file +} + +export type BucketsTableProps = { + loading: boolean; + data: Bucket[]; + handleAclClick: (arg0: Bucket) => void; + selectedColumns: MultiOption[]; + searchColumn: 'name' | 'volumeName'; + searchTerm: string; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts new file mode 100644 index 00000000000..96a37020153 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Pipeline } from "@/v2/types/pipelines.types"; +import { StorageReport } from "@/v2/types/overview.types"; +import { Option as MultiOption } from "@/v2/components/select/multiSelect"; + +// Corresponds to HddsProtos.NodeState +export const DatanodeStateList = ['HEALTHY', 'STALE', 'DEAD'] as const; +type DatanodeStateType = typeof DatanodeStateList; +export type DatanodeState = DatanodeStateType[number]; + +// Corresponds to HddsProtos.NodeOperationalState +export const DatanodeOpStateList = [ + 'IN_SERVICE', + 'DECOMMISSIONING', + 'DECOMMISSIONED', + 'ENTERING_MAINTENANCE', + 'IN_MAINTENANCE' +] as const; +export type DatanodeOpState = typeof DatanodeOpStateList[number]; + +export type DatanodeResponse = { + hostname: string; + state: DatanodeState; + opState: DatanodeOpState; + lastHeartbeat: string; + storageReport: StorageReport; + pipelines: Pipeline[]; + containers: number; + openContainers: number; + leaderCount: number; + uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; + networkLocation: string; +} + +export type DatanodesResponse = { + totalCount: number; + datanodes: DatanodeResponse[]; +} + +export type Datanode = { + hostname: string; + state: DatanodeState; + opState: DatanodeOpState; + lastHeartbeat: string; + storageUsed: number; + storageTotal: number; + storageRemaining: number; + storageCommitted: number; + pipelines: Pipeline[]; + containers: number; + openContainers: number; + leaderCount: number; + uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; + networkLocation: string; +} + +export type DatanodeDetails = { + uuid: string; +} + +export type DatanodeDecomissionInfo = { + datanodeDetails: DatanodeDetails +} + +export type DatanodesState = { + dataSource: Datanode[]; + lastUpdated: number; + columnOptions: MultiOption[]; +} + +// Datanode Summary endpoint types +type summaryByteString = { + string: string; + bytes: { + validUtf8: boolean; + empty: boolean; + } +} + +type SummaryPort = { + name: string; + value: number; +} + +type SummaryDatanodeDetails = { + level: number; + parent: unknown | null; + cost: number; + uuid: string; + uuidString: string; + ipAddress: string; + hostName: string; + ports: SummaryPort; + certSerialId: null, + version: string | null; + setupTime: number; + revision: string | null; + buildDate: string; + persistedOpState: string; + persistedOpStateExpiryEpochSec: number; + initialVersion: number; + currentVersion: number; + decommissioned: boolean; + maintenance: boolean; + ipAddressAsByteString: summaryByteString; + hostNameAsByteString: summaryByteString; + networkName: string; + networkLocation: string; + networkFullPath: string; + numOfLeaves: number; + networkNameAsByteString: summaryByteString; + networkLocationAsByteString: summaryByteString +} + +type SummaryMetrics = { + decommissionStartTime: string; + numOfUnclosedPipelines: number; + numOfUnderReplicatedContainers: number; + numOfUnclosedContainers: number; +} + +type SummaryContainers = { + UnderReplicated: string[]; + UnClosed: string[]; +} + +export type SummaryData = { + datanodeDetails: SummaryDatanodeDetails; + metrics: SummaryMetrics; + containers: SummaryContainers; +} + +export type DatanodeTableProps = { + loading: boolean; + selectedRows: React.Key[]; + data: Datanode[]; + decommissionUuids: string | string[]; + searchColumn: 'hostname' | 'uuid' | 'version' | 'revision'; + searchTerm: string; + selectedColumns: MultiOption[]; + handleSelectionChange: (arg0: React.Key[]) => void; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts new file mode 100644 index 00000000000..7c5a23bc0af --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export const PipelineStatusList = [ + 'OPEN', + 'CLOSING', + 'QUASI_CLOSED', + 'CLOSED', + 'UNHEALTHY', + 'INVALID', + 'DELETED', + 'DORMANT' +] as const; +export type PipelineStatus = typeof PipelineStatusList[number]; + +export type Pipeline = { + pipelineId: string; + status: PipelineStatus; + replicationType: string; + leaderNode: string; + datanodes: string[]; + lastLeaderElection: number; + duration: number; + leaderElections: number; + replicationFactor: string; + containers: number; +} + +export type PipelinesResponse = { + totalCount: number; + pipelines: Pipeline[]; +} + +export type PipelinesState = { + activeDataSource: Pipeline[]; + columnOptions: Option[]; + lastUpdated: number; +} + +export type PipelinesTableProps = { + loading: boolean; + data: Pipeline[]; + selectedColumns: Option[]; + searchTerm: string; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts index 67f007706a4..b808d403584 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts @@ -40,5 +40,13 @@ export type VolumesState = { data: Volume[]; lastUpdated: number; columnOptions: Option[]; - currentRow: Volume | Record; +} + +export type VolumesTableProps = { + loading: boolean; + data: Volume[]; + handleAclClick: (arg0: Volume) => void; + selectedColumns: Option[]; + searchColumn: 'volume' | 'owner' | 'admin'; + searchTerm: string; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts new file mode 100644 index 00000000000..fb553d0db3f --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import moment from "moment"; + +moment.updateLocale('en', { + relativeTime: { + past: '%s ago', + s: '%ds', + m: '1min', + mm: '%dmins', + h: '1hr', + hh: '%dhrs', + d: '1d', + dd: '%dd', + M: '1m', + MM: '%dm', + y: '1y', + yy: '%dy' + } +}); + +export function getTimeDiffFromTimestamp(timestamp: number): string { + const timestampDate = new Date(timestamp); + return moment(timestampDate).fromNow(); +} + +export function getDurationFromTimestamp(timestamp: number): string { + const duration: moment.Duration = moment.duration(timestamp, 'milliseconds'); + // return nothing when the duration is falsy or not correctly parsed (P0D) + if(!duration || duration.toISOString() === "P0D") return ''; + + let elapsedTime = []; + const durationBreakdowns: Record = { + 'y': Math.floor(duration.years()), + 'm': Math.floor(duration.months()), + 'd': Math.floor(duration.days()), + 'h': Math.floor(duration.hours()), + 'min': Math.floor(duration.minutes()), + 's': Math.floor(duration.seconds()) + } + + for (const [key, value] of Object.entries(durationBreakdowns)) { + value > 0 && elapsedTime.push(value + key); + } + + return (elapsedTime.length === 0) ? 'Just now' : elapsedTime.join(' '); +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 82c7c1b5bef..3c39e4192d2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -886,7 +886,9 @@ public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); + String negativeSize = UnHealthyContainerStates.NEGATIVE_SIZE.toString(); // For NEGATIVE_SIZE state + // Initial empty response verification Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -899,44 +901,55 @@ public void testUnhealthyContainersFilteredResponse() assertEquals(0, responseObject.getMisReplicatedCount()); assertEquals(Collections.EMPTY_LIST, responseObject.getContainers()); + // Add unhealthy records putContainerInfos(5); uuid1 = newDatanode("host1", "127.0.0.1"); uuid2 = newDatanode("host2", "127.0.0.2"); uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); - createEmptyMissingUnhealthyRecords(2); + createEmptyMissingUnhealthyRecords(2); // For EMPTY_MISSING state + createNegativeSizeUnhealthyRecords(2); // For NEGATIVE_SIZE state + // Check for unhealthy containers response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); responseObject = (UnhealthyContainersResponse) response.getEntity(); + // Summary should have the count for all unhealthy: assertEquals(5, responseObject.getMissingCount()); assertEquals(4, responseObject.getOverReplicatedCount()); assertEquals(3, responseObject.getUnderReplicatedCount()); assertEquals(2, responseObject.getMisReplicatedCount()); - Collection records - = responseObject.getContainers(); + Collection records = responseObject.getContainers(); assertTrue(records.stream() .flatMap(containerMetadata -> containerMetadata.getReplicas().stream() .map(ContainerHistory::getState)) .allMatch(s -> s.equals("UNHEALTHY"))); - // There should only be 5 missing containers and no others as we asked for - // only missing. + + // Verify only missing containers are returned assertEquals(5, records.size()); for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } + // Check for empty missing containers, should return zero Response filteredEmptyMissingResponse = containerEndpoint .getUnhealthyContainers(emptyMissing, 1000, 1); responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); records = responseObject.getContainers(); - // Assert for zero empty missing containers. + assertEquals(0, records.size()); + + // Check for negative size containers, should return zero + Response filteredNegativeSizeResponse = containerEndpoint + .getUnhealthyContainers(negativeSize, 1000, 1); + responseObject = (UnhealthyContainersResponse) filteredNegativeSizeResponse.getEntity(); + records = responseObject.getContainers(); assertEquals(0, records.size()); } + @Test public void testUnhealthyContainersInvalidState() { WebApplicationException e = assertThrows(WebApplicationException.class, @@ -1043,6 +1056,15 @@ private void createEmptyMissingUnhealthyRecords(int emptyMissing) { } } + private void createNegativeSizeUnhealthyRecords(int negativeSize) { + int cid = 0; + for (int i = 0; i < negativeSize; i++) { + createUnhealthyRecord(++cid, UnHealthyContainerStates.NEGATIVE_SIZE.toString(), + 3, 3, 0, null); // Added for NEGATIVE_SIZE state + } + } + + private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java index e30590df55e..7c874a9e299 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.recon.api.NodeEndpoint; import org.apache.hadoop.ozone.recon.api.PipelineEndpoint; import org.apache.hadoop.ozone.recon.api.TaskStatusService; -import org.apache.hadoop.ozone.recon.api.TriggerDBSyncEndpoint; import org.apache.hadoop.ozone.recon.api.UtilizationEndpoint; import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; @@ -70,8 +69,14 @@ public void testAdminOnlyEndpoints() { assertThat(allEndpoints).isNotEmpty(); - // If an endpoint is added, it must be explicitly added to this set or be - // marked with @AdminOnly for this test to pass. + // If an endpoint is added, it must either require admin privileges by being + // marked with the `@AdminOnly` annotation, or be added to this set to exclude it. + // - Any endpoint that displays information related to the filesystem namespace + // (including aggregate counts), user information, or allows modification to the + // cluster's state should be marked as `@AdminOnly`. + // - Read-only endpoints that only return information about node status or + // cluster state do not require the `@AdminOnly` annotation and can be excluded + // from admin requirements by adding them to this set. Set> nonAdminEndpoints = new HashSet<>(); nonAdminEndpoints.add(UtilizationEndpoint.class); nonAdminEndpoints.add(ClusterStateEndpoint.class); @@ -79,7 +84,6 @@ public void testAdminOnlyEndpoints() { nonAdminEndpoints.add(NodeEndpoint.class); nonAdminEndpoints.add(PipelineEndpoint.class); nonAdminEndpoints.add(TaskStatusService.class); - nonAdminEndpoints.add(TriggerDBSyncEndpoint.class); assertThat(allEndpoints).containsAll(nonAdminEndpoints); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index ae46bd8b5b5..46e4506a5ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -22,15 +22,18 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.IOException; import java.time.Duration; import java.util.ArrayList; @@ -105,7 +108,7 @@ public void testRun() throws Exception { // Create 7 containers. The first 5 will have various unhealthy states // defined below. The container with ID=6 will be healthy and - // container with ID=7 will be EMPTY_MISSING + // container with ID=7 will be EMPTY_MISSING (but not inserted into DB) List mockContainers = getMockContainers(7); when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); when(scmMock.getContainerManager()).thenReturn(containerManagerMock); @@ -132,20 +135,20 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> Empty Missing + // return 0 replicas for container ID 3 -> EMPTY_MISSING (will not be inserted into DB) ContainerInfo containerInfo3 = TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); - // Return 5 Healthy -> Over replicated + // Return 5 Healthy Replicas -> Over-replicated ContainerInfo containerInfo4 = TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, - State.CLOSED, State.CLOSED, State.CLOSED)); + State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated ContainerInfo containerInfo5 = @@ -158,7 +161,7 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); - // Return 3 Healthy -> Healthy container + // Return 3 Healthy Replicas -> Healthy container ContainerInfo containerInfo6 = TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); @@ -166,12 +169,14 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> MISSING + // return 0 replicas for container ID 7 -> MISSING (will later transition to EMPTY_MISSING but not inserted into DB) ContainerInfo containerInfo7 = TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); + when(reconContainerMetadataManager.getKeyCountForContainer( + 7L)).thenReturn(5L); // Indicates non-empty container 7 for now List all = unHealthyContainersTableHandle.findAll(); assertThat(all).isEmpty(); @@ -180,8 +185,8 @@ public void testRun() throws Exception { ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); - when(reconContainerMetadataManager.getKeyCountForContainer( - 7L)).thenReturn(5L); + + // Start container health task ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), scmMock.getScmServiceProvider(), @@ -189,8 +194,12 @@ public void testRun() throws Exception { placementMock, reconTaskConfig, reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); + + // Ensure unhealthy container count in DB matches expected LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 6)); + (unHealthyContainersTableHandle.count() == 5)); + + // Check for UNDER_REPLICATED container states UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -200,6 +209,10 @@ public void testRun() throws Exception { assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); + // Assert that EMPTY_MISSING state containers were never added to DB. + assertEquals(0, + unHealthyContainersTableHandle.fetchByContainerId(3L).size()); + List unhealthyContainers = containerHealthSchemaManager.getUnhealthyContainers( ALL_REPLICAS_BAD, 0, Integer.MAX_VALUE); @@ -209,10 +222,7 @@ public void testRun() throws Exception { assertEquals(0, unhealthyContainers.get(0).getActualReplicaCount().intValue()); - rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - + // Check for MISSING state in container ID 7 rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); @@ -233,9 +243,7 @@ public void testRun() throws Exception { assertThat(taskStatus.getLastUpdatedTimestamp()) .isGreaterThan(currentTime); - // Now run the job again, to check that relevant records are updated or - // removed as appropriate. Need to adjust the return value for all the mocks - // Under replicated -> Delta goes from 2 to 1 + // Adjust the mock results and rerun to check for updates or removal of records when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED)); @@ -244,7 +252,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 3 -> Still empty Missing + // Container 3 remains EMPTY_MISSING, but no DB insertion when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -253,11 +261,16 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); - // Was mis-replicated - make it healthy now + // Convert container 7 which was MISSING to EMPTY_MISSING (not inserted into DB) + when(reconContainerMetadataManager.getKeyCountForContainer( + 7L)).thenReturn(0L); + placementMock.setMisRepWhenDnPresent(null); + // Ensure count is reduced after EMPTY_MISSING containers are not inserted LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 4)); + (unHealthyContainersTableHandle.count() == 2)); + rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(1, rec.getReplicaDelta().intValue()); @@ -266,36 +279,21 @@ public void testRun() throws Exception { assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(2L).size()); - rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - - rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); - assertEquals("MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); + // Assert that for container 7 no records exist in DB because it's now EMPTY_MISSING + assertEquals(0, + unHealthyContainersTableHandle.fetchByContainerId(7L).size()); rec = unHealthyContainersTableHandle.fetchByContainerId(4L).get(0); assertEquals("OVER_REPLICATED", rec.getContainerState()); assertEquals(-1, rec.getReplicaDelta().intValue()); - // This container is now healthy, it should not be in the table any more + // Ensure container 5 is now healthy and not in the table assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); - // Again make container Id 7 as empty which was missing as well, so in next - // container health task run, this container also should be deleted from - // UNHEALTHY_CONTAINERS table because we want to cleanup any existing - // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. - when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); - LambdaTestUtils.await(6000, 1000, () -> { - UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); - return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); - }); - - // Just check once again that count doesn't change, only state of - // container 7 changes from MISSING to EMPTY_MISSING + // Just check once again that count remains consistent LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 4)); + (unHealthyContainersTableHandle.count() == 2)); } @Test @@ -370,17 +368,12 @@ public void testDeletedContainer() throws Exception { reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); LambdaTestUtils.await(6000, 1000, () -> - (unHealthyContainersTableHandle.count() == 2)); + (unHealthyContainersTableHandle.count() == 1)); UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); - rec = - unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - ReconTaskStatus taskStatus = reconTaskStatusDao.findById(containerHealthTask.getTaskName()); assertThat(taskStatus.getLastUpdatedTimestamp()) @@ -473,64 +466,106 @@ public void testAllContainerStateInsertions() { } @Test - public void testNegativeSizeContainers() throws Exception { - // Setup mock objects and test environment - UnhealthyContainersDao unhealthyContainersDao = + public void testMissingAndEmptyMissingContainerDeletion() throws Exception { + // Setup mock DAOs and managers + UnhealthyContainersDao unHealthyContainersTableHandle = getDao(UnhealthyContainersDao.class); ContainerHealthSchemaManager containerHealthSchemaManager = new ContainerHealthSchemaManager( getSchemaDefinition(ContainerSchemaDefinition.class), - unhealthyContainersDao); + unHealthyContainersTableHandle); ReconStorageContainerManagerFacade scmMock = mock(ReconStorageContainerManagerFacade.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); ContainerManager containerManagerMock = mock(ContainerManager.class); StorageContainerServiceProvider scmClientMock = mock(StorageContainerServiceProvider.class); ReconContainerMetadataManager reconContainerMetadataManager = mock(ReconContainerMetadataManager.class); - MockPlacementPolicy placementMock = new MockPlacementPolicy(); + mock(ReconContainerMetadataManager.class); - // Mock container info setup - List mockContainers = getMockContainers(3); - when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + // Create 2 containers. They start in CLOSED state in Recon. + List mockContainers = getMockContainers(2); when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + when(scmMock.getContainerManager()).thenReturn(containerManagerMock); when(containerManagerMock.getContainers(any(ContainerID.class), anyInt())).thenReturn(mockContainers); + + // Mark both containers as initially CLOSED in Recon for (ContainerInfo c : mockContainers) { - when(containerManagerMock.getContainer( - c.containerID())).thenReturn(c); - when(scmClientMock.getContainerWithPipeline( - c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); - when(containerManagerMock.getContainer(c.containerID()) - .getUsedBytes()).thenReturn(Long.valueOf(-10)); + when(containerManagerMock.getContainer(c.containerID())).thenReturn(c); } - // Verify the table is initially empty - assertThat(unhealthyContainersDao.findAll()).isEmpty(); + // Simulate SCM reporting the containers as DELETED + ContainerInfo deletedContainer1 = getMockDeletedContainer(1); + ContainerInfo deletedContainer2 = getMockDeletedContainer(2); + + when(scmClientMock.getContainerWithPipeline(1)) + .thenReturn(new ContainerWithPipeline(deletedContainer1, null)); + when(scmClientMock.getContainerWithPipeline(2)) + .thenReturn(new ContainerWithPipeline(deletedContainer2, null)); + + // Both containers start as CLOSED in Recon (MISSING or EMPTY_MISSING) + when(containerManagerMock.getContainer(ContainerID.valueOf(1L)).getState()) + .thenReturn(HddsProtos.LifeCycleState.CLOSED); + when(containerManagerMock.getContainer(ContainerID.valueOf(2L)).getState()) + .thenReturn(HddsProtos.LifeCycleState.CLOSED); - // Setup and start the container health task + // Replicas are empty, so both containers should be considered for deletion + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) + .thenReturn(Collections.emptySet()); + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) + .thenReturn(Collections.emptySet()); + + // Initialize UnhealthyContainers in DB (MISSING and EMPTY_MISSING) + // Create and set up the first UnhealthyContainer for a MISSING container + UnhealthyContainers container1 = new UnhealthyContainers(); + container1.setContainerId(1L); + container1.setContainerState("MISSING"); + container1.setExpectedReplicaCount(3); + container1.setActualReplicaCount(0); + container1.setReplicaDelta(3); + container1.setInStateSince(System.currentTimeMillis()); + + // Create and set up the second UnhealthyContainer for an EMPTY_MISSING container + UnhealthyContainers container2 = new UnhealthyContainers(); + container2.setContainerId(2L); + container2.setContainerState("MISSING"); + container2.setExpectedReplicaCount(3); + container2.setActualReplicaCount(0); + container2.setReplicaDelta(3); + container2.setInStateSince(System.currentTimeMillis()); + + unHealthyContainersTableHandle.insert(container1); + unHealthyContainersTableHandle.insert(container2); + + when(reconContainerMetadataManager.getKeyCountForContainer(1L)).thenReturn(5L); + when(reconContainerMetadataManager.getKeyCountForContainer(2L)).thenReturn(0L); + + // Start the container health task ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); - ContainerHealthTask containerHealthTask = new ContainerHealthTask( - scmMock.getContainerManager(), scmMock.getScmServiceProvider(), - reconTaskStatusDao, - containerHealthSchemaManager, placementMock, reconTaskConfig, - reconContainerMetadataManager, - new OzoneConfiguration()); - containerHealthTask.start(); + ContainerHealthTask containerHealthTask = + new ContainerHealthTask(scmMock.getContainerManager(), + scmMock.getScmServiceProvider(), + reconTaskStatusDao, containerHealthSchemaManager, + placementMock, reconTaskConfig, + reconContainerMetadataManager, new OzoneConfiguration()); - // Wait for the task to identify unhealthy containers - LambdaTestUtils.await(6000, 1000, - () -> unhealthyContainersDao.count() == 3); + containerHealthTask.start(); - // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states - List negativeSizeContainers = - unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); - assertThat(negativeSizeContainers).hasSize(3); + // Wait for the task to complete and ensure that updateContainerState is invoked for + // container IDs 1 and 2 to mark the containers as DELETED, since they are DELETED in SCM. + LambdaTestUtils.await(60000, 1000, () -> { + verify(containerManagerMock, times(1)) + .updateContainerState(ContainerID.valueOf(1L), HddsProtos.LifeCycleEvent.DELETE); + verify(containerManagerMock, times(1)) + .updateContainerState(ContainerID.valueOf(2L), HddsProtos.LifeCycleEvent.DELETE); + return true; + }); } - private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index 7d55e612bad..4e9965638a1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -127,6 +127,58 @@ public void testMissingRecordRetained() { )); } + @Test + public void testEmptyMissingRecordNotInsertedButLogged() { + // Create a container that is in EMPTY_MISSING state + Set replicas = new HashSet<>(); + ContainerHealthStatus status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + + // Initialize stats map + Map> unhealthyContainerStateStatsMap = new HashMap<>(); + initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); + + // Generate records for EMPTY_MISSING container + List records = ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( + status, (long) 345678, unhealthyContainerStateStatsMap); + + // Assert that no records are created for EMPTY_MISSING state + assertEquals(0, records.size()); + + // Assert that the EMPTY_MISSING state is logged + assertEquals(1, unhealthyContainerStateStatsMap.get(UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L)); + } + + @Test + public void testNegativeSizeRecordNotInsertedButLogged() { + // Simulate a container with NEGATIVE_SIZE state + when(container.getUsedBytes()).thenReturn(-10L); // Negative size + Set replicas = generateReplicas(container, CLOSED, CLOSED); + ContainerHealthStatus status = + new ContainerHealthStatus(container, replicas, placementPolicy, reconContainerMetadataManager, CONF); + + // Initialize stats map + Map> + unhealthyContainerStateStatsMap = new HashMap<>(); + initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); + + // Generate records for NEGATIVE_SIZE container + List records = + ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( + status, (long) 123456, unhealthyContainerStateStatsMap); + + // Assert that none of the records are for negative. + records.forEach(record -> assertFalse(record.getContainerState() + .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString()))); + + + // Assert that the NEGATIVE_SIZE state is logged + assertEquals(1, unhealthyContainerStateStatsMap.get( + UnHealthyContainerStates.NEGATIVE_SIZE).getOrDefault(CONTAINER_COUNT, 0L)); + } + + @Test public void testUnderReplicatedRecordRetainedAndUpdated() { // under replicated container @@ -396,13 +448,9 @@ public void testCorrectRecordsGenerated() { status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, reconContainerMetadataManager, CONF); - records = ContainerHealthTask.ContainerHealthRecords + ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 345678, unhealthyContainerStateStatsMap); - assertEquals(1, records.size()); - rec = records.get(0); - assertEquals(UnHealthyContainerStates.EMPTY_MISSING.toString(), - rec.getContainerState()); assertEquals(3, rec.getExpectedReplicaCount().intValue()); assertEquals(0, rec.getActualReplicaCount().intValue()); @@ -582,6 +630,8 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); + unhealthyContainerStateStatsMap.put( + UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private void logUnhealthyContainerStats( @@ -590,7 +640,7 @@ private void logUnhealthyContainerStats( // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. + // EMPTY_MISSING containers, but dont add it to the unhealthy container table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index d49ff17f3bf..cc63663bf22 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -19,11 +19,9 @@ import javax.annotation.Priority; import javax.inject.Inject; -import javax.ws.rs.WebApplicationException; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; import javax.ws.rs.container.PreMatching; -import javax.ws.rs.core.Response; import javax.ws.rs.ext.Provider; import com.google.common.annotations.VisibleForTesting; @@ -41,6 +39,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Filter used to construct string to sign from unfiltered request. @@ -116,10 +115,4 @@ public SignatureInfo getSignatureInfo() { return signatureInfo; } - private WebApplicationException wrapOS3Exception(OS3Exception os3Exception) { - return new WebApplicationException(os3Exception.getErrorMessage(), - os3Exception, - Response.status(os3Exception.getHttpCode()) - .entity(os3Exception.toXml()).build()); - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java index 4f08527668c..7614c4933a8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java @@ -154,8 +154,6 @@ private void setCertificate(String omServiceID, } } catch (CertificateException ce) { throw new IOException(ce); - } catch (IOException e) { - throw e; } finally { if (certClient != null) { certClient.close(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java index cdaaa228ecd..5881baa174b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java @@ -34,7 +34,9 @@ import java.lang.reflect.Type; import javax.ws.rs.ext.Provider; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read CompleteMultipartUploadRequest wo namespace. @@ -69,6 +71,10 @@ public CompleteMultipartUploadRequest readFrom( MultivaluedMap multivaluedMap, InputStream inputStream) throws IOException, WebApplicationException { try { + if (inputStream.available() == 0) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage("You must specify at least one part")); + } + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); UnmarshallerHandler unmarshallerHandler = context.createUnmarshaller().getUnmarshallerHandler(); @@ -78,8 +84,11 @@ public CompleteMultipartUploadRequest readFrom( filter.setParent(xmlReader); filter.parse(new InputSource(inputStream)); return (CompleteMultipartUploadRequest) unmarshallerHandler.getResult(); + } catch (WebApplicationException e) { + throw e; } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); } } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java index 0c34c08091a..775ec789f38 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.s3.endpoint; import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.ext.MessageBodyReader; @@ -34,6 +33,9 @@ import org.xml.sax.InputSource; import org.xml.sax.XMLReader; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; + /** * Custom unmarshaller to read MultiDeleteRequest w/wo namespace. */ @@ -78,7 +80,7 @@ public MultiDeleteRequest readFrom(Class type, filter.parse(new InputSource(entityStream)); return (MultiDeleteRequest) unmarshallerHandler.getResult(); } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); } } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java index 3fa6149815e..c832915176b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java @@ -34,7 +34,9 @@ import java.lang.annotation.Annotation; import java.lang.reflect.Type; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read PutBucketAclRequest wo namespace. @@ -79,7 +81,7 @@ public S3BucketAcl readFrom( filter.parse(new InputSource(inputStream)); return (S3BucketAcl)(unmarshallerHandler.getResult()); } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); } } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java index 810aa2085f4..3660457146f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java @@ -158,4 +158,9 @@ public String toXml() { this.getErrorMessage(), this.getResource(), this.getRequestId()); } + + /** Create a copy with specific message. */ + public OS3Exception withMessage(String message) { + return new OS3Exception(code, message, httpCode); + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index d644162a8ec..fda298f27dc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; @@ -116,4 +118,11 @@ public static S3StorageType toS3StorageType(String storageType) throw newError(INVALID_ARGUMENT, storageType, ex); } } + + public static WebApplicationException wrapOS3Exception(OS3Exception ex) { + return new WebApplicationException(ex.getErrorMessage(), ex, + Response.status(ex.getHttpCode()) + .entity(ex.toXml()) + .build()); + } } diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 04c1c8602cb..d8c5599f304 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -160,6 +160,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-tools + + org.apache.ratis + ratis-shell + info.picocli diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 4653aa3eeb3..5e1207519ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.utils.Filter; import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -128,6 +129,14 @@ public class DBScanner implements Callable, SubcommandWithParent { "eg.) \"name,acls.type\" for showing name and type under acls.") private String fieldsFilter; + @CommandLine.Option(names = {"--filter"}, + description = "Comma-separated list of \"::\" where " + + " is any valid field of the record, " + + " is (EQUALS,MAX or MIN) and " + + " is the value of the field. " + + "eg.) \"dataSize:equals:1000\" for showing records having the value 1000 for dataSize") + private String filter; + @CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"}, description = "Datanode DB Schema Version: V1/V2/V3", defaultValue = "V3") @@ -298,7 +307,7 @@ private void processRecords(ManagedRocksIterator iterator, } Future future = threadPool.submit( new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, - withKey, schemaV3, fieldsFilter)); + withKey, schemaV3, fieldsFilter, filter)); futures.add(future); batch = new ArrayList<>(batchSize); sequenceId++; @@ -306,7 +315,7 @@ private void processRecords(ManagedRocksIterator iterator, } if (!batch.isEmpty()) { Future future = threadPool.submit(new Task(dbColumnFamilyDef, - batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter)); + batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter, filter)); futures.add(future); } @@ -473,10 +482,12 @@ private static class Task implements Callable { private final boolean withKey; private final boolean schemaV3; private String valueFields; + private String valueFilter; + @SuppressWarnings("checkstyle:parameternumber") Task(DBColumnFamilyDefinition dbColumnFamilyDefinition, ArrayList batch, LogWriter logWriter, - long sequenceId, boolean withKey, boolean schemaV3, String valueFields) { + long sequenceId, boolean withKey, boolean schemaV3, String valueFields, String filter) { this.dbColumnFamilyDefinition = dbColumnFamilyDefinition; this.batch = batch; this.logWriter = logWriter; @@ -484,6 +495,7 @@ private static class Task implements Callable { this.withKey = withKey; this.schemaV3 = schemaV3; this.valueFields = valueFields; + this.valueFilter = filter; } Map getFieldSplit(List fields, Map fieldMap) { @@ -504,6 +516,31 @@ Map getFieldSplit(List fields, Map field return fieldMap; } + void getFilterSplit(List fields, Map fieldMap, Filter leafValue) throws IOException { + int len = fields.size(); + if (len == 1) { + Filter currentValue = fieldMap.get(fields.get(0)); + if (currentValue != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + fieldMap.put(fields.get(0), leafValue); + } else { + Filter fieldMapGet = fieldMap.computeIfAbsent(fields.get(0), k -> new Filter()); + if (fieldMapGet.getValue() != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + Map nextLevel = fieldMapGet.getNextLevel(); + if (nextLevel == null) { + fieldMapGet.setNextLevel(new HashMap<>()); + } + getFilterSplit(fields.subList(1, len), fieldMapGet.getNextLevel(), leafValue); + } + } + @Override public Void call() { try { @@ -517,6 +554,26 @@ public Void call() { } } + Map fieldsFilterSplitMap = new HashMap<>(); + if (valueFilter != null) { + for (String field : valueFilter.split(",")) { + String[] fieldValue = field.split(":"); + if (fieldValue.length != 3) { + err().println("Error: Invalid format for filter \"" + field + + "\". Usage: ::. Ignoring filter passed"); + } else { + Filter filter = new Filter(fieldValue[1], fieldValue[2]); + if (filter.getOperator() == null) { + err().println("Error: Invalid format for filter \"" + filter + + "\". can be one of [EQUALS,MIN,MAX]. Ignoring filter passed"); + } else { + String[] subfields = fieldValue[0].split("\\."); + getFilterSplit(Arrays.asList(subfields), fieldsFilterSplitMap, filter); + } + } + } + } + for (ByteArrayKeyValue byteArrayKeyValue : batch) { StringBuilder sb = new StringBuilder(); if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) { @@ -552,9 +609,14 @@ public Void call() { Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(byteArrayKeyValue.getValue()); + if (valueFilter != null && + !checkFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsFilterSplitMap)) { + // the record doesn't pass the filter + continue; + } if (valueFields != null) { Map filteredValue = new HashMap<>(); - filteredValue.putAll(getFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); + filteredValue.putAll(getFieldsFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); sb.append(WRITER.writeValueAsString(filteredValue)); } else { sb.append(WRITER.writeValueAsString(o)); @@ -570,7 +632,92 @@ public Void call() { return null; } - Map getFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { + boolean checkFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) + throws IOException { + for (Map.Entry field : fieldsSplitMap.entrySet()) { + try { + Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); + Object valueObject = valueClassField.get(obj); + Filter fieldValue = field.getValue(); + + if (valueObject == null) { + // there is no such field in the record. This filter will be ignored for the current record. + continue; + } + if (fieldValue == null) { + err().println("Malformed filter. Check input"); + throw new IOException("Invalid filter passed"); + } else if (fieldValue.getNextLevel() == null) { + // reached the end of fields hierarchy, check if they match the filter + // Currently, only equals operation is supported + if (Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator()) && + !String.valueOf(valueObject).equals(fieldValue.getValue())) { + return false; + } else if (!Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator())) { + err().println("Only EQUALS operator is supported currently."); + throw new IOException("Invalid filter passed"); + } + } else { + Map subfields = fieldValue.getNextLevel(); + if (Collection.class.isAssignableFrom(valueObject.getClass())) { + if (!checkFilteredObjectCollection((Collection) valueObject, subfields)) { + return false; + } + } else if (Map.class.isAssignableFrom(valueObject.getClass())) { + Map valueObjectMap = (Map) valueObject; + boolean flag = false; + for (Map.Entry ob : valueObjectMap.entrySet()) { + boolean subflag; + if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { + subflag = checkFilteredObjectCollection((Collection)ob.getValue(), subfields); + } else { + subflag = checkFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + } + if (subflag) { + // atleast one item in the map/list of the record has matched the filter, + // so record passes the filter. + flag = true; + break; + } + } + if (!flag) { + // none of the items in the map/list passed the filter => record doesn't pass the filter + return false; + } + } else { + if (!checkFilteredObject(valueObject, valueClassField.getType(), subfields)) { + return false; + } + } + } + } catch (NoSuchFieldException ex) { + err().println("ERROR: no such field: " + field); + exception = true; + return false; + } catch (IllegalAccessException e) { + err().println("ERROR: Cannot get field from object: " + field); + exception = true; + return false; + } catch (Exception ex) { + err().println("ERROR: field: " + field + ", ex: " + ex); + exception = true; + return false; + } + } + return true; + } + + boolean checkFilteredObjectCollection(Collection valueObject, Map fields) + throws NoSuchFieldException, IllegalAccessException, IOException { + for (Object ob : valueObject) { + if (checkFilteredObject(ob, ob.getClass(), fields)) { + return true; + } + } + return false; + } + + Map getFieldsFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { Map valueMap = new HashMap<>(); for (Map.Entry field : fieldsSplitMap.entrySet()) { try { @@ -583,7 +730,7 @@ Map getFilteredObject(Object obj, Class clazz, Map subfieldObjectsList = - getFilteredObjectCollection((Collection) valueObject, subfields); + getFieldsFilteredObjectCollection((Collection) valueObject, subfields); valueMap.put(field.getKey(), subfieldObjectsList); } else if (Map.class.isAssignableFrom(valueObject.getClass())) { Map subfieldObjectsMap = new HashMap<>(); @@ -591,16 +738,16 @@ Map getFilteredObject(Object obj, Class clazz, Map ob : valueObjectMap.entrySet()) { Object subfieldValue; if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { - subfieldValue = getFilteredObjectCollection((Collection)ob.getValue(), subfields); + subfieldValue = getFieldsFilteredObjectCollection((Collection)ob.getValue(), subfields); } else { - subfieldValue = getFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + subfieldValue = getFieldsFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); } subfieldObjectsMap.put(ob.getKey(), subfieldValue); } valueMap.put(field.getKey(), subfieldObjectsMap); } else { valueMap.put(field.getKey(), - getFilteredObject(valueObject, valueClassField.getType(), subfields)); + getFieldsFilteredObject(valueObject, valueClassField.getType(), subfields)); } } } catch (NoSuchFieldException ex) { @@ -612,11 +759,11 @@ Map getFilteredObject(Object obj, Class clazz, Map getFilteredObjectCollection(Collection valueObject, Map fields) + List getFieldsFilteredObjectCollection(Collection valueObject, Map fields) throws NoSuchFieldException, IllegalAccessException { List subfieldObjectsList = new ArrayList<>(); for (Object ob : valueObject) { - Object subfieldValue = getFilteredObject(ob, ob.getClass(), fields); + Object subfieldValue = getFieldsFilteredObject(ob, ob.getClass(), fields); subfieldObjectsList.add(subfieldValue); } return subfieldObjectsList; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index c964676f266..58b62d22b98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -355,11 +355,7 @@ public Void call() throws Exception { // wait until all keys are added or exception occurred. while ((numberOfKeysAdded.get() != totalKeyCount) && exception == null) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } executor.shutdown(); executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); @@ -373,11 +369,7 @@ public Void call() throws Exception { if (validateExecutor != null) { while (!validationQueue.isEmpty()) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } validateExecutor.shutdown(); validateExecutor.awaitTermination(Integer.MAX_VALUE, @@ -421,11 +413,7 @@ private void doCleanObjects() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while ((numberOfBucketsCleaned.get() != totalBucketCount) && exception == null) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java new file mode 100644 index 00000000000..aca41844a18 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; +import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient; +import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import java.security.cert.CertificateFactory; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; + +import java.io.IOException; +import java.io.PrintWriter; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Paths; +import java.security.cert.CertPath; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Map; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Optional; +import java.util.Arrays; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_SCM_CERTS; +import static org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient.CERT_FILE_NAME_FORMAT; +import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.removeTrailingSlashIfNeeded; + +/** + * In case of accidental deletion of SCM certificates from local storage, + * this tool restores the certs that are persisted into the SCM DB. + * Note that this will only work if the SCM has persisted certs in its RocksDB + * and private keys of the SCM are intact. + */ +@CommandLine.Command( + name = "cert-recover", + description = "Recover Deleted SCM Certificate from RocksDB") +@MetaInfServices(SubcommandWithParent.class) +public class RecoverSCMCertificate implements Callable, SubcommandWithParent { + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "SCM DB Path") + private String dbPath; + + @CommandLine.ParentCommand + private OzoneRepair parent; + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @Override + public Class getParentType() { + return OzoneRepair.class; + } + + private PrintWriter err() { + return spec.commandLine().getErr(); + } + + private PrintWriter out() { + return spec.commandLine().getOut(); + } + + @Override + public Void call() throws Exception { + dbPath = removeTrailingSlashIfNeeded(dbPath); + String tableName = VALID_SCM_CERTS.getName(); + DBDefinition dbDefinition = + DBDefinitionFactory.getDefinition(Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new Exception("Error: Incorrect DB Path"); + } + DBColumnFamilyDefinition columnFamilyDefinition = + getDbColumnFamilyDefinition(tableName, dbDefinition); + + try { + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List cfHandleList = new ArrayList<>(); + byte[] tableNameBytes = tableName.getBytes(StandardCharsets.UTF_8); + ColumnFamilyHandle cfHandle = null; + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, cfDescList, + cfHandleList)) { + cfHandle = getColumnFamilyHandle(cfHandleList, tableNameBytes); + SecurityConfig securityConfig = new SecurityConfig(parent.getOzoneConf()); + + Map allCerts = getAllCerts(columnFamilyDefinition, cfHandle, db); + out().println("All Certs in DB : " + allCerts.keySet()); + String hostName = InetAddress.getLocalHost().getHostName(); + out().println("Host: " + hostName); + + X509Certificate subCertificate = getSubCertificate(allCerts, hostName); + X509Certificate rootCertificate = getRootCertificate(allCerts); + + out().println("Sub cert serialID for this host: " + subCertificate.getSerialNumber().toString()); + out().println("Root cert serialID: " + rootCertificate.getSerialNumber().toString()); + + boolean isRootCA = false; + + String caPrincipal = rootCertificate.getSubjectDN().getName(); + if (caPrincipal.contains(hostName)) { + isRootCA = true; + } + storeCerts(subCertificate, rootCertificate, isRootCA, securityConfig); + } + } catch (RocksDBException | CertificateException exception) { + err().print("Failed to recover scm cert"); + } + return null; + } + + private static ColumnFamilyHandle getColumnFamilyHandle( + List cfHandleList, byte[] tableNameBytes) throws Exception { + ColumnFamilyHandle cfHandle = null; + for (ColumnFamilyHandle cf : cfHandleList) { + if (Arrays.equals(cf.getName(), tableNameBytes)) { + cfHandle = cf; + break; + } + } + if (cfHandle == null) { + throw new Exception("Error: VALID_SCM_CERTS table not found in DB"); + } + return cfHandle; + } + + private static X509Certificate getRootCertificate( + Map allCerts) throws Exception { + Optional cert = allCerts.values().stream().filter( + c -> c.getSubjectDN().getName() + .contains(OzoneConsts.SCM_ROOT_CA_PREFIX)).findFirst(); + if (!cert.isPresent()) { + throw new Exception("Root CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); + } + return cert.get(); + } + + + private static X509Certificate getSubCertificate( + Map allCerts, String hostName) throws Exception { + Optional cert = allCerts.values().stream().filter( + c -> c.getSubjectDN().getName() + .contains(OzoneConsts.SCM_SUB_CA_PREFIX) && c.getSubjectDN() + .getName().contains(hostName)).findFirst(); + if (!cert.isPresent()) { + throw new Exception("Sub CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); + } + return cert.get(); + } + + private static Map getAllCerts( + DBColumnFamilyDefinition columnFamilyDefinition, + ColumnFamilyHandle cfHandle, ManagedRocksDB db) throws IOException, RocksDBException { + Map allCerts = new HashMap<>(); + ManagedRocksIterator rocksIterator = ManagedRocksIterator.managed(db.get().newIterator(cfHandle)); + rocksIterator.get().seekToFirst(); + while (rocksIterator.get().isValid()) { + BigInteger id = (BigInteger) columnFamilyDefinition.getKeyCodec() + .fromPersistedFormat(rocksIterator.get().key()); + X509Certificate certificate = + (X509Certificate) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(rocksIterator.get().value()); + allCerts.put(id, certificate); + rocksIterator.get().next(); + } + return allCerts; + } + + private static DBColumnFamilyDefinition getDbColumnFamilyDefinition( + String tableName, DBDefinition dbDefinition) throws Exception { + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(tableName); + if (columnFamilyDefinition == null) { + throw new Exception( + "Error: VALID_SCM_CERTS table no found in Definition"); + } + return columnFamilyDefinition; + } + + private void storeCerts(X509Certificate scmCertificate, + X509Certificate rootCertificate, boolean isRootCA, SecurityConfig securityConfig) + throws CertificateException, IOException { + CertificateCodec certCodec = + new CertificateCodec(securityConfig, SCMCertificateClient.COMPONENT_NAME); + + out().println("Writing certs to path : " + certCodec.getLocation().toString()); + + CertPath certPath = addRootCertInPath(scmCertificate, rootCertificate); + CertPath rootCertPath = getRootCertPath(rootCertificate); + String encodedCert = CertificateCodec.getPEMEncodedString(certPath); + String certName = String.format(CERT_FILE_NAME_FORMAT, + CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber().toString()); + certCodec.writeCertificate(certName, encodedCert); + + String rootCertName = String.format(CERT_FILE_NAME_FORMAT, + CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber().toString()); + String encodedRootCert = CertificateCodec.getPEMEncodedString(rootCertPath); + certCodec.writeCertificate(rootCertName, encodedRootCert); + + certCodec.writeCertificate(certCodec.getLocation().toAbsolutePath(), + securityConfig.getCertificateFileName(), encodedCert); + + if (isRootCA) { + CertificateCodec rootCertCodec = + new CertificateCodec(securityConfig, OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME); + out().println("Writing root certs to path : " + rootCertCodec.getLocation().toString()); + rootCertCodec.writeCertificate(rootCertCodec.getLocation().toAbsolutePath(), + securityConfig.getCertificateFileName(), encodedRootCert); + } + } + + public CertPath addRootCertInPath(X509Certificate scmCert, + X509Certificate rootCert) throws CertificateException { + ArrayList updatedList = new ArrayList<>(); + updatedList.add(scmCert); + updatedList.add(rootCert); + CertificateFactory certFactory = + CertificateCodec.getCertFactory(); + return certFactory.generateCertPath(updatedList); + } + + public CertPath getRootCertPath(X509Certificate rootCert) + throws CertificateException { + ArrayList updatedList = new ArrayList<>(); + updatedList.add(rootCert); + CertificateFactory factory = CertificateCodec.getCertFactory(); + return factory.generateCertPath(updatedList); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java new file mode 100644 index 00000000000..5bc98268064 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.ratis.shell.cli.sh.RatisShell; + +import picocli.CommandLine; + +/** + * Ozone Ratis Command line tool. + */ +@CommandLine.Command(name = "ozone ratis", + description = "Shell for running Ratis commands", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneRatis extends Shell { + + public OzoneRatis() { + super(OzoneRatis.class); + } + + /** + * Main for the OzoneRatis Command handling. + * + * @param argv - System Args Strings[] + */ + public static void main(String[] argv) throws Exception { + new OzoneRatis().run(argv); + } + + @Override + public int execute(String[] argv) { + TracingUtil.initTracing("shell", createOzoneConfiguration()); + String spanName = "ozone ratis" + String.join(" ", argv); + return TracingUtil.executeInNewSpan(spanName, () -> { + // TODO: When Ozone has RATIS-2155, update this line to use the RatisShell.Builder + // in order to setup TLS and other confs. + final RatisShell shell = new RatisShell(System.out); + return shell.run(argv); + }); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckCommand.java new file mode 100644 index 00000000000..ba53b21b98d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckCommand.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell.fsck; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.OzoneShell; +import org.apache.hadoop.ozone.shell.Shell; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.Writer; + +/** + * The {@link OzoneFsckCommand} class is a command-line tool for performing file system checks within Ozone. + * This tool supports various options to fine-tune the file system check process, + * including filters for volumes, buckets, and keys. + * It can also produce verbose output detailing keys, containers, blocks, and chunks as well as delete corrupted keys. + *

+ * Options: + *

    + *
  • `--volume-prefix`: Specifies the prefix for volumes that should be included in the check. + *
  • `--bucket-prefix`: Specifies the prefix for buckets that should be included in the check. + *
  • `--key-prefix`: Specifies the prefix for keys that should be included in the check. + *
  • `--delete`: Deletes the corrupted keys. + *
  • `--keys`: Displays information about good and healthy keys. + *
  • `--containers`: Includes information about containers. + *
  • `--blocks`: Includes information about blocks. + *
  • `--chunks`: Includes information about chunks. + *
  • `--verbose` or `-v`: Provides full verbose output, ignoring the --keys, --containers, --blocks, + * and --chunks options. + *
  • `--output` or `-o`: Specifies the file to output information about the scan process. + * If not specified, the information will be printed to the system output. + *
+ */ +@CommandLine.Command(name = "fscheck", + description = "Operational tool to run system-wide file check in Ozone", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +@MetaInfServices(SubcommandWithParent.class) +public class OzoneFsckCommand extends Handler implements SubcommandWithParent { + + @CommandLine.Option(names = {"--volume-prefix"}, + description = "Specifies the prefix for volumes that should be included in the check") + private String volumePrefix; + + @CommandLine.Option(names = {"--bucket-prefix"}, + description = "Specifies the prefix for buckets that should be included in the check") + private String bucketPrefix; + + @CommandLine.Option(names = {"--key-prefix"}, + description = "Specifies the prefix for keys that should be included in the check") + private String keyPrefix; + + @CommandLine.Option(names = {"--delete"}, + description = "Deletes the corrupted keys") + private boolean delete; + + @CommandLine.Option(names = {"--keys"}, + description = "Specifies whether to display information about good and healthy keys") + private boolean keys; + + @CommandLine.Option(names = {"--containers"}, + description = "Specifies whether to include information about containers") + private boolean containers; + + @CommandLine.Option(names = {"--blocks"}, + description = "Specifies whether to include information about blocks") + private boolean blocks; + + @CommandLine.Option(names = {"--chunks"}, + description = "Specifies whether to include information about chunks") + private boolean chunks; + + @CommandLine.Option(names = {"--verbose", "-v"}, + description = "Full verbose output; ignores --keys, --containers, --blocks, --chunks options") + private boolean verbose; + + @CommandLine.Option(names = {"--output", "-o"}, + description = "Specifies the file to output information about the scan process." + + " If not specified, the information will be printed to the system output") + private String output; + + @CommandLine.ParentCommand + private Shell shell; + + @Override + public boolean isVerbose() { + return shell.isVerbose(); + } + + @Override + public OzoneConfiguration createOzoneConfiguration() { + return shell.createOzoneConfiguration(); + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + OzoneFsckVerboseSettings verboseSettings = new OzoneFsckVerboseSettings(); + + OzoneConfiguration ozoneConfiguration = getConf(); + + try (Writer writer = new PrintWriter(System.out); + OzoneFsckHandler handler = + new OzoneFsckHandler(address, verboseSettings, writer, delete, client, ozoneConfiguration)) { + try { + handler.scan(); + } finally { + writer.flush(); + } + } catch (Exception e) { + throw new IOException("Can't execute fscheck command", e); + } + } + + @Override + public Class getParentType() { + return OzoneShell.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckHandler.java new file mode 100644 index 00000000000..70e5de917a3 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckHandler.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell.fsck; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.VerifyBlockResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerMultinodeApi; +import org.apache.hadoop.hdds.scm.storage.ContainerMultinodeApiImpl; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.shell.OzoneAddress; + +import java.io.IOException; +import java.io.Writer; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; + +/** + * OzoneFsckHandler is responsible for checking the integrity of keys in an Ozone filesystem. + * It traverses volumes, buckets, and keys to detect and optionally delete corrupted keys. + */ +public class OzoneFsckHandler implements AutoCloseable { + private final OzoneAddress address; + + private final OzoneFsckVerboseSettings verboseSettings; + + private final Writer writer; + + private final boolean deleteCorruptedKeys; + + private final OzoneClient client; + + private final OzoneManagerProtocol omClient; + + private final ContainerOperationClient containerOperationClient; + + private final XceiverClientManager xceiverClientManager; + + public OzoneFsckHandler(OzoneAddress address, OzoneFsckVerboseSettings verboseSettings, Writer writer, + boolean deleteCorruptedKeys, OzoneClient client, OzoneConfiguration ozoneConfiguration) throws IOException { + this.address = address; + this.verboseSettings = verboseSettings; + this.writer = writer; + this.deleteCorruptedKeys = deleteCorruptedKeys; + this.client = client; + this.omClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); + this.containerOperationClient = new ContainerOperationClient(ozoneConfiguration); + this.xceiverClientManager = containerOperationClient.getXceiverClientManager(); + } + + /** + * Initiates a scan operation that traverses through the volumes, buckets, and keys to perform file system checks. + * + * @throws IOException if an I/O error occurs during the scan process. + */ + public void scan() throws IOException { + scanVolumes(); + } + + private void scanVolumes() throws IOException { + Iterator volumes = client.getObjectStore().listVolumes(address.getVolumeName()); + + writer.write("Scanning volumes\n"); + + while (volumes.hasNext()) { + scanBuckets(volumes.next()); + } + } + + private void scanBuckets(OzoneVolume volume) throws IOException { + Iterator buckets = volume.listBuckets(address.getBucketName()); + + writer.write("Scanning buckets for volume " + volume.getName() + "\n"); + + while (buckets.hasNext()) { + scanKeys(buckets.next()); + } + } + + private void scanKeys(OzoneBucket bucket) throws IOException { + Iterator keys = bucket.listKeys(address.getKeyName()); + + writer.write("Scanning keys for bucket " + bucket.getName() + "\n"); + + while (keys.hasNext()) { + scanKey(keys.next()); + } + } + + private void scanKey(OzoneKey key) throws IOException { + OmKeyArgs keyArgs = createKeyArgs(key); + + KeyInfoWithVolumeContext keyInfoWithContext = omClient.getKeyInfo(keyArgs, false); + + OmKeyInfo keyInfo = keyInfoWithContext.getKeyInfo(); + + List locations = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); + + for (OmKeyLocationInfo location : locations) { + Pipeline pipeline = getKeyPipeline(location.getPipeline()); + + XceiverClientSpi xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); + + try (ContainerMultinodeApi containerClient = new ContainerMultinodeApiImpl(xceiverClient)) { + Map responses = containerClient.verifyBlock( + location.getBlockID().getDatanodeBlockIDProtobuf(), + location.getToken() + ); + + for (VerifyBlockResponseProto response : responses.values()) { + if (response.hasValid() && !response.getValid()) { + writer.write(String.format("Block %s is damaged", location.getBlockID())); + } + } + } catch (Exception e) { + throw new IOException("Can't sent request to Datanode.", e); + } + } + } + + private Pipeline getKeyPipeline(Pipeline keyPipeline) { + boolean isECKey = keyPipeline.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC; + if (!isECKey && keyPipeline.getType() != STAND_ALONE) { + return Pipeline.newBuilder(keyPipeline) + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .build(); + } else { + return keyPipeline; + } + } + + private OmKeyArgs createKeyArgs(OzoneKey key) { + return new OmKeyArgs.Builder() + .setVolumeName(key.getVolumeName()) + .setBucketName(key.getBucketName()) + .setKeyName(key.getName()) + .build(); + } + + @Override + public void close() throws Exception { + this.xceiverClientManager.close(); + this.containerOperationClient.close(); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckVerboseSettings.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckVerboseSettings.java new file mode 100644 index 00000000000..2998ef95814 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/OzoneFsckVerboseSettings.java @@ -0,0 +1,5 @@ +package org.apache.hadoop.ozone.shell.fsck; + +public class OzoneFsckVerboseSettings { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/package-info.java new file mode 100644 index 00000000000..b324f89f09e --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/fsck/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * FS check commands for Ozone. + */ +package org.apache.hadoop.ozone.shell.fsck; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java index 833f4f7e779..35095dd7ff2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java @@ -71,7 +71,7 @@ public class PutKeyHandler extends KeyHandler { @Option(names = "--expectedGeneration", description = "Store key only if it already exists and its generation matches the value provided") - private long expectedGeneration; + private Long expectedGeneration; @Override protected void execute(OzoneClient client, OzoneAddress address) @@ -131,9 +131,14 @@ private void async( private OzoneOutputStream createOrReplaceKey(OzoneBucket bucket, String keyName, long size, Map keyMetadata, ReplicationConfig replicationConfig ) throws IOException { - return expectedGeneration > 0 - ? bucket.rewriteKey(keyName, size, expectedGeneration, replicationConfig, keyMetadata) - : bucket.createKey(keyName, size, replicationConfig, keyMetadata); + if (expectedGeneration != null) { + final long existingGeneration = expectedGeneration; + Preconditions.checkArgument(existingGeneration > 0, + "expectedGeneration must be positive, but was %s", existingGeneration); + return bucket.rewriteKey(keyName, size, existingGeneration, replicationConfig, keyMetadata); + } + + return bucket.createKey(keyName, size, replicationConfig, keyMetadata); } private void stream( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index 8cc80502386..00270310737 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -227,11 +227,7 @@ private void doCleanBuckets() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while (numberOfBucketsCleaned.get() != totalBucketCount && exception == null) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(100); } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java new file mode 100644 index 00000000000..129e1a6158d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.utils; + +import java.util.Map; + +/** + * Represent class which has info of what operation and value a set of records should be filtered with. + */ +public class Filter { + private FilterOperator operator; + private Object value; + private Map nextLevel = null; + + public Filter() { + this.operator = null; + this.value = null; + } + + public Filter(FilterOperator operator, Object value) { + this.operator = operator; + this.value = value; + } + + public Filter(String op, Object value) { + this.operator = getFilterOperator(op); + this.value = value; + } + + public Filter(FilterOperator operator, Object value, Map next) { + this.operator = operator; + this.value = value; + this.nextLevel = next; + } + + public Filter(String op, Object value, Map next) { + this.operator = getFilterOperator(op); + this.value = value; + this.nextLevel = next; + } + + public FilterOperator getOperator() { + return operator; + } + + public void setOperator(FilterOperator operator) { + this.operator = operator; + } + + public Object getValue() { + return value; + } + + public void setValue(Object value) { + this.value = value; + } + + public Map getNextLevel() { + return nextLevel; + } + + public void setNextLevel(Map nextLevel) { + this.nextLevel = nextLevel; + } + + public FilterOperator getFilterOperator(String op) { + if (op.equalsIgnoreCase("equals")) { + return FilterOperator.EQUALS; + } else if (op.equalsIgnoreCase("max")) { + return FilterOperator.MAX; + } else if (op.equalsIgnoreCase("min")) { + return FilterOperator.MIN; + } else { + return null; + } + } + + @Override + public String toString() { + return "(" + operator + "," + value + "," + nextLevel + ")"; + } + + /** + * Operation of the filter. + */ + public enum FilterOperator { + EQUALS, + MAX, + MIN; + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java new file mode 100644 index 00000000000..9c27bedcf7d --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.charset.StandardCharsets; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for OzoneRatis. + */ +public class TestOzoneRatis { + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private OzoneRatis ozoneRatis; + + @BeforeEach + public void setUp() throws UnsupportedEncodingException { + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + ozoneRatis = new OzoneRatis(); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + /** + * Execute method to invoke the OzoneRatis class and capture output. + * + * @param args command line arguments to pass + * @return the output from OzoneRatis + */ + private String execute(String[] args) throws IOException { + ozoneRatis.execute(args); + return outContent.toString(StandardCharsets.UTF_8.name()); + } + + @Test + public void testBasicOzoneRatisCommand() throws IOException { + String[] args = {""}; + String output = execute(args); + assertTrue(output.contains("Usage: ratis sh [generic options]")); + } + + @Test + public void testLocalRaftMetaConfSubcommand(@TempDir Path tempDir) throws IOException { + // Set up temporary directory and files + Path metadataDir = tempDir.resolve("data/metadata/ratis/test-cluster/current/"); + Files.createDirectories(metadataDir); + + // Create a dummy raft-meta.conf file using protobuf + Path raftMetaConfFile = metadataDir.resolve("raft-meta.conf"); + + // Create a LogEntryProto with a dummy index and peer + RaftProtos.RaftPeerProto raftPeerProto = RaftProtos.RaftPeerProto.newBuilder() + .setId(ByteString.copyFromUtf8("peer1")) + .setAddress("localhost:8000") + .setStartupRole(RaftProtos.RaftPeerRole.FOLLOWER) + .build(); + + RaftProtos.LogEntryProto logEntryProto = RaftProtos.LogEntryProto.newBuilder() + .setConfigurationEntry(RaftProtos.RaftConfigurationProto.newBuilder() + .addPeers(raftPeerProto).build()) + .setIndex(0) + .build(); + + // Write the logEntryProto to the raft-meta.conf file + try (OutputStream out = Files.newOutputStream(raftMetaConfFile)) { + logEntryProto.writeTo(out); + } + + + String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080", "-path", metadataDir.toString()}; + String output = execute(args); + + assertTrue(output.contains("Index in the original file is: 0")); + assertTrue(output.contains("Generate new LogEntryProto info is:")); + + // Verify that the new raft-meta.conf is generated + Path newRaftMetaConfFile = metadataDir.resolve("new-raft-meta.conf"); + assertTrue(Files.exists(newRaftMetaConfFile), "New raft-meta.conf file should be created."); + + // Verify content of the newly generated file + try (InputStream in = Files.newInputStream(newRaftMetaConfFile)) { + RaftProtos.LogEntryProto newLogEntryProto = RaftProtos.LogEntryProto.parseFrom(in); + assertEquals(1, newLogEntryProto.getIndex()); + RaftProtos.RaftPeerProto peerProto = newLogEntryProto.getConfigurationEntry().getPeers(0); + assertEquals("peer1", peerProto.getId().toStringUtf8()); + assertEquals("localhost:8080", peerProto.getAddress()); + assertEquals(RaftProtos.RaftPeerRole.FOLLOWER, peerProto.getStartupRole()); + } + } + + @Test + public void testMissingRequiredArguments() throws IOException { + String[] args = {"local", "raftMetaConf"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required options: peers, path")); + } + + @Test + public void testMissingPeerArgument() throws IOException { + String[] args = {"local", "raftMetaConf", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: peers")); + } + + @Test + public void testMissingPathArgument() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: path")); + } + + @Test + public void testInvalidPeersFormat() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost8080", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse the server address parameter \"localhost8080\".")); + } + + @Test + public void testDuplicatePeersAddress() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080,localhost:8080", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Found duplicated address: localhost:8080.")); + } + + @Test + public void testDuplicatePeersId() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080,peer1|localhost:8081", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Found duplicated ID: peer1.")); + } +} diff --git a/pom.xml b/pom.xml index c3834286870..b607372b0ee 100644 --- a/pom.xml +++ b/pom.xml @@ -183,7 +183,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.0.1 1.9.25 1.11 - 4.7.6 + 4.7.5 0.16.0 0.10.2 @@ -758,6 +758,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ratis-common ${ratis.version} + + org.apache.ratis + ratis-shell + ${ratis.version} + io.netty