From 4d2ea7888d7cc87e0934313d17ca64ae5cc98689 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 6 Nov 2024 19:53:32 -0800 Subject: [PATCH 01/37] HDDS-11650. ContainerId list to track all containers created in a datanode Change-Id: I94fd413a2d778ac5d86a7da5126cf3d1cac8113a --- .../hadoop/hdds/utils/db/Proto2EnumCodec.java | 98 +++++++++ .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + hadoop-hdds/container-service/pom.xml | 6 + .../container/common/impl/ContainerSet.java | 56 ++++- .../container/common/impl/HddsDispatcher.java | 2 +- .../container/common/interfaces/DBHandle.java | 18 +- .../statemachine/DatanodeStateMachine.java | 11 +- .../DeleteBlocksCommandHandler.java | 5 +- .../common/utils/ContainerCache.java | 16 +- .../ozone/container/common/utils/RawDB.java | 2 +- .../common/utils/ReferenceCountedDB.java | 6 +- .../container/keyvalue/KeyValueContainer.java | 3 +- .../keyvalue/KeyValueContainerCheck.java | 7 +- .../keyvalue/KeyValueContainerData.java | 7 +- .../container/keyvalue/KeyValueHandler.java | 3 +- .../keyvalue/helpers/BlockUtils.java | 6 +- .../helpers/KeyValueContainerUtil.java | 7 +- .../keyvalue/impl/BlockManagerImpl.java | 15 +- .../background/BlockDeletingTask.java | 11 +- .../metadata/AbstractDatanodeStore.java | 202 +++++------------- .../container/metadata/AbstractRDBStore.java | 135 ++++++++++++ .../container/metadata/AbstractStore.java | 73 +++++++ .../container/metadata/DatanodeStore.java | 49 +---- .../metadata/MasterVolumeDBDefinition.java | 72 +++++++ .../metadata/MasterVolumeMetadataStore.java | 71 ++++++ .../container/metadata/MetadataStore.java | 35 +++ .../container/ozoneimpl/OzoneContainer.java | 43 +++- .../replication/ContainerImporter.java | 2 +- .../container/common/ContainerTestUtils.java | 3 +- .../common/TestBlockDeletingService.java | 49 ++--- .../TestSchemaOneBackwardsCompatibility.java | 27 +-- .../TestSchemaTwoBackwardsCompatibility.java | 12 +- ...leRecoveringContainerScrubbingService.java | 3 +- .../TestContainerDeletionChoosingPolicy.java | 5 +- .../common/impl/TestContainerPersistence.java | 3 +- .../common/impl/TestContainerSet.java | 9 +- .../common/impl/TestHddsDispatcher.java | 7 +- .../TestCloseContainerCommandHandler.java | 3 +- .../TestDeleteBlocksCommandHandler.java | 3 +- .../volume/TestVolumeSetDiskChecks.java | 3 +- .../keyvalue/TestKeyValueBlockIterator.java | 5 +- .../keyvalue/TestKeyValueContainer.java | 12 +- .../keyvalue/TestKeyValueContainerCheck.java | 5 +- .../TestKeyValueContainerIntegrityChecks.java | 3 +- ...estKeyValueContainerMetadataInspector.java | 4 +- .../keyvalue/TestKeyValueHandler.java | 5 +- .../ozoneimpl/TestContainerReader.java | 18 +- .../ozoneimpl/TestOzoneContainer.java | 3 +- .../replication/TestContainerImporter.java | 7 +- .../TestGrpcReplicationService.java | 3 +- .../TestReplicationSupervisor.java | 3 +- .../TestSendContainerRequestHandler.java | 5 +- .../hadoop/hdds/utils/db/DBTestUtils.java | 142 ++++++++++++ hadoop-hdds/pom.xml | 8 + hadoop-hdds/tools/pom.xml | 6 + .../container/upgrade/TestUpgradeManager.java | 3 +- .../hdds/scm/TestStorageContainerManager.java | 4 +- .../hdds/utils/ClusterContainersUtil.java | 3 +- .../ozone/client/rpc/OzoneRpcClientTests.java | 5 +- .../rpc/TestFailureHandlingByClient.java | 5 +- .../rpc/TestValidateBCSIDOnRestart.java | 3 +- .../commandhandler/TestBlockDeletion.java | 5 +- .../TestDeleteContainerHandler.java | 5 +- .../server/ratis/TestCSMMetrics.java | 3 +- .../metrics/TestContainerMetrics.java | 5 +- .../container/server/TestContainerServer.java | 5 +- .../server/TestSecureContainerServer.java | 5 +- .../ozone/debug/DBDefinitionFactory.java | 4 +- .../debug/container/ContainerCommands.java | 2 +- .../freon/ClosedContainerReplicator.java | 91 ++++---- 70 files changed, 1055 insertions(+), 411 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java new file mode 100644 index 00000000000..9e5ee487318 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db; + +import org.apache.ratis.thirdparty.com.google.protobuf.ProtocolMessageEnum; +import jakarta.annotation.Nonnull; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Codecs to serialize/deserialize Protobuf v2 enums. + */ +public final class Proto2EnumCodec + implements Codec { + private static final ConcurrentMap, + Codec> CODECS + = new ConcurrentHashMap<>(); + private static final IntegerCodec INTEGER_CODEC = IntegerCodec.get(); + + /** + * @return the {@link Codec} for the given class. + */ + public static Codec get(T t) { + final Codec codec = CODECS.computeIfAbsent(t.getClass(), + key -> new Proto2EnumCodec<>(t)); + return (Codec) codec; + } + + private final Class clazz; + + private Proto2EnumCodec(M m) { + this.clazz = (Class) m.getClass(); + } + + @Override + public Class getTypeClass() { + return clazz; + } + + @Override + public boolean supportCodecBuffer() { + return INTEGER_CODEC.supportCodecBuffer(); + } + + @Override + public CodecBuffer toCodecBuffer(@Nonnull M value, + CodecBuffer.Allocator allocator) throws IOException { + return INTEGER_CODEC.toCodecBuffer(value.getNumber(), allocator); + } + + private M parseFrom(Integer value) throws IOException { + try { + return (M) this.clazz.getMethod("valueOf", Integer.class).invoke(value); + } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { + throw new IOException(e); + } + } + + @Override + public M fromCodecBuffer(@Nonnull CodecBuffer buffer) + throws IOException { + return parseFrom(INTEGER_CODEC.fromCodecBuffer(buffer)); + } + + @Override + public byte[] toPersistedFormat(M value) { + return INTEGER_CODEC.toPersistedFormat(value.getNumber()); + } + + @Override + public M fromPersistedFormat(byte[] bytes) throws IOException { + return parseFrom(INTEGER_CODEC.fromPersistedFormat(bytes)); + } + + @Override + public M copyObject(M message) { + // proto messages are immutable + return message; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index e483feba98d..0eeedce1c64 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -122,6 +122,7 @@ public final class OzoneConsts { public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; public static final String SCM_DB_BACKUP_PREFIX = "scm.db.backup."; public static final String CONTAINER_DB_NAME = "container.db"; + public static final String CONTAINER_META_DB_NAME = "container_meta.db"; public static final String STORAGE_DIR_CHUNKS = "chunks"; public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index c21ca8203b5..d756ad1b066 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -71,6 +71,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework + + org.apache.ozone + hdds-server-framework + test-jar + test + org.apache.ozone hdds-client diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 15cc6245ddb..3e69707e5d6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -23,12 +23,16 @@ import com.google.common.collect.ImmutableMap; import com.google.protobuf.Message; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; + import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,10 +69,14 @@ public class ContainerSet implements Iterable> { new ConcurrentSkipListMap<>(); private Clock clock; private long recoveringTimeout; + private final Table containerIdsTable; + private final boolean readOnly; - public ContainerSet(long recoveringTimeout) { + public ContainerSet(Table continerIdsTable, long recoveringTimeout) { this.clock = Clock.system(ZoneOffset.UTC); + this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; + this.readOnly = containerIdsTable == null; } public long getCurrentTime() { @@ -85,22 +93,41 @@ public void setRecoveringTimeout(long recoveringTimeout) { this.recoveringTimeout = recoveringTimeout; } + public boolean addContainer(Container container) throws StorageContainerException { + return addContainer(container, false); + } + /** * Add Container to container map. * @param container container to be added * @return If container is added to containerMap returns true, otherwise * false */ - public boolean addContainer(Container container) throws + public boolean addContainer(Container container, boolean overwriteMissingContainers) throws StorageContainerException { + Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); + State containerState = container.getContainerData().getState(); + if (!overwriteMissingContainers && missingContainerSet.contains(containerId) + && containerState != State.RECOVERING) { + throw new StorageContainerException(String.format("Container with container Id %d is missing in the DN " + + "and creation of containers with state %s is not allowed. Only recreation of container in RECOVERING state " + + "is allowed.", containerId, containerState.toString()), ContainerProtos.Result.CONTAINER_MISSING); + + } if (containerMap.putIfAbsent(containerId, container) == null) { if (LOG.isDebugEnabled()) { LOG.debug("Container with container Id {} is added to containerMap", containerId); } + try { + containerIdsTable.put(containerId, containerState); + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); + } + missingContainerSet.remove(containerId); // wish we could have done this from ContainerData.setState container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { @@ -122,21 +149,30 @@ public boolean addContainer(Container container) throws * @return Container */ public Container getContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); + Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); return containerMap.get(containerId); } + public boolean removeContainer(long containerId) { + return removeContainer(containerId, false); + } + /** * Removes the Container matching with specified containerId. * @param containerId ID of the container to remove * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId) { + public boolean removeContainer(long containerId, boolean markMissing) { + Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); - Container removed = containerMap.remove(containerId); + Container removed = containerMap.compute(containerId, (cid, value) -> { + if (markMissing) { + missingContainerSet.add(containerId); + } + return value; + }); if (removed == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); @@ -155,6 +191,8 @@ public boolean removeContainer(long containerId) { * otherwise false. */ public boolean removeRecoveringContainer(long containerId) { + Preconditions.checkState(!readOnly, + "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); //it might take a little long time to iterate all the entries @@ -196,7 +234,7 @@ public void handleVolumeFailures(StateContext context) { containerMap.values().forEach(c -> { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { - removeContainer(data.getContainerID()); + removeContainer(data.getContainerID(), true); LOG.debug("Removing Container {} as the Volume {} " + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); @@ -347,6 +385,10 @@ public Set getMissingContainerSet() { return missingContainerSet; } + public Table getContainerIdsTable() { + return containerIdsTable; + } + /** * Builds the missing container set by taking a diff between total no * containers actually found and number of containers which actually diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 5fc97184155..d2117097cbd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -276,7 +276,7 @@ private ContainerCommandResponseProto dispatchRequest( getMissingContainerSet().remove(containerID); } } - if (getMissingContainerSet().contains(containerID)) { + if (cmdType != Type.CreateContainer && getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java index 839a112ed9b..9f611af511b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java @@ -17,24 +17,24 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.ozone.container.metadata.AbstractStore; import java.io.Closeable; /** * DB handle abstract class. */ -public abstract class DBHandle implements Closeable { +public abstract class DBHandle implements Closeable { - private final DatanodeStore store; + private final STORE store; private final String containerDBPath; - public DBHandle(DatanodeStore store, String containerDBPath) { + public DBHandle(STORE store, String containerDBPath) { this.store = store; this.containerDBPath = containerDBPath; } - public DatanodeStore getStore() { + public STORE getStore() { return this.store; } @@ -45,4 +45,12 @@ public String getContainerDBPath() { public boolean cleanup() { return true; } + + @Override + public String toString() { + return "DBHandle{" + + "containerDBPath='" + containerDBPath + '\'' + + ", store=" + store + + '}'; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index ae01c6da756..46532314d0b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -59,8 +59,10 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.RefreshVolumeUsageCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.SetNodeOperationalStateCommandHandler; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; +import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -131,6 +133,8 @@ public class DatanodeStateMachine implements Closeable { private final DatanodeQueueMetrics queueMetrics; private final ReconfigurationHandler reconfigurationHandler; + private final ReferenceCountedDB masterVolumeMetadataStore; + /** * Constructs a datanode state machine. * @param datanodeDetails - DatanodeDetails used to identify a datanode @@ -178,10 +182,11 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, // OzoneContainer instance is used in a non-thread safe way by the context // past to its constructor, so we much synchronize its access. See // HDDS-3116 for more details. + this.masterVolumeMetadataStore = MasterVolumeMetadataStore.get(conf); constructionLock.writeLock().lock(); try { container = new OzoneContainer(hddsDatanodeService, this.datanodeDetails, - conf, context, certClient, secretKeyClient); + conf, context, certClient, secretKeyClient, masterVolumeMetadataStore); } finally { constructionLock.writeLock().unlock(); } @@ -447,6 +452,10 @@ public void close() throws IOException { if (nettyMetrics != null) { nettyMetrics.unregister(); } + + if (masterVolumeMetadataStore != null) { + masterVolumeMetadataStore.cleanup(); + } } private void executorServiceShutdownGraceful(ExecutorService executor) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 136c5805821..444ad9237a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; @@ -549,7 +550,7 @@ private void markBlocksForDeletionSchemaV1( return; } int newDeletionBlocks = 0; - try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { + try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { Table blockDataTable = containerDB.getStore().getBlockDataTable(); Table deletedBlocksTable = @@ -607,7 +608,7 @@ private void markBlocksForDeletionSchemaV1( private void updateMetaData(KeyValueContainerData containerData, DeletedBlocksTransaction delTX, int newDeletionBlocks, - DBHandle containerDB, BatchOperation batchOperation) + DBHandle containerDB, BatchOperation batchOperation) throws IOException { if (newDeletionBlocks > 0) { // Finally commit the DB counters. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 741c65e130a..2e68a4b1ef1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -108,7 +108,7 @@ public void shutdownCache() { */ @Override protected boolean removeLRU(LinkEntry entry) { - ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); + ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); lock.lock(); try { metrics.incNumCacheEvictions(); @@ -128,21 +128,21 @@ protected boolean removeLRU(LinkEntry entry) { * @param conf - Hadoop Configuration. * @return ReferenceCountedDB. */ - public ReferenceCountedDB getDB(long containerID, String containerDBType, + public ReferenceCountedDB getDB(long containerID, String containerDBType, String containerDBPath, String schemaVersion, ConfigurationSource conf) throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); - ReferenceCountedDB db; + ReferenceCountedDB db; Lock containerLock = rocksDBLock.get(containerDBPath); containerLock.lock(); metrics.incNumDbGetOps(); try { lock.lock(); try { - db = (ReferenceCountedDB) this.get(containerDBPath); + db = (ReferenceCountedDB) this.get(containerDBPath); if (db != null && !db.isClosed()) { metrics.incNumCacheHits(); db.incrementReference(); @@ -170,8 +170,8 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, lock.lock(); try { - ReferenceCountedDB currentDB = - (ReferenceCountedDB) this.get(containerDBPath); + ReferenceCountedDB currentDB = + (ReferenceCountedDB) this.get(containerDBPath); if (currentDB != null && !currentDB.isClosed()) { // increment the reference before returning the object currentDB.incrementReference(); @@ -201,7 +201,7 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, public void removeDB(String containerDBPath) { lock.lock(); try { - ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); + ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); if (db != null) { boolean cleaned = cleanupDb(db); if (!db.isClosed()) { @@ -230,7 +230,7 @@ private boolean cleanupDb(ReferenceCountedDB db) { * @param containerDBPath - DB path of the container. * @param db - DB handler */ - public void addDB(String containerDBPath, ReferenceCountedDB db) { + public void addDB(String containerDBPath, ReferenceCountedDB db) { lock.lock(); try { this.putIfAbsent(containerDBPath, db); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java index 54849a6f8cd..873ce75ff58 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java @@ -26,7 +26,7 @@ * Just a wrapper for DatanodeStore. * This is for container schema v3 which has one rocksdb instance per disk. */ -public class RawDB extends DBHandle { +public class RawDB extends DBHandle { public RawDB(DatanodeStore store, String containerDBPath) { super(store, containerDBPath); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java index 2b73042ae7e..8c238abcf75 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java @@ -22,7 +22,7 @@ import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.ozone.container.metadata.AbstractStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,12 +36,12 @@ * from caller stack. When JDK9 StackWalker is available, we can switch to * StackWalker instead of new Exception().printStackTrace(). */ -public class ReferenceCountedDB extends DBHandle { +public class ReferenceCountedDB extends DBHandle { private static final Logger LOG = LoggerFactory.getLogger(ReferenceCountedDB.class); private final AtomicInteger referenceCount; - public ReferenceCountedDB(DatanodeStore store, String containerDBPath) { + public ReferenceCountedDB(STORE store, String containerDBPath) { super(store, containerDBPath); this.referenceCount = new AtomicInteger(0); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index ae3288a3e98..00900bd9f2c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -366,7 +367,7 @@ public void delete() throws StorageContainerException { @Override public boolean hasBlocks() throws IOException { - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { return !KeyValueContainerUtil.noBlocksInContainer(db.getStore(), containerData, bCheckChunksFilePath); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index c235109f2cb..43118d42cca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -47,6 +47,7 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.util.DirectBufferPool; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -246,7 +247,7 @@ private ScanResult scanData(DataTransferThrottler throttler, onDiskContainerData.setDbFile(dbFile); try { - try (DBHandle db = BlockUtils.getDB(onDiskContainerData, checkConfig); + try (DBHandle db = BlockUtils.getDB(onDiskContainerData, checkConfig); BlockIterator kvIter = db.getStore().getBlockIterator( onDiskContainerData.getContainerID(), onDiskContainerData.getUnprefixedKeyFilter())) { @@ -312,7 +313,7 @@ private ScanResult scanData(DataTransferThrottler throttler, * @return blockData in DB * @throws IOException */ - private BlockData getBlockDataFromDB(DBHandle db, BlockData block) + private BlockData getBlockDataFromDB(DBHandle db, BlockData block) throws IOException { String blockKey = onDiskContainerData.getBlockKey(block.getBlockID().getLocalID()); @@ -329,7 +330,7 @@ private BlockData getBlockDataFromDB(DBHandle db, BlockData block) * @return blockData in DB * @throws IOException */ - private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) + private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) throws IOException { container.readLock(); try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 4ea8552e780..4908cda8f9a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.yaml.snakeyaml.nodes.Tag; @@ -296,7 +297,7 @@ public boolean isFinalizedBlockExist(long localID) { return finalizedBlockSet.contains(localID); } - public void clearFinalizedBlock(DBHandle db) throws IOException { + public void clearFinalizedBlock(DBHandle db) throws IOException { if (!finalizedBlockSet.isEmpty()) { // delete from db and clear memory // Should never fail. @@ -353,7 +354,7 @@ public static List getYamlFields() { * @param releasedBytes - Number of bytes released. * @throws IOException */ - public void updateAndCommitDBCounters(DBHandle db, + public void updateAndCommitDBCounters(DBHandle db, BatchOperation batchOperation, int deletedBlockCount, long releasedBytes) throws IOException { Table metadataTable = db.getStore().getMetadataTable(); @@ -370,7 +371,7 @@ public void updateAndCommitDBCounters(DBHandle db, db.getStore().getBatchHandler().commitBatchOperation(batchOperation); } - public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { + public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { // Reset the in memory metadata. numPendingDeletionBlocks.set(0); // Reset the metadata on disk. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index aa9c4bd953c..dece7bf49d4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -88,6 +88,7 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import com.google.common.annotations.VisibleForTesting; @@ -1333,7 +1334,7 @@ private String[] getFilesWithPrefix(String prefix, File chunkDir) { private boolean logBlocksIfNonZero(Container container) throws IOException { boolean nonZero = false; - try (DBHandle dbHandle + try (DBHandle dbHandle = BlockUtils.getDB( (KeyValueContainerData) container.getContainerData(), conf)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 8bbc2478004..ad4bdd8cfbb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -120,7 +120,7 @@ public static DatanodeStore getUncachedDatanodeStore( * @return DB handle. * @throws StorageContainerException */ - public static DBHandle getDB(KeyValueContainerData containerData, + public static DBHandle getDB(KeyValueContainerData containerData, ConfigurationSource conf) throws StorageContainerException { Preconditions.checkNotNull(containerData); Preconditions.checkNotNull(containerData.getDbFile()); @@ -191,7 +191,7 @@ public static void addDB(DatanodeStore store, String containerDBPath, ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); cache.addDB(containerDBPath, - new ReferenceCountedDB(store, containerDBPath)); + new ReferenceCountedDB<>(store, containerDBPath)); } } @@ -283,7 +283,7 @@ public static void removeContainerFromDB(KeyValueContainerData containerData, public static void dumpKVContainerDataToFiles( KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { - try (DBHandle db = getDB(containerData, conf)) { + try (DBHandle db = getDB(containerData, conf)) { Preconditions.checkState(db.getStore() instanceof DatanodeStoreSchemaThreeImpl); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 691ccaa630d..091b244f418 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -234,14 +234,13 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, boolean bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); if (kvContainerData.hasSchema(OzoneConsts.SCHEMA_V3)) { - try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { - populateContainerMetadata(kvContainerData, - db.getStore(), bCheckChunksFilePath); + try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { + populateContainerMetadata(kvContainerData, db.getStore(), bCheckChunksFilePath); } return; } - DBHandle cachedDB = null; + DBHandle cachedDB = null; DatanodeStore store = null; try { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 6232b843567..2f3146a1940 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import com.google.common.base.Preconditions; @@ -110,7 +111,7 @@ public long persistPutBlock(KeyValueContainer container, // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -233,7 +234,7 @@ public void finalizeBlock(Container container, BlockID blockId) kvContainer.removeFromPendingPutBlockCache(localID); - try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), + try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), config)) { // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -250,7 +251,7 @@ public void finalizeBlock(Container container, BlockID blockId) } } - private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, + private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, KeyValueContainer kvContainer, BatchOperation batch, long localID) throws IOException { // if the chunk list of the block to be finalized was written incremental, @@ -270,7 +271,7 @@ public BlockData getBlock(Container container, BlockID blockID) throws IOExcepti KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); long bcsId = blockID.getBlockCommitSequenceId(); - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -290,7 +291,7 @@ public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -343,7 +344,7 @@ public List listBlock(Container container, long startLocalID, int List result = null; KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, config)) { + try (DBHandle db = BlockUtils.getDB(cData, config)) { result = new ArrayList<>(); String startKey = (startLocalID == -1) ? cData.startKeyEmpty() : cData.getBlockKey(startLocalID); @@ -369,7 +370,7 @@ public void shutdown() { BlockUtils.shutdownCache(config); } - private BlockData getBlockByID(DBHandle db, BlockID blockID, + private BlockData getBlockByID(DBHandle db, BlockID blockID, KeyValueContainerData containerData) throws IOException { String blockKey = containerData.getBlockKey(blockID.getLocalID()); return db.getStore().getBlockByID(blockID, blockKey); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java index eb7b6e7378a..feb1d39366f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.util.Time; @@ -146,7 +147,7 @@ private ContainerBackgroundTaskResult handleDeleteTask() throws Exception { File dataDir = new File(containerData.getChunksPath()); long startTime = Time.monotonicNow(); // Scan container's db and get list of under deletion blocks - try (DBHandle meta = BlockUtils.getDB(containerData, conf)) { + try (DBHandle meta = BlockUtils.getDB(containerData, conf)) { if (containerData.hasSchema(SCHEMA_V1)) { crr = deleteViaSchema1(meta, container, dataDir, startTime); } else if (containerData.hasSchema(SCHEMA_V2)) { @@ -174,7 +175,7 @@ public boolean checkDataDir(File dataDir) { } public ContainerBackgroundTaskResult deleteViaSchema1( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); if (!checkDataDir(dataDir)) { @@ -276,7 +277,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1( } public ContainerBackgroundTaskResult deleteViaSchema2( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { Deleter schema2Deleter = (table, batch, tid) -> { Table delTxTable = @@ -296,7 +297,7 @@ public ContainerBackgroundTaskResult deleteViaSchema2( } public ContainerBackgroundTaskResult deleteViaSchema3( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { Deleter schema3Deleter = (table, batch, tid) -> { Table delTxTable = @@ -318,7 +319,7 @@ public ContainerBackgroundTaskResult deleteViaSchema3( private ContainerBackgroundTaskResult deleteViaTransactionStore( TableIterator> - iter, DBHandle meta, Container container, File dataDir, + iter, DBHandle meta, Container container, File dataDir, long startTime, Deleter deleter) throws IOException { ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); if (!checkDataDir(dataDir)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 88aeb3c174d..d9edd6d4cb0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -17,27 +17,22 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; -import org.rocksdb.InfoLogLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,14 +40,11 @@ import java.io.IOException; import java.util.NoSuchElementException; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; -import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; - /** * Implementation of the {@link DatanodeStore} interface that contains * functionality common to all more derived datanode store implementations. */ -public abstract class AbstractDatanodeStore implements DatanodeStore { +public class AbstractDatanodeStore extends AbstractRDBStore implements DatanodeStore { private Table metadataTable; @@ -68,12 +60,6 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); - private volatile DBStore store; - private final AbstractDatanodeDBDefinition dbDef; - private final ManagedColumnFamilyOptions cfOptions; - - private static DatanodeDBProfile dbProfile; - private final boolean openReadOnly; /** * Constructs the metadata store and starts the DB services. @@ -84,114 +70,64 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { protected AbstractDatanodeStore(ConfigurationSource config, AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) throws IOException { - - dbProfile = DatanodeDBProfile - .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); - - // The same config instance is used on each datanode, so we can share the - // corresponding column family options, providing a single shared cache - // for all containers on a datanode. - cfOptions = dbProfile.getColumnFamilyOptions(config); - - this.dbDef = dbDef; - this.openReadOnly = openReadOnly; - start(config); + super(dbDef, config, openReadOnly); } @Override - public void start(ConfigurationSource config) + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) throws IOException { - if (this.store == null) { - ManagedDBOptions options = dbProfile.getDBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - if (this.dbDef instanceof DatanodeSchemaOneDBDefinition || - this.dbDef instanceof DatanodeSchemaTwoDBDefinition) { - long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); - options.setMaxTotalWalSize(maxWalSize); - } - - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); - // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); - options.setInfoLogLevel(level); - options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); - options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); - - if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); - - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) - .build(); - } else { - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .build(); - } + AbstractDatanodeDBDefinition dbDefinition = this.getDbDef(); + if (dbDefinition instanceof DatanodeSchemaOneDBDefinition || + dbDefinition instanceof DatanodeSchemaTwoDBDefinition) { + long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); + options.setMaxTotalWalSize(maxWalSize); + } + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); - checkTableStatus(metadataTable, metadataTable.getName()); - - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); - - blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); - checkTableStatus(blockDataTable, blockDataTable.getName()); - - if (dbDef.getFinalizeBlocksColumnFamily() != null) { - finalizeBlocksTableWithIterator = - dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); - - finalizeBlocksTable = new DatanodeTable<>( - finalizeBlocksTableWithIterator); - checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); - } + if (dbDefinition instanceof DatanodeSchemaThreeDBDefinition) { + options.setDeleteObsoleteFilesPeriodMicros( + dc.getRocksdbDeleteObsoleteFilesPeriod()); - if (dbDef.getLastChunkInfoColumnFamily() != null) { - lastChunkInfoTable = new DatanodeTable<>( - dbDef.getLastChunkInfoColumnFamily().getTable(this.store)); - checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); - } + // For V3, all Rocksdb dir has the same "container.db" name. So use + // parentDirName(storage UUID)-dbDirName as db metrics name + dbStoreBuilder.setDBJmxBeanNameName(dbDefinition.getDBLocation(config).getName() + "-" + + dbDefinition.getName()); } - } - - @Override - public synchronized void stop() throws Exception { - if (store != null) { - store.close(); - store = null; + DBStore dbStore = dbStoreBuilder.setDBOptions(options).build(); + + // Use the DatanodeTable wrapper to disable the table iterator on + // existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is + // necessary. + metadataTable = new DatanodeTable<>( + dbDefinition.getMetadataColumnFamily().getTable(dbStore)); + checkTableStatus(metadataTable, metadataTable.getName()); + + // The block iterator this class returns will need to use the table + // iterator internally, so construct a block data table instance + // that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = + dbDefinition.getBlockDataColumnFamily().getTable(dbStore); + + blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); + checkTableStatus(blockDataTable, blockDataTable.getName()); + + if (dbDefinition.getFinalizeBlocksColumnFamily() != null) { + finalizeBlocksTableWithIterator = + dbDefinition.getFinalizeBlocksColumnFamily().getTable(dbStore); + + finalizeBlocksTable = new DatanodeTable<>( + finalizeBlocksTableWithIterator); + checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); } - } - @Override - public DBStore getStore() { - return this.store; - } - - @Override - public BatchOperationHandler getBatchHandler() { - return this.store; + if (dbDefinition.getLastChunkInfoColumnFamily() != null) { + lastChunkInfoTable = new DatanodeTable<>( + dbDefinition.getLastChunkInfoColumnFamily().getTable(dbStore)); + checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); + } + return dbStore; } @Override @@ -240,44 +176,6 @@ public BlockIterator getFinalizeBlockIterator(long containerID, finalizeBlocksTableWithIterator.iterator(), filter); } - @Override - public synchronized boolean isClosed() { - if (this.store == null) { - return true; - } - return this.store.isClosed(); - } - - @Override - public void close() throws IOException { - this.store.close(); - this.cfOptions.close(); - } - - @Override - public void flushDB() throws IOException { - store.flushDB(); - } - - @Override - public void flushLog(boolean sync) throws IOException { - store.flushLog(sync); - } - - @Override - public void compactDB() throws IOException { - store.compactDB(); - } - - @VisibleForTesting - public DatanodeDBProfile getDbProfile() { - return dbProfile; - } - - protected AbstractDatanodeDBDefinition getDbDef() { - return this.dbDef; - } - protected Table getBlockDataTableWithIterator() { return this.blockDataTableWithIterator; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java new file mode 100644 index 00000000000..6f51a1cb4a1 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java @@ -0,0 +1,135 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; +import org.rocksdb.InfoLogLevel; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; +import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; + +/** + * Abstract Interface defining the way to interact with any rocksDB in the datanode. + * @param Generic parameter defining the schema for the DB. + */ +public abstract class AbstractRDBStore implements AbstractStore { + private final DEF dbDef; + private final ManagedColumnFamilyOptions cfOptions; + private static DatanodeDBProfile dbProfile; + private final boolean openReadOnly; + private volatile DBStore store; + + protected AbstractRDBStore(DEF dbDef, ConfigurationSource config, boolean openReadOnly) throws IOException { + dbProfile = DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); + + // The same config instance is used on each datanode, so we can share the + // corresponding column family options, providing a single shared cache + // for all containers on a datanode. + cfOptions = dbProfile.getColumnFamilyOptions(config); + this.dbDef = dbDef; + this.openReadOnly = openReadOnly; + start(config); + } + + public void start(ConfigurationSource config) + throws IOException { + if (this.store == null) { + ManagedDBOptions options = dbProfile.getDBOptions(); + options.setCreateIfMissing(true); + options.setCreateMissingColumnFamilies(true); + + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); + // Config user log files + InfoLogLevel level = InfoLogLevel.valueOf( + dc.getRocksdbLogLevel() + "_LEVEL"); + options.setInfoLogLevel(level); + options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); + options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); + this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly), options, config); + } + } + + protected abstract DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, + ConfigurationSource config) throws IOException; + + public synchronized void stop() throws Exception { + if (store != null) { + store.close(); + store = null; + } + } + + public DBStore getStore() { + return this.store; + } + + public synchronized boolean isClosed() { + if (this.store == null) { + return true; + } + return this.store.isClosed(); + } + + public BatchOperationHandler getBatchHandler() { + return this.store; + } + + public void close() throws IOException { + this.store.close(); + this.cfOptions.close(); + } + + public void flushDB() throws IOException { + store.flushDB(); + } + + public void flushLog(boolean sync) throws IOException { + store.flushLog(sync); + } + + public void compactDB() throws IOException { + store.compactDB(); + } + + @VisibleForTesting + public DatanodeDBProfile getDbProfile() { + return dbProfile; + } + + protected DEF getDbDef() { + return this.dbDef; + } + +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java new file mode 100644 index 00000000000..4e8c83f79da --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.metadata; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBStore; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Abstract Interface for interacting with datanode databases. + */ +public interface AbstractStore extends Closeable { + + /** + * Start datanode manager. + * + * @param configuration - Configuration + * @throws IOException - Unable to start datanode store. + */ + void start(ConfigurationSource configuration) throws IOException; + + /** + * Stop datanode manager. + */ + void stop() throws Exception; + + /** + * Get datanode store. + * + * @return datanode store. + */ + @VisibleForTesting + DBStore getStore(); + + /** + * Helper to create and write batch transactions. + */ + BatchOperationHandler getBatchHandler(); + + void flushLog(boolean sync) throws IOException; + + void flushDB() throws IOException; + + void compactDB() throws IOException; + + /** + * Returns if the underlying DB is closed. This call is thread safe. + * @return true if the DB is closed. + */ + boolean isClosed(); + + default void compactionIfNeeded() throws Exception { + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index d791d9bbeab..b8283494ef5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -17,22 +17,16 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; -import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import java.io.Closeable; import java.io.IOException; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; @@ -40,31 +34,10 @@ /** * Interface for interacting with datanode databases. */ -public interface DatanodeStore extends Closeable { +public interface DatanodeStore extends AbstractStore { String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; - /** - * Start datanode manager. - * - * @param configuration - Configuration - * @throws IOException - Unable to start datanode store. - */ - void start(ConfigurationSource configuration) throws IOException; - - /** - * Stop datanode manager. - */ - void stop() throws Exception; - - /** - * Get datanode store. - * - * @return datanode store. - */ - @VisibleForTesting - DBStore getStore(); - /** * A Table that keeps the block data. * @@ -100,17 +73,6 @@ public interface DatanodeStore extends Closeable { */ Table getLastChunkInfoTable(); - /** - * Helper to create and write batch transactions. - */ - BatchOperationHandler getBatchHandler(); - - void flushLog(boolean sync) throws IOException; - - void flushDB() throws IOException; - - void compactDB() throws IOException; - BlockIterator getBlockIterator(long containerID) throws IOException; @@ -120,15 +82,6 @@ BlockIterator getBlockIterator(long containerID, BlockIterator getFinalizeBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException; - /** - * Returns if the underlying DB is closed. This call is thread safe. - * @return true if the DB is closed. - */ - boolean isClosed(); - - default void compactionIfNeeded() throws Exception { - } - default BlockData getBlockByID(BlockID blockID, String blockKey) throws IOException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java new file mode 100644 index 00000000000..633561bc812 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java @@ -0,0 +1,72 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.hdds.utils.db.Proto2EnumCodec; +import org.apache.hadoop.ozone.OzoneConsts; + +import java.util.Map; + +/** + * Class for defining the schema for master volume in a datanode. + */ +public final class MasterVolumeDBDefinition extends DBDefinition.WithMap { + + private static final String CONTAINER_IDS_TABLE_NAME = "containerIds"; + + public static final DBColumnFamilyDefinition + CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>( + CONTAINER_IDS_TABLE_NAME, + LongCodec.get(), + Proto2EnumCodec.get(State.OPEN)); + + private static final Map> + COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( + CONTAINER_IDS_TABLE); + + private static final MasterVolumeDBDefinition INSTANCE = new MasterVolumeDBDefinition(); + + public static MasterVolumeDBDefinition get() { + return INSTANCE; + } + + private MasterVolumeDBDefinition() { + super(COLUMN_FAMILIES); + } + + @Override + public String getName() { + return OzoneConsts.CONTAINER_META_DB_NAME; + } + + @Override + public String getLocationConfigKey() { + return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; + } + + public DBColumnFamilyDefinition getContainerIdsTable() { + return CONTAINER_IDS_TABLE; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java new file mode 100644 index 00000000000..bf9e34a1e26 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; + +import java.io.IOException; + +/** + * Singleton class for interacting with database in the master volume of a datanode. + */ +public final class MasterVolumeMetadataStore extends AbstractRDBStore + implements MetadataStore { + + private Table containerIdsTable; + + private static ReferenceCountedDB instance = null; + + public static ReferenceCountedDB get(ConfigurationSource conf) throws IOException { + if (instance == null || instance.isClosed()) { + synchronized (MasterVolumeMetadataStore.class) { + if (instance == null || instance.isClosed()) { + MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); + instance = new ReferenceCountedDB<>(masterVolumeMetadataStore, + masterVolumeMetadataStore.getDbDef().getDBLocation(conf).getAbsolutePath()); + } + } + } + return instance; + } + + private MasterVolumeMetadataStore(ConfigurationSource config, boolean openReadOnly) throws IOException { + super(MasterVolumeDBDefinition.get(), config, openReadOnly); + } + + @Override + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) + throws IOException { + DBStore dbStore = dbStoreBuilder.build(); + this.containerIdsTable = this.getDbDef().getContainerIdsTable().getTable(dbStore); + return dbStore; + } + + @Override + public Table getContainerIdsTable() { + return containerIdsTable; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java new file mode 100644 index 00000000000..e21ee4b4321 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java @@ -0,0 +1,35 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.db.Table; + +/** + * Interface for interacting with database in the master volume of a datanode. + */ +public interface MetadataStore extends AbstractStore { + /** + * A Table that keeps the containerIds in a datanode. + * + * @return Table + */ + Table getContainerIdsTable(); +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index e40fa635c12..e5ff5e08b48 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -34,6 +35,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService; @@ -51,12 +54,14 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.StorageVolume.VolumeType; import org.apache.hadoop.ozone.container.common.volume.StorageVolumeChecker; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.StaleRecoveringContainerScrubbingService; +import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ReplicationServer; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -70,6 +75,7 @@ import java.io.IOException; import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -128,6 +134,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; + private final ReferenceCountedDB masterVolumeMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -143,9 +150,10 @@ enum InitializingStatus { * @throws IOException */ public OzoneContainer(HddsDatanodeService hddsDatanodeService, - DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context, CertificateClient certClient, - SecretKeyVerifierClient secretKeyClient) throws IOException { + DatanodeDetails datanodeDetails, ConfigurationSource conf, + StateContext context, CertificateClient certClient, + SecretKeyVerifierClient secretKeyClient, + ReferenceCountedDB masterVolumeMetadataStore) throws IOException { config = conf; this.datanodeDetails = datanodeDetails; this.context = context; @@ -178,12 +186,13 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, TimeUnit.MINUTES); } } - long recoveringContainerTimeout = config.getTimeDuration( OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - - containerSet = new ContainerSet(recoveringContainerTimeout); + this.masterVolumeMetadataStore = masterVolumeMetadataStore; + this.masterVolumeMetadataStore.incrementReference(); + containerSet = new ContainerSet(masterVolumeMetadataStore.getStore().getContainerIdsTable(), + recoveringContainerTimeout); metadataScanner = null; metrics = ContainerMetrics.create(conf); @@ -296,6 +305,14 @@ public OzoneContainer( this(null, datanodeDetails, conf, context, null, null); } + public OzoneContainer(HddsDatanodeService hddsDatanodeService, + DatanodeDetails datanodeDetails, ConfigurationSource conf, + StateContext context, CertificateClient certClient, + SecretKeyVerifierClient secretKeyClient) throws IOException { + this(hddsDatanodeService, datanodeDetails, conf, context, certClient, secretKeyClient, + MasterVolumeMetadataStore.get(conf)); + } + public GrpcTlsConfig getTlsClientConfig() { return tlsClientConfig; } @@ -304,7 +321,7 @@ public GrpcTlsConfig getTlsClientConfig() { * Build's container map after volume format. */ @VisibleForTesting - public void buildContainerSet() { + public void buildContainerSet() throws IOException { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList<>(); @@ -327,6 +344,14 @@ public void buildContainerSet() { thread.start(); volumeThreads.add(thread); } + try (TableIterator> itr = + containerSet.getContainerIdsTable().iterator()) { + Map containerIds = new HashMap<>(); + while (itr.hasNext()) { + containerIds.put(itr.next().getKey(), 0L); + } + containerSet.buildMissingContainerSetAndValidate(containerIds); + } try { for (int i = 0; i < volumeThreads.size(); i++) { @@ -513,6 +538,10 @@ public void stop() { blockDeletingService.shutdown(); recoveringContainerScrubbingService.shutdown(); ContainerMetrics.remove(); + if (this.masterVolumeMetadataStore != null) { + this.masterVolumeMetadataStore.decrementReference(); + this.masterVolumeMetadataStore.cleanup(); + } } public void handleVolumeFailures() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index f20094079c9..aaeaa21e583 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -128,7 +128,7 @@ public void importContainer(long containerID, Path tarFilePath, try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { Container container = controller.importContainer( containerData, input, packer); - containerSet.addContainer(container); + containerSet.addContainer(container, true); } } finally { importContainerProgress.remove(containerID); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 53ba8b68578..2e6985d46ac 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.retry.RetryPolicies; @@ -336,7 +337,7 @@ public static ContainerDispatcher getNoopContainerDispatcher() { } private static final ContainerController EMPTY_CONTAINER_CONTROLLER - = new ContainerController(new ContainerSet(1000), Collections.emptyMap()); + = new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Collections.emptyMap()); public static ContainerController getEmptyContainerController() { return EMPTY_CONTAINER_CONTROLLER; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index bc56141fb08..6edb84cc691 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.Checksum; @@ -211,7 +212,7 @@ private void createPendingDeleteBlocksSchema1(int numOfBlocksPerContainer, ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container) { BlockID blockID = null; - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { blockID = ContainerTestHelper.getTestBlockID(containerID); String deleteStateName = data.getDeletingBlockKey( @@ -246,7 +247,7 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, putChunksInBlock(numOfChunksPerBlock, i, chunks, buffer, chunkManager, container, blockID); kd.setChunks(chunks); - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { String blockKey = data.getBlockKey(blockID.getLocalID()); metadata.getStore().getBlockDataTable().put(blockKey, kd); } catch (IOException exception) { @@ -269,7 +270,7 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, private void createTxn(KeyValueContainerData data, List containerBlocks, int txnID, long containerID) { - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx = StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction .newBuilder().setTxID(txnID).setContainerID(containerID) @@ -331,7 +332,7 @@ private void updateMetaData(KeyValueContainerData data, KeyValueContainer container, int numOfBlocksPerContainer, int numOfChunksPerBlock) { long chunkLength = 100; - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { container.getContainerData().setBlockCount(numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. metadata.getStore().getMetadataTable() @@ -362,7 +363,7 @@ private void deleteAndWait(BlockDeletingServiceTestImpl service, * Get under deletion blocks count from DB, * note this info is parsed from container.db. */ - private int getUnderDeletionBlocksCount(DBHandle meta, + private int getUnderDeletionBlocksCount(DBHandle meta, KeyValueContainerData data) throws IOException { if (data.hasSchema(SCHEMA_V1)) { return meta.getStore().getBlockDataTable() @@ -429,7 +430,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(blockDeleteLimit); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); // Create one container with no actual pending delete blocks, but an // incorrect metadata value indicating it has enough pending deletes to @@ -437,7 +438,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) KeyValueContainerData incorrectData = createToDeleteBlocks(containerSet, 0, 1); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { // Check pre-create state. assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); @@ -460,7 +461,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Check its metadata was set up correctly. assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); assertEquals(correctNumBlocksToDelete, @@ -488,7 +489,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Pending delete block count in the incorrect container should be fixed // and reset to 0. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() @@ -497,7 +498,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Correct container should not have been processed. assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); assertEquals(correctNumBlocksToDelete, @@ -512,7 +513,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // The incorrect container should remain in the same state after being // fixed. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() @@ -521,7 +522,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // The correct container should have been processed this run and had its // blocks deleted. assertEquals(0, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, correctData)); assertEquals(0, db.getStore().getMetadataTable() @@ -537,7 +538,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -558,7 +559,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) ? data.getDeletingBlockKeyFilter() : data.getUnprefixedKeyFilter(); - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { Map> containerMap = containerSet.getContainerMapCopy(); assertBlockDataTableRecordCount(3, meta, filter, data.getContainerID()); // NOTE: this test assumes that all the container is KetValueContainer and @@ -663,7 +664,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); createToDeleteBlocks(containerSet, numOfContainers, numOfBlocksPerContainer, numOfChunksPerBlock); @@ -694,7 +695,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) List unrecordedBlockIds = new ArrayList<>(); Set unrecordedChunks = new HashSet<>(); - try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) { + try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) { // create unrecorded blocks in a new txn and update metadata, // service shall first choose the top pendingDeletion container // if using the TopNOrderedContainerDeletionChoosingPolicy @@ -773,7 +774,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); // Create 1 container with 100 blocks createToDeleteBlocks(containerSet, 1, 100, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); @@ -804,7 +805,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -846,7 +847,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) KeyValueContainer container = (KeyValueContainer) containerSet.iterator().next(); KeyValueContainerData data = container.getContainerData(); - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); GenericTestUtils.waitFor(() -> { try { @@ -906,7 +907,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(1); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int containerCount = 2; int chunksPerBlock = 10; @@ -966,7 +967,7 @@ public void testContainerMaxLockHoldingTime( dnConf.setBlockDeletingMaxLockHoldingTime(Duration.ofMillis(-1)); dnConf.setBlockDeletionLimit(3); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int containerCount = 1; int chunksPerBlock = 10; @@ -1030,7 +1031,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(10); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, @@ -1099,7 +1100,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) private void assertBlockDataTableRecordCount(int expectedCount, KeyValueContainerData containerData, KeyPrefixFilter filter) throws IOException { - try (DBHandle handle = BlockUtils.getDB(containerData, conf)) { + try (DBHandle handle = BlockUtils.getDB(containerData, conf)) { long containerID = containerData.getContainerID(); assertBlockDataTableRecordCount(expectedCount, handle, filter, containerID); @@ -1116,7 +1117,7 @@ private void assertBlockDataTableRecordCount(int expectedCount, * @throws IOException */ private void assertBlockDataTableRecordCount(int expectedCount, - DBHandle handle, KeyPrefixFilter filter, long containerID) + DBHandle handle, KeyPrefixFilter filter, long containerID) throws IOException { long count = 0L; try (BlockIterator iterator = handle.getStore(). diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 2235b23ce88..a020c6bc9f8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -152,7 +153,7 @@ public void cleanup() { public void testDirectTableIterationDisabled(String schemaVersion) throws Exception { setup(schemaVersion); - try (DBHandle refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(newKvData(), conf)) { DatanodeStore store = refCountedDB.getStore(); assertTableIteratorUnsupported(store.getMetadataTable()); @@ -178,7 +179,7 @@ private void assertTableIteratorUnsupported(Table table) { public void testBlockIteration(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB, cData)); @@ -245,7 +246,7 @@ public void testReadWithoutMetadata(String schemaVersion) throws Exception { // This simulates them not being there to start with. setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadataTable = db.getStore().getMetadataTable(); metadataTable.delete(cData.getBlockCountKey()); @@ -311,7 +312,7 @@ public void testDelete(String schemaVersion) throws Exception { TestDB.KEY_COUNT - numBlocksToDelete; KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { // Test results via block iteration. assertEquals(expectedDeletingBlocks, @@ -358,7 +359,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) metrics, c -> { }); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = refCountedDB.getStore().getDeletedBlocksTable() @@ -407,7 +408,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) public void testReadBlockData(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -453,7 +454,7 @@ public void testReadBlockData(String schemaVersion) throws Exception { public void testReadDeletingBlockData(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -508,7 +509,7 @@ public void testReadDeletingBlockData(String schemaVersion) throws Exception { public void testReadMetadata(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table metadataTable = refCountedDB.getStore().getMetadataTable(); @@ -527,7 +528,7 @@ public void testReadMetadata(String schemaVersion) throws Exception { public void testReadDeletedBlocks(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable(); @@ -571,7 +572,7 @@ private void runBlockDeletingService(KeyValueHandler keyValueHandler) } private ContainerSet makeContainerSet() throws Exception { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); KeyValueContainer container = new KeyValueContainer(newKvData(), conf); containerSet.addContainer(container); @@ -634,7 +635,7 @@ private void checkContainerData(KeyValueContainerData kvData) { kvData.getNumPendingDeletionBlocks()); } - private int countDeletedBlocks(DBHandle refCountedDB, + private int countDeletedBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getDeletedBlocksTable() @@ -643,7 +644,7 @@ private int countDeletedBlocks(DBHandle refCountedDB, cData.getUnprefixedKeyFilter()).size(); } - private int countDeletingBlocks(DBHandle refCountedDB, + private int countDeletingBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getBlockDataTable() @@ -652,7 +653,7 @@ private int countDeletingBlocks(DBHandle refCountedDB, cData.getDeletingBlockKeyFilter()).size(); } - private int countUnprefixedBlocks(DBHandle refCountedDB, + private int countUnprefixedBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getBlockDataTable() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 0c4612b79fa..8d6c9b4a233 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -50,6 +51,7 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.ozone.test.GenericTestUtils; @@ -134,7 +136,7 @@ public void setup() throws Exception { blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager, volumeSet); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); ozoneContainer = mock(OzoneContainer.class); @@ -174,7 +176,7 @@ public void testBlockIteration() throws IOException { // turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); - try (DBHandle db = BlockUtils.getDB(container.getContainerData(), conf)) { + try (DBHandle db = BlockUtils.getDB(container.getContainerData(), conf)) { long containerID = container.getContainerData().getContainerID(); int blockCount = 0; try (BlockIterator iter = db.getStore() @@ -210,7 +212,7 @@ public void testReadMetadata() throws IOException { // turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); assertEquals((long)metadatatable.get(BLOCK_COUNT), BLOCKS_PER_CONTAINER); @@ -262,7 +264,7 @@ public void testDeleteViaTransation() throws IOException, TimeoutException, assertEquals(cData.getBytesUsed(), expectedBytesUsed); // check db metadata after deletion - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); assertEquals((long)metadatatable.get(BLOCK_COUNT), expectedKeyCount); assertEquals((long)metadatatable.get(PENDING_DELETE_BLOCK_COUNT), 0); @@ -295,7 +297,7 @@ private KeyValueContainer createTestContainer() throws IOException { List blocks = Arrays.asList(startBlockID, startBlockID + 1); DeletedBlocksTransaction txn = createTestDeleteTxn(txnID, blocks, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { try (BatchOperation batch = db.getStore().getBatchHandler() .initBatchOperation()) { DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java index 644ee014e9f..027bc598048 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -146,7 +147,7 @@ private List createTestContainers( public void testScrubbingStaleRecoveringContainers( ContainerTestVersionInfo versionInfo) throws Exception { initVersionInfo(versionInfo); - ContainerSet containerSet = new ContainerSet(10); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 10); containerSet.setClock(testClock); StaleRecoveringContainerScrubbingService srcss = new StaleRecoveringContainerScrubbingService( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index 890bca18cb1..c3932a6286e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -30,6 +30,7 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -85,7 +86,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) RandomContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int numContainers = 10; for (int i = 0; i < numContainers; i++) { @@ -148,7 +149,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) TopNOrderedContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int numContainers = 10; Random random = new Random(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 3ff8f9e625d..d27728fdbb1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -152,7 +153,7 @@ public static void shutdown() throws IOException { @BeforeEach public void setupPaths() throws IOException { - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); volumeSet = new MutableVolumeSet(DATANODE_UUID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); // Initialize volume directories. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 1f1d24bcad9..98d9e7b7719 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; @@ -68,7 +69,7 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) { public void testAddGetRemoveContainer(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); long containerId = 100L; ContainerProtos.ContainerDataProto.State state = ContainerProtos .ContainerDataProto.State.CLOSED; @@ -157,7 +158,7 @@ public void testIteratorPerVolume(ContainerLayoutVersion layout) HddsVolume vol2 = mock(HddsVolume.class); when(vol2.getStorageID()).thenReturn("uuid-2"); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); for (int i = 0; i < 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layout, @@ -200,7 +201,7 @@ public void iteratorIsOrderedByScanTime(ContainerLayoutVersion layout) HddsVolume vol = mock(HddsVolume.class); when(vol.getStorageID()).thenReturn("uuid-1"); Random random = new Random(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int containerCount = 50; for (int i = 0; i < containerCount; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, @@ -298,7 +299,7 @@ private static void assertContainerIds(int startId, int count, } private ContainerSet createContainerSet() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); for (int i = FIRST_ID; i < FIRST_ID + 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layoutVersion, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 1cbd6ee4706..4bcb56d4302 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.token.TokenVerifier; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; @@ -131,7 +132,7 @@ public void testContainerCloseActionWhenFull( try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, @@ -266,7 +267,7 @@ public void testContainerCloseActionWhenVolumeFull( .thenReturn(Collections.singletonList(volumeBuilder.build())); try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); // create a 50 byte container KeyValueContainerData containerData = new KeyValueContainerData(1L, @@ -516,7 +517,7 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf, TokenVerifier tokenVerifier) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); volumeSet.getVolumesList().stream().forEach(v -> { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index a3b60aa36da..4c35202be1e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -90,7 +91,7 @@ private void init() throws Exception { pipelineID.getId().toString(), null); container = new KeyValueContainer(data, conf); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); containerSet.addContainer(container); containerHandler = mock(Handler.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index dcabad46ac5..7b20f856f17 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics; @@ -111,7 +112,7 @@ private void setup() throws Exception { conf = new OzoneConfiguration(); layout = ContainerLayoutVersion.FILE_PER_BLOCK; ozoneContainer = mock(OzoneContainer.class); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); volume1 = mock(HddsVolume.class); when(volume1.getStorageID()).thenReturn("uuid-1"); for (int i = 0; i <= 10; i++) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 55df5f43b6b..8dc7679d77e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -268,7 +269,7 @@ public void testVolumeFailure() throws IOException { new DummyChecker(conf, new Timer(), 0); OzoneContainer ozoneContainer = mock(OzoneContainer.class); - ContainerSet conSet = new ContainerSet(20); + ContainerSet conSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 20); when(ozoneContainer.getContainerSet()).thenReturn(conSet); String path = dir.getPath(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 49ddd5f674d..40d9df999b4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -49,6 +49,7 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; @@ -76,7 +77,7 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; @TempDir private File testRoot; - private DBHandle db; + private DBHandle db; private ContainerLayoutVersion layout; private String schemaVersion; private String datanodeID = UUID.randomUUID().toString(); @@ -398,7 +399,7 @@ private Map> createContainerWithBlocks(long containerId, Map prefixCounts) throws Exception { // Create required block data. Map> blockIDs = new HashMap<>(); - try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { List chunkList = new ArrayList<>(); ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 584db675d93..cfbac195697 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -455,7 +455,7 @@ private void createContainer() throws StorageContainerException { private void populate(KeyValueContainer container, long numberOfKeysToWrite) throws IOException { KeyValueContainerData cData = container.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { Table blockDataTable = metadataStore.getStore().getBlockDataTable(); @@ -486,7 +486,7 @@ private void populateWithoutBlock(KeyValueContainer container, long numberOfKeysToWrite) throws IOException { KeyValueContainerData cData = container.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { // Just update metdata, and don't insert in block table // As for test, we are doing manually so adding key count to DB. metadataStore.getStore().getMetadataTable() @@ -687,7 +687,7 @@ public void testContainerRocksDB(ContainerTestVersionInfo versionInfo) keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { + try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); long defaultCacheSize = OzoneConsts.GB; long cacheSize = Long.parseLong(store @@ -742,7 +742,7 @@ public void testDBProfileAffectsDBOptions( keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); DatanodeDBProfile outProfile1; - try (DBHandle db1 = + try (DBHandle db1 = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) { DatanodeStore store1 = db1.getStore(); assertInstanceOf(AbstractDatanodeStore.class, store1); @@ -763,7 +763,7 @@ public void testDBProfileAffectsDBOptions( keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); DatanodeDBProfile outProfile2; - try (DBHandle db2 = + try (DBHandle db2 = BlockUtils.getDB(keyValueContainer.getContainerData(), otherConf)) { DatanodeStore store2 = db2.getStore(); assertInstanceOf(AbstractDatanodeStore.class, store2); @@ -1055,7 +1055,7 @@ private void testMixedSchemaImport(String dir, KeyValueContainer container = new KeyValueContainer(data, conf); container.create(volumeSet, volumeChoosingPolicy, scmId); long pendingDeleteBlockCount = 20; - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { Table metadataTable = meta.getStore().getMetadataTable(); metadataTable.put(data.getPendingDeleteBlockCountKey(), pendingDeleteBlockCount); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index b24a6f04c48..891cfbd6bc7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration; import java.io.File; @@ -115,8 +116,8 @@ public void testKeyValueContainerCheckCorruption( File dbFile = KeyValueContainerLocationUtil .getContainerDBFile(containerData); containerData.setDbFile(dbFile); - try (DBHandle ignored = BlockUtils.getDB(containerData, conf); - BlockIterator kvIter = + try (DBHandle ignored = BlockUtils.getDB(containerData, conf); + BlockIterator kvIter = ignored.getStore().getBlockIterator(containerID)) { BlockData block = kvIter.nextBlock(); assertFalse(block.getChunks().isEmpty()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java index 9c531069e9c..116bff84367 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -135,7 +136,7 @@ protected KeyValueContainer createContainerWithBlocks(long containerId, KeyValueContainer container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), clusterID); - try (DBHandle metadataStore = BlockUtils.getDB(containerData, + try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { assertNotNull(containerData.getChunksPath()); File chunksPath = new File(containerData.getChunksPath()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java index 12a659b7e44..d6b9f631b3a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java @@ -425,7 +425,7 @@ public void setDB(KeyValueContainerData containerData, long blockCount, long byteCount, long dbDeleteCount, List deleteTransactions) throws Exception { - try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); // Don't care about in memory state. Just change the DB values. metadataTable.put(containerData.getBlockCountKey(), blockCount); @@ -470,7 +470,7 @@ public void setDB(KeyValueContainerData containerData, void checkDbCounts(KeyValueContainerData containerData, long expectedBlockCount, long expectedBytesUsed, long expectedDeletedCount) throws Exception { - try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); long bytesUsed = metadataTable.get(containerData.getBytesUsedKey()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 2637f1922c6..2e84fd2cdc8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.token.TokenVerifier; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; @@ -269,7 +270,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception { volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); try { - ContainerSet cset = new ContainerSet(1000); + ContainerSet cset = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); @@ -355,7 +356,7 @@ public void testDeleteContainer() throws IOException { final String clusterId = UUID.randomUUID().toString(); final String datanodeId = UUID.randomUUID().toString(); final ConfigurationSource conf = new OzoneConfiguration(); - final ContainerSet containerSet = new ContainerSet(1000); + final ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); HddsVolume hddsVolume = new HddsVolume.Builder(testDir).conf(conf) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 8fd7b6280b6..f1fdf66ef94 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -44,6 +45,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.FileUtils; @@ -104,7 +106,7 @@ private void setup(ContainerTestVersionInfo versionInfo) throws Exception { Files.createDirectory(tempDir.resolve("volumeDir")).toFile(); this.conf = new OzoneConfiguration(); volumeSet = mock(MutableVolumeSet.class); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); datanodeId = UUID.randomUUID(); hddsVolume = new HddsVolume.Builder(volumeDir @@ -152,7 +154,7 @@ public void cleanup() { private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List blockNames, int count) throws Exception { KeyValueContainerData cData = keyValueContainer.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { for (int i = 0; i < count; i++) { Table blockDataTable = @@ -183,7 +185,7 @@ private List addBlocks(KeyValueContainer keyValueContainer, long containerId = keyValueContainer.getContainerData().getContainerID(); KeyValueContainerData cData = keyValueContainer.getContainerData(); List blkNames = new ArrayList<>(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { for (int i = 0; i < blockCount; i++) { // Creating BlockData @@ -268,7 +270,7 @@ public void testContainerReaderWithLoadException( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(1000); + ContainerSet containerSet1 = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDir" + 1)).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; @@ -318,7 +320,7 @@ public void testContainerReaderWithInvalidDbPath( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(1000); + ContainerSet containerSet1 = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDirDbDelete")).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; @@ -530,7 +532,7 @@ private KeyValueContainer createContainerWithId(int id, VolumeSet volSet, private void setBlockCommitSequence(KeyValueContainerData cData, long val) throws IOException { - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { metadataStore.getStore().getMetadataTable() .put(cData.getBcsIdKey(), val); metadataStore.getStore().flushDB(); @@ -575,7 +577,7 @@ public void testMarkedDeletedContainerCleared( if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { // verify if newly added container is not present as added - try (DBHandle dbHandle = BlockUtils.getDB( + try (DBHandle dbHandle = BlockUtils.getDB( kvContainer.getContainerData(), conf)) { DatanodeStoreSchemaThreeImpl store = (DatanodeStoreSchemaThreeImpl) dbHandle.getStore(); @@ -587,7 +589,7 @@ public void testMarkedDeletedContainerCleared( private long addDbEntry(KeyValueContainerData containerData) throws Exception { - try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) { + try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) { DatanodeStoreSchemaThreeImpl store = (DatanodeStoreSchemaThreeImpl) dbHandle.getStore(); Table metadataTable = store.getMetadataTable(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 07804c2a20b..2586625d349 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.io.TempDir; @@ -256,7 +257,7 @@ private long addBlocks(KeyValueContainer container, long freeBytes = container.getContainerData().getMaxSize(); long containerId = container.getContainerData().getContainerID(); KeyValueContainerData cData = container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadataTable = db.getStore().getMetadataTable(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index 1b989e6bc7f..d6b91cb35c1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -86,7 +87,7 @@ void importSameContainerWhenAlreadyImport() throws Exception { KeyValueContainer container = new KeyValueContainer(containerData, conf); ContainerController controllerMock = mock(ContainerController.class); // create containerImporter object - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); containerSet.addContainer(container); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); @@ -117,7 +118,7 @@ void importSameContainerWhenFirstInProgress() throws Exception { return container; }); // create containerImporter object - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, @@ -156,7 +157,7 @@ public void testInconsistentChecksumContainerShouldThrowError() throws Exception doNothing().when(containerData).setChecksumTo0ByteArray(); // create containerImporter object ContainerController controllerMock = mock(ContainerController.class); - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = spy(new ContainerImporter(conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index 03901b99be3..ebf3d33ebd5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; @@ -96,7 +97,7 @@ public void init(boolean isZeroCopy) throws Exception { SecurityConfig secConf = new SecurityConfig(conf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); DatanodeDetails.Builder dn = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 315e0c0253b..f3e2cd66080 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -129,7 +130,7 @@ public class TestReplicationSupervisor { @BeforeEach public void setUp() throws Exception { clock = new TestClock(Instant.now(), ZoneId.systemDefault()); - set = new ContainerSet(1000); + set = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); context = new StateContext( new OzoneConfiguration(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index baaf296f02b..7615301849d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -55,11 +56,11 @@ void setup() { void testReceiveDataForExistingContainer() throws Exception { long containerId = 1; // create containerImporter - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - new ContainerSet(0), mock(ContainerController.class), volumeSet); + new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0), mock(ContainerController.class), volumeSet); KeyValueContainerData containerData = new KeyValueContainerData(containerId, ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test"); // add container to container set diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java new file mode 100644 index 00000000000..0eed13cbe30 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java @@ -0,0 +1,142 @@ +package org.apache.hadoop.hdds.utils.db; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Util class for mocking DB interactions happening in various tests. + */ +public final class DBTestUtils { + + private DBTestUtils() { + + } + + public static Table getInMemoryTableForTest() { + return new Table() { + private final Map map = new ConcurrentHashMap<>(); + + @Override + public void close() { + } + + @Override + public void put(KEY key, VALUE value) { + map.put(key, value); + } + + @Override + public void putWithBatch(BatchOperation batch, KEY key, VALUE value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean isExist(KEY key) { + return map.containsKey(key); + } + + @Override + public VALUE get(KEY key) { + return map.get(key); + } + + @Override + public VALUE getIfExist(KEY key) { + return map.get(key); + } + + @Override + public void delete(KEY key) { + map.remove(key); + } + + @Override + public void deleteWithBatch(BatchOperation batch, KEY key) { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteRange(KEY beginKey, KEY endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator(KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public String getName() { + return ""; + } + + @Override + public long getEstimatedKeyCount() { + return map.size(); + } + + @Override + public List> getRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void dumpToFileWithPrefix(File externalFile, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void loadFromFile(File externalFile) { + throw new UnsupportedOperationException(); + } + }; + } +} diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 0237210d2fc..d08daac9bdf 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -194,6 +194,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + org.apache.ozone + hdds-server-framework + ${hdds.version} + test-jar + test + + org.apache.ozone hdds-container-service diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 583c801bcd4..700bff59403 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -57,6 +57,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework + + org.apache.ozone + hdds-server-framework + test-jar + test + org.apache.ozone hdds-client diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index b3c15a46f76..3db6e4ffa87 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; @@ -133,7 +134,7 @@ public void setup() throws Exception { return volumes.get(ii); }); - containerSet = new ContainerSet(1000); + containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); blockManager = new BlockManagerImpl(CONF); chunkManager = new FilePerBlockStrategy(true, blockManager, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 95d7faa9174..81f95b14fb4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -1043,7 +1043,7 @@ public List getAllBlocks(MiniOzoneCluster cluster, OzoneConfiguration conf, Long containerID) throws IOException { List allBlocks = Lists.newArrayList(); KeyValueContainerData cData = getContainerMetadata(cluster, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { List> kvs = db.getStore().getBlockDataTable() @@ -1063,7 +1063,7 @@ public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, OzoneConfigura throws IOException { for (Map.Entry> entry : containerBlocks.entrySet()) { KeyValueContainerData cData = getContainerMetadata(cluster, entry.getKey()); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { DatanodeStore ds = db.getStore(); DatanodeStoreSchemaThreeImpl dnStoreImpl = (DatanodeStoreSchemaThreeImpl) ds; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java index bf20b4ecc0b..9153f417792 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import java.io.File; import java.io.IOException; @@ -65,7 +66,7 @@ public static File getChunksLocationPath(MiniOzoneCluster cluster, Container con // the container. KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf())) { BlockID blockID = new BlockID(containerID, localID); String blockKey = containerData.getBlockKey(localID); BlockData blockData = db.getStore().getBlockByID(blockID, blockKey); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 6edef789b17..37da4783d1f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -109,6 +109,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; @@ -2114,7 +2115,7 @@ public void testGetKeyDetails() throws IOException { (KeyValueContainerData)(datanodeService.getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerData()); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator(containerID)) { while (keyValueBlockIterator.hasNext()) { @@ -2247,7 +2248,7 @@ void testZReadKeyWithUnhealthyContainerReplica() throws Exception { long newBCSID = container.getBlockCommitSequenceId() - 1; KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { + try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { db.getStore().getMetadataTable().put(cData.getBcsIdKey(), newBCSID); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 805a3a86eba..ae181fa2cc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -280,7 +281,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2)) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerId1)).getContainerData(); - try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) { + try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) { BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get( containerData1.getBlockKey(locationList.get(0).getBlockID() .getLocalID())); @@ -298,7 +299,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0)) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerId2)).getContainerData(); - try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) { + try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) { BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get( containerData2.getBlockKey(locationList.get(1).getBlockID() .getLocalID())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 1e22613f929..141ce16aebb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -230,7 +231,7 @@ public void testValidateBCSIDOnDnRestart() throws Exception { .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); keyValueContainerData = assertInstanceOf(KeyValueContainerData.class, containerData); - try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) { + try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) { // modify the bcsid for the container in the ROCKS DB thereby inducing // corruption diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 719715ac8b3..f68ccfd13fe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -73,6 +73,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -743,7 +744,7 @@ private void verifyBlocksCreated( OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { assertNotNull(db.getStore().getBlockDataTable() .get(cData.getBlockKey(blockID.getLocalID()))); } @@ -759,7 +760,7 @@ private void verifyBlocksDeleted( OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table blockDataTable = db.getStore().getBlockDataTable(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 192c933f53c..c9659ecaa82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; @@ -553,7 +554,7 @@ public void testContainerDeleteWithInvalidBlockCount() private void clearBlocksTable(Container container) throws IOException { - try (DBHandle dbHandle + try (DBHandle dbHandle = BlockUtils.getDB( (KeyValueContainerData) container.getContainerData(), conf)) { @@ -565,7 +566,7 @@ private void clearBlocksTable(Container container) throws IOException { } } - private void clearTable(DBHandle dbHandle, Table table, Container container) + private void clearTable(DBHandle dbHandle, Table table, Container container) throws IOException { List> blocks = table.getRangeKVs( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index e68831b494f..59141b9ed3a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -190,7 +191,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), null, null); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index e6264cd3e11..bff3cfde919 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -145,7 +146,7 @@ private static MutableVolumeSet createVolumeSet(DatanodeDetails dn, String path) } private HddsDispatcher createDispatcher(DatanodeDetails dd, VolumeSet volumeSet) { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); StateContext context = ContainerTestUtils.getMockContext( dd, CONF); ContainerMetrics metrics = ContainerMetrics.create(CONF); @@ -254,7 +255,7 @@ private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolume final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); return XceiverServerRatis.newXceiverServerRatis(null, dn, CONF, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), null, null); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 8db7b137472..b891d3b3309 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -133,7 +134,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), caClient, null); } @@ -190,7 +191,7 @@ static void runTestClientServer( private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 0bdf61b3bd5..d2f2e0cefa7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -173,7 +174,7 @@ public void testClientServer() throws Exception { private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); @@ -219,7 +220,7 @@ XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), caClient, null); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index 87482cb549b..7209c734104 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.metadata.MasterVolumeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; @@ -56,7 +57,8 @@ private DBDefinitionFactory() { static { final Map map = new HashMap<>(); - Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get()) + Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get(), + MasterVolumeDBDefinition.get()) .forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition)); DB_MAP = Collections.unmodifiableMap(map); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index 5592926bf88..e1402bb80e1 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -115,7 +115,7 @@ OzoneConfiguration getOzoneConf() { public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = parent.getOzoneConf(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(null, 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 393c7e599c5..4db4231373d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -32,8 +32,10 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -82,66 +84,75 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; + private ReferenceCountedDB masterVolumeMetadataStoreReferenceCountedDB; private List replicationTasks; @Override public Void call() throws Exception { - OzoneConfiguration conf = createOzoneConfiguration(); + try { + OzoneConfiguration conf = createOzoneConfiguration(); - final Collection datanodeStorageDirs = - HddsServerUtil.getDatanodeStorageDirs(conf); + final Collection datanodeStorageDirs = + HddsServerUtil.getDatanodeStorageDirs(conf); - for (String dir : datanodeStorageDirs) { - checkDestinationDirectory(dir); - } + for (String dir : datanodeStorageDirs) { + checkDestinationDirectory(dir); + } - final ContainerOperationClient containerOperationClient = - new ContainerOperationClient(conf); + final ContainerOperationClient containerOperationClient = + new ContainerOperationClient(conf); - final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); + final List containerInfos = + containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); - //logic same as the download+import on the destination datanode - initializeReplicationSupervisor(conf, containerInfos.size() * 2); + //logic same as the download+import on the destination datanode + initializeReplicationSupervisor(conf, containerInfos.size() * 2); - replicationTasks = new ArrayList<>(); + replicationTasks = new ArrayList<>(); - for (ContainerInfo container : containerInfos) { + for (ContainerInfo container : containerInfos) { - final ContainerWithPipeline containerWithPipeline = - containerOperationClient - .getContainerWithPipeline(container.getContainerID()); + final ContainerWithPipeline containerWithPipeline = + containerOperationClient + .getContainerWithPipeline(container.getContainerID()); - if (container.getState() == LifeCycleState.CLOSED) { + if (container.getState() == LifeCycleState.CLOSED) { - final List datanodesWithContainer = - containerWithPipeline.getPipeline().getNodes(); + final List datanodesWithContainer = + containerWithPipeline.getPipeline().getNodes(); - final List datanodeUUIDs = - datanodesWithContainer - .stream().map(DatanodeDetails::getUuidString) - .collect(Collectors.toList()); + final List datanodeUUIDs = + datanodesWithContainer + .stream().map(DatanodeDetails::getUuidString) + .collect(Collectors.toList()); - //if datanode is specified, replicate only container if it has a - //replica. - if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) { - replicationTasks.add(new ReplicationTask( - ReplicateContainerCommand.fromSources(container.getContainerID(), - datanodesWithContainer), replicator)); + //if datanode is specified, replicate only container if it has a + //replica. + if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) { + replicationTasks.add(new ReplicationTask( + ReplicateContainerCommand.fromSources(container.getContainerID(), + datanodesWithContainer), replicator)); + } } + } - } + //important: override the max number of tasks. + setTestNo(replicationTasks.size()); - //important: override the max number of tasks. - setTestNo(replicationTasks.size()); + init(); - init(); + timer = getMetrics().timer("replicate-container"); + runTests(this::replicateContainer); + } finally { + if (masterVolumeMetadataStoreReferenceCountedDB != null) { + masterVolumeMetadataStoreReferenceCountedDB.close(); + masterVolumeMetadataStoreReferenceCountedDB.cleanup(); + } - timer = getMetrics().timer("replicate-container"); - runTests(this::replicateContainer); + } return null; } @@ -173,8 +184,12 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - - ContainerSet containerSet = new ContainerSet(1000); + ReferenceCountedDB referenceCountedDS = + MasterVolumeMetadataStore.get(conf); + referenceCountedDS.incrementReference(); + this.masterVolumeMetadataStoreReferenceCountedDB = referenceCountedDS; + ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore() + .getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); From 87a480905dc6dfb5a4b8f9b301c3415ab82b5be7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 6 Nov 2024 21:49:58 -0800 Subject: [PATCH 02/37] HDDS-11650. Fix test cases Change-Id: I22654091edbd3a11c585aa95ca2b554eba0f9d95 --- .../hadoop/hdds/utils/db/Proto2EnumCodec.java | 2 +- .../container/common/impl/ContainerSet.java | 9 ++++++--- .../container/ozoneimpl/OzoneContainer.java | 19 ++++++++++--------- .../TestDeleteBlocksCommandHandler.java | 2 ++ .../volume/TestStorageVolumeChecker.java | 3 ++- .../ozoneimpl/TestOzoneContainer.java | 3 +++ .../upgrade/TestDatanodeUpgradeToScmHA.java | 11 +++++++++-- 7 files changed, 33 insertions(+), 16 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java index 9e5ee487318..d206b17a9b1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java @@ -68,7 +68,7 @@ public CodecBuffer toCodecBuffer(@Nonnull M value, private M parseFrom(Integer value) throws IOException { try { - return (M) this.clazz.getMethod("valueOf", Integer.class).invoke(value); + return (M) this.clazz.getDeclaredMethod("forNumber", int.class).invoke(null, value); } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new IOException(e); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 3e69707e5d6..246195af54e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -50,6 +50,7 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; @@ -167,13 +168,15 @@ public boolean removeContainer(long containerId, boolean markMissing) { Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); - Container removed = containerMap.compute(containerId, (cid, value) -> { + AtomicReference> removed = new AtomicReference<>(); + containerMap.compute(containerId, (cid, value) -> { if (markMissing) { missingContainerSet.add(containerId); } - return value; + removed.set(value); + return null; }); - if (removed == null) { + if (removed.get() == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); return false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index e5ff5e08b48..1febe3a8b14 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -134,7 +134,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; - private final ReferenceCountedDB masterVolumeMetadataStore; + private ReferenceCountedDB masterVolumeMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -344,19 +344,19 @@ public void buildContainerSet() throws IOException { thread.start(); volumeThreads.add(thread); } - try (TableIterator> itr = - containerSet.getContainerIdsTable().iterator()) { - Map containerIds = new HashMap<>(); - while (itr.hasNext()) { - containerIds.put(itr.next().getKey(), 0L); - } - containerSet.buildMissingContainerSetAndValidate(containerIds); - } try { for (int i = 0; i < volumeThreads.size(); i++) { volumeThreads.get(i).join(); } + try (TableIterator> itr = + containerSet.getContainerIdsTable().iterator()) { + Map containerIds = new HashMap<>(); + while (itr.hasNext()) { + containerIds.put(itr.next().getKey(), 0L); + } + containerSet.buildMissingContainerSetAndValidate(containerIds); + } } catch (InterruptedException ex) { LOG.error("Volume Threads Interrupted exception", ex); Thread.currentThread().interrupt(); @@ -541,6 +541,7 @@ public void stop() { if (this.masterVolumeMetadataStore != null) { this.masterVolumeMetadataStore.decrementReference(); this.masterVolumeMetadataStore.cleanup(); + this.masterVolumeMetadataStore = null; } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 7b20f856f17..c2c8faa25b7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -64,6 +64,7 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT; @@ -304,6 +305,7 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull() // Setting up the test environment OzoneConfiguration configuration = new OzoneConfiguration(); configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString()); + configuration.set(OZONE_SCM_DATANODE_ID_DIR, folder.toString()); DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails(); DatanodeConfiguration dnConf = configuration.getObject(DatanodeConfiguration.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java index 6900131caa3..52121c5a43a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java @@ -218,7 +218,8 @@ public void testVolumeDeletion(VolumeCheckResult checkResult, ContainerLayoutVersion layout, TestInfo testInfo) throws Exception { initTest(checkResult, layout); LOG.info("Executing {}", testInfo.getTestMethod()); - + conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, new File(folder.toString(), UUID.randomUUID().toString()) + .toString()); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setDiskCheckMinGap(Duration.ofMillis(0)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 2586625d349..2b19128fa94 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -59,6 +59,7 @@ import java.util.ArrayList; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -179,6 +180,8 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) } conf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR, dbDirString.toString()); + conf.set(OZONE_SCM_DATANODE_ID_DIR, + new File(folder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); ContainerTestUtils.enableSchemaV3(conf); OzoneContainer ozoneContainer = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index d4a27e74cda..10620900598 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -56,6 +56,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -112,6 +113,8 @@ public void testReadsDuringFinalization(boolean enableSCMHA) setScmHAEnabled(enableSCMHA); // start DN and SCM startScmServer(); + conf.set(OZONE_SCM_DATANODE_ID_DIR, + new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); UpgradeTestHelper.addHddsVolume(conf, tempFolder); dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); @@ -150,6 +153,8 @@ public void testImportContainer(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); // start DN and SCM startScmServer(); + conf.set(OZONE_SCM_DATANODE_ID_DIR, + new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); UpgradeTestHelper.addHddsVolume(conf, tempFolder); dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); @@ -225,7 +230,8 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// - + conf.set(OZONE_SCM_DATANODE_ID_DIR, + new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); startScmServer(); String originalScmID = scmServerImpl.getScmId(); File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); @@ -319,7 +325,8 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// - + conf.set(OZONE_SCM_DATANODE_ID_DIR, + new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); startScmServer(); String originalScmID = scmServerImpl.getScmId(); File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); From c048a5ec1bbd4bb581ca9755d9719d5c3173e631 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 6 Nov 2024 22:14:48 -0800 Subject: [PATCH 03/37] HDDS-11650. Add comments Change-Id: Icaa5ae0b29ec0ffccf5914bec0fd6ed6ae117219 --- .../hadoop/ozone/container/common/impl/ContainerSet.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 246195af54e..27df69af744 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -169,6 +169,9 @@ public boolean removeContainer(long containerId, boolean markMissing) { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); AtomicReference> removed = new AtomicReference<>(); + //We need to add to missing container set before removing containerMap since there could be write chunk operation + // that could recreate the container in another volume if we remove it from the map before adding to missing + // container. containerMap.compute(containerId, (cid, value) -> { if (markMissing) { missingContainerSet.add(containerId); From f4c538f03b89c29b3aad430e0fcf458ca324844a Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 07:39:08 -0800 Subject: [PATCH 04/37] HDDS-11650. Fix tests Change-Id: I995fc25b93f16aa859eeb8f0418aa774e3719330 --- .../container/metadata/MasterVolumeMetadataStore.java | 2 +- .../commandhandler/TestDeleteBlocksCommandHandler.java | 2 -- .../common/volume/TestStorageVolumeChecker.java | 2 -- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 3 --- .../container/upgrade/TestDatanodeUpgradeToScmHA.java | 9 --------- .../hadoop/ozone/client/TestOzoneClientFactory.java | 1 + 6 files changed, 2 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index bf9e34a1e26..6d5c1acd1d2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -45,7 +45,7 @@ public static ReferenceCountedDB get(ConfigurationSou if (instance == null || instance.isClosed()) { MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); instance = new ReferenceCountedDB<>(masterVolumeMetadataStore, - masterVolumeMetadataStore.getDbDef().getDBLocation(conf).getAbsolutePath()); + masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index c2c8faa25b7..7b20f856f17 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -64,7 +64,6 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT; @@ -305,7 +304,6 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull() // Setting up the test environment OzoneConfiguration configuration = new OzoneConfiguration(); configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString()); - configuration.set(OZONE_SCM_DATANODE_ID_DIR, folder.toString()); DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails(); DatanodeConfiguration dnConf = configuration.getObject(DatanodeConfiguration.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java index 52121c5a43a..0311be1c0d2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java @@ -218,8 +218,6 @@ public void testVolumeDeletion(VolumeCheckResult checkResult, ContainerLayoutVersion layout, TestInfo testInfo) throws Exception { initTest(checkResult, layout); LOG.info("Executing {}", testInfo.getTestMethod()); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, new File(folder.toString(), UUID.randomUUID().toString()) - .toString()); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setDiskCheckMinGap(Duration.ofMillis(0)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 2b19128fa94..2586625d349 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -59,7 +59,6 @@ import java.util.ArrayList; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -180,8 +179,6 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) } conf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR, dbDirString.toString()); - conf.set(OZONE_SCM_DATANODE_ID_DIR, - new File(folder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); ContainerTestUtils.enableSchemaV3(conf); OzoneContainer ozoneContainer = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 10620900598..0ed7d195bf3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -56,7 +56,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -113,8 +112,6 @@ public void testReadsDuringFinalization(boolean enableSCMHA) setScmHAEnabled(enableSCMHA); // start DN and SCM startScmServer(); - conf.set(OZONE_SCM_DATANODE_ID_DIR, - new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); UpgradeTestHelper.addHddsVolume(conf, tempFolder); dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); @@ -153,8 +150,6 @@ public void testImportContainer(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); // start DN and SCM startScmServer(); - conf.set(OZONE_SCM_DATANODE_ID_DIR, - new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); UpgradeTestHelper.addHddsVolume(conf, tempFolder); dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); @@ -230,8 +225,6 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// - conf.set(OZONE_SCM_DATANODE_ID_DIR, - new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); startScmServer(); String originalScmID = scmServerImpl.getScmId(); File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); @@ -325,8 +318,6 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// - conf.set(OZONE_SCM_DATANODE_ID_DIR, - new File(tempFolder.toFile(), UUID.randomUUID().toString()).getAbsoluteFile().toString()); startScmServer(); String originalScmID = scmServerImpl.getScmId(); File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index e2a15595b55..42835925f87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -61,6 +61,7 @@ public Void run() throws IOException { } }); }); + e.printStackTrace(); assertInstanceOf(AccessControlException.class, e); } From f22f0d172b552359f9cfbd60ddde3a4bdd66c8fd Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 09:13:20 -0800 Subject: [PATCH 05/37] HDDS-11650. Remove from rocskdb Change-Id: Ibeadc9330185f699e4cf1d9c1c8631d1af52683e --- .../container/common/impl/ContainerSet.java | 25 +++++++++++-------- .../container/ozoneimpl/ContainerReader.java | 2 +- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 27df69af744..58ea326af48 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -155,7 +155,7 @@ public Container getContainer(long containerId) { } public boolean removeContainer(long containerId) { - return removeContainer(containerId, false); + return removeContainer(containerId, false, true); } /** @@ -164,22 +164,25 @@ public boolean removeContainer(long containerId) { * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId, boolean markMissing) { + public boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) throws StorageContainerException { Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); - AtomicReference> removed = new AtomicReference<>(); //We need to add to missing container set before removing containerMap since there could be write chunk operation // that could recreate the container in another volume if we remove it from the map before adding to missing // container. - containerMap.compute(containerId, (cid, value) -> { - if (markMissing) { - missingContainerSet.add(containerId); + if (markMissing) { + missingContainerSet.add(containerId); + } + Container removed = containerMap.remove(containerId); + if (removeFromDB) { + try { + containerIdsTable.delete(containerId); + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } - removed.set(value); - return null; - }); - if (removed.get() == null) { + } + if (removed == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); return false; @@ -240,7 +243,7 @@ public void handleVolumeFailures(StateContext context) { containerMap.values().forEach(c -> { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { - removeContainer(data.getContainerID(), true); + removeContainer(data.getContainerID(), true, false); LOG.debug("Removing Container {} as the Volume {} " + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 1685d1c5fe2..198ae8f65f2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -321,7 +321,7 @@ private void resolveDuplicate(KeyValueContainer existing, private void swapAndRemoveContainer(KeyValueContainer existing, KeyValueContainer toAdd) throws IOException { containerSet.removeContainer( - existing.getContainerData().getContainerID()); + existing.getContainerData().getContainerID(), false, false); containerSet.addContainer(toAdd); KeyValueContainerUtil.removeContainer(existing.getContainerData(), hddsVolume.getConf()); From c94734ae7b975552f8372e199594ee71efe3807d Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 09:14:30 -0800 Subject: [PATCH 06/37] HDDS-11650. Fix checkstyle Change-Id: I5ac6a685a49e79be5ea43717294dd649383433f2 --- .../hadoop/ozone/container/common/impl/ContainerSet.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 58ea326af48..274a3ede2aa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -50,7 +50,6 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; @@ -164,7 +163,8 @@ public boolean removeContainer(long containerId) { * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) throws StorageContainerException { + public boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) + throws StorageContainerException { Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); From e579d0e726b9db02cc5a0d42c14d23626636aa24 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 09:31:11 -0800 Subject: [PATCH 07/37] HDDS-11650. Fix Issues Change-Id: I18f48f9d97b0cc16a3c97a3137ee01ebda4fcbec --- .../ozone/container/common/impl/ContainerSet.java | 11 ++++++----- .../container/common/volume/MutableVolumeSet.java | 8 +++++--- .../ozone/container/ozoneimpl/OzoneContainer.java | 3 ++- .../org/apache/hadoop/hdds/utils/VoidCallable.java | 9 +++++++++ 4 files changed, 22 insertions(+), 9 deletions(-) create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 274a3ede2aa..67bdbf97c02 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.UncheckedIOException; import java.time.Clock; import java.time.ZoneOffset; import java.util.ArrayList; @@ -153,7 +154,7 @@ public Container getContainer(long containerId) { return containerMap.get(containerId); } - public boolean removeContainer(long containerId) { + public boolean removeContainer(long containerId) throws StorageContainerException { return removeContainer(containerId, false, true); } @@ -237,20 +238,20 @@ public int containerCount() { * * @param context StateContext */ - public void handleVolumeFailures(StateContext context) { + public void handleVolumeFailures(StateContext context) throws StorageContainerException { AtomicBoolean failedVolume = new AtomicBoolean(false); AtomicInteger containerCount = new AtomicInteger(0); - containerMap.values().forEach(c -> { + for (Container c : containerMap.values()) { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { removeContainer(data.getContainerID(), true, false); LOG.debug("Removing Container {} as the Volume {} " + - "has failed", data.getContainerID(), data.getVolume()); + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); containerCount.incrementAndGet(); ContainerLogger.logLost(data, "Volume failure"); } - }); + } if (failedVolume.get()) { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index e195b127d49..fd1f721a4c4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.VoidCallable; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -84,7 +86,7 @@ public class MutableVolumeSet implements VolumeSet { private String clusterID; private final StorageVolumeChecker volumeChecker; - private Runnable failedVolumeListener; + private VoidCallable failedVolumeListener; private StateContext context; private final StorageVolumeFactory volumeFactory; private final StorageVolume.VolumeType volumeType; @@ -132,7 +134,7 @@ public MutableVolumeSet(String dnUuid, String clusterID, initializeVolumeSet(); } - public void setFailedVolumeListener(Runnable runnable) { + public void setFailedVolumeListener(VoidCallable runnable) { failedVolumeListener = runnable; } @@ -255,7 +257,7 @@ private void handleVolumeFailures( } if (failedVolumeListener != null) { - failedVolumeListener.run(); + failedVolumeListener.call(); } // TODO: // 1. Consider stopping IO on open containers and tearing down diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index abf45afd06c..ac0837a4b58 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.token.TokenVerifier; @@ -547,7 +548,7 @@ public void stop() { } } - public void handleVolumeFailures() { + public void handleVolumeFailures() throws StorageContainerException { if (containerSet != null) { containerSet.handleVolumeFailures(context); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java new file mode 100644 index 00000000000..9c2917cab40 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java @@ -0,0 +1,9 @@ +package org.apache.hadoop.hdds.utils; + +/** + * Defines a functional interface to call void returning function. + */ +@FunctionalInterface +public interface VoidCallable { + void call() throws EXCEPTION_TYPE; + } \ No newline at end of file From 2c376bcadd82eaed5bdc70def203f943d3e0834e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 13:11:40 -0800 Subject: [PATCH 08/37] HDDS-11650. Fix checkstyle & rat Change-Id: I16cec52ea1c2853c80ee9a6e3279a23408d05651 --- .../container/common/impl/ContainerSet.java | 1 - .../common/volume/MutableVolumeSet.java | 1 - .../hadoop/hdds/utils/VoidCallable.java | 21 +++++++++++++++++-- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 67bdbf97c02..72d9a74beee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.io.UncheckedIOException; import java.time.Clock; import java.time.ZoneOffset; import java.util.ArrayList; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index fd1f721a4c4..426012f3765 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantReadWriteLock; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java index 9c2917cab40..5d54b3f1077 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hdds.utils; /** @@ -5,5 +22,5 @@ */ @FunctionalInterface public interface VoidCallable { - void call() throws EXCEPTION_TYPE; - } \ No newline at end of file + void call() throws EXCEPTION_TYPE; +} \ No newline at end of file From 4b7748175bd2b7afc1c4e60aa7ee99e24f8866ee Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 13:26:40 -0800 Subject: [PATCH 09/37] HDDS-11650. Fix checkstyle & rat Change-Id: Icf779e2bff8ace1721b529e3c89edbe9effa9989 --- .../main/java/org/apache/hadoop/hdds/utils/VoidCallable.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java index 5d54b3f1077..5f0d1704abb 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java @@ -23,4 +23,4 @@ @FunctionalInterface public interface VoidCallable { void call() throws EXCEPTION_TYPE; -} \ No newline at end of file +} From 502702970a322a7cbbdc4bf9a243e3bf14be89f3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 16:50:42 -0800 Subject: [PATCH 10/37] HDDS-11650. Fix tests failures Change-Id: I485646e86105a8a1bab6b638262669fc5f92d94d --- .../hadoop/ozone/container/common/impl/HddsDispatcher.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index d2117097cbd..63af0becf2a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -276,7 +276,7 @@ private ContainerCommandResponseProto dispatchRequest( getMissingContainerSet().remove(containerID); } } - if (cmdType != Type.CreateContainer && getMissingContainerSet().contains(containerID)) { + if (cmdType != Type.CreateContainer && !HddsUtils.isReadOnly(msg) && getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", From c5392d0d4565ab071ab17943634adbc4e31bc9db Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 19:42:59 -0800 Subject: [PATCH 11/37] HDDS-11650. Fix tests failures Change-Id: I03ab7dd188ae39248ca889f40b9490eb2870579f --- .../container/common/impl/HddsDispatcher.java | 3 ++- .../metadata/MasterVolumeMetadataStore.java | 17 ++++------------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 63af0becf2a..130a6e34330 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -276,7 +276,8 @@ private ContainerCommandResponseProto dispatchRequest( getMissingContainerSet().remove(containerID); } } - if (cmdType != Type.CreateContainer && !HddsUtils.isReadOnly(msg) && getMissingContainerSet().contains(containerID)) { + if (cmdType != Type.CreateContainer && !HddsUtils.isReadOnly(msg) + && getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index 6d5c1acd1d2..e40fe1b5d1b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -30,26 +30,17 @@ import java.io.IOException; /** - * Singleton class for interacting with database in the master volume of a datanode. + * Class for interacting with database in the master volume of a datanode. */ public final class MasterVolumeMetadataStore extends AbstractRDBStore implements MetadataStore { private Table containerIdsTable; - private static ReferenceCountedDB instance = null; - public static ReferenceCountedDB get(ConfigurationSource conf) throws IOException { - if (instance == null || instance.isClosed()) { - synchronized (MasterVolumeMetadataStore.class) { - if (instance == null || instance.isClosed()) { - MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); - instance = new ReferenceCountedDB<>(masterVolumeMetadataStore, - masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); - } - } - } - return instance; + MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); + return new ReferenceCountedDB<>(masterVolumeMetadataStore, + masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); } private MasterVolumeMetadataStore(ConfigurationSource config, boolean openReadOnly) throws IOException { From 7c4837a60f29c309c46d15a29f0b6c0350f865b2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 7 Nov 2024 21:57:04 -0800 Subject: [PATCH 12/37] HDDS-11650. Fix MasterVolumeMetaStore cache Change-Id: I82647ef09edc6fd9432652d911bf2ff4bccf25a5 --- .../metadata/MasterVolumeMetadataStore.java | 25 ++++++++++++++++--- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 15 ++++++++--- hadoop-hdds/server-scm/pom.xml | 6 +++++ 3 files changed, 39 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index e40fe1b5d1b..5ab6f5e8990 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -28,6 +28,9 @@ import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; /** * Class for interacting with database in the master volume of a datanode. @@ -36,11 +39,27 @@ public final class MasterVolumeMetadataStore extends AbstractRDBStore containerIdsTable; + private static final ConcurrentMap> INSTANCES = + new ConcurrentHashMap<>(); public static ReferenceCountedDB get(ConfigurationSource conf) throws IOException { - MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); - return new ReferenceCountedDB<>(masterVolumeMetadataStore, - masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); + String dbDirPath = DBStoreBuilder.getDBDirPath(MasterVolumeDBDefinition.get(), conf).getAbsolutePath(); + try { + return INSTANCES.compute(dbDirPath, (k, v) -> { + if (v == null || v.isClosed()) { + try { + MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); + return new ReferenceCountedDB<>(masterVolumeMetadataStore, + masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return v; + }); + } catch (UncheckedIOException e) { + throw e.getCause(); + } } private MasterVolumeMetadataStore(ConfigurationSource config, boolean openReadOnly) throws IOException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index ed8d145b666..9db581ee292 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -163,17 +163,24 @@ private DBStoreBuilder(ConfigurationSource configuration, OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT, StorageUnit.BYTES); } - private void applyDBDefinition(DBDefinition definition) { + public static File getDBDirPath(DBDefinition dbDefinition, + ConfigurationSource conf) { // Set metadata dirs. - File metadataDir = definition.getDBLocation(configuration); + File metadataDir = dbDefinition.getDBLocation(conf); if (metadataDir == null) { LOG.warn("{} is not configured. We recommend adding this setting. " + "Falling back to {} instead.", - definition.getLocationConfigKey(), + dbDefinition.getLocationConfigKey(), HddsConfigKeys.OZONE_METADATA_DIRS); - metadataDir = getOzoneMetaDirPath(configuration); + metadataDir = getOzoneMetaDirPath(conf); } + return metadataDir; + } + + private void applyDBDefinition(DBDefinition definition) { + // Set metadata dirs. + File metadataDir = getDBDirPath(definition, configuration); setName(definition.getName()); setPath(Paths.get(metadataDir.getPath())); diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 4c2e40c3759..a338f6352a5 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -76,6 +76,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework + + org.apache.ozone + hdds-server-framework + test + test-jar + org.apache.ozone From 1ae494bb792f1078eacfffd7b4c7d0ecdd399adf Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 8 Nov 2024 07:33:46 -0800 Subject: [PATCH 13/37] HDDS-11650. Fix MasterVolumeMetaStore cache Change-Id: I73091fc280dea5ad447b9df8bb0a1877d8f1ff35 --- .../statemachine/DatanodeStateMachine.java | 11 +--------- .../metadata/MasterVolumeMetadataStore.java | 3 +++ .../container/ozoneimpl/OzoneContainer.java | 20 +++++-------------- .../freon/ClosedContainerReplicator.java | 7 ++----- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 46532314d0b..ae01c6da756 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -59,10 +59,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.RefreshVolumeUsageCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.SetNodeOperationalStateCommandHandler; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; -import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -133,8 +131,6 @@ public class DatanodeStateMachine implements Closeable { private final DatanodeQueueMetrics queueMetrics; private final ReconfigurationHandler reconfigurationHandler; - private final ReferenceCountedDB masterVolumeMetadataStore; - /** * Constructs a datanode state machine. * @param datanodeDetails - DatanodeDetails used to identify a datanode @@ -182,11 +178,10 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, // OzoneContainer instance is used in a non-thread safe way by the context // past to its constructor, so we much synchronize its access. See // HDDS-3116 for more details. - this.masterVolumeMetadataStore = MasterVolumeMetadataStore.get(conf); constructionLock.writeLock().lock(); try { container = new OzoneContainer(hddsDatanodeService, this.datanodeDetails, - conf, context, certClient, secretKeyClient, masterVolumeMetadataStore); + conf, context, certClient, secretKeyClient); } finally { constructionLock.writeLock().unlock(); } @@ -452,10 +447,6 @@ public void close() throws IOException { if (nettyMetrics != null) { nettyMetrics.unregister(); } - - if (masterVolumeMetadataStore != null) { - masterVolumeMetadataStore.cleanup(); - } } private void executorServiceShutdownGraceful(ExecutorService executor) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index 5ab6f5e8990..690a07dc44f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -46,6 +46,9 @@ public static ReferenceCountedDB get(ConfigurationSou String dbDirPath = DBStoreBuilder.getDBDirPath(MasterVolumeDBDefinition.get(), conf).getAbsolutePath(); try { return INSTANCES.compute(dbDirPath, (k, v) -> { + if (v != null) { + v.incrementReference(); + } if (v == null || v.isClosed()) { try { MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index ac0837a4b58..35cffc6ef83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -152,10 +152,9 @@ enum InitializingStatus { * @throws IOException */ public OzoneContainer(HddsDatanodeService hddsDatanodeService, - DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context, CertificateClient certClient, - SecretKeyVerifierClient secretKeyClient, - ReferenceCountedDB masterVolumeMetadataStore) throws IOException { + DatanodeDetails datanodeDetails, ConfigurationSource conf, + StateContext context, CertificateClient certClient, + SecretKeyVerifierClient secretKeyClient) throws IOException { config = conf; this.datanodeDetails = datanodeDetails; this.context = context; @@ -191,8 +190,7 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, long recoveringContainerTimeout = config.getTimeDuration( OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - this.masterVolumeMetadataStore = masterVolumeMetadataStore; - this.masterVolumeMetadataStore.incrementReference(); + this.masterVolumeMetadataStore = MasterVolumeMetadataStore.get(conf); containerSet = new ContainerSet(masterVolumeMetadataStore.getStore().getContainerIdsTable(), recoveringContainerTimeout); metadataScanner = null; @@ -307,14 +305,6 @@ public OzoneContainer( this(null, datanodeDetails, conf, context, null, null); } - public OzoneContainer(HddsDatanodeService hddsDatanodeService, - DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context, CertificateClient certClient, - SecretKeyVerifierClient secretKeyClient) throws IOException { - this(hddsDatanodeService, datanodeDetails, conf, context, certClient, secretKeyClient, - MasterVolumeMetadataStore.get(conf)); - } - public GrpcTlsConfig getTlsClientConfig() { return tlsClientConfig; } @@ -541,7 +531,7 @@ public void stop() { recoveringContainerScrubbingService.shutdown(); IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); - if (this.masterVolumeMetadataStore != null) { + if (this.masterVolumeMetadataStore != null && !this.masterVolumeMetadataStore.isClosed()) { this.masterVolumeMetadataStore.decrementReference(); this.masterVolumeMetadataStore.cleanup(); this.masterVolumeMetadataStore = null; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 4db4231373d..7baa501aa55 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -184,12 +184,9 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - ReferenceCountedDB referenceCountedDS = - MasterVolumeMetadataStore.get(conf); - referenceCountedDS.incrementReference(); + ReferenceCountedDB referenceCountedDS = MasterVolumeMetadataStore.get(conf); this.masterVolumeMetadataStoreReferenceCountedDB = referenceCountedDS; - ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore() - .getContainerIdsTable(), 1000); + ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore().getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); From bdc2e502f2bed5ccb08a3029e84d554de0485e46 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 8 Nov 2024 08:04:00 -0800 Subject: [PATCH 14/37] HDDS-11650. Fix MasterVolumeMetaStore cache Change-Id: Ife63a4ab2a69869cce9d1c407bfdeba2540d2482 --- .../common/utils/ReferenceCountedHandle.java | 56 +++++++++++++++++++ .../metadata/MasterVolumeMetadataStore.java | 14 +++-- .../container/ozoneimpl/OzoneContainer.java | 9 ++- .../freon/ClosedContainerReplicator.java | 7 +-- 4 files changed, 72 insertions(+), 14 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java new file mode 100644 index 00000000000..e93d974aafe --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java @@ -0,0 +1,56 @@ +package org.apache.hadoop.ozone.container.common.utils; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import org.apache.hadoop.ozone.container.metadata.AbstractStore; + +import java.io.Closeable; + +/** + * Class enclosing a reference counted handle to DBStore. + */ +public class ReferenceCountedHandle implements Closeable { + private final ReferenceCountedDB dbHandle; + private volatile boolean isClosed; + + //Provide a handle with an already incremented reference. + public ReferenceCountedHandle(ReferenceCountedDB dbHandle) { + this.dbHandle = dbHandle; + this.isClosed = false; + } + + public STORE getStore() { + return dbHandle.getStore(); + } + + @Override + public void close() { + if (!isClosed) { + synchronized (this) { + if (!isClosed) { + if (!dbHandle.isClosed()) { + dbHandle.decrementReference(); + dbHandle.cleanup(); + } + this.isClosed = true; + } + } + } + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index 690a07dc44f..3965f94449a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import java.io.IOException; import java.io.UncheckedIOException; @@ -42,24 +43,27 @@ public final class MasterVolumeMetadataStore extends AbstractRDBStore> INSTANCES = new ConcurrentHashMap<>(); - public static ReferenceCountedDB get(ConfigurationSource conf) throws IOException { + public static ReferenceCountedHandle get(ConfigurationSource conf) throws IOException { String dbDirPath = DBStoreBuilder.getDBDirPath(MasterVolumeDBDefinition.get(), conf).getAbsolutePath(); try { - return INSTANCES.compute(dbDirPath, (k, v) -> { + return new ReferenceCountedHandle<>(INSTANCES.compute(dbDirPath, (k, v) -> { if (v != null) { v.incrementReference(); } if (v == null || v.isClosed()) { try { MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); - return new ReferenceCountedDB<>(masterVolumeMetadataStore, - masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); + ReferenceCountedDB referenceCountedDB = + new ReferenceCountedDB<>(masterVolumeMetadataStore, + masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); + referenceCountedDB.incrementReference(); + return referenceCountedDB; } catch (IOException e) { throw new UncheckedIOException(e); } } return v; - }); + })); } catch (UncheckedIOException e) { throw e.getCause(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 35cffc6ef83..1eaaf5fcbab 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -56,7 +56,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; @@ -136,7 +136,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; - private ReferenceCountedDB masterVolumeMetadataStore; + private ReferenceCountedHandle masterVolumeMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -531,9 +531,8 @@ public void stop() { recoveringContainerScrubbingService.shutdown(); IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); - if (this.masterVolumeMetadataStore != null && !this.masterVolumeMetadataStore.isClosed()) { - this.masterVolumeMetadataStore.decrementReference(); - this.masterVolumeMetadataStore.cleanup(); + if (this.masterVolumeMetadataStore != null) { + this.masterVolumeMetadataStore.close(); this.masterVolumeMetadataStore = null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 7baa501aa55..2c4beb048c6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; @@ -84,7 +84,7 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; - private ReferenceCountedDB masterVolumeMetadataStoreReferenceCountedDB; + private ReferenceCountedHandle masterVolumeMetadataStoreReferenceCountedDB; private List replicationTasks; @@ -149,7 +149,6 @@ public Void call() throws Exception { } finally { if (masterVolumeMetadataStoreReferenceCountedDB != null) { masterVolumeMetadataStoreReferenceCountedDB.close(); - masterVolumeMetadataStoreReferenceCountedDB.cleanup(); } } @@ -184,7 +183,7 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - ReferenceCountedDB referenceCountedDS = MasterVolumeMetadataStore.get(conf); + ReferenceCountedHandle referenceCountedDS = MasterVolumeMetadataStore.get(conf); this.masterVolumeMetadataStoreReferenceCountedDB = referenceCountedDS; ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore().getContainerIdsTable(), 1000); From 06ca3479d7227d263962ea060da614c8050fe817 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 8 Nov 2024 08:29:56 -0800 Subject: [PATCH 15/37] HDDS-11650. Fix acceptance tests Change-Id: Ic9fe75b9efe885080e3ad440f132eb0100c41a17 --- hadoop-ozone/dist/src/main/compose/compatibility/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config | 2 +- .../dist/src/main/compose/ozone-om-prepare/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozone/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/restart/docker-config | 2 +- .../dist/src/main/compose/upgrade/compose/ha/docker-config | 2 +- .../dist/src/main/compose/upgrade/compose/non-ha/docker-config | 2 +- .../dist/src/main/compose/upgrade/compose/om-ha/docker-config | 2 +- hadoop-ozone/dist/src/main/compose/xcompat/docker-config | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index d3984110d8d..f7f1c24b8a0 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 10d9f5c8cf5..f4866c4240d 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=100MB OZONE-SITE.XML_ozone.scm.block.size=20MB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config index 623f9595583..ba4d80a9d05 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 08c490ea51f..ebf2ce532bd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 65834455eaa..ae2fb092be6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -23,7 +23,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config index 79d2e5285fb..f0ec8fcaa1a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config index 8239aad2a5d..59b1fcf8cab 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.ozone.scm.block.size=64MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index a657f22340e..f2a9e044793 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -29,7 +29,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config index 06696a0e413..87b0cb50537 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config @@ -19,7 +19,7 @@ CORE-SITE.XML_fs.defaultFS=ofs://om OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config index 66f4cf151ec..adfaeb287d0 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config @@ -17,7 +17,7 @@ CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.block.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 38cc5b71a18..1495e89813a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -47,7 +47,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 12a7819d1ad..2a58ffcf384 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -22,7 +22,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index 4f13d624969..387a1c8517e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/restart/docker-config b/hadoop-ozone/dist/src/main/compose/restart/docker-config index 161af7a2975..852eb6647c3 100644 --- a/hadoop-ozone/dist/src/main/compose/restart/docker-config +++ b/hadoop-ozone/dist/src/main/compose/restart/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index a1b6da80c4b..d06d3279dc9 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -35,7 +35,7 @@ OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config index 88126ddf2cb..ce4a8807e54 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config @@ -25,7 +25,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config index 77fa2b40ee4..a049ba5f012 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 1a61aaf4f7e..746b2b6e943 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -32,7 +32,7 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 From 8f98ab9a242d8cd00fa1cf1a7818b7793292c7c4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 8 Nov 2024 16:03:05 -0800 Subject: [PATCH 16/37] HDDS-11650. Fix acceptance tests Change-Id: I5a8e092d8fb751a2ca69256740df59edd59b9b95 --- .../container/common/impl/ContainerSet.java | 21 ++++++++++++------- .../debug/container/ContainerCommands.java | 2 +- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 72d9a74beee..ce40b79f223 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -70,13 +70,18 @@ public class ContainerSet implements Iterable> { private Clock clock; private long recoveringTimeout; private final Table containerIdsTable; - private final boolean readOnly; public ContainerSet(Table continerIdsTable, long recoveringTimeout) { + this(continerIdsTable, recoveringTimeout, false); + } + + public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { this.clock = Clock.system(ZoneOffset.UTC); this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; - this.readOnly = containerIdsTable == null; + if (!readOnly && containerIdsTable == null) { + throw new IllegalArgumentException("Container table cannot be null when container set is not read only"); + } } public long getCurrentTime() { @@ -105,7 +110,6 @@ public boolean addContainer(Container container) throws StorageContainerExcep */ public boolean addContainer(Container container, boolean overwriteMissingContainers) throws StorageContainerException { - Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); @@ -123,7 +127,9 @@ public boolean addContainer(Container container, boolean overwriteMissingCont containerId); } try { - containerIdsTable.put(containerId, containerState); + if (containerIdsTable != null) { + containerIdsTable.put(containerId, containerState); + } } catch (IOException e) { throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } @@ -165,7 +171,6 @@ public boolean removeContainer(long containerId) throws StorageContainerExceptio */ public boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) throws StorageContainerException { - Preconditions.checkState(!readOnly, "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); //We need to add to missing container set before removing containerMap since there could be write chunk operation @@ -177,7 +182,9 @@ public boolean removeContainer(long containerId, boolean markMissing, boolean re Container removed = containerMap.remove(containerId); if (removeFromDB) { try { - containerIdsTable.delete(containerId); + if (containerIdsTable != null) { + containerIdsTable.delete(containerId); + } } catch (IOException e) { throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } @@ -200,8 +207,6 @@ public boolean removeContainer(long containerId, boolean markMissing, boolean re * otherwise false. */ public boolean removeRecoveringContainer(long containerId) { - Preconditions.checkState(!readOnly, - "Container Set is read-only."); Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); //it might take a little long time to iterate all the entries diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index e1402bb80e1..47260d62f73 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -115,7 +115,7 @@ OzoneConfiguration getOzoneConf() { public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = parent.getOzoneConf(); - ContainerSet containerSet = new ContainerSet(null, 1000); + ContainerSet containerSet = new ContainerSet(null, 1000, true); ContainerMetrics metrics = ContainerMetrics.create(conf); From 7d7f07842ae307374a525a6fc0ad052f306bfd88 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 8 Nov 2024 18:52:51 -0800 Subject: [PATCH 17/37] HDDS-11650. Add an integration test to test dn restarts with missing containers Change-Id: Ic67537ed852920d8945430665e22eeddc7350d6e --- .../container/common/impl/ContainerSet.java | 3 +- .../container/common/impl/HddsDispatcher.java | 3 +- .../container/keyvalue/KeyValueHandler.java | 17 ++- .../OnDemandContainerDataScanner.java | 3 + .../volume/TestVolumeSetDiskChecks.java | 1 + .../ozoneimpl/TestOzoneContainer.java | 139 +++++++++++++++++- 6 files changed, 158 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index ce40b79f223..56a77f90379 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -114,8 +114,7 @@ public boolean addContainer(Container container, boolean overwriteMissingCont long containerId = container.getContainerData().getContainerID(); State containerState = container.getContainerData().getState(); - if (!overwriteMissingContainers && missingContainerSet.contains(containerId) - && containerState != State.RECOVERING) { + if (!overwriteMissingContainers && missingContainerSet.contains(containerId)) { throw new StorageContainerException(String.format("Container with container Id %d is missing in the DN " + "and creation of containers with state %s is not allowed. Only recreation of container in RECOVERING state " + "is allowed.", containerId, containerState.toString()), ContainerProtos.Result.CONTAINER_MISSING); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 130a6e34330..d86cf4db163 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -175,7 +175,8 @@ private boolean canIgnoreException(Result result) { case CONTAINER_UNHEALTHY: case CLOSED_CONTAINER_IO: case DELETE_ON_OPEN_CONTAINER: - case UNSUPPORTED_REQUEST: // Blame client for sending unsupported request. + case UNSUPPORTED_REQUEST: + case CONTAINER_MISSING:// Blame client for sending unsupported request. return true; default: return false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index dece7bf49d4..45666b76074 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -93,6 +93,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; + +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CLOSED_CONTAINER_IO; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR; @@ -120,8 +122,6 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; @@ -355,6 +355,15 @@ ContainerCommandResponseProto handleCreateContainer( } long containerID = request.getContainerID(); + State containerState = request.getCreateContainer().getState(); + + if (containerSet.getMissingContainerSet().contains(containerID) && containerState != RECOVERING) { + return ContainerUtils.logAndReturnError(LOG, + new StorageContainerException(String.format("Container with " + "container Id %d is " + + "missing in the DN and creation of containers with state %s is not allowed. Only recreation of container " + + "in RECOVERING state is allowed.", containerID, containerState.toString()), + ContainerProtos.Result.CONTAINER_MISSING), request); + } ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(conf); @@ -379,7 +388,7 @@ ContainerCommandResponseProto handleCreateContainer( try { if (containerSet.getContainer(containerID) == null) { newContainer.create(volumeSet, volumeChoosingPolicy, clusterId); - created = containerSet.addContainer(newContainer); + created = containerSet.addContainer(newContainer, RECOVERING == newContainer.getContainerState()); } else { // The create container request for an already existing container can // arrive in case the ContainerStateMachine reapplies the transaction @@ -1071,7 +1080,7 @@ private void checkContainerOpen(KeyValueContainer kvContainer) * might already be in closing state here. */ if (containerState == State.OPEN || containerState == State.CLOSING - || containerState == State.RECOVERING) { + || containerState == RECOVERING) { return; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java index 44884c5c290..edac2f596ea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java @@ -80,6 +80,9 @@ public static synchronized void init( } private static boolean shouldScan(Container container) { + if (container == null) { + return false; + } long containerID = container.getContainerData().getContainerID(); if (instance == null) { LOG.debug("Skipping on demand scan for container {} since scanner was " + diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 8dc7679d77e..3f790814cf9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -341,6 +341,7 @@ public void testVolumeFailure() throws IOException { conSet.handleVolumeFailures(stateContext); // ContainerID1 should be removed belonging to failed volume assertNull(conSet.getContainer(containerID1)); + assertTrue(conSet.getMissingContainerSet().contains(containerID1)); // ContainerID should exist belonging to normal volume assertNotNull(conSet.getContainer(containerID)); expectedReportCount.put( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1c5da04c0a3..8ba5a727ff5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -31,11 +32,13 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; @@ -160,6 +163,134 @@ public void testOzoneContainerViaDataNode() throws Exception { } } + @Test + public void testOzoneContainerWithMissingContainer() throws Exception { + MiniOzoneCluster cluster = null; + try { + long containerID = + ContainerTestHelper.getTestContainerID(); + OzoneConfiguration conf = newOzoneConfiguration(); + + // Start ozone container Via Datanode create. + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build(); + cluster.waitForClusterToBeReady(); + + runTestOzoneContainerWithMissingContainer(cluster, containerID); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + private void runTestOzoneContainerWithMissingContainer( + MiniOzoneCluster cluster, long testContainerID) throws Exception { + ContainerProtos.ContainerCommandRequestProto + request, writeChunkRequest, putBlockRequest, + updateRequest1, updateRequest2; + ContainerProtos.ContainerCommandResponseProto response, + updateResponse1, updateResponse2; + XceiverClientGrpc client = null; + try { + // This client talks to ozone container via datanode. + client = createClientForTesting(cluster); + client.connect(); + Pipeline pipeline = client.getPipeline(); + createContainerForTesting(client, testContainerID); + writeChunkRequest = writeChunkForContainer(client, testContainerID, + 1024); + + DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails(); + File containerPath = + new File(cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(testContainerID) + .getContainerData().getContainerPath()); + cluster.getHddsDatanode(datanodeDetails).stop(); + FileUtils.deleteDirectory(containerPath); + + // Restart & Check if the container has been marked as missing, since the container directory has been deleted. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + + // Read Chunk + request = ContainerTestHelper.getReadChunkRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + response = client.sendCommand(request); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + response = createContainerForTesting(client, testContainerID); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Put Block + putBlockRequest = ContainerTestHelper.getPutBlockRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Get Block + request = ContainerTestHelper. + getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + // Delete Block and Delete Chunk are handled by BlockDeletingService + // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests + // are deprecated + + //Update an existing container + Map containerUpdate = new HashMap(); + containerUpdate.put("container_updated_key", "container_updated_value"); + updateRequest1 = ContainerTestHelper.getUpdateContainerRequest( + testContainerID, containerUpdate); + updateResponse1 = client.sendCommand(updateRequest1); + assertNotNull(updateResponse1); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, + response.getResult()); + + //Update an non-existing container + long nonExistingContinerID = + ContainerTestHelper.getTestContainerID(); + updateRequest2 = ContainerTestHelper.getUpdateContainerRequest( + nonExistingContinerID, containerUpdate); + updateResponse2 = client.sendCommand(updateRequest2); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, + updateResponse2.getResult()); + + // Restarting again & checking if the container is still not present on disk and marked as missing, this is to + // ensure the previous write request didn't inadvertently create the container data. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + + } finally { + if (client != null) { + client.close(); + } + } + } + public static void runTestOzoneContainerViaDataNode( long testContainerID, XceiverClientSpi client) throws Exception { ContainerProtos.ContainerCommandRequestProto @@ -175,6 +306,7 @@ public static void runTestOzoneContainerViaDataNode( writeChunkRequest = writeChunkForContainer(client, testContainerID, 1024); + // Read Chunk request = ContainerTestHelper.getReadChunkRequest( pipeline, writeChunkRequest.getWriteChunk()); @@ -506,10 +638,14 @@ private static XceiverClientGrpc createClientForTesting( MiniOzoneCluster cluster) { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipelines().iterator().next(); + return createClientForTesting(pipeline, cluster); + } + + private static XceiverClientGrpc createClientForTesting(Pipeline pipeline, MiniOzoneCluster cluster) { return new XceiverClientGrpc(pipeline, cluster.getConf()); } - public static void createContainerForTesting(XceiverClientSpi client, + public static ContainerProtos.ContainerCommandResponseProto createContainerForTesting(XceiverClientSpi client, long containerID) throws Exception { // Create container ContainerProtos.ContainerCommandRequestProto request = @@ -518,6 +654,7 @@ public static void createContainerForTesting(XceiverClientSpi client, ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(request); assertNotNull(response); + return response; } public static ContainerProtos.ContainerCommandRequestProto From 108bf82d82821383f773c9c4e595bdda18e0eac1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 15 Nov 2024 14:55:45 -0800 Subject: [PATCH 18/37] HDDS-11650. Address review comments Change-Id: Icf8b45e0c2de6d353f3f880c441de7d7a6138009 --- .../ozone/container/common/impl/ContainerSet.java | 15 ++++++++++----- .../ozone/container/keyvalue/KeyValueHandler.java | 12 ++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 56a77f90379..62e2216eb69 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -102,6 +102,14 @@ public boolean addContainer(Container container) throws StorageContainerExcep return addContainer(container, false); } + public void validateContainerIsMissing(long containerId, State state) throws StorageContainerException { + if (missingContainerSet.contains(containerId)) { + throw new StorageContainerException(String.format("Container with container Id %d with state : %s is missing in" + + " the DN.", containerId, state), + ContainerProtos.Result.CONTAINER_MISSING); + } + } + /** * Add Container to container map. * @param container container to be added @@ -114,11 +122,8 @@ public boolean addContainer(Container container, boolean overwriteMissingCont long containerId = container.getContainerData().getContainerID(); State containerState = container.getContainerData().getState(); - if (!overwriteMissingContainers && missingContainerSet.contains(containerId)) { - throw new StorageContainerException(String.format("Container with container Id %d is missing in the DN " + - "and creation of containers with state %s is not allowed. Only recreation of container in RECOVERING state " + - "is allowed.", containerId, containerState.toString()), ContainerProtos.Result.CONTAINER_MISSING); - + if (!overwriteMissingContainers) { + validateContainerIsMissing(containerId, containerState); } if (containerMap.putIfAbsent(containerId, container) == null) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 45666b76074..198b8d5cf16 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -357,12 +357,12 @@ ContainerCommandResponseProto handleCreateContainer( long containerID = request.getContainerID(); State containerState = request.getCreateContainer().getState(); - if (containerSet.getMissingContainerSet().contains(containerID) && containerState != RECOVERING) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException(String.format("Container with " + "container Id %d is " + - "missing in the DN and creation of containers with state %s is not allowed. Only recreation of container " + - "in RECOVERING state is allowed.", containerID, containerState.toString()), - ContainerProtos.Result.CONTAINER_MISSING), request); + if (containerState != RECOVERING) { + try { + containerSet.validateContainerIsMissing(containerID, containerState); + } catch (StorageContainerException ex) { + return ContainerUtils.logAndReturnError(LOG, ex, request); + } } ContainerLayoutVersion layoutVersion = From af0f757622a82843864bf5aaa93b2470a398f442 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 17 Nov 2024 14:00:02 -0800 Subject: [PATCH 19/37] HDDS-11650. Address review comments Change-Id: I7ead428f7ff82968a0f1e5058bbf65f3b807bdb9 --- hadoop-hdds/container-service/pom.xml | 6 ------ .../container/common/impl/ContainerSet.java | 7 ++++++- .../container/common/ContainerTestUtils.java | 3 +-- .../common/TestBlockDeletingService.java | 17 ++++++++--------- .../TestSchemaOneBackwardsCompatibility.java | 3 +-- .../TestSchemaTwoBackwardsCompatibility.java | 3 +-- ...taleRecoveringContainerScrubbingService.java | 3 +-- .../TestContainerDeletionChoosingPolicy.java | 5 ++--- .../common/impl/TestContainerPersistence.java | 3 +-- .../container/common/impl/TestContainerSet.java | 9 ++++----- .../common/impl/TestHddsDispatcher.java | 7 +++---- .../TestCloseContainerCommandHandler.java | 3 +-- .../TestDeleteBlocksCommandHandler.java | 3 +-- .../common/volume/TestStorageVolumeChecker.java | 1 + .../common/volume/TestVolumeSetDiskChecks.java | 3 +-- .../container/keyvalue/TestKeyValueHandler.java | 5 ++--- .../ozoneimpl/TestContainerReader.java | 7 +++---- .../replication/TestContainerImporter.java | 7 +++---- .../replication/TestGrpcReplicationService.java | 3 +-- .../replication/TestReplicationSupervisor.java | 3 +-- .../TestSendContainerRequestHandler.java | 5 ++--- .../upgrade/TestDatanodeUpgradeToScmHA.java | 2 ++ .../hadoop/hdds/utils/db/DBTestUtils.java | 0 hadoop-hdds/pom.xml | 8 -------- hadoop-hdds/server-scm/pom.xml | 6 ------ hadoop-hdds/tools/pom.xml | 6 ------ .../container/upgrade/TestUpgradeManager.java | 3 +-- .../ozone/client/TestOzoneClientFactory.java | 1 - .../transport/server/ratis/TestCSMMetrics.java | 3 +-- .../container/metrics/TestContainerMetrics.java | 5 ++--- .../container/ozoneimpl/TestOzoneContainer.java | 1 - .../container/server/TestContainerServer.java | 5 ++--- .../server/TestSecureContainerServer.java | 5 ++--- 33 files changed, 54 insertions(+), 97 deletions(-) rename hadoop-hdds/framework/src/{test => main}/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java (100%) diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d756ad1b066..c21ca8203b5 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -71,12 +71,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework - - org.apache.ozone - hdds-server-framework - test-jar - test - org.apache.ozone hdds-client diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index cc52098465d..a49f429e056 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -27,12 +27,12 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +71,11 @@ public class ContainerSet implements Iterable> { private long recoveringTimeout; private final Table containerIdsTable; + @VisibleForTesting + public ContainerSet(long recoveringTimeout) { + this(DBTestUtils.getInMemoryTableForTest(), recoveringTimeout); + } + public ContainerSet(Table continerIdsTable, long recoveringTimeout) { this(continerIdsTable, recoveringTimeout, false); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 4c6b53d4139..4347e0013ac 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.retry.RetryPolicies; @@ -337,7 +336,7 @@ public static ContainerDispatcher getNoopContainerDispatcher() { } private static final ContainerController EMPTY_CONTAINER_CONTROLLER - = new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Collections.emptyMap()); + = new ContainerController(new ContainerSet(1000), Collections.emptyMap()); public static ContainerController getEmptyContainerController() { return EMPTY_CONTAINER_CONTROLLER; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 6edb84cc691..744b310923e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecBuffer; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.Checksum; @@ -430,7 +429,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(blockDeleteLimit); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); // Create one container with no actual pending delete blocks, but an // incorrect metadata value indicating it has enough pending deletes to @@ -538,7 +537,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -664,7 +663,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); createToDeleteBlocks(containerSet, numOfContainers, numOfBlocksPerContainer, numOfChunksPerBlock); @@ -774,7 +773,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); // Create 1 container with 100 blocks createToDeleteBlocks(containerSet, 1, 100, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); @@ -805,7 +804,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -907,7 +906,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(1); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); int containerCount = 2; int chunksPerBlock = 10; @@ -967,7 +966,7 @@ public void testContainerMaxLockHoldingTime( dnConf.setBlockDeletingMaxLockHoldingTime(Duration.ofMillis(-1)); dnConf.setBlockDeletionLimit(3); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); int containerCount = 1; int chunksPerBlock = 10; @@ -1031,7 +1030,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(10); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index a020c6bc9f8..a6cb9b3e10b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -572,7 +571,7 @@ private void runBlockDeletingService(KeyValueHandler keyValueHandler) } private ContainerSet makeContainerSet() throws Exception { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); KeyValueContainer container = new KeyValueContainer(newKvData(), conf); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 8d6c9b4a233..4bd32eda94e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -136,7 +135,7 @@ public void setup() throws Exception { blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager, volumeSet); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); ozoneContainer = mock(OzoneContainer.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java index 027bc598048..644ee014e9f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -147,7 +146,7 @@ private List createTestContainers( public void testScrubbingStaleRecoveringContainers( ContainerTestVersionInfo versionInfo) throws Exception { initVersionInfo(versionInfo); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 10); + ContainerSet containerSet = new ContainerSet(10); containerSet.setClock(testClock); StaleRecoveringContainerScrubbingService srcss = new StaleRecoveringContainerScrubbingService( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index c3932a6286e..890bca18cb1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -30,7 +30,6 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -86,7 +85,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) RandomContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); int numContainers = 10; for (int i = 0; i < numContainers; i++) { @@ -149,7 +148,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) TopNOrderedContainerDeletionChoosingPolicy.class.getName()); List pathLists = new LinkedList<>(); pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); int numContainers = 10; Random random = new Random(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index d27728fdbb1..3ff8f9e625d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -153,7 +152,7 @@ public static void shutdown() throws IOException { @BeforeEach public void setupPaths() throws IOException { - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); volumeSet = new MutableVolumeSet(DATANODE_UUID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); // Initialize volume directories. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 98d9e7b7719..1f1d24bcad9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; @@ -69,7 +68,7 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) { public void testAddGetRemoveContainer(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); long containerId = 100L; ContainerProtos.ContainerDataProto.State state = ContainerProtos .ContainerDataProto.State.CLOSED; @@ -158,7 +157,7 @@ public void testIteratorPerVolume(ContainerLayoutVersion layout) HddsVolume vol2 = mock(HddsVolume.class); when(vol2.getStorageID()).thenReturn("uuid-2"); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); for (int i = 0; i < 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layout, @@ -201,7 +200,7 @@ public void iteratorIsOrderedByScanTime(ContainerLayoutVersion layout) HddsVolume vol = mock(HddsVolume.class); when(vol.getStorageID()).thenReturn("uuid-1"); Random random = new Random(); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); int containerCount = 50; for (int i = 0; i < containerCount; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, @@ -299,7 +298,7 @@ private static void assertContainerIds(int startId, int count, } private ContainerSet createContainerSet() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); for (int i = FIRST_ID; i < FIRST_ID + 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layoutVersion, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 7d466fb6695..a15db18eba1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; @@ -132,7 +131,7 @@ public void testContainerCloseActionWhenFull( try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, @@ -267,7 +266,7 @@ public void testContainerCloseActionWhenVolumeFull( .thenReturn(Collections.singletonList(volumeBuilder.build())); try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); StateContext context = ContainerTestUtils.getMockContext(dd, conf); // create a 50 byte container KeyValueContainerData containerData = new KeyValueContainerData(1L, @@ -517,7 +516,7 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf, TokenVerifier tokenVerifier) throws IOException { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); volumeSet.getVolumesList().stream().forEach(v -> { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 7bb18d37e0b..2e1e0eafd01 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -91,7 +90,7 @@ private void init() throws Exception { pipelineID.getId().toString(), null); container = new KeyValueContainer(data, conf); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); containerSet.addContainer(container); containerHandler = mock(Handler.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 7b20f856f17..dcabad46ac5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics; @@ -112,7 +111,7 @@ private void setup() throws Exception { conf = new OzoneConfiguration(); layout = ContainerLayoutVersion.FILE_PER_BLOCK; ozoneContainer = mock(OzoneContainer.class); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); volume1 = mock(HddsVolume.class); when(volume1.getStorageID()).thenReturn("uuid-1"); for (int i = 0; i <= 10; i++) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java index 0311be1c0d2..6900131caa3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java @@ -218,6 +218,7 @@ public void testVolumeDeletion(VolumeCheckResult checkResult, ContainerLayoutVersion layout, TestInfo testInfo) throws Exception { initTest(checkResult, layout); LOG.info("Executing {}", testInfo.getTestMethod()); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); dnConf.setDiskCheckMinGap(Duration.ofMillis(0)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 3f790814cf9..0b24161aadb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -269,7 +268,7 @@ public void testVolumeFailure() throws IOException { new DummyChecker(conf, new Timer(), 0); OzoneContainer ozoneContainer = mock(OzoneContainer.class); - ContainerSet conSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 20); + ContainerSet conSet = new ContainerSet(20); when(ozoneContainer.getContainerSet()).thenReturn(conSet); String path = dir.getPath(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 2e84fd2cdc8..2637f1922c6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; @@ -270,7 +269,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception { volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); try { - ContainerSet cset = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet cset = new ContainerSet(1000); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); @@ -356,7 +355,7 @@ public void testDeleteContainer() throws IOException { final String clusterId = UUID.randomUUID().toString(); final String datanodeId = UUID.randomUUID().toString(); final ConfigurationSource conf = new OzoneConfiguration(); - final ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + final ContainerSet containerSet = new ContainerSet(1000); final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); HddsVolume hddsVolume = new HddsVolume.Builder(testDir).conf(conf) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index f1fdf66ef94..0ad73342725 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -106,7 +105,7 @@ private void setup(ContainerTestVersionInfo versionInfo) throws Exception { Files.createDirectory(tempDir.resolve("volumeDir")).toFile(); this.conf = new OzoneConfiguration(); volumeSet = mock(MutableVolumeSet.class); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); datanodeId = UUID.randomUUID(); hddsVolume = new HddsVolume.Builder(volumeDir @@ -270,7 +269,7 @@ public void testContainerReaderWithLoadException( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet1 = new ContainerSet(1000); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDir" + 1)).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; @@ -320,7 +319,7 @@ public void testContainerReaderWithInvalidDbPath( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet1 = new ContainerSet(1000); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDirDbDelete")).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index d6b91cb35c1..1b989e6bc7f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -87,7 +86,7 @@ void importSameContainerWhenAlreadyImport() throws Exception { KeyValueContainer container = new KeyValueContainer(containerData, conf); ContainerController controllerMock = mock(ContainerController.class); // create containerImporter object - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); + ContainerSet containerSet = new ContainerSet(0); containerSet.addContainer(container); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); @@ -118,7 +117,7 @@ void importSameContainerWhenFirstInProgress() throws Exception { return container; }); // create containerImporter object - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); + ContainerSet containerSet = new ContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, @@ -157,7 +156,7 @@ public void testInconsistentChecksumContainerShouldThrowError() throws Exception doNothing().when(containerData).setChecksumTo0ByteArray(); // create containerImporter object ContainerController controllerMock = mock(ContainerController.class); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); + ContainerSet containerSet = new ContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = spy(new ContainerImporter(conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index df7a36c0200..dda87cff0df 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; @@ -95,7 +94,7 @@ public void init() throws Exception { SecurityConfig secConf = new SecurityConfig(conf); - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); DatanodeDetails.Builder dn = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index f3e2cd66080..315e0c0253b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -130,7 +129,7 @@ public class TestReplicationSupervisor { @BeforeEach public void setUp() throws Exception { clock = new TestClock(Instant.now(), ZoneId.systemDefault()); - set = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + set = new ContainerSet(1000); DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); context = new StateContext( new OzoneConfiguration(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index 7615301849d..baaf296f02b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -56,11 +55,11 @@ void setup() { void testReceiveDataForExistingContainer() throws Exception { long containerId = 1; // create containerImporter - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0); + ContainerSet containerSet = new ContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 0), mock(ContainerController.class), volumeSet); + new ContainerSet(0), mock(ContainerController.class), volumeSet); KeyValueContainerData containerData = new KeyValueContainerData(containerId, ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test"); // add container to container set diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 0ed7d195bf3..d4a27e74cda 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -225,6 +225,7 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// + startScmServer(); String originalScmID = scmServerImpl.getScmId(); File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); @@ -318,6 +319,7 @@ public void testFailedVolumeDuringFinalization(boolean enableSCMHA) public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { setScmHAEnabled(enableSCMHA); /// SETUP /// + startScmServer(); String originalScmID = scmServerImpl.getScmId(); File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java similarity index 100% rename from hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index d08daac9bdf..0237210d2fc 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -194,14 +194,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - - org.apache.ozone - hdds-server-framework - ${hdds.version} - test-jar - test - - org.apache.ozone hdds-container-service diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index a338f6352a5..4c2e40c3759 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -76,12 +76,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework - - org.apache.ozone - hdds-server-framework - test - test-jar - org.apache.ozone diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 700bff59403..583c801bcd4 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -57,12 +57,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework - - org.apache.ozone - hdds-server-framework - test-jar - test - org.apache.ozone hdds-client diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 3db6e4ffa87..b3c15a46f76 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; @@ -134,7 +133,7 @@ public void setup() throws Exception { return volumes.get(ii); }); - containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + containerSet = new ContainerSet(1000); blockManager = new BlockManagerImpl(CONF); chunkManager = new FilePerBlockStrategy(true, blockManager, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index 42835925f87..e2a15595b55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -61,7 +61,6 @@ public Void run() throws IOException { } }); }); - e.printStackTrace(); assertInstanceOf(AccessControlException.class, e); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index e7570f29229..95aeb649024 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -190,7 +189,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(1000), Maps.newHashMap()), null, null); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index aecb8250e41..bc2dcda8e54 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -135,7 +134,7 @@ private static MutableVolumeSet createVolumeSet(DatanodeDetails dn, String path) } private HddsDispatcher createDispatcher(DatanodeDetails dd, VolumeSet volumeSet) { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); StateContext context = ContainerTestUtils.getMockContext( dd, CONF); ContainerMetrics metrics = ContainerMetrics.create(CONF); @@ -244,7 +243,7 @@ private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolume final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); return XceiverServerRatis.newXceiverServerRatis(null, dn, CONF, dispatcher, - new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(1000), Maps.newHashMap()), null, null); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 2ded82dc5dc..99c3786233c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -304,7 +304,6 @@ public static void runTestOzoneContainerViaDataNode( writeChunkRequest = writeChunkForContainer(client, testContainerID, 1024); - // Read Chunk request = ContainerTestHelper.getReadChunkRequest( pipeline, writeChunkRequest.getWriteChunk()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 1387d8accc3..dae2d50bcd6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -132,7 +131,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(1000), Maps.newHashMap()), caClient, null); } @@ -189,7 +188,7 @@ static void runTestClientServer( private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 13b3f4d43aa..af72dae912b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; @@ -167,7 +166,7 @@ public void testClientServer() throws Exception { private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000); + ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); @@ -213,7 +212,7 @@ XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(DBTestUtils.getInMemoryTableForTest(), 1000), Maps.newHashMap()), + new ContainerController(new ContainerSet(1000), Maps.newHashMap()), caClient, null); } From b97d874de3f1cf3e6e4ac134e0393215bfaa443b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 17 Nov 2024 15:16:33 -0800 Subject: [PATCH 20/37] HDDS-11650. Reduce number of files changed Change-Id: I4476c42d2c8c11af64fc5808eafcecc9046bd2e8 --- .../common/interfaces/BaseDBHandle.java | 56 +++++++++++ .../container/common/interfaces/DBHandle.java | 37 +------- .../DeleteBlocksCommandHandler.java | 5 +- .../common/utils/BaseReferenceCountedDB.java | 93 +++++++++++++++++++ .../common/utils/ContainerCache.java | 16 ++-- .../ozone/container/common/utils/RawDB.java | 2 +- .../common/utils/ReferenceCountedDB.java | 6 +- .../common/utils/ReferenceCountedHandle.java | 4 +- .../container/keyvalue/KeyValueContainer.java | 3 +- .../keyvalue/KeyValueContainerCheck.java | 7 +- .../keyvalue/KeyValueContainerData.java | 7 +- .../container/keyvalue/KeyValueHandler.java | 2 +- .../keyvalue/helpers/BlockUtils.java | 6 +- .../helpers/KeyValueContainerUtil.java | 7 +- .../keyvalue/impl/BlockManagerImpl.java | 15 ++- .../background/BlockDeletingTask.java | 11 +-- .../metadata/MasterVolumeMetadataStore.java | 8 +- .../common/TestBlockDeletingService.java | 32 +++---- .../TestSchemaOneBackwardsCompatibility.java | 24 ++--- .../TestSchemaTwoBackwardsCompatibility.java | 9 +- .../keyvalue/TestKeyValueBlockIterator.java | 5 +- .../keyvalue/TestKeyValueContainer.java | 12 +-- .../keyvalue/TestKeyValueContainerCheck.java | 5 +- .../TestKeyValueContainerIntegrityChecks.java | 3 +- ...estKeyValueContainerMetadataInspector.java | 4 +- .../ozoneimpl/TestContainerReader.java | 11 +-- .../ozoneimpl/TestOzoneContainer.java | 3 +- .../hdds/scm/TestStorageContainerManager.java | 4 +- .../hdds/utils/ClusterContainersUtil.java | 3 +- .../ozone/client/rpc/OzoneRpcClientTests.java | 5 +- .../rpc/TestFailureHandlingByClient.java | 5 +- .../rpc/TestValidateBCSIDOnRestart.java | 3 +- .../commandhandler/TestBlockDeletion.java | 5 +- .../TestDeleteContainerHandler.java | 5 +- 34 files changed, 264 insertions(+), 159 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java new file mode 100644 index 00000000000..aaaa0128109 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.interfaces; + +import org.apache.hadoop.ozone.container.metadata.AbstractStore; + +import java.io.Closeable; + +/** + * DB handle abstract class. + */ +public abstract class BaseDBHandle implements Closeable { + + private final STORE store; + private final String containerDBPath; + + public BaseDBHandle(STORE store, String containerDBPath) { + this.store = store; + this.containerDBPath = containerDBPath; + } + + public STORE getStore() { + return this.store; + } + + public String getContainerDBPath() { + return this.containerDBPath; + } + + public boolean cleanup() { + return true; + } + + @Override + public String toString() { + return "DBHandle{" + + "containerDBPath='" + containerDBPath + '\'' + + ", store=" + store + + '}'; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java index 9f611af511b..aea67917b2f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java @@ -17,40 +17,13 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.ozone.container.metadata.AbstractStore; - -import java.io.Closeable; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; /** - * DB handle abstract class. + * DB handle abstract class for datanode store. */ -public abstract class DBHandle implements Closeable { - - private final STORE store; - private final String containerDBPath; - - public DBHandle(STORE store, String containerDBPath) { - this.store = store; - this.containerDBPath = containerDBPath; - } - - public STORE getStore() { - return this.store; - } - - public String getContainerDBPath() { - return this.containerDBPath; - } - - public boolean cleanup() { - return true; - } - - @Override - public String toString() { - return "DBHandle{" + - "containerDBPath='" + containerDBPath + '\'' + - ", store=" + store + - '}'; +public abstract class DBHandle extends BaseDBHandle { + public DBHandle(DatanodeStore store, String containerDBPath) { + super(store, containerDBPath); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 444ad9237a1..136c5805821 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -49,7 +49,6 @@ import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; @@ -550,7 +549,7 @@ private void markBlocksForDeletionSchemaV1( return; } int newDeletionBlocks = 0; - try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { + try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { Table blockDataTable = containerDB.getStore().getBlockDataTable(); Table deletedBlocksTable = @@ -608,7 +607,7 @@ private void markBlocksForDeletionSchemaV1( private void updateMetaData(KeyValueContainerData containerData, DeletedBlocksTransaction delTX, int newDeletionBlocks, - DBHandle containerDB, BatchOperation batchOperation) + DBHandle containerDB, BatchOperation batchOperation) throws IOException { if (newDeletionBlocks > 0) { // Finally commit the DB counters. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java new file mode 100644 index 00000000000..e34e7b07c93 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.apache.hadoop.ozone.container.common.interfaces.BaseDBHandle; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.metadata.AbstractStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Class to implement reference counting over instances of a db handle. + */ +public class BaseReferenceCountedDB extends BaseDBHandle { + private static final Logger LOG = + LoggerFactory.getLogger(BaseReferenceCountedDB.class); + private final AtomicInteger referenceCount; + + public BaseReferenceCountedDB(STORE store, String containerDBPath) { + super(store, containerDBPath); + this.referenceCount = new AtomicInteger(0); + } + + public void incrementReference() { + this.referenceCount.incrementAndGet(); + if (LOG.isTraceEnabled()) { + LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", getContainerDBPath(), + referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); + } + } + + public void decrementReference() { + int refCount = this.referenceCount.decrementAndGet(); + Preconditions.checkArgument(refCount >= 0, "refCount:", refCount); + if (LOG.isTraceEnabled()) { + LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", getContainerDBPath(), + referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); + } + } + + public boolean cleanup() { + if (getStore() != null && getStore().isClosed() + || referenceCount.get() == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("Close {} refCnt {}", getContainerDBPath(), + referenceCount.get()); + } + try { + getStore().stop(); + return true; + } catch (Exception e) { + LOG.error("Error closing DB. Container: " + getContainerDBPath(), e); + return false; + } + } else { + return false; + } + } + + @Override + public void close() throws IOException { + decrementReference(); + } + + /** + * Returns if the underlying DB is closed. This call is threadsafe. + * @return true if the DB is closed. + */ + public boolean isClosed() { + return getStore().isClosed(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 2e68a4b1ef1..741c65e130a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -108,7 +108,7 @@ public void shutdownCache() { */ @Override protected boolean removeLRU(LinkEntry entry) { - ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); + ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); lock.lock(); try { metrics.incNumCacheEvictions(); @@ -128,21 +128,21 @@ protected boolean removeLRU(LinkEntry entry) { * @param conf - Hadoop Configuration. * @return ReferenceCountedDB. */ - public ReferenceCountedDB getDB(long containerID, String containerDBType, + public ReferenceCountedDB getDB(long containerID, String containerDBType, String containerDBPath, String schemaVersion, ConfigurationSource conf) throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); - ReferenceCountedDB db; + ReferenceCountedDB db; Lock containerLock = rocksDBLock.get(containerDBPath); containerLock.lock(); metrics.incNumDbGetOps(); try { lock.lock(); try { - db = (ReferenceCountedDB) this.get(containerDBPath); + db = (ReferenceCountedDB) this.get(containerDBPath); if (db != null && !db.isClosed()) { metrics.incNumCacheHits(); db.incrementReference(); @@ -170,8 +170,8 @@ public ReferenceCountedDB getDB(long containerID, String containe lock.lock(); try { - ReferenceCountedDB currentDB = - (ReferenceCountedDB) this.get(containerDBPath); + ReferenceCountedDB currentDB = + (ReferenceCountedDB) this.get(containerDBPath); if (currentDB != null && !currentDB.isClosed()) { // increment the reference before returning the object currentDB.incrementReference(); @@ -201,7 +201,7 @@ public ReferenceCountedDB getDB(long containerID, String containe public void removeDB(String containerDBPath) { lock.lock(); try { - ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); + ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); if (db != null) { boolean cleaned = cleanupDb(db); if (!db.isClosed()) { @@ -230,7 +230,7 @@ private boolean cleanupDb(ReferenceCountedDB db) { * @param containerDBPath - DB path of the container. * @param db - DB handler */ - public void addDB(String containerDBPath, ReferenceCountedDB db) { + public void addDB(String containerDBPath, ReferenceCountedDB db) { lock.lock(); try { this.putIfAbsent(containerDBPath, db); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java index 873ce75ff58..54849a6f8cd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java @@ -26,7 +26,7 @@ * Just a wrapper for DatanodeStore. * This is for container schema v3 which has one rocksdb instance per disk. */ -public class RawDB extends DBHandle { +public class RawDB extends DBHandle { public RawDB(DatanodeStore store, String containerDBPath) { super(store, containerDBPath); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java index 8c238abcf75..2b73042ae7e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java @@ -22,7 +22,7 @@ import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.metadata.AbstractStore; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,12 +36,12 @@ * from caller stack. When JDK9 StackWalker is available, we can switch to * StackWalker instead of new Exception().printStackTrace(). */ -public class ReferenceCountedDB extends DBHandle { +public class ReferenceCountedDB extends DBHandle { private static final Logger LOG = LoggerFactory.getLogger(ReferenceCountedDB.class); private final AtomicInteger referenceCount; - public ReferenceCountedDB(STORE store, String containerDBPath) { + public ReferenceCountedDB(DatanodeStore store, String containerDBPath) { super(store, containerDBPath); this.referenceCount = new AtomicInteger(0); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java index e93d974aafe..ca24c99ab08 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java @@ -26,11 +26,11 @@ * Class enclosing a reference counted handle to DBStore. */ public class ReferenceCountedHandle implements Closeable { - private final ReferenceCountedDB dbHandle; + private final BaseReferenceCountedDB dbHandle; private volatile boolean isClosed; //Provide a handle with an already incremented reference. - public ReferenceCountedHandle(ReferenceCountedDB dbHandle) { + public ReferenceCountedHandle(BaseReferenceCountedDB dbHandle) { this.dbHandle = dbHandle; this.isClosed = false; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 00900bd9f2c..ae3288a3e98 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -62,7 +62,6 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -367,7 +366,7 @@ public void delete() throws StorageContainerException { @Override public boolean hasBlocks() throws IOException { - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { return !KeyValueContainerUtil.noBlocksInContainer(db.getStore(), containerData, bCheckChunksFilePath); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 43118d42cca..c235109f2cb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -47,7 +47,6 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.util.DirectBufferPool; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -247,7 +246,7 @@ private ScanResult scanData(DataTransferThrottler throttler, onDiskContainerData.setDbFile(dbFile); try { - try (DBHandle db = BlockUtils.getDB(onDiskContainerData, checkConfig); + try (DBHandle db = BlockUtils.getDB(onDiskContainerData, checkConfig); BlockIterator kvIter = db.getStore().getBlockIterator( onDiskContainerData.getContainerID(), onDiskContainerData.getUnprefixedKeyFilter())) { @@ -313,7 +312,7 @@ private ScanResult scanData(DataTransferThrottler throttler, * @return blockData in DB * @throws IOException */ - private BlockData getBlockDataFromDB(DBHandle db, BlockData block) + private BlockData getBlockDataFromDB(DBHandle db, BlockData block) throws IOException { String blockKey = onDiskContainerData.getBlockKey(block.getBlockID().getLocalID()); @@ -330,7 +329,7 @@ private BlockData getBlockDataFromDB(DBHandle db, BlockData block * @return blockData in DB * @throws IOException */ - private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) + private BlockData getBlockDataFromDBWithLock(DBHandle db, BlockData block) throws IOException { container.readLock(); try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 4908cda8f9a..4ea8552e780 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.yaml.snakeyaml.nodes.Tag; @@ -297,7 +296,7 @@ public boolean isFinalizedBlockExist(long localID) { return finalizedBlockSet.contains(localID); } - public void clearFinalizedBlock(DBHandle db) throws IOException { + public void clearFinalizedBlock(DBHandle db) throws IOException { if (!finalizedBlockSet.isEmpty()) { // delete from db and clear memory // Should never fail. @@ -354,7 +353,7 @@ public static List getYamlFields() { * @param releasedBytes - Number of bytes released. * @throws IOException */ - public void updateAndCommitDBCounters(DBHandle db, + public void updateAndCommitDBCounters(DBHandle db, BatchOperation batchOperation, int deletedBlockCount, long releasedBytes) throws IOException { Table metadataTable = db.getStore().getMetadataTable(); @@ -371,7 +370,7 @@ public void updateAndCommitDBCounters(DBHandle db, db.getStore().getBatchHandler().commitBatchOperation(batchOperation); } - public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { + public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { // Reset the in memory metadata. numPendingDeletionBlocks.set(0); // Reset the metadata on disk. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 198b8d5cf16..a5690fa19a7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -1343,7 +1343,7 @@ private String[] getFilesWithPrefix(String prefix, File chunkDir) { private boolean logBlocksIfNonZero(Container container) throws IOException { boolean nonZero = false; - try (DBHandle dbHandle + try (DBHandle dbHandle = BlockUtils.getDB( (KeyValueContainerData) container.getContainerData(), conf)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index ad4bdd8cfbb..8bbc2478004 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -120,7 +120,7 @@ public static DatanodeStore getUncachedDatanodeStore( * @return DB handle. * @throws StorageContainerException */ - public static DBHandle getDB(KeyValueContainerData containerData, + public static DBHandle getDB(KeyValueContainerData containerData, ConfigurationSource conf) throws StorageContainerException { Preconditions.checkNotNull(containerData); Preconditions.checkNotNull(containerData.getDbFile()); @@ -191,7 +191,7 @@ public static void addDB(DatanodeStore store, String containerDBPath, ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); cache.addDB(containerDBPath, - new ReferenceCountedDB<>(store, containerDBPath)); + new ReferenceCountedDB(store, containerDBPath)); } } @@ -283,7 +283,7 @@ public static void removeContainerFromDB(KeyValueContainerData containerData, public static void dumpKVContainerDataToFiles( KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { - try (DBHandle db = getDB(containerData, conf)) { + try (DBHandle db = getDB(containerData, conf)) { Preconditions.checkState(db.getStore() instanceof DatanodeStoreSchemaThreeImpl); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 091b244f418..691ccaa630d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -234,13 +234,14 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, boolean bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); if (kvContainerData.hasSchema(OzoneConsts.SCHEMA_V3)) { - try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { - populateContainerMetadata(kvContainerData, db.getStore(), bCheckChunksFilePath); + try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { + populateContainerMetadata(kvContainerData, + db.getStore(), bCheckChunksFilePath); } return; } - DBHandle cachedDB = null; + DBHandle cachedDB = null; DatanodeStore store = null; try { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 2f3146a1940..6232b843567 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import com.google.common.base.Preconditions; @@ -111,7 +110,7 @@ public long persistPutBlock(KeyValueContainer container, // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -234,7 +233,7 @@ public void finalizeBlock(Container container, BlockID blockId) kvContainer.removeFromPendingPutBlockCache(localID); - try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), + try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(), config)) { // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -251,7 +250,7 @@ public void finalizeBlock(Container container, BlockID blockId) } } - private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, + private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db, KeyValueContainer kvContainer, BatchOperation batch, long localID) throws IOException { // if the chunk list of the block to be finalized was written incremental, @@ -271,7 +270,7 @@ public BlockData getBlock(Container container, BlockID blockID) throws IOExcepti KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); long bcsId = blockID.getBlockCommitSequenceId(); - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -291,7 +290,7 @@ public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, config)) { + try (DBHandle db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -344,7 +343,7 @@ public List listBlock(Container container, long startLocalID, int List result = null; KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, config)) { + try (DBHandle db = BlockUtils.getDB(cData, config)) { result = new ArrayList<>(); String startKey = (startLocalID == -1) ? cData.startKeyEmpty() : cData.getBlockKey(startLocalID); @@ -370,7 +369,7 @@ public void shutdown() { BlockUtils.shutdownCache(config); } - private BlockData getBlockByID(DBHandle db, BlockID blockID, + private BlockData getBlockByID(DBHandle db, BlockID blockID, KeyValueContainerData containerData) throws IOException { String blockKey = containerData.getBlockKey(blockID.getLocalID()); return db.getStore().getBlockByID(blockID, blockKey); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java index feb1d39366f..eb7b6e7378a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.util.Time; @@ -147,7 +146,7 @@ private ContainerBackgroundTaskResult handleDeleteTask() throws Exception { File dataDir = new File(containerData.getChunksPath()); long startTime = Time.monotonicNow(); // Scan container's db and get list of under deletion blocks - try (DBHandle meta = BlockUtils.getDB(containerData, conf)) { + try (DBHandle meta = BlockUtils.getDB(containerData, conf)) { if (containerData.hasSchema(SCHEMA_V1)) { crr = deleteViaSchema1(meta, container, dataDir, startTime); } else if (containerData.hasSchema(SCHEMA_V2)) { @@ -175,7 +174,7 @@ public boolean checkDataDir(File dataDir) { } public ContainerBackgroundTaskResult deleteViaSchema1( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); if (!checkDataDir(dataDir)) { @@ -277,7 +276,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1( } public ContainerBackgroundTaskResult deleteViaSchema2( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { Deleter schema2Deleter = (table, batch, tid) -> { Table delTxTable = @@ -297,7 +296,7 @@ public ContainerBackgroundTaskResult deleteViaSchema2( } public ContainerBackgroundTaskResult deleteViaSchema3( - DBHandle meta, Container container, File dataDir, + DBHandle meta, Container container, File dataDir, long startTime) throws IOException { Deleter schema3Deleter = (table, batch, tid) -> { Table delTxTable = @@ -319,7 +318,7 @@ public ContainerBackgroundTaskResult deleteViaSchema3( private ContainerBackgroundTaskResult deleteViaTransactionStore( TableIterator> - iter, DBHandle meta, Container container, File dataDir, + iter, DBHandle meta, Container container, File dataDir, long startTime, Deleter deleter) throws IOException { ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); if (!checkDataDir(dataDir)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java index 3965f94449a..f8f7a80a706 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.BaseReferenceCountedDB; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import java.io.IOException; @@ -40,7 +40,7 @@ public final class MasterVolumeMetadataStore extends AbstractRDBStore containerIdsTable; - private static final ConcurrentMap> INSTANCES = + private static final ConcurrentMap> INSTANCES = new ConcurrentHashMap<>(); public static ReferenceCountedHandle get(ConfigurationSource conf) throws IOException { @@ -53,8 +53,8 @@ public static ReferenceCountedHandle get(Configuratio if (v == null || v.isClosed()) { try { MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); - ReferenceCountedDB referenceCountedDB = - new ReferenceCountedDB<>(masterVolumeMetadataStore, + BaseReferenceCountedDB referenceCountedDB = + new BaseReferenceCountedDB<>(masterVolumeMetadataStore, masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); referenceCountedDB.incrementReference(); return referenceCountedDB; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 744b310923e..bc56141fb08 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -211,7 +211,7 @@ private void createPendingDeleteBlocksSchema1(int numOfBlocksPerContainer, ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container) { BlockID blockID = null; - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { blockID = ContainerTestHelper.getTestBlockID(containerID); String deleteStateName = data.getDeletingBlockKey( @@ -246,7 +246,7 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, putChunksInBlock(numOfChunksPerBlock, i, chunks, buffer, chunkManager, container, blockID); kd.setChunks(chunks); - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { String blockKey = data.getBlockKey(blockID.getLocalID()); metadata.getStore().getBlockDataTable().put(blockKey, kd); } catch (IOException exception) { @@ -269,7 +269,7 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, private void createTxn(KeyValueContainerData data, List containerBlocks, int txnID, long containerID) { - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx = StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction .newBuilder().setTxID(txnID).setContainerID(containerID) @@ -331,7 +331,7 @@ private void updateMetaData(KeyValueContainerData data, KeyValueContainer container, int numOfBlocksPerContainer, int numOfChunksPerBlock) { long chunkLength = 100; - try (DBHandle metadata = BlockUtils.getDB(data, conf)) { + try (DBHandle metadata = BlockUtils.getDB(data, conf)) { container.getContainerData().setBlockCount(numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. metadata.getStore().getMetadataTable() @@ -362,7 +362,7 @@ private void deleteAndWait(BlockDeletingServiceTestImpl service, * Get under deletion blocks count from DB, * note this info is parsed from container.db. */ - private int getUnderDeletionBlocksCount(DBHandle meta, + private int getUnderDeletionBlocksCount(DBHandle meta, KeyValueContainerData data) throws IOException { if (data.hasSchema(SCHEMA_V1)) { return meta.getStore().getBlockDataTable() @@ -437,7 +437,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) KeyValueContainerData incorrectData = createToDeleteBlocks(containerSet, 0, 1); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { // Check pre-create state. assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); @@ -460,7 +460,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Check its metadata was set up correctly. assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); assertEquals(correctNumBlocksToDelete, @@ -488,7 +488,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Pending delete block count in the incorrect container should be fixed // and reset to 0. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() @@ -497,7 +497,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // Correct container should not have been processed. assertEquals(correctNumBlocksToDelete, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(correctNumBlocksToDelete, getUnderDeletionBlocksCount(db, correctData)); assertEquals(correctNumBlocksToDelete, @@ -512,7 +512,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // The incorrect container should remain in the same state after being // fixed. assertEquals(0, incorrectData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { + try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, incorrectData)); assertEquals(0, db.getStore().getMetadataTable() @@ -521,7 +521,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) // The correct container should have been processed this run and had its // blocks deleted. assertEquals(0, correctData.getNumPendingDeletionBlocks()); - try (DBHandle db = BlockUtils.getDB(correctData, conf)) { + try (DBHandle db = BlockUtils.getDB(correctData, conf)) { assertEquals(0, getUnderDeletionBlocksCount(db, correctData)); assertEquals(0, db.getStore().getMetadataTable() @@ -558,7 +558,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) KeyPrefixFilter filter = isSameSchemaVersion(schemaVersion, SCHEMA_V1) ? data.getDeletingBlockKeyFilter() : data.getUnprefixedKeyFilter(); - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { Map> containerMap = containerSet.getContainerMapCopy(); assertBlockDataTableRecordCount(3, meta, filter, data.getContainerID()); // NOTE: this test assumes that all the container is KetValueContainer and @@ -694,7 +694,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) List unrecordedBlockIds = new ArrayList<>(); Set unrecordedChunks = new HashSet<>(); - try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) { + try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) { // create unrecorded blocks in a new txn and update metadata, // service shall first choose the top pendingDeletion container // if using the TopNOrderedContainerDeletionChoosingPolicy @@ -846,7 +846,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) KeyValueContainer container = (KeyValueContainer) containerSet.iterator().next(); KeyValueContainerData data = container.getContainerData(); - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); GenericTestUtils.waitFor(() -> { try { @@ -1099,7 +1099,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) private void assertBlockDataTableRecordCount(int expectedCount, KeyValueContainerData containerData, KeyPrefixFilter filter) throws IOException { - try (DBHandle handle = BlockUtils.getDB(containerData, conf)) { + try (DBHandle handle = BlockUtils.getDB(containerData, conf)) { long containerID = containerData.getContainerID(); assertBlockDataTableRecordCount(expectedCount, handle, filter, containerID); @@ -1116,7 +1116,7 @@ private void assertBlockDataTableRecordCount(int expectedCount, * @throws IOException */ private void assertBlockDataTableRecordCount(int expectedCount, - DBHandle handle, KeyPrefixFilter filter, long containerID) + DBHandle handle, KeyPrefixFilter filter, long containerID) throws IOException { long count = 0L; try (BlockIterator iterator = handle.getStore(). diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index a6cb9b3e10b..2235b23ce88 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -152,7 +152,7 @@ public void cleanup() { public void testDirectTableIterationDisabled(String schemaVersion) throws Exception { setup(schemaVersion); - try (DBHandle refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(newKvData(), conf)) { DatanodeStore store = refCountedDB.getStore(); assertTableIteratorUnsupported(store.getMetadataTable()); @@ -178,7 +178,7 @@ private void assertTableIteratorUnsupported(Table table) { public void testBlockIteration(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB, cData)); @@ -245,7 +245,7 @@ public void testReadWithoutMetadata(String schemaVersion) throws Exception { // This simulates them not being there to start with. setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadataTable = db.getStore().getMetadataTable(); metadataTable.delete(cData.getBlockCountKey()); @@ -311,7 +311,7 @@ public void testDelete(String schemaVersion) throws Exception { TestDB.KEY_COUNT - numBlocksToDelete; KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { // Test results via block iteration. assertEquals(expectedDeletingBlocks, @@ -358,7 +358,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) metrics, c -> { }); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = refCountedDB.getStore().getDeletedBlocksTable() @@ -407,7 +407,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) public void testReadBlockData(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -453,7 +453,7 @@ public void testReadBlockData(String schemaVersion) throws Exception { public void testReadDeletingBlockData(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -508,7 +508,7 @@ public void testReadDeletingBlockData(String schemaVersion) throws Exception { public void testReadMetadata(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table metadataTable = refCountedDB.getStore().getMetadataTable(); @@ -527,7 +527,7 @@ public void testReadMetadata(String schemaVersion) throws Exception { public void testReadDeletedBlocks(String schemaVersion) throws Exception { setup(schemaVersion); KeyValueContainerData cData = newKvData(); - try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { + try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { Table deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable(); @@ -634,7 +634,7 @@ private void checkContainerData(KeyValueContainerData kvData) { kvData.getNumPendingDeletionBlocks()); } - private int countDeletedBlocks(DBHandle refCountedDB, + private int countDeletedBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getDeletedBlocksTable() @@ -643,7 +643,7 @@ private int countDeletedBlocks(DBHandle refCountedDB, cData.getUnprefixedKeyFilter()).size(); } - private int countDeletingBlocks(DBHandle refCountedDB, + private int countDeletingBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getBlockDataTable() @@ -652,7 +652,7 @@ private int countDeletingBlocks(DBHandle refCountedDB, cData.getDeletingBlockKeyFilter()).size(); } - private int countUnprefixedBlocks(DBHandle refCountedDB, + private int countUnprefixedBlocks(DBHandle refCountedDB, KeyValueContainerData cData) throws IOException { return refCountedDB.getStore().getBlockDataTable() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 4bd32eda94e..0c4612b79fa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -50,7 +50,6 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.ozone.test.GenericTestUtils; @@ -175,7 +174,7 @@ public void testBlockIteration() throws IOException { // turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); - try (DBHandle db = BlockUtils.getDB(container.getContainerData(), conf)) { + try (DBHandle db = BlockUtils.getDB(container.getContainerData(), conf)) { long containerID = container.getContainerData().getContainerID(); int blockCount = 0; try (BlockIterator iter = db.getStore() @@ -211,7 +210,7 @@ public void testReadMetadata() throws IOException { // turn on schema v3 first, then do operations ContainerTestUtils.enableSchemaV3(conf); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); assertEquals((long)metadatatable.get(BLOCK_COUNT), BLOCKS_PER_CONTAINER); @@ -263,7 +262,7 @@ public void testDeleteViaTransation() throws IOException, TimeoutException, assertEquals(cData.getBytesUsed(), expectedBytesUsed); // check db metadata after deletion - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadatatable = db.getStore().getMetadataTable(); assertEquals((long)metadatatable.get(BLOCK_COUNT), expectedKeyCount); assertEquals((long)metadatatable.get(PENDING_DELETE_BLOCK_COUNT), 0); @@ -296,7 +295,7 @@ private KeyValueContainer createTestContainer() throws IOException { List blocks = Arrays.asList(startBlockID, startBlockID + 1); DeletedBlocksTransaction txn = createTestDeleteTxn(txnID, blocks, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { try (BatchOperation batch = db.getStore().getBatchHandler() .initBatchOperation()) { DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 40d9df999b4..49ddd5f674d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -49,7 +49,6 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; @@ -77,7 +76,7 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; @TempDir private File testRoot; - private DBHandle db; + private DBHandle db; private ContainerLayoutVersion layout; private String schemaVersion; private String datanodeID = UUID.randomUUID().toString(); @@ -399,7 +398,7 @@ private Map> createContainerWithBlocks(long containerId, Map prefixCounts) throws Exception { // Create required block data. Map> blockIDs = new HashMap<>(); - try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { List chunkList = new ArrayList<>(); ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index cfbac195697..584db675d93 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -455,7 +455,7 @@ private void createContainer() throws StorageContainerException { private void populate(KeyValueContainer container, long numberOfKeysToWrite) throws IOException { KeyValueContainerData cData = container.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { Table blockDataTable = metadataStore.getStore().getBlockDataTable(); @@ -486,7 +486,7 @@ private void populateWithoutBlock(KeyValueContainer container, long numberOfKeysToWrite) throws IOException { KeyValueContainerData cData = container.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, CONF)) { // Just update metdata, and don't insert in block table // As for test, we are doing manually so adding key count to DB. metadataStore.getStore().getMetadataTable() @@ -687,7 +687,7 @@ public void testContainerRocksDB(ContainerTestVersionInfo versionInfo) keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { + try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); long defaultCacheSize = OzoneConsts.GB; long cacheSize = Long.parseLong(store @@ -742,7 +742,7 @@ public void testDBProfileAffectsDBOptions( keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); DatanodeDBProfile outProfile1; - try (DBHandle db1 = + try (DBHandle db1 = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) { DatanodeStore store1 = db1.getStore(); assertInstanceOf(AbstractDatanodeStore.class, store1); @@ -763,7 +763,7 @@ public void testDBProfileAffectsDBOptions( keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); DatanodeDBProfile outProfile2; - try (DBHandle db2 = + try (DBHandle db2 = BlockUtils.getDB(keyValueContainer.getContainerData(), otherConf)) { DatanodeStore store2 = db2.getStore(); assertInstanceOf(AbstractDatanodeStore.class, store2); @@ -1055,7 +1055,7 @@ private void testMixedSchemaImport(String dir, KeyValueContainer container = new KeyValueContainer(data, conf); container.create(volumeSet, volumeChoosingPolicy, scmId); long pendingDeleteBlockCount = 20; - try (DBHandle meta = BlockUtils.getDB(data, conf)) { + try (DBHandle meta = BlockUtils.getDB(data, conf)) { Table metadataTable = meta.getStore().getMetadataTable(); metadataTable.put(data.getPendingDeleteBlockCountKey(), pendingDeleteBlockCount); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 891cfbd6bc7..b24a6f04c48 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration; import java.io.File; @@ -116,8 +115,8 @@ public void testKeyValueContainerCheckCorruption( File dbFile = KeyValueContainerLocationUtil .getContainerDBFile(containerData); containerData.setDbFile(dbFile); - try (DBHandle ignored = BlockUtils.getDB(containerData, conf); - BlockIterator kvIter = + try (DBHandle ignored = BlockUtils.getDB(containerData, conf); + BlockIterator kvIter = ignored.getStore().getBlockIterator(containerID)) { BlockData block = kvIter.nextBlock(); assertFalse(block.getChunks().isEmpty()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java index 116bff84367..9c531069e9c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -136,7 +135,7 @@ protected KeyValueContainer createContainerWithBlocks(long containerId, KeyValueContainer container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), clusterID); - try (DBHandle metadataStore = BlockUtils.getDB(containerData, + try (DBHandle metadataStore = BlockUtils.getDB(containerData, conf)) { assertNotNull(containerData.getChunksPath()); File chunksPath = new File(containerData.getChunksPath()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java index d6b9f631b3a..12a659b7e44 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java @@ -425,7 +425,7 @@ public void setDB(KeyValueContainerData containerData, long blockCount, long byteCount, long dbDeleteCount, List deleteTransactions) throws Exception { - try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); // Don't care about in memory state. Just change the DB values. metadataTable.put(containerData.getBlockCountKey(), blockCount); @@ -470,7 +470,7 @@ public void setDB(KeyValueContainerData containerData, void checkDbCounts(KeyValueContainerData containerData, long expectedBlockCount, long expectedBytesUsed, long expectedDeletedCount) throws Exception { - try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); long bytesUsed = metadataTable.get(containerData.getBytesUsedKey()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 0ad73342725..8fd7b6280b6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.FileUtils; @@ -153,7 +152,7 @@ public void cleanup() { private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List blockNames, int count) throws Exception { KeyValueContainerData cData = keyValueContainer.getContainerData(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { for (int i = 0; i < count; i++) { Table blockDataTable = @@ -184,7 +183,7 @@ private List addBlocks(KeyValueContainer keyValueContainer, long containerId = keyValueContainer.getContainerData().getContainerID(); KeyValueContainerData cData = keyValueContainer.getContainerData(); List blkNames = new ArrayList<>(); - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { for (int i = 0; i < blockCount; i++) { // Creating BlockData @@ -531,7 +530,7 @@ private KeyValueContainer createContainerWithId(int id, VolumeSet volSet, private void setBlockCommitSequence(KeyValueContainerData cData, long val) throws IOException { - try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { metadataStore.getStore().getMetadataTable() .put(cData.getBcsIdKey(), val); metadataStore.getStore().flushDB(); @@ -576,7 +575,7 @@ public void testMarkedDeletedContainerCleared( if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { // verify if newly added container is not present as added - try (DBHandle dbHandle = BlockUtils.getDB( + try (DBHandle dbHandle = BlockUtils.getDB( kvContainer.getContainerData(), conf)) { DatanodeStoreSchemaThreeImpl store = (DatanodeStoreSchemaThreeImpl) dbHandle.getStore(); @@ -588,7 +587,7 @@ public void testMarkedDeletedContainerCleared( private long addDbEntry(KeyValueContainerData containerData) throws Exception { - try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) { + try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) { DatanodeStoreSchemaThreeImpl store = (DatanodeStoreSchemaThreeImpl) dbHandle.getStore(); Table metadataTable = store.getMetadataTable(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 00a52fb8b29..60552e7cc9d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -45,7 +45,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.io.TempDir; @@ -257,7 +256,7 @@ private long addBlocks(KeyValueContainer container, long freeBytes = container.getContainerData().getMaxSize(); long containerId = container.getContainerData().getContainerID(); KeyValueContainerData cData = container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadataTable = db.getStore().getMetadataTable(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 81f95b14fb4..95d7faa9174 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -1043,7 +1043,7 @@ public List getAllBlocks(MiniOzoneCluster cluster, OzoneConfiguration conf, Long containerID) throws IOException { List allBlocks = Lists.newArrayList(); KeyValueContainerData cData = getContainerMetadata(cluster, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { List> kvs = db.getStore().getBlockDataTable() @@ -1063,7 +1063,7 @@ public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, OzoneConfigura throws IOException { for (Map.Entry> entry : containerBlocks.entrySet()) { KeyValueContainerData cData = getContainerMetadata(cluster, entry.getKey()); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { DatanodeStore ds = db.getStore(); DatanodeStoreSchemaThreeImpl dnStoreImpl = (DatanodeStoreSchemaThreeImpl) ds; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java index 9153f417792..bf20b4ecc0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import java.io.File; import java.io.IOException; @@ -66,7 +65,7 @@ public static File getChunksLocationPath(MiniOzoneCluster cluster, Container con // the container. KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf())) { + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf())) { BlockID blockID = new BlockID(containerID, localID); String blockKey = containerData.getBlockKey(localID); BlockData blockData = db.getStore().getBlockByID(blockID, blockKey); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index d85bbdbff91..9ed0b182d33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -109,7 +109,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; @@ -2116,7 +2115,7 @@ public void testGetKeyDetails() throws IOException { (KeyValueContainerData)(datanodeService.getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerData()); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator(containerID)) { while (keyValueBlockIterator.hasNext()) { @@ -2249,7 +2248,7 @@ void testZReadKeyWithUnhealthyContainerReplica() throws Exception { long newBCSID = container.getBlockCommitSequenceId() - 1; KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { + try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { db.getStore().getMetadataTable().put(cData.getBcsIdKey(), newBCSID); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index ae181fa2cc7..805a3a86eba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -58,7 +58,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -281,7 +280,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2)) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerId1)).getContainerData(); - try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) { + try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) { BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get( containerData1.getBlockKey(locationList.get(0).getBlockID() .getLocalID())); @@ -299,7 +298,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0)) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerId2)).getContainerData(); - try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) { + try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) { BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get( containerData2.getBlockKey(locationList.get(1).getBlockID() .getLocalID())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 141ce16aebb..1e22613f929 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -51,7 +51,6 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -231,7 +230,7 @@ public void testValidateBCSIDOnDnRestart() throws Exception { .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); keyValueContainerData = assertInstanceOf(KeyValueContainerData.class, containerData); - try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) { + try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) { // modify the bcsid for the container in the ROCKS DB thereby inducing // corruption diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index f68ccfd13fe..719715ac8b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -73,7 +73,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -744,7 +743,7 @@ private void verifyBlocksCreated( OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { assertNotNull(db.getStore().getBlockDataTable() .get(cData.getBlockKey(blockID.getLocalID()))); } @@ -760,7 +759,7 @@ private void verifyBlocksDeleted( OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table blockDataTable = db.getStore().getBlockDataTable(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index c9659ecaa82..192c933f53c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -50,7 +50,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; @@ -554,7 +553,7 @@ public void testContainerDeleteWithInvalidBlockCount() private void clearBlocksTable(Container container) throws IOException { - try (DBHandle dbHandle + try (DBHandle dbHandle = BlockUtils.getDB( (KeyValueContainerData) container.getContainerData(), conf)) { @@ -566,7 +565,7 @@ private void clearBlocksTable(Container container) throws IOException { } } - private void clearTable(DBHandle dbHandle, Table table, Container container) + private void clearTable(DBHandle dbHandle, Table table, Container container) throws IOException { List> blocks = table.getRangeKVs( From 09b2dfe1fda091072252fece0864a0319c52f725 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 17 Nov 2024 15:22:29 -0800 Subject: [PATCH 21/37] HDDS-11650. Fix checkstyle Change-Id: I33f2a74daa2812d227e985b1a0738bc0be99e6e7 --- .../container/common/utils/BaseReferenceCountedDB.java | 1 - .../ozone/container/keyvalue/KeyValueHandler.java | 1 - .../apache/hadoop/hdds/utils/db/DBStoreBuilder.java | 10 +++++----- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java index e34e7b07c93..b2d9fbda4ba 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java @@ -21,7 +21,6 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.ozone.container.common.interfaces.BaseDBHandle; -import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.metadata.AbstractStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index a5690fa19a7..4cd3c8cd0a5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -88,7 +88,6 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 9db581ee292..1e42241ee43 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -163,17 +163,17 @@ private DBStoreBuilder(ConfigurationSource configuration, OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT, StorageUnit.BYTES); } - public static File getDBDirPath(DBDefinition dbDefinition, - ConfigurationSource conf) { + public static File getDBDirPath(DBDefinition definition, + ConfigurationSource configuration) { // Set metadata dirs. - File metadataDir = dbDefinition.getDBLocation(conf); + File metadataDir = definition.getDBLocation(configuration); if (metadataDir == null) { LOG.warn("{} is not configured. We recommend adding this setting. " + "Falling back to {} instead.", - dbDefinition.getLocationConfigKey(), + definition.getLocationConfigKey(), HddsConfigKeys.OZONE_METADATA_DIRS); - metadataDir = getOzoneMetaDirPath(conf); + metadataDir = getOzoneMetaDirPath(configuration); } return metadataDir; } From 3766bc330aa3e3e6795b19767c8ea496737968e4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 18 Nov 2024 08:27:04 -0800 Subject: [PATCH 22/37] HDDS-11650. Address review comments make method more descriptive Change-Id: Id021b079fb9bb023a5297a458a3ac703f5d75668 --- .../container/common/impl/ContainerSet.java | 37 ++++++++++++++++++- .../container/keyvalue/KeyValueHandler.java | 6 ++- .../container/ozoneimpl/ContainerReader.java | 3 +- .../replication/ContainerImporter.java | 2 +- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index a49f429e056..b4627fed8ef 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -107,6 +107,10 @@ public boolean addContainer(Container container) throws StorageContainerExcep return addContainer(container, false); } + public boolean addContainerByOverwriteMissingContainer(Container container) throws StorageContainerException { + return addContainer(container, true); + } + public void validateContainerIsMissing(long containerId, State state) throws StorageContainerException { if (missingContainerSet.contains(containerId)) { throw new StorageContainerException(String.format("Container with container Id %d with state : %s is missing in" + @@ -168,17 +172,46 @@ public Container getContainer(long containerId) { return containerMap.get(containerId); } + /** + * Removes container from both memory and database. This should be used when the containerData on disk has been + * removed completely from the node. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ public boolean removeContainer(long containerId) throws StorageContainerException { return removeContainer(containerId, false, true); } + /** + * Removes containerId from memory. This needs to be used when the container is still present on disk, and the + * inmemory state of the container needs to be updated. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeContainerOnlyFromMemory(long containerId) throws StorageContainerException { + return removeContainer(containerId, false, false); + } + + /** + * Marks a container to be missing, thus it removes the container from inmemory containerMap and marks the + * container as missing. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeMissingContainer(long containerId) throws StorageContainerException { + return removeContainer(containerId, true, false); + } + /** * Removes the Container matching with specified containerId. * @param containerId ID of the container to remove * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) + private boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) throws StorageContainerException { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); @@ -257,7 +290,7 @@ public void handleVolumeFailures(StateContext context) throws StorageContainerEx for (Container c : containerMap.values()) { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { - removeContainer(data.getContainerID(), true, false); + removeMissingContainer(data.getContainerID()); LOG.debug("Removing Container {} as the Volume {} " + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 4cd3c8cd0a5..39c33feecec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -387,7 +387,11 @@ ContainerCommandResponseProto handleCreateContainer( try { if (containerSet.getContainer(containerID) == null) { newContainer.create(volumeSet, volumeChoosingPolicy, clusterId); - created = containerSet.addContainer(newContainer, RECOVERING == newContainer.getContainerState()); + if (RECOVERING == newContainer.getContainerState()) { + created = containerSet.addContainerByOverwriteMissingContainer(newContainer); + } else { + created = containerSet.addContainer(newContainer); + } } else { // The create container request for an already existing container can // arrive in case the ContainerStateMachine reapplies the transaction diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 198ae8f65f2..027fbff89c8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -320,8 +320,7 @@ private void resolveDuplicate(KeyValueContainer existing, private void swapAndRemoveContainer(KeyValueContainer existing, KeyValueContainer toAdd) throws IOException { - containerSet.removeContainer( - existing.getContainerData().getContainerID(), false, false); + containerSet.removeContainerOnlyFromMemory(existing.getContainerData().getContainerID()); containerSet.addContainer(toAdd); KeyValueContainerUtil.removeContainer(existing.getContainerData(), hddsVolume.getConf()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index aaeaa21e583..58a5d674639 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -128,7 +128,7 @@ public void importContainer(long containerID, Path tarFilePath, try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { Container container = controller.importContainer( containerData, input, packer); - containerSet.addContainer(container, true); + containerSet.addContainerByOverwriteMissingContainer(container); } } finally { importContainerProgress.remove(containerID); From 46ee375009f41b5feb091c2e003933bf371b40d3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 18 Nov 2024 08:27:28 -0800 Subject: [PATCH 23/37] HDDS-11650. Address review comments make method more descriptive Change-Id: I06873c6e1b4aa0d78ace0d203c1fadb887127d14 --- .../ozone/container/common/impl/ContainerSet.java | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index b4627fed8ef..8ca2b1d2e5e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -103,10 +103,23 @@ public void setRecoveringTimeout(long recoveringTimeout) { this.recoveringTimeout = recoveringTimeout; } + /** + * Add Container to container map. This would fail if the container is already present or has been marked as missing. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ public boolean addContainer(Container container) throws StorageContainerException { return addContainer(container, false); } + /** + * Add Container to container map. This would overwrite the container even if it is missing. But would fail if the + * container is already present. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ public boolean addContainerByOverwriteMissingContainer(Container container) throws StorageContainerException { return addContainer(container, true); } @@ -125,7 +138,7 @@ public void validateContainerIsMissing(long containerId, State state) throws Sto * @return If container is added to containerMap returns true, otherwise * false */ - public boolean addContainer(Container container, boolean overwriteMissingContainers) throws + private boolean addContainer(Container container, boolean overwriteMissingContainers) throws StorageContainerException { Preconditions.checkNotNull(container, "container cannot be null"); From 564ae170a1ace6ab08ee724a393da88812542fcc Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 18 Nov 2024 12:06:14 -0800 Subject: [PATCH 24/37] HDDS-11650. Address review comments Change-Id: Iead70ac8119ac0bd970a8575267e7becb354fb7b --- .../container/common/impl/ContainerSet.java | 4 +- .../container/common/impl/HddsDispatcher.java | 4 +- .../common/interfaces/BaseDBHandle.java | 4 +- .../common/utils/BaseReferenceCountedDB.java | 4 +- .../common/utils/ReferenceCountedHandle.java | 4 +- .../common/volume/MutableVolumeSet.java | 8 +- .../container/metadata/AbstractRDBStore.java | 2 +- .../{AbstractStore.java => BaseStore.java} | 6 +- .../container/metadata/DatanodeStore.java | 2 +- .../container/metadata/MetadataStore.java | 2 +- .../hadoop/hdds/utils/VoidCallable.java | 26 ---- .../hadoop/hdds/utils/db/DBTestUtils.java | 142 ------------------ .../hdds/utils/db/InMemoryTestTable.java | 133 ++++++++++++++++ .../freon/ClosedContainerReplicator.java | 90 +++++------ 14 files changed, 199 insertions(+), 232 deletions(-) rename hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/{AbstractStore.java => BaseStore.java} (91%) delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 8ca2b1d2e5e..8be48e285ee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.DBTestUtils; +import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -73,7 +73,7 @@ public class ContainerSet implements Iterable> { @VisibleForTesting public ContainerSet(long recoveringTimeout) { - this(DBTestUtils.getInMemoryTableForTest(), recoveringTimeout); + this(new InMemoryTestTable<>(), recoveringTimeout); } public ContainerSet(Table continerIdsTable, long recoveringTimeout) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index d86cf4db163..d1ea73fbfd8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -175,8 +175,8 @@ private boolean canIgnoreException(Result result) { case CONTAINER_UNHEALTHY: case CLOSED_CONTAINER_IO: case DELETE_ON_OPEN_CONTAINER: - case UNSUPPORTED_REQUEST: - case CONTAINER_MISSING:// Blame client for sending unsupported request. + case UNSUPPORTED_REQUEST:// Blame client for sending unsupported request. + case CONTAINER_MISSING: return true; default: return false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java index aaaa0128109..3d64f4433cf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.ozone.container.metadata.AbstractStore; +import org.apache.hadoop.ozone.container.metadata.BaseStore; import java.io.Closeable; /** * DB handle abstract class. */ -public abstract class BaseDBHandle implements Closeable { +public abstract class BaseDBHandle implements Closeable { private final STORE store; private final String containerDBPath; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java index b2d9fbda4ba..d1ebd03f5f7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.ozone.container.common.interfaces.BaseDBHandle; -import org.apache.hadoop.ozone.container.metadata.AbstractStore; +import org.apache.hadoop.ozone.container.metadata.BaseStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,7 @@ /** * Class to implement reference counting over instances of a db handle. */ -public class BaseReferenceCountedDB extends BaseDBHandle { +public class BaseReferenceCountedDB extends BaseDBHandle { private static final Logger LOG = LoggerFactory.getLogger(BaseReferenceCountedDB.class); private final AtomicInteger referenceCount; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java index ca24c99ab08..b91c5fe6d63 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java @@ -18,14 +18,14 @@ * limitations under the License. * */ -import org.apache.hadoop.ozone.container.metadata.AbstractStore; +import org.apache.hadoop.ozone.container.metadata.BaseStore; import java.io.Closeable; /** * Class enclosing a reference counted handle to DBStore. */ -public class ReferenceCountedHandle implements Closeable { +public class ReferenceCountedHandle implements Closeable { private final BaseReferenceCountedDB dbHandle; private volatile boolean isClosed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 426012f3765..9afea8e6b0c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.utils.HddsServerUtil; -import org.apache.hadoop.hdds.utils.VoidCallable; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -45,6 +44,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.ratis.util.function.CheckedRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +85,7 @@ public class MutableVolumeSet implements VolumeSet { private String clusterID; private final StorageVolumeChecker volumeChecker; - private VoidCallable failedVolumeListener; + private CheckedRunnable failedVolumeListener; private StateContext context; private final StorageVolumeFactory volumeFactory; private final StorageVolume.VolumeType volumeType; @@ -133,7 +133,7 @@ public MutableVolumeSet(String dnUuid, String clusterID, initializeVolumeSet(); } - public void setFailedVolumeListener(VoidCallable runnable) { + public void setFailedVolumeListener(CheckedRunnable runnable) { failedVolumeListener = runnable; } @@ -256,7 +256,7 @@ private void handleVolumeFailures( } if (failedVolumeListener != null) { - failedVolumeListener.call(); + failedVolumeListener.run(); } // TODO: // 1. Consider stopping IO on open containers and tearing down diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java index 6f51a1cb4a1..93ac25a3bbd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java @@ -40,7 +40,7 @@ * Abstract Interface defining the way to interact with any rocksDB in the datanode. * @param Generic parameter defining the schema for the DB. */ -public abstract class AbstractRDBStore implements AbstractStore { +public abstract class AbstractRDBStore implements BaseStore { private final DEF dbDef; private final ManagedColumnFamilyOptions cfOptions; private static DatanodeDBProfile dbProfile; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java similarity index 91% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java index 4e8c83f79da..f66c1b04e73 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -26,9 +25,9 @@ import java.io.IOException; /** - * Abstract Interface for interacting with datanode databases. + * Interface for interacting with datanode databases. */ -public interface AbstractStore extends Closeable { +public interface BaseStore extends Closeable { /** * Start datanode manager. @@ -48,7 +47,6 @@ public interface AbstractStore extends Closeable { * * @return datanode store. */ - @VisibleForTesting DBStore getStore(); /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index b8283494ef5..2dffc141875 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -34,7 +34,7 @@ /** * Interface for interacting with datanode databases. */ -public interface DatanodeStore extends AbstractStore { +public interface DatanodeStore extends BaseStore { String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java index e21ee4b4321..fa234720fe1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java @@ -25,7 +25,7 @@ /** * Interface for interacting with database in the master volume of a datanode. */ -public interface MetadataStore extends AbstractStore { +public interface MetadataStore extends BaseStore { /** * A Table that keeps the containerIds in a datanode. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java deleted file mode 100644 index 5f0d1704abb..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/VoidCallable.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -/** - * Defines a functional interface to call void returning function. - */ -@FunctionalInterface -public interface VoidCallable { - void call() throws EXCEPTION_TYPE; -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java deleted file mode 100644 index 0eed13cbe30..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBTestUtils.java +++ /dev/null @@ -1,142 +0,0 @@ -package org.apache.hadoop.hdds.utils.db; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Util class for mocking DB interactions happening in various tests. - */ -public final class DBTestUtils { - - private DBTestUtils() { - - } - - public static Table getInMemoryTableForTest() { - return new Table() { - private final Map map = new ConcurrentHashMap<>(); - - @Override - public void close() { - } - - @Override - public void put(KEY key, VALUE value) { - map.put(key, value); - } - - @Override - public void putWithBatch(BatchOperation batch, KEY key, VALUE value) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - @Override - public boolean isExist(KEY key) { - return map.containsKey(key); - } - - @Override - public VALUE get(KEY key) { - return map.get(key); - } - - @Override - public VALUE getIfExist(KEY key) { - return map.get(key); - } - - @Override - public void delete(KEY key) { - map.remove(key); - } - - @Override - public void deleteWithBatch(BatchOperation batch, KEY key) { - throw new UnsupportedOperationException(); - } - - @Override - public void deleteRange(KEY beginKey, KEY endKey) { - throw new UnsupportedOperationException(); - } - - @Override - public TableIterator> iterator() { - throw new UnsupportedOperationException(); - } - - @Override - public TableIterator> iterator(KEY prefix) { - throw new UnsupportedOperationException(); - } - - @Override - public String getName() { - return ""; - } - - @Override - public long getEstimatedKeyCount() { - return map.size(); - } - - @Override - public List> getRangeKVs(KEY startKey, int count, KEY prefix, - MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - throw new UnsupportedOperationException(); - } - - @Override - public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, - MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - throw new UnsupportedOperationException(); - } - - @Override - public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) { - throw new UnsupportedOperationException(); - } - - @Override - public void dumpToFileWithPrefix(File externalFile, KEY prefix) { - throw new UnsupportedOperationException(); - } - - @Override - public void loadFromFile(File externalFile) { - throw new UnsupportedOperationException(); - } - }; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java new file mode 100644 index 00000000000..9cc1695298c --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -0,0 +1,133 @@ +package org.apache.hadoop.hdds.utils.db; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * InMemory Table implementation for tests. + */ +public final class InMemoryTestTable implements Table { + private final Map map = new ConcurrentHashMap<>(); + + @Override + public void close() { + } + + @Override + public void put(KEY key, VALUE value) { + map.put(key, value); + } + + @Override + public void putWithBatch(BatchOperation batch, KEY key, VALUE value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean isExist(KEY key) { + return map.containsKey(key); + } + + @Override + public VALUE get(KEY key) { + return map.get(key); + } + + @Override + public VALUE getIfExist(KEY key) { + return map.get(key); + } + + @Override + public void delete(KEY key) { + map.remove(key); + } + + @Override + public void deleteWithBatch(BatchOperation batch, KEY key) { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteRange(KEY beginKey, KEY endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator(KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public String getName() { + return ""; + } + + @Override + public long getEstimatedKeyCount() { + return map.size(); + } + + @Override + public List> getRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void dumpToFileWithPrefix(File externalFile, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void loadFromFile(File externalFile) { + throw new UnsupportedOperationException(); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 2c4beb048c6..ffb817a1f40 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -90,68 +90,72 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements @Override public Void call() throws Exception { - try { - OzoneConfiguration conf = createOzoneConfiguration(); - - final Collection datanodeStorageDirs = - HddsServerUtil.getDatanodeStorageDirs(conf); - - for (String dir : datanodeStorageDirs) { - checkDestinationDirectory(dir); + return replicate(); + } finally { + if (masterVolumeMetadataStoreReferenceCountedDB != null) { + masterVolumeMetadataStoreReferenceCountedDB.close(); } + } + } - final ContainerOperationClient containerOperationClient = - new ContainerOperationClient(conf); - final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); + public Void replicate() throws Exception { - //logic same as the download+import on the destination datanode - initializeReplicationSupervisor(conf, containerInfos.size() * 2); + OzoneConfiguration conf = createOzoneConfiguration(); - replicationTasks = new ArrayList<>(); + final Collection datanodeStorageDirs = + HddsServerUtil.getDatanodeStorageDirs(conf); - for (ContainerInfo container : containerInfos) { + for (String dir : datanodeStorageDirs) { + checkDestinationDirectory(dir); + } - final ContainerWithPipeline containerWithPipeline = - containerOperationClient - .getContainerWithPipeline(container.getContainerID()); + final ContainerOperationClient containerOperationClient = + new ContainerOperationClient(conf); - if (container.getState() == LifeCycleState.CLOSED) { + final List containerInfos = + containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); - final List datanodesWithContainer = - containerWithPipeline.getPipeline().getNodes(); + //logic same as the download+import on the destination datanode + initializeReplicationSupervisor(conf, containerInfos.size() * 2); - final List datanodeUUIDs = - datanodesWithContainer - .stream().map(DatanodeDetails::getUuidString) - .collect(Collectors.toList()); + replicationTasks = new ArrayList<>(); - //if datanode is specified, replicate only container if it has a - //replica. - if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) { - replicationTasks.add(new ReplicationTask( - ReplicateContainerCommand.fromSources(container.getContainerID(), - datanodesWithContainer), replicator)); - } - } + for (ContainerInfo container : containerInfos) { - } + final ContainerWithPipeline containerWithPipeline = + containerOperationClient + .getContainerWithPipeline(container.getContainerID()); - //important: override the max number of tasks. - setTestNo(replicationTasks.size()); + if (container.getState() == LifeCycleState.CLOSED) { - init(); + final List datanodesWithContainer = + containerWithPipeline.getPipeline().getNodes(); - timer = getMetrics().timer("replicate-container"); - runTests(this::replicateContainer); - } finally { - if (masterVolumeMetadataStoreReferenceCountedDB != null) { - masterVolumeMetadataStoreReferenceCountedDB.close(); + final List datanodeUUIDs = + datanodesWithContainer + .stream().map(DatanodeDetails::getUuidString) + .collect(Collectors.toList()); + + //if datanode is specified, replicate only container if it has a + //replica. + if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) { + replicationTasks.add(new ReplicationTask( + ReplicateContainerCommand.fromSources(container.getContainerID(), + datanodesWithContainer), replicator)); + } } } + + //important: override the max number of tasks. + setTestNo(replicationTasks.size()); + + init(); + + timer = getMetrics().timer("replicate-container"); + runTests(this::replicateContainer); return null; } From af144a2536e341453c77798b7fd4e9df7bf1fda5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 18 Nov 2024 12:11:16 -0800 Subject: [PATCH 25/37] HDDS-11650. Address review comments Change-Id: I4d42a3b04b84ce6544cf5549aedcb1bbe37b2b86 --- .../ozone/container/common/interfaces/BaseDBHandle.java | 4 ++-- .../ozone/container/common/utils/BaseReferenceCountedDB.java | 4 ++-- .../ozone/container/common/utils/ReferenceCountedHandle.java | 4 ++-- .../hadoop/ozone/container/metadata/AbstractRDBStore.java | 2 +- .../metadata/{BaseStore.java => DBStoreManager.java} | 2 +- .../apache/hadoop/ozone/container/metadata/DatanodeStore.java | 2 +- .../apache/hadoop/ozone/container/metadata/MetadataStore.java | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) rename hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/{BaseStore.java => DBStoreManager.java} (97%) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java index 3d64f4433cf..b8b785b9a11 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.ozone.container.metadata.BaseStore; +import org.apache.hadoop.ozone.container.metadata.DBStoreManager; import java.io.Closeable; /** * DB handle abstract class. */ -public abstract class BaseDBHandle implements Closeable { +public abstract class BaseDBHandle implements Closeable { private final STORE store; private final String containerDBPath; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java index d1ebd03f5f7..b482488cc62 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.ozone.container.common.interfaces.BaseDBHandle; -import org.apache.hadoop.ozone.container.metadata.BaseStore; +import org.apache.hadoop.ozone.container.metadata.DBStoreManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,7 @@ /** * Class to implement reference counting over instances of a db handle. */ -public class BaseReferenceCountedDB extends BaseDBHandle { +public class BaseReferenceCountedDB extends BaseDBHandle { private static final Logger LOG = LoggerFactory.getLogger(BaseReferenceCountedDB.class); private final AtomicInteger referenceCount; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java index b91c5fe6d63..60b2bf95bd4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java @@ -18,14 +18,14 @@ * limitations under the License. * */ -import org.apache.hadoop.ozone.container.metadata.BaseStore; +import org.apache.hadoop.ozone.container.metadata.DBStoreManager; import java.io.Closeable; /** * Class enclosing a reference counted handle to DBStore. */ -public class ReferenceCountedHandle implements Closeable { +public class ReferenceCountedHandle implements Closeable { private final BaseReferenceCountedDB dbHandle; private volatile boolean isClosed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java index 93ac25a3bbd..5ce1a85b388 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java @@ -40,7 +40,7 @@ * Abstract Interface defining the way to interact with any rocksDB in the datanode. * @param Generic parameter defining the schema for the DB. */ -public abstract class AbstractRDBStore implements BaseStore { +public abstract class AbstractRDBStore implements DBStoreManager { private final DEF dbDef; private final ManagedColumnFamilyOptions cfOptions; private static DatanodeDBProfile dbProfile; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java similarity index 97% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java index f66c1b04e73..ec9849950a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BaseStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java @@ -27,7 +27,7 @@ /** * Interface for interacting with datanode databases. */ -public interface BaseStore extends Closeable { +public interface DBStoreManager extends Closeable { /** * Start datanode manager. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index 2dffc141875..3ebdc3f6295 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -34,7 +34,7 @@ /** * Interface for interacting with datanode databases. */ -public interface DatanodeStore extends BaseStore { +public interface DatanodeStore extends DBStoreManager { String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java index fa234720fe1..3f47b9985a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java @@ -25,7 +25,7 @@ /** * Interface for interacting with database in the master volume of a datanode. */ -public interface MetadataStore extends BaseStore { +public interface MetadataStore extends DBStoreManager { /** * A Table that keeps the containerIds in a datanode. * From 79e5de8fb62c4c89face4e1419db5eff0342cba1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 18 Nov 2024 12:14:52 -0800 Subject: [PATCH 26/37] HDDS-11650. Remove extra line Change-Id: Icd905015d020fad583a49454e4847e8ed03e7785 --- .../org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index ffb817a1f40..b062d6272c9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -99,7 +99,6 @@ public Void call() throws Exception { } } - public Void replicate() throws Exception { OzoneConfiguration conf = createOzoneConfiguration(); From b0ffe5d3d1f46e535b723cc92180f9d666937a0f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 08:55:25 -0800 Subject: [PATCH 27/37] HDDS-11650. Address review comment and rename metadata store class name Change-Id: I24693b81d1409120537f37289aec886bad845588 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 +- ...va => WitnessedContainerDBDefinition.java} | 10 ++++---- ...a => WitnessedContainerMetadataStore.java} | 2 +- ... WitnessedContainerMetadataStoreImpl.java} | 24 ++++++++++--------- .../container/ozoneimpl/OzoneContainer.java | 15 ++++++------ .../ozone/debug/DBDefinitionFactory.java | 4 ++-- .../freon/ClosedContainerReplicator.java | 14 ++++++----- 7 files changed, 38 insertions(+), 33 deletions(-) rename hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/{MasterVolumeDBDefinition.java => WitnessedContainerDBDefinition.java} (86%) rename hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/{MetadataStore.java => WitnessedContainerMetadataStore.java} (94%) rename hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/{MasterVolumeMetadataStore.java => WitnessedContainerMetadataStoreImpl.java} (72%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 0eeedce1c64..49bfa1eae21 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -122,7 +122,7 @@ public final class OzoneConsts { public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; public static final String SCM_DB_BACKUP_PREFIX = "scm.db.backup."; public static final String CONTAINER_DB_NAME = "container.db"; - public static final String CONTAINER_META_DB_NAME = "container_meta.db"; + public static final String WITNESSED_CONTAINER_DB_NAME = "witnessed_container.db"; public static final String STORAGE_DIR_CHUNKS = "chunks"; public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java similarity index 86% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java index 633561bc812..587da1608e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java @@ -32,7 +32,7 @@ /** * Class for defining the schema for master volume in a datanode. */ -public final class MasterVolumeDBDefinition extends DBDefinition.WithMap { +public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap { private static final String CONTAINER_IDS_TABLE_NAME = "containerIds"; @@ -46,19 +46,19 @@ public final class MasterVolumeDBDefinition extends DBDefinition.WithMap { COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( CONTAINER_IDS_TABLE); - private static final MasterVolumeDBDefinition INSTANCE = new MasterVolumeDBDefinition(); + private static final WitnessedContainerDBDefinition INSTANCE = new WitnessedContainerDBDefinition(); - public static MasterVolumeDBDefinition get() { + public static WitnessedContainerDBDefinition get() { return INSTANCE; } - private MasterVolumeDBDefinition() { + private WitnessedContainerDBDefinition() { super(COLUMN_FAMILIES); } @Override public String getName() { - return OzoneConsts.CONTAINER_META_DB_NAME; + return OzoneConsts.WITNESSED_CONTAINER_DB_NAME; } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java similarity index 94% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java index 3f47b9985a1..c46ca66c571 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java @@ -25,7 +25,7 @@ /** * Interface for interacting with database in the master volume of a datanode. */ -public interface MetadataStore extends DBStoreManager { +public interface WitnessedContainerMetadataStore extends DBStoreManager { /** * A Table that keeps the containerIds in a datanode. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java similarity index 72% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java index f8f7a80a706..395a44593a3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/MasterVolumeMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java @@ -36,15 +36,16 @@ /** * Class for interacting with database in the master volume of a datanode. */ -public final class MasterVolumeMetadataStore extends AbstractRDBStore - implements MetadataStore { +public final class WitnessedContainerMetadataStoreImpl extends AbstractRDBStore + implements WitnessedContainerMetadataStore { private Table containerIdsTable; - private static final ConcurrentMap> INSTANCES = + private static final ConcurrentMap> INSTANCES = new ConcurrentHashMap<>(); - public static ReferenceCountedHandle get(ConfigurationSource conf) throws IOException { - String dbDirPath = DBStoreBuilder.getDBDirPath(MasterVolumeDBDefinition.get(), conf).getAbsolutePath(); + public static ReferenceCountedHandle get(ConfigurationSource conf) + throws IOException { + String dbDirPath = DBStoreBuilder.getDBDirPath(WitnessedContainerDBDefinition.get(), conf).getAbsolutePath(); try { return new ReferenceCountedHandle<>(INSTANCES.compute(dbDirPath, (k, v) -> { if (v != null) { @@ -52,10 +53,11 @@ public static ReferenceCountedHandle get(Configuratio } if (v == null || v.isClosed()) { try { - MasterVolumeMetadataStore masterVolumeMetadataStore = new MasterVolumeMetadataStore(conf, false); - BaseReferenceCountedDB referenceCountedDB = - new BaseReferenceCountedDB<>(masterVolumeMetadataStore, - masterVolumeMetadataStore.getStore().getDbLocation().getAbsolutePath()); + WitnessedContainerMetadataStore + witnessedContainerMetadataStore = new WitnessedContainerMetadataStoreImpl(conf, false); + BaseReferenceCountedDB referenceCountedDB = + new BaseReferenceCountedDB<>(witnessedContainerMetadataStore, + witnessedContainerMetadataStore.getStore().getDbLocation().getAbsolutePath()); referenceCountedDB.incrementReference(); return referenceCountedDB; } catch (IOException e) { @@ -69,8 +71,8 @@ public static ReferenceCountedHandle get(Configuratio } } - private MasterVolumeMetadataStore(ConfigurationSource config, boolean openReadOnly) throws IOException { - super(MasterVolumeDBDefinition.get(), config, openReadOnly); + private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean openReadOnly) throws IOException { + super(WitnessedContainerDBDefinition.get(), config, openReadOnly); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index dfa5c7ff59a..06018b748e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -63,7 +63,8 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume.VolumeType; import org.apache.hadoop.ozone.container.common.volume.StorageVolumeChecker; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.StaleRecoveringContainerScrubbingService; -import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ReplicationServer; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -136,7 +137,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; - private ReferenceCountedHandle masterVolumeMetadataStore; + private ReferenceCountedHandle witnessedContainerMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -190,8 +191,8 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, long recoveringContainerTimeout = config.getTimeDuration( OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - this.masterVolumeMetadataStore = MasterVolumeMetadataStore.get(conf); - containerSet = new ContainerSet(masterVolumeMetadataStore.getStore().getContainerIdsTable(), + this.witnessedContainerMetadataStore = WitnessedContainerMetadataStoreImpl.get(conf); + containerSet = new ContainerSet(witnessedContainerMetadataStore.getStore().getContainerIdsTable(), recoveringContainerTimeout); metadataScanner = null; @@ -545,9 +546,9 @@ public void stop() { recoveringContainerScrubbingService.shutdown(); IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); - if (this.masterVolumeMetadataStore != null) { - this.masterVolumeMetadataStore.close(); - this.masterVolumeMetadataStore = null; + if (this.witnessedContainerMetadataStore != null) { + this.witnessedContainerMetadataStore.close(); + this.witnessedContainerMetadataStore = null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index 7209c734104..ca79aa41fa4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.metadata.MasterVolumeDBDefinition; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; @@ -58,7 +58,7 @@ private DBDefinitionFactory() { static { final Map map = new HashMap<>(); Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get(), - MasterVolumeDBDefinition.get()) + WitnessedContainerDBDefinition.get()) .forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition)); DB_MAP = Collections.unmodifiableMap(map); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index b062d6272c9..7d64ca4a9a6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -35,7 +35,8 @@ import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.metadata.MasterVolumeMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -84,7 +85,7 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; - private ReferenceCountedHandle masterVolumeMetadataStoreReferenceCountedDB; + private ReferenceCountedHandle witnessedContainerMetadataStore; private List replicationTasks; @@ -93,8 +94,8 @@ public Void call() throws Exception { try { return replicate(); } finally { - if (masterVolumeMetadataStoreReferenceCountedDB != null) { - masterVolumeMetadataStoreReferenceCountedDB.close(); + if (witnessedContainerMetadataStore != null) { + witnessedContainerMetadataStore.close(); } } } @@ -186,8 +187,9 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - ReferenceCountedHandle referenceCountedDS = MasterVolumeMetadataStore.get(conf); - this.masterVolumeMetadataStoreReferenceCountedDB = referenceCountedDS; + ReferenceCountedHandle referenceCountedDS = + WitnessedContainerMetadataStoreImpl.get(conf); + this.witnessedContainerMetadataStore = referenceCountedDS; ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore().getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); From 7a0e34167def03df64823d85fc2e3a6de8a9b52f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 15:40:28 -0800 Subject: [PATCH 28/37] HDDS-11650. Address review comments Change-Id: Ie1d1f7163a04990f0447517b0e4e2f89fc8bc48b --- .../hadoop/hdds/utils/db/Proto2EnumCodec.java | 98 ------------------- .../container/common/impl/ContainerSet.java | 10 +- .../common/utils/BaseReferenceCountedDB.java | 92 ----------------- .../common/utils/ReferenceCountedHandle.java | 56 ----------- .../WitnessedContainerDBDefinition.java | 9 +- .../WitnessedContainerMetadataStore.java | 3 +- .../WitnessedContainerMetadataStoreImpl.java | 26 ++--- .../container/ozoneimpl/OzoneContainer.java | 11 +-- .../freon/ClosedContainerReplicator.java | 7 +- 9 files changed, 24 insertions(+), 288 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java deleted file mode 100644 index d206b17a9b1..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2EnumCodec.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils.db; - -import org.apache.ratis.thirdparty.com.google.protobuf.ProtocolMessageEnum; -import jakarta.annotation.Nonnull; - -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -/** - * Codecs to serialize/deserialize Protobuf v2 enums. - */ -public final class Proto2EnumCodec - implements Codec { - private static final ConcurrentMap, - Codec> CODECS - = new ConcurrentHashMap<>(); - private static final IntegerCodec INTEGER_CODEC = IntegerCodec.get(); - - /** - * @return the {@link Codec} for the given class. - */ - public static Codec get(T t) { - final Codec codec = CODECS.computeIfAbsent(t.getClass(), - key -> new Proto2EnumCodec<>(t)); - return (Codec) codec; - } - - private final Class clazz; - - private Proto2EnumCodec(M m) { - this.clazz = (Class) m.getClass(); - } - - @Override - public Class getTypeClass() { - return clazz; - } - - @Override - public boolean supportCodecBuffer() { - return INTEGER_CODEC.supportCodecBuffer(); - } - - @Override - public CodecBuffer toCodecBuffer(@Nonnull M value, - CodecBuffer.Allocator allocator) throws IOException { - return INTEGER_CODEC.toCodecBuffer(value.getNumber(), allocator); - } - - private M parseFrom(Integer value) throws IOException { - try { - return (M) this.clazz.getDeclaredMethod("forNumber", int.class).invoke(null, value); - } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { - throw new IOException(e); - } - } - - @Override - public M fromCodecBuffer(@Nonnull CodecBuffer buffer) - throws IOException { - return parseFrom(INTEGER_CODEC.fromCodecBuffer(buffer)); - } - - @Override - public byte[] toPersistedFormat(M value) { - return INTEGER_CODEC.toPersistedFormat(value.getNumber()); - } - - @Override - public M fromPersistedFormat(byte[] bytes) throws IOException { - return parseFrom(INTEGER_CODEC.fromPersistedFormat(bytes)); - } - - @Override - public M copyObject(M message) { - // proto messages are immutable - return message; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 8be48e285ee..d3453141a55 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -69,18 +69,18 @@ public class ContainerSet implements Iterable> { new ConcurrentSkipListMap<>(); private Clock clock; private long recoveringTimeout; - private final Table containerIdsTable; + private final Table containerIdsTable; @VisibleForTesting public ContainerSet(long recoveringTimeout) { this(new InMemoryTestTable<>(), recoveringTimeout); } - public ContainerSet(Table continerIdsTable, long recoveringTimeout) { + public ContainerSet(Table continerIdsTable, long recoveringTimeout) { this(continerIdsTable, recoveringTimeout, false); } - public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { + public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { this.clock = Clock.system(ZoneOffset.UTC); this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; @@ -154,7 +154,7 @@ private boolean addContainer(Container container, boolean overwriteMissingCon } try { if (containerIdsTable != null) { - containerIdsTable.put(containerId, containerState); + containerIdsTable.put(containerId, containerState.toString()); } } catch (IOException e) { throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); @@ -469,7 +469,7 @@ public Set getMissingContainerSet() { return missingContainerSet; } - public Table getContainerIdsTable() { + public Table getContainerIdsTable() { return containerIdsTable; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java deleted file mode 100644 index b482488cc62..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/BaseReferenceCountedDB.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.hadoop.ozone.container.common.interfaces.BaseDBHandle; -import org.apache.hadoop.ozone.container.metadata.DBStoreManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Class to implement reference counting over instances of a db handle. - */ -public class BaseReferenceCountedDB extends BaseDBHandle { - private static final Logger LOG = - LoggerFactory.getLogger(BaseReferenceCountedDB.class); - private final AtomicInteger referenceCount; - - public BaseReferenceCountedDB(STORE store, String containerDBPath) { - super(store, containerDBPath); - this.referenceCount = new AtomicInteger(0); - } - - public void incrementReference() { - this.referenceCount.incrementAndGet(); - if (LOG.isTraceEnabled()) { - LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", getContainerDBPath(), - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public void decrementReference() { - int refCount = this.referenceCount.decrementAndGet(); - Preconditions.checkArgument(refCount >= 0, "refCount:", refCount); - if (LOG.isTraceEnabled()) { - LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", getContainerDBPath(), - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public boolean cleanup() { - if (getStore() != null && getStore().isClosed() - || referenceCount.get() == 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Close {} refCnt {}", getContainerDBPath(), - referenceCount.get()); - } - try { - getStore().stop(); - return true; - } catch (Exception e) { - LOG.error("Error closing DB. Container: " + getContainerDBPath(), e); - return false; - } - } else { - return false; - } - } - - @Override - public void close() throws IOException { - decrementReference(); - } - - /** - * Returns if the underlying DB is closed. This call is threadsafe. - * @return true if the DB is closed. - */ - public boolean isClosed() { - return getStore().isClosed(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java deleted file mode 100644 index 60b2bf95bd4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedHandle.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.apache.hadoop.ozone.container.common.utils; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -import org.apache.hadoop.ozone.container.metadata.DBStoreManager; - -import java.io.Closeable; - -/** - * Class enclosing a reference counted handle to DBStore. - */ -public class ReferenceCountedHandle implements Closeable { - private final BaseReferenceCountedDB dbHandle; - private volatile boolean isClosed; - - //Provide a handle with an already incremented reference. - public ReferenceCountedHandle(BaseReferenceCountedDB dbHandle) { - this.dbHandle = dbHandle; - this.isClosed = false; - } - - public STORE getStore() { - return dbHandle.getStore(); - } - - @Override - public void close() { - if (!isClosed) { - synchronized (this) { - if (!isClosed) { - if (!dbHandle.isClosed()) { - dbHandle.decrementReference(); - dbHandle.cleanup(); - } - this.isClosed = true; - } - } - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java index 587da1608e5..a15ab27a69d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java @@ -19,12 +19,11 @@ * */ -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.LongCodec; -import org.apache.hadoop.hdds.utils.db.Proto2EnumCodec; +import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.OzoneConsts; import java.util.Map; @@ -36,11 +35,11 @@ public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap { private static final String CONTAINER_IDS_TABLE_NAME = "containerIds"; - public static final DBColumnFamilyDefinition + public static final DBColumnFamilyDefinition CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>( CONTAINER_IDS_TABLE_NAME, LongCodec.get(), - Proto2EnumCodec.get(State.OPEN)); + StringCodec.get()); private static final Map> COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( @@ -66,7 +65,7 @@ public String getLocationConfigKey() { return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; } - public DBColumnFamilyDefinition getContainerIdsTable() { + public DBColumnFamilyDefinition getContainerIdsTable() { return CONTAINER_IDS_TABLE; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java index c46ca66c571..b16c7b981ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java @@ -19,7 +19,6 @@ * */ -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.utils.db.Table; /** @@ -31,5 +30,5 @@ public interface WitnessedContainerMetadataStore extends DBStoreManager { * * @return Table */ - Table getContainerIdsTable(); + Table getContainerIdsTable(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java index 395a44593a3..270daf815b2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java @@ -20,13 +20,10 @@ */ import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.ozone.container.common.utils.BaseReferenceCountedDB; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import java.io.IOException; import java.io.UncheckedIOException; @@ -39,33 +36,24 @@ public final class WitnessedContainerMetadataStoreImpl extends AbstractRDBStore implements WitnessedContainerMetadataStore { - private Table containerIdsTable; - private static final ConcurrentMap> INSTANCES = + private Table containerIdsTable; + private static final ConcurrentMap INSTANCES = new ConcurrentHashMap<>(); - public static ReferenceCountedHandle get(ConfigurationSource conf) + public static WitnessedContainerMetadataStore get(ConfigurationSource conf) throws IOException { String dbDirPath = DBStoreBuilder.getDBDirPath(WitnessedContainerDBDefinition.get(), conf).getAbsolutePath(); try { - return new ReferenceCountedHandle<>(INSTANCES.compute(dbDirPath, (k, v) -> { - if (v != null) { - v.incrementReference(); - } + return INSTANCES.compute(dbDirPath, (k, v) -> { if (v == null || v.isClosed()) { try { - WitnessedContainerMetadataStore - witnessedContainerMetadataStore = new WitnessedContainerMetadataStoreImpl(conf, false); - BaseReferenceCountedDB referenceCountedDB = - new BaseReferenceCountedDB<>(witnessedContainerMetadataStore, - witnessedContainerMetadataStore.getStore().getDbLocation().getAbsolutePath()); - referenceCountedDB.incrementReference(); - return referenceCountedDB; + return new WitnessedContainerMetadataStoreImpl(conf, false); } catch (IOException e) { throw new UncheckedIOException(e); } } return v; - })); + }); } catch (UncheckedIOException e) { throw e.getCause(); } @@ -84,7 +72,7 @@ protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions op } @Override - public Table getContainerIdsTable() { + public Table getContainerIdsTable() { return containerIdsTable; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 06018b748e5..d7cf90854bf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -56,7 +55,6 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; @@ -137,7 +135,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; - private ReferenceCountedHandle witnessedContainerMetadataStore; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -192,8 +190,7 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); this.witnessedContainerMetadataStore = WitnessedContainerMetadataStoreImpl.get(conf); - containerSet = new ContainerSet(witnessedContainerMetadataStore.getStore().getContainerIdsTable(), - recoveringContainerTimeout); + containerSet = new ContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(), recoveringContainerTimeout); metadataScanner = null; metrics = ContainerMetrics.create(conf); @@ -342,7 +339,7 @@ public void buildContainerSet() throws IOException { for (int i = 0; i < volumeThreads.size(); i++) { volumeThreads.get(i).join(); } - try (TableIterator> itr = + try (TableIterator> itr = containerSet.getContainerIdsTable().iterator()) { Map containerIds = new HashMap<>(); while (itr.hasNext()) { @@ -547,7 +544,7 @@ public void stop() { IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); if (this.witnessedContainerMetadataStore != null) { - this.witnessedContainerMetadataStore.close(); + IOUtils.close(LOG, this.witnessedContainerMetadataStore); this.witnessedContainerMetadataStore = null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 7d64ca4a9a6..656251424b2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedHandle; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; @@ -85,7 +84,7 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; - private ReferenceCountedHandle witnessedContainerMetadataStore; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; private List replicationTasks; @@ -187,10 +186,10 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - ReferenceCountedHandle referenceCountedDS = + WitnessedContainerMetadataStore referenceCountedDS = WitnessedContainerMetadataStoreImpl.get(conf); this.witnessedContainerMetadataStore = referenceCountedDS; - ContainerSet containerSet = new ContainerSet(referenceCountedDS.getStore().getContainerIdsTable(), 1000); + ContainerSet containerSet = new ContainerSet(referenceCountedDS.getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); From 730d75eaaabea247b463940ce03b5c4446e805d3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 17:28:44 -0800 Subject: [PATCH 29/37] HDDS-11667. Stop metadatastore instead of close. Had to do this to avoid column family option close Change-Id: I71ae4da25b0340a12eb6bbaab7338ceb0823ad8b --- .../hadoop/ozone/container/ozoneimpl/OzoneContainer.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index d7cf90854bf..21b19001532 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -544,7 +544,12 @@ public void stop() { IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); if (this.witnessedContainerMetadataStore != null) { - IOUtils.close(LOG, this.witnessedContainerMetadataStore); + try { + this.witnessedContainerMetadataStore.stop(); + } catch (Exception e) { + LOG.error(String.format("Error while stopping witnessedContainerMetadataStore. Status of store: %s", + witnessedContainerMetadataStore.isClosed()), e); + } this.witnessedContainerMetadataStore = null; } } From 5446f4a5168b714d89e1a0bdfdb5a648293692b5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 18:27:40 -0800 Subject: [PATCH 30/37] HDDS-11650. Address review comments Change-Id: Ie9c55a9111e215b4ae0fc58d80079ee71a5cdffb --- .../hadoop/ozone/container/common/impl/ContainerSet.java | 9 +++++---- .../hadoop/ozone/container/keyvalue/KeyValueHandler.java | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index d3453141a55..8dd35064e6b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -124,7 +124,7 @@ public boolean addContainerByOverwriteMissingContainer(Container container) t return addContainer(container, true); } - public void validateContainerIsMissing(long containerId, State state) throws StorageContainerException { + public void ensureContainerNotMissing(long containerId, State state) throws StorageContainerException { if (missingContainerSet.contains(containerId)) { throw new StorageContainerException(String.format("Container with container Id %d with state : %s is missing in" + " the DN.", containerId, state), @@ -135,17 +135,18 @@ public void validateContainerIsMissing(long containerId, State state) throws Sto /** * Add Container to container map. * @param container container to be added + * @param overwrite if true should overwrite the container if the container was missing. * @return If container is added to containerMap returns true, otherwise * false */ - private boolean addContainer(Container container, boolean overwriteMissingContainers) throws + private boolean addContainer(Container container, boolean overwrite) throws StorageContainerException { Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); State containerState = container.getContainerData().getState(); - if (!overwriteMissingContainers) { - validateContainerIsMissing(containerId, containerState); + if (!overwrite) { + ensureContainerNotMissing(containerId, containerState); } if (containerMap.putIfAbsent(containerId, container) == null) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 39c33feecec..860615e0a4b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -358,7 +358,7 @@ ContainerCommandResponseProto handleCreateContainer( if (containerState != RECOVERING) { try { - containerSet.validateContainerIsMissing(containerID, containerState); + containerSet.ensureContainerNotMissing(containerID, containerState); } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } From 9cbc45f0e313723aab10107de7f9ff4c97c355d8 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 19:17:46 -0800 Subject: [PATCH 31/37] HDDS-11650. Address review comments Change-Id: I5ba57cc8de58aed79ed41c9f1198cf9ee7fd3c27 --- .../common/interfaces/BaseDBHandle.java | 56 ------------------- .../container/common/interfaces/DBHandle.java | 25 ++++++++- 2 files changed, 22 insertions(+), 59 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java deleted file mode 100644 index b8b785b9a11..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BaseDBHandle.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.ozone.container.metadata.DBStoreManager; - -import java.io.Closeable; - -/** - * DB handle abstract class. - */ -public abstract class BaseDBHandle implements Closeable { - - private final STORE store; - private final String containerDBPath; - - public BaseDBHandle(STORE store, String containerDBPath) { - this.store = store; - this.containerDBPath = containerDBPath; - } - - public STORE getStore() { - return this.store; - } - - public String getContainerDBPath() { - return this.containerDBPath; - } - - public boolean cleanup() { - return true; - } - - @Override - public String toString() { - return "DBHandle{" + - "containerDBPath='" + containerDBPath + '\'' + - ", store=" + store + - '}'; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java index aea67917b2f..839a112ed9b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java @@ -19,11 +19,30 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import java.io.Closeable; + /** - * DB handle abstract class for datanode store. + * DB handle abstract class. */ -public abstract class DBHandle extends BaseDBHandle { +public abstract class DBHandle implements Closeable { + + private final DatanodeStore store; + private final String containerDBPath; + public DBHandle(DatanodeStore store, String containerDBPath) { - super(store, containerDBPath); + this.store = store; + this.containerDBPath = containerDBPath; + } + + public DatanodeStore getStore() { + return this.store; + } + + public String getContainerDBPath() { + return this.containerDBPath; + } + + public boolean cleanup() { + return true; } } From 261f8fcfa413972b460b331fd07d0e920ce2c2a1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 19 Nov 2024 20:27:46 -0800 Subject: [PATCH 32/37] HDDS-11650. Address review comments Change-Id: I24bc0bce41a0a8cccec5b0e872359502835f1231 --- .../hadoop/ozone/container/ozoneimpl/OzoneContainer.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 21b19001532..5307f393e09 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -547,8 +547,8 @@ public void stop() { try { this.witnessedContainerMetadataStore.stop(); } catch (Exception e) { - LOG.error(String.format("Error while stopping witnessedContainerMetadataStore. Status of store: %s", - witnessedContainerMetadataStore.isClosed()), e); + LOG.error("Error while stopping witnessedContainerMetadataStore. Status of store: {}", + witnessedContainerMetadataStore.isClosed(), e); } this.witnessedContainerMetadataStore = null; } From 4320d50ed76eb026ffbe7a5880284773f9479a6f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 20 Nov 2024 05:19:28 -0800 Subject: [PATCH 33/37] HDDS-11650. Add a test case Change-Id: I03aac18fd7547ea39519721798015adc3c09033c --- .../ozoneimpl/TestOzoneContainer.java | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 60552e7cc9d..6bc2dee685e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -51,7 +52,9 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; +import java.util.HashSet; import java.util.Random; +import java.util.Set; import java.util.UUID; import java.util.HashMap; import java.util.List; @@ -61,6 +64,7 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class is used to test OzoneContainer. @@ -122,7 +126,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) volume.format(clusterId); commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } - + List containerDatas = new ArrayList<>(); // Add containers to disk int numTestContainers = 10; for (int i = 0; i < numTestContainers; i++) { @@ -136,6 +140,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) layout, maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); + containerDatas.add(keyValueContainerData); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId); @@ -156,8 +161,21 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) ozoneContainer.buildContainerSet(); ContainerSet containerset = ozoneContainer.getContainerSet(); assertEquals(numTestContainers, containerset.containerCount()); - verifyCommittedSpace(ozoneContainer); + Set missingContainers = new HashSet<>(); + for (int i = 0; i < numTestContainers; i++) { + if (i %2 == 0) { + missingContainers.add(containerDatas.get(i).getContainerID()); + FileUtils.deleteDirectory(new File(containerDatas.get(i).getContainerPath())); + } + } + ozoneContainer.stop(); + ozoneContainer = ContainerTestUtils.getOzoneContainer(datanodeDetails, conf); + ozoneContainer.buildContainerSet(); + containerset = ozoneContainer.getContainerSet(); + assertEquals(numTestContainers/2, containerset.containerCount()); + assertEquals(numTestContainers/2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); + assertEquals(missingContainers, containerset.getMissingContainerSet()); } @ContainerTestVersionInfo.ContainerTest From ac3918c2c8d677c4563d33fc05a81e76848d5bb7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 20 Nov 2024 05:27:23 -0800 Subject: [PATCH 34/37] HDDS-11650. Stop ozone container Change-Id: I1967faced513f6a6439b66034661b2342ab0475b --- .../hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 6bc2dee685e..a691737994d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -176,6 +176,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) assertEquals(numTestContainers/2, containerset.containerCount()); assertEquals(numTestContainers/2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); assertEquals(missingContainers, containerset.getMissingContainerSet()); + ozoneContainer.stop(); } @ContainerTestVersionInfo.ContainerTest From 3d9431a7d16f5bb2a8d3f409c7ffadb6083bbc59 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 20 Nov 2024 05:40:45 -0800 Subject: [PATCH 35/37] HDDS-11650. Fix checkstyle Change-Id: Icb96283b7496a3c02b0a16d3b7410fd4e649c30c --- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index a691737994d..2f2cbc81e90 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -64,7 +64,6 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class is used to test OzoneContainer. @@ -164,7 +163,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) verifyCommittedSpace(ozoneContainer); Set missingContainers = new HashSet<>(); for (int i = 0; i < numTestContainers; i++) { - if (i %2 == 0) { + if (i % 2 == 0) { missingContainers.add(containerDatas.get(i).getContainerID()); FileUtils.deleteDirectory(new File(containerDatas.get(i).getContainerPath())); } @@ -173,8 +172,8 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) ozoneContainer = ContainerTestUtils.getOzoneContainer(datanodeDetails, conf); ozoneContainer.buildContainerSet(); containerset = ozoneContainer.getContainerSet(); - assertEquals(numTestContainers/2, containerset.containerCount()); - assertEquals(numTestContainers/2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); + assertEquals(numTestContainers / 2, containerset.containerCount()); + assertEquals(numTestContainers / 2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); assertEquals(missingContainers, containerset.getMissingContainerSet()); ozoneContainer.stop(); } From 50b27bfae200846a0c42b0145590cb9986107451 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 20 Nov 2024 05:57:27 -0800 Subject: [PATCH 36/37] HDDS-11650. Add test case Change-Id: Id25cc023de401c9033078fd8b37b7b28f89c04cc --- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 99c3786233c..ea8e8a1d819 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -235,11 +235,15 @@ private void runTestOzoneContainerWithMissingContainer( putBlockRequest = ContainerTestHelper.getPutBlockRequest( pipeline, writeChunkRequest.getWriteChunk()); - response = client.sendCommand(putBlockRequest); assertNotNull(response); assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + // Write chunk + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + // Get Block request = ContainerTestHelper. getBlockRequest(pipeline, putBlockRequest.getPutBlock()); From 827bc86915efbb398f024ccc467de5db7ecd347d Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 20 Nov 2024 06:21:55 -0800 Subject: [PATCH 37/37] HDDS-11650. Add write chunk on tests Change-Id: I52a3676fa80fd87c218dc749acd4cf5e99a78308 --- .../ozone/container/ContainerTestHelper.java | 22 +++++++++++++--- .../ozoneimpl/TestOzoneContainer.java | 25 +++++++++++++++++-- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 2b7592e1c35..20372dcc6ea 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -293,18 +293,31 @@ public static Builder newReadChunkRequestBuilder(Pipeline pipeline, */ public static ContainerCommandRequestProto getCreateContainerRequest( long containerID, Pipeline pipeline) throws IOException { + return getCreateContainerRequest(containerID, pipeline, ContainerProtos.ContainerDataProto.State.OPEN); + } + + + /** + * Returns a create container command for test purposes. There are a bunch of + * tests where we need to just send a request and get a reply. + * + * @return ContainerCommandRequestProto. + */ + public static ContainerCommandRequestProto getCreateContainerRequest( + long containerID, Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { LOG.trace("addContainer: {}", containerID); - return getContainerCommandRequestBuilder(containerID, pipeline).build(); + return getContainerCommandRequestBuilder(containerID, pipeline, state) + .build(); } private static Builder getContainerCommandRequestBuilder(long containerID, - Pipeline pipeline) throws IOException { + Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); + ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build()); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); return request; @@ -320,7 +333,8 @@ public static ContainerCommandRequestProto getCreateContainerSecureRequest( long containerID, Pipeline pipeline, Token token) throws IOException { LOG.trace("addContainer: {}", containerID); - Builder request = getContainerCommandRequestBuilder(containerID, pipeline); + Builder request = getContainerCommandRequestBuilder(containerID, pipeline, + ContainerProtos.ContainerDataProto.State.OPEN); if (token != null) { request.setEncodedToken(token.encodeToUrlString()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index ea8e8a1d819..553ea03f1fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -250,6 +250,11 @@ private void runTestOzoneContainerWithMissingContainer( response = client.sendCommand(request); assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + // Create Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + // Delete Block and Delete Chunk are handled by BlockDeletingService // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests // are deprecated @@ -261,8 +266,7 @@ private void runTestOzoneContainerWithMissingContainer( testContainerID, containerUpdate); updateResponse1 = client.sendCommand(updateRequest1); assertNotNull(updateResponse1); - assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, - response.getResult()); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, updateResponse1.getResult()); //Update an non-existing container long nonExistingContinerID = @@ -285,6 +289,23 @@ private void runTestOzoneContainerWithMissingContainer( return false; } }, 1000, 30000); + // Create Recovering Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline, + ContainerProtos.ContainerDataProto.State.RECOVERING); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //Get block on the recovering container should succeed now. + request = ContainerTestHelper.getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); } finally { if (client != null) {