Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-11480. Refactor OM volume response tests #7265

Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@

package org.apache.hadoop.ozone.om.response.volume;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
Expand All @@ -31,43 +27,16 @@
import org.apache.hadoop.ozone.storage.proto.
OzoneManagerStorageProtos.PersistedUserVolumeInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.nio.file.Path;
import java.util.UUID;

import static org.junit.jupiter.api.Assertions.assertEquals;

/**
* This class tests OMVolumeCreateResponse.
*/
public class TestOMVolumeCreateResponse {

@TempDir
private Path folder;

private OMMetadataManager omMetadataManager;
private BatchOperation batchOperation;

@BeforeEach
public void setup() throws Exception {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null);
batchOperation = omMetadataManager.getStore().initBatchOperation();
}

@AfterEach
public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
}
public class TestOMVolumeCreateResponse extends TestOMVolumeResponse {

@Test
public void testAddToDBBatch() throws Exception {
Expand All @@ -79,10 +48,10 @@ public void testAddToDBBatch() throws Exception {
.addVolumeNames(volumeName).build();

OMResponse omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true)
.setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true)
.setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance())
.build();

OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
Expand Down Expand Up @@ -125,6 +94,4 @@ void testAddToDBBatchNoOp() throws Exception {
assertEquals(0, omMetadataManager.countRowsInTable(
omMetadataManager.getVolumeTable()));
}


}
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@

package org.apache.hadoop.ozone.om.response.volume;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
Expand All @@ -30,48 +26,20 @@
.OMResponse;
import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.util.UUID;
import java.nio.file.Path;

import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertNull;

/**
* This class tests OMVolumeCreateResponse.
*/
public class TestOMVolumeDeleteResponse {

@TempDir
private Path folder;

private OMMetadataManager omMetadataManager;
private BatchOperation batchOperation;

@BeforeEach
public void setup() throws Exception {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null);
batchOperation = omMetadataManager.getStore().initBatchOperation();
}

@AfterEach
public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
}
public class TestOMVolumeDeleteResponse extends TestOMVolumeResponse {

@Test
public void testAddToDBBatch() throws Exception {

String volumeName = UUID.randomUUID().toString();
String userName = "user1";
PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder()
Expand All @@ -95,7 +63,7 @@ public void testAddToDBBatch() throws Exception {
// As we are deleting updated volume list should be empty.
PersistedUserVolumeInfo updatedVolumeList =
PersistedUserVolumeInfo.newBuilder()
.setObjectID(1).setUpdateID(1).build();
.setObjectID(1).setUpdateID(1).build();
OMVolumeDeleteResponse omVolumeDeleteResponse =
new OMVolumeDeleteResponse(omResponse, volumeName, userName,
updatedVolumeList);
Expand All @@ -107,7 +75,7 @@ public void testAddToDBBatch() throws Exception {
omMetadataManager.getStore().commitBatchOperation(batchOperation);

assertNull(omMetadataManager.getVolumeTable().get(
omMetadataManager.getVolumeKey(volumeName)));
omMetadataManager.getVolumeKey(volumeName)));

assertNull(omMetadataManager.getUserTable().get(
omMetadataManager.getUserKey(userName)));
Expand All @@ -127,5 +95,4 @@ public void testAddToDBBatchNoOp() {
omResponse);
assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation));
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
package org.apache.hadoop.ozone.om.response.volume;
ivandika3 marked this conversation as resolved.
Show resolved Hide resolved

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.io.TempDir;

import java.nio.file.Path;

public class TestOMVolumeResponse {
@TempDir
private Path folder;

protected OMMetadataManager omMetadataManager;
protected BatchOperation batchOperation;

@BeforeEach
public void setup() throws Exception {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null);
batchOperation = omMetadataManager.getStore().initBatchOperation();
}

@AfterEach
public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@

package org.apache.hadoop.ozone.om.response.volume;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
Expand All @@ -30,45 +26,17 @@
.OMResponse;
import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.nio.file.Path;
import java.util.UUID;

import static org.junit.jupiter.api.Assertions.assertEquals;

/**
* This class tests OMVolumeCreateResponse.
*/
public class TestOMVolumeSetOwnerResponse {

@TempDir
private Path folder;

private OMMetadataManager omMetadataManager;
private BatchOperation batchOperation;

@BeforeEach
public void setup() throws Exception {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null);
batchOperation = omMetadataManager.getStore().initBatchOperation();
}

@AfterEach
public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
}

public class TestOMVolumeSetOwnerResponse extends TestOMVolumeResponse {

@Test
public void testAddToDBBatch() throws Exception {
Expand All @@ -94,25 +62,24 @@ public void testAddToDBBatch() throws Exception {
new OMVolumeCreateResponse(omResponse, omVolumeArgs, volumeList);



String newOwner = "user2";
PersistedUserVolumeInfo newOwnerVolumeList =
PersistedUserVolumeInfo.newBuilder()
.setObjectID(1)
.setUpdateID(1)
.addVolumeNames(volumeName).build();
.setObjectID(1)
.setUpdateID(1)
.addVolumeNames(volumeName).build();
PersistedUserVolumeInfo oldOwnerVolumeList =
PersistedUserVolumeInfo.newBuilder()
.setObjectID(2)
.setUpdateID(2)
.build();
.setObjectID(2)
.setUpdateID(2)
.build();
OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder()
.setOwnerName(newOwner).setAdminName(newOwner)
.setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime())
.build();

OMVolumeSetOwnerResponse omVolumeSetOwnerResponse =
new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList,
new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList,
newOwnerVolumeList, newOwnerVolumeArgs);

omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
Expand Down Expand Up @@ -155,6 +122,4 @@ void testAddToDBBatchNoOp() throws Exception {
assertEquals(0, omMetadataManager.countRowsInTable(
omMetadataManager.getVolumeTable()));
}


}
Original file line number Diff line number Diff line change
Expand Up @@ -18,56 +18,24 @@

package org.apache.hadoop.ozone.om.response.volume;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.CreateVolumeResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.nio.file.Path;
import java.util.UUID;

import static org.junit.jupiter.api.Assertions.assertEquals;

/**
* This class tests OMVolumeCreateResponse.
*/
public class TestOMVolumeSetQuotaResponse {

@TempDir
private Path folder;

private OMMetadataManager omMetadataManager;
private BatchOperation batchOperation;

@BeforeEach
public void setup() throws Exception {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null);
batchOperation = omMetadataManager.getStore().initBatchOperation();
}

@AfterEach
public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
}

public class TestOMVolumeSetQuotaResponse extends TestOMVolumeResponse {

@Test
public void testAddToDBBatch() throws Exception {
Expand Down Expand Up @@ -123,6 +91,4 @@ void testAddToDBBatchNoOp() throws Exception {
assertEquals(0, omMetadataManager.countRowsInTable(
omMetadataManager.getVolumeTable()));
}


}