Skip to content

Commit

Permalink
MET-6035 Removed depublication reason from createRecordidsToDepublish…
Browse files Browse the repository at this point in the history
…ed APi call
  • Loading branch information
JoanaCMS committed Aug 7, 2024
1 parent b5c7b9c commit cb958c5
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,11 @@ public DepublishRecordIdController(DepublishRecordIdService depublishRecordIdSer
@ResponseStatus(HttpStatus.CREATED)
public void createRecordIdsToBeDepublished(@RequestHeader("Authorization") String authorization,
@PathVariable("datasetId") String datasetId,
@RequestParam(value = "depublicationReason") DepublicationReason depublicationReason,
@RequestBody String recordIdsInSeparateLines
) throws GenericMetisException {
final MetisUserView metisUserView = authenticationClient.getUserByAccessTokenInHeader(authorization);
final int added = depublishRecordIdService
.addRecordIdsToBeDepublished(metisUserView, datasetId, recordIdsInSeparateLines, depublicationReason);
.addRecordIdsToBeDepublished(metisUserView, datasetId, recordIdsInSeparateLines);
if (LOGGER.isInfoEnabled()) {
LOGGER.info("{} Depublish record ids added to dataset with datasetId: {}", added,
CRLF_PATTERN.matcher(datasetId).replaceAll(""));
Expand All @@ -114,10 +113,9 @@ public void createRecordIdsToBeDepublished(@RequestHeader("Authorization") Strin
@ResponseStatus(HttpStatus.CREATED)
public void createRecordIdsToBeDepublished(@RequestHeader("Authorization") String authorization,
@PathVariable("datasetId") String datasetId,
@RequestParam(value = "depublicationReason") DepublicationReason depublicationReason,
@RequestPart("depublicationFile") MultipartFile recordIdsFile
) throws GenericMetisException, IOException {
createRecordIdsToBeDepublished(authorization, datasetId, depublicationReason,
createRecordIdsToBeDepublished(authorization, datasetId,
new String(recordIdsFile.getBytes(), StandardCharsets.UTF_8));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import eu.europeana.metis.core.rest.RequestLimits;
import eu.europeana.metis.core.util.DepublishRecordIdSortField;
import eu.europeana.metis.core.util.SortDirection;
import eu.europeana.metis.core.workflow.plugins.DepublicationReason;
import eu.europeana.metis.exception.BadContentException;
import java.time.Instant;
import java.util.ArrayList;
Expand Down Expand Up @@ -110,7 +109,7 @@ private Set<String> getNonExistingRecordIds(String datasetId, Set<String> record
* @throws BadContentException In case adding the records would violate the maximum number of depublished records that each
* dataset can have.
*/
public int createRecordIdsToBeDepublished(String datasetId, Set<String> candidateRecordIds, DepublicationReason depublicationReason)
public int createRecordIdsToBeDepublished(String datasetId, Set<String> candidateRecordIds)
throws BadContentException {

// Check list size: if this is too large we can throw exception regardless of what's in the database.
Expand All @@ -130,20 +129,19 @@ public int createRecordIdsToBeDepublished(String datasetId, Set<String> candidat
}

// Add the records and we're done.
addRecords(recordIdsToAdd, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, null, depublicationReason);
addRecords(recordIdsToAdd, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, null);
return recordIdsToAdd.size();
}

void addRecords(Set<String> recordIdsToAdd, String datasetId,
DepublicationStatus depublicationStatus, Instant depublicationDate, DepublicationReason depublicationReason) {
DepublicationStatus depublicationStatus, Instant depublicationDate) {
final List<DepublishRecordId> objectsToAdd = recordIdsToAdd.stream().map(recordId -> {
final DepublishRecordId depublishRecordId = new DepublishRecordId();
depublishRecordId.setId(new ObjectId());
depublishRecordId.setDatasetId(datasetId);
depublishRecordId.setRecordId(recordId);
depublishRecordId.setDepublicationStatus(depublicationStatus);
depublishRecordId.setDepublicationDate(depublicationDate);
depublishRecordId.setDepublicationReason(depublicationReason);
return depublishRecordId;
}).toList();
retryableExternalRequestForNetworkExceptions(() -> {
Expand Down Expand Up @@ -330,7 +328,7 @@ private Query<DepublishRecordId> prepareQueryForDepublishRecordIds(String datase
* {@link DepublicationStatus#PENDING_DEPUBLICATION}
*/
public void markRecordIdsWithDepublicationStatus(String datasetId, Set<String> recordIds,
DepublicationStatus depublicationStatus, @Nullable Date depublicationDate, DepublicationReason depublicationReason) {
DepublicationStatus depublicationStatus, @Nullable Date depublicationDate) {

// Check correctness of parameters
if (Objects.isNull(depublicationStatus) || StringUtils.isBlank(datasetId)) {
Expand All @@ -355,7 +353,7 @@ public void markRecordIdsWithDepublicationStatus(String datasetId, Set<String> r
.filter(
date -> depublicationStatus != DepublicationStatus.PENDING_DEPUBLICATION)
.map(Date::toInstant).orElse(null);
addRecords(recordIdsToAdd, datasetId, depublicationStatus, depublicationInstant, depublicationReason);
addRecords(recordIdsToAdd, datasetId, depublicationStatus, depublicationInstant);

// Compute the records to update - if there are none, we're done.
recordIdsToUpdate = new HashSet<>(recordIds);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,13 @@ private void reinstateDepublishRecordIdsStatus(IndexToPublishPlugin indexPlugin,
if (!CollectionUtils.isEmpty(publishedRecordIds)) {
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId,
publishedRecordIds.stream().map(depublishedRecordIdsByFullId::get)
.collect(Collectors.toSet()), DepublicationStatus.PENDING_DEPUBLICATION, null, null);
.collect(Collectors.toSet()), DepublicationStatus.PENDING_DEPUBLICATION, null);
}
}
} else {
// reset de-publish status, pass null, all records will be de-published
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, null,
DepublicationStatus.PENDING_DEPUBLICATION, null, null);
DepublicationStatus.PENDING_DEPUBLICATION, null);
}
}

Expand Down Expand Up @@ -178,7 +178,7 @@ private void depublishRecordPostProcess(DepublishPlugin depublishPlugin, String
Collectors.mapping(Pair::getRight, Collectors.toSet())));
successfulRecords.forEach((dataset, records) ->
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(dataset, records,
DepublicationStatus.DEPUBLISHED, new Date(), null));
DepublicationStatus.DEPUBLISHED, new Date()));

// Set publication fitness to PARTIALLY FIT (if not set to the more severe UNFIT).
final Dataset dataset = datasetDao.getDatasetByDatasetId(datasetId);
Expand All @@ -195,7 +195,7 @@ private void depublishDatasetPostProcess(String datasetId) {

// Set all depublished records back to PENDING.
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, null,
DepublicationStatus.PENDING_DEPUBLICATION, null, null);
DepublicationStatus.PENDING_DEPUBLICATION, null);
// Find latest PUBLISH Type Plugin and set dataStatus to DELETED.
final PluginWithExecutionId<MetisPlugin> latestSuccessfulPlugin = workflowExecutionDao
.getLatestSuccessfulPlugin(datasetId, OrchestratorService.PUBLISH_TYPES);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ public DepublishRecordIdService(Authorizer authorizer, OrchestratorService orche
* </ul>
*/
public int addRecordIdsToBeDepublished(MetisUserView metisUserView, String datasetId,
String recordIdsInSeparateLines, DepublicationReason depublicationReason) throws GenericMetisException {
String recordIdsInSeparateLines) throws GenericMetisException {

// Authorize.
authorizer.authorizeWriteExistingDatasetById(metisUserView, datasetId);
Expand All @@ -77,7 +77,7 @@ public int addRecordIdsToBeDepublished(MetisUserView metisUserView, String datas
recordIdsInSeparateLines);

// Add the records.
return depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, normalizedRecordIds, depublicationReason);
return depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, normalizedRecordIds);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ void createRecordIdsToBeDepublishedHappyScenarioTest() throws BadContentExceptio
final String datasetId = Integer.toString(TestObjectFactory.DATASETID);
final Set<String> setTest = Set.of("1001");

depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest, DepublicationReason.UNKNOWN);
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest);

assertEquals(1, provider.getDatastore().find(DepublishRecordId.class).count());
assertEquals("1001",
Expand All @@ -84,7 +84,7 @@ void createRecordIdsToBeDepublishedBigNumberOfCandidateRecordIdsTest() {
final Set<String> setTest = Set.of("1008", "1009", "1010", "1011", "1012", "1013");

Throwable exception = assertThrows(BadContentException.class,
() -> depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest, DepublicationReason.UNKNOWN));
() -> depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest));

assertEquals(
"Can't add these records: this would violate the maximum number of records per dataset.",
Expand All @@ -99,7 +99,7 @@ void createRecordIdsToBeDepublishedBigNumberOfDepublishedRecord() {
doReturn(6L).when(depublishRecordIdDao).countDepublishRecordIdsForDataset(datasetId);

Throwable exception = assertThrows(BadContentException.class,
() -> depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest, DepublicationReason.UNKNOWN));
() -> depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, setTest));

assertEquals(
"Can't add these records: this would violate the maximum number of records per dataset.",
Expand All @@ -119,7 +119,7 @@ void deletePendingRecordIdsTest() throws BadContentException {
() -> depublishRecordIdDao.deletePendingRecordIds(datasetId, biggerThanAllowedSet));

depublishRecordIdDao
.addRecords(setTest, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, Instant.now(), DepublicationReason.UNKNOWN);
.addRecords(setTest, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, Instant.now());
assertEquals(1, provider.getDatastore().find(DepublishRecordId.class).count());

depublishRecordIdDao.deletePendingRecordIds(datasetId, setTest);
Expand All @@ -133,7 +133,7 @@ void countSuccessfullyDepublishedRecordIdsForDatasetTest() {
final Set<String> setTest = Set.of("1003");

depublishRecordIdDao
.addRecords(setTest, datasetId, DepublicationStatus.DEPUBLISHED, Instant.now(), DepublicationReason.UNKNOWN);
.addRecords(setTest, datasetId, DepublicationStatus.DEPUBLISHED, Instant.now());
long result = depublishRecordIdDao.countSuccessfullyDepublishedRecordIdsForDataset(datasetId);
assertEquals(1L, result);
}
Expand All @@ -144,7 +144,7 @@ void getDepublishRecordIdsTest() {
final Set<String> setTest = Set.of("1004", "1005");

depublishRecordIdDao
.addRecords(setTest, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, Instant.now(),DepublicationReason.UNKNOWN);
.addRecords(setTest, datasetId, DepublicationStatus.PENDING_DEPUBLICATION, Instant.now());
List<DepublishRecordIdView> find1004 = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
SortDirection.ASCENDING, "1004");
Expand Down Expand Up @@ -173,7 +173,7 @@ void getAllDepublishRecordIdsWithStatusTest() throws BadContentException {
DepublicationStatus.DEPUBLISHED, biggerThanAllowedSet));

depublishRecordIdDao
.addRecords(setTest, datasetId, DepublicationStatus.DEPUBLISHED, Instant.now(), DepublicationReason.UNKNOWN);
.addRecords(setTest, datasetId, DepublicationStatus.DEPUBLISHED, Instant.now());
Set<String> result = depublishRecordIdDao.getAllDepublishRecordIdsWithStatus(datasetId,
DepublishRecordIdSortField.DEPUBLICATION_STATE, SortDirection.ASCENDING,
DepublicationStatus.DEPUBLISHED, setTest);
Expand All @@ -196,17 +196,17 @@ void markRecordIdsWithDepublicationStatus_wrong_parametersTest() {

//Null depublication status
assertThrows(IllegalArgumentException.class, () -> depublishRecordIdDao
.markRecordIdsWithDepublicationStatus(datasetId, recordIdsSet, null, date, DepublicationReason.UNKNOWN));
.markRecordIdsWithDepublicationStatus(datasetId, recordIdsSet, null, date));

//Blank dataset id
assertThrows(IllegalArgumentException.class, () -> depublishRecordIdDao
.markRecordIdsWithDepublicationStatus(null, recordIdsSet,
DepublicationStatus.PENDING_DEPUBLICATION, date, DepublicationReason.UNKNOWN));
DepublicationStatus.PENDING_DEPUBLICATION, date));

//Depublished status but date null
assertThrows(IllegalArgumentException.class, () -> depublishRecordIdDao
.markRecordIdsWithDepublicationStatus(datasetId, recordIdsSet,
DepublicationStatus.DEPUBLISHED, null, DepublicationReason.UNKNOWN));
DepublicationStatus.DEPUBLISHED, null));
}

@Test
Expand All @@ -217,7 +217,7 @@ void markRecordIdsWithDepublicationStatus_all_recordIds_set_depublished_and_then
Date date = Date.from(Instant.now());

//Create recordIds
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsSet, DepublicationReason.UNKNOWN);
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsSet);
//Check stored recordIds
List<DepublishRecordIdView> findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -228,7 +228,7 @@ void markRecordIdsWithDepublicationStatus_all_recordIds_set_depublished_and_then
//Set to DEPUBLISHED
depublishRecordIdDao
.markRecordIdsWithDepublicationStatus(datasetId, null, DepublicationStatus.DEPUBLISHED,
date, DepublicationReason.UNKNOWN);
date);
//Check stored recordIds
findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -239,7 +239,7 @@ void markRecordIdsWithDepublicationStatus_all_recordIds_set_depublished_and_then
.equals(Date.from(depublishRecordIdView.getDepublicationDate()))));
//Set to PENDING_DEPUBLICATION
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, null,
DepublicationStatus.PENDING_DEPUBLICATION, date, DepublicationReason.UNKNOWN);
DepublicationStatus.PENDING_DEPUBLICATION, date);
//Check stored recordIds
findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -258,7 +258,7 @@ void markRecordIdsWithDepublicationStatus_specified_recordIds_set_depublished_an
Date date = Date.from(Instant.now());

//Create recordIds
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsToCreate, DepublicationReason.UNKNOWN);
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsToCreate);
//Check stored recordIds
List<DepublishRecordIdView> findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -268,7 +268,7 @@ void markRecordIdsWithDepublicationStatus_specified_recordIds_set_depublished_an
.getDepublicationStatus() && null == depublishRecordIdView.getDepublicationDate()));
//Set to DEPUBLISHED
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, recordIdsToUpdate,
DepublicationStatus.DEPUBLISHED, date, DepublicationReason.UNKNOWN);
DepublicationStatus.DEPUBLISHED, date);
//Check stored recordIds
findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -279,7 +279,7 @@ void markRecordIdsWithDepublicationStatus_specified_recordIds_set_depublished_an
.equals(Date.from(depublishRecordIdView.getDepublicationDate()))).count());
//Set to PENDING_DEPUBLICATION
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, recordIdsToUpdate,
DepublicationStatus.PENDING_DEPUBLICATION, date, DepublicationReason.UNKNOWN);
DepublicationStatus.PENDING_DEPUBLICATION, date);
//Check stored recordIds
findAll = depublishRecordIdDao
.getDepublishRecordIds(datasetId, 0, DepublishRecordIdSortField.DEPUBLICATION_STATE,
Expand All @@ -299,10 +299,10 @@ void markRecordIdsWithDepublicationStatus_depublish_non_already_existing_recordI
Date date = Date.from(Instant.now());

//Create recordIds
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsToCreate, DepublicationReason.UNKNOWN);
depublishRecordIdDao.createRecordIdsToBeDepublished(datasetId, recordIdsToCreate);
//Set to DEPUBLISHED
depublishRecordIdDao.markRecordIdsWithDepublicationStatus(datasetId, recordIdsToUpdate,
DepublicationStatus.DEPUBLISHED, date, DepublicationReason.UNKNOWN);
DepublicationStatus.DEPUBLISHED, date);

//Check stored recordIds
List<DepublishRecordIdView> findAll = depublishRecordIdDao
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ void cleanUp() {

@Test
void addRecordIdsToBeDepublishedTest() throws GenericMetisException {
depublishRecordIdService.addRecordIdsToBeDepublished(metisUserView, datasetId, "1002", DepublicationReason.UNKNOWN);
depublishRecordIdService.addRecordIdsToBeDepublished(metisUserView, datasetId, "1002");

verify(authorizer, times(1)).authorizeWriteExistingDatasetById(metisUserView, datasetId);
verify(depublishRecordIdService, times(1)).checkAndNormalizeRecordIds(any(), any());
verify(depublishRecordIdDao, times(1)).createRecordIdsToBeDepublished(any(), any(), any());
verify(depublishRecordIdDao, times(1)).createRecordIdsToBeDepublished(any(), any());
verifyNoMoreInteractions(orchestratorService);


Expand Down

0 comments on commit cb958c5

Please sign in to comment.