Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add migration to special field #7368

Merged
merged 6 commits into from
Jan 19, 2021
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ Note that this project **does not** adhere to [Semantic Versioning](http://semve
### Changed

- The content of the field `timestamp` is migrated to `creationdate`. In case one configured "udpate timestampe", it is migrated to `modificationdate`. [koppor#130](https://github.com/koppor/jabref/issues/130)
- The JabRef specific meta-data content in the main field such as priorities (prio1, prio2, ...) are migrated to their respective fields. They are removed from the keywords. [#6840]((https://github.com/jabref/jabref/issues/6840))
DominikVoigt marked this conversation as resolved.
Show resolved Hide resolved
- We fixed an issue where groups generated from authors' last names did not include all entries of the authors' [#5833](https://github.com/JabRef/jabref/issues/5833)
- The export to MS Office XML now uses the month name for the field `MonthAcessed` instead of the two digit number [#7354](https://github.com/JabRef/jabref/issues/7354)

Expand Down
8 changes: 5 additions & 3 deletions src/main/java/org/jabref/logic/importer/OpenDatabase.java
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import org.jabref.migrations.ConvertLegacyExplicitGroups;
import org.jabref.migrations.ConvertMarkingToGroups;
import org.jabref.migrations.PostOpenMigration;
import org.jabref.migrations.SpecialFieldsToSeparateFields;
import org.jabref.migrations.TimeStampToDateAddAndModify;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.util.FileUpdateMonitor;
Expand Down Expand Up @@ -72,16 +73,17 @@ public static ParserResult loadDatabase(Path fileToOpen, ImportFormatPreferences
LOGGER.debug("Synchronized special fields based on keywords");
}

performLoadDatabaseMigrations(result, timestampPreferences);
performLoadDatabaseMigrations(result, timestampPreferences, importFormatPreferences.getKeywordSeparator());

return result;
}

private static void performLoadDatabaseMigrations(ParserResult parserResult, TimestampPreferences timestampPreferences) {
private static void performLoadDatabaseMigrations(ParserResult parserResult, TimestampPreferences timestampPreferences, Character keywordDelimited) {
List<PostOpenMigration> postOpenMigrations = Arrays.asList(
new ConvertLegacyExplicitGroups(),
new ConvertMarkingToGroups(),
new TimeStampToDateAddAndModify(timestampPreferences)
new TimeStampToDateAddAndModify(timestampPreferences),
new SpecialFieldsToSeparateFields(keywordDelimited)
);

for (PostOpenMigration migration : postOpenMigrations) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package org.jabref.migrations;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;

import org.jabref.logic.importer.ParserResult;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Keyword;
import org.jabref.model.entry.KeywordList;
import org.jabref.model.entry.field.SpecialField;
import org.jabref.model.entry.field.SpecialFieldValue;

public class SpecialFieldsToSeparateFields implements PostOpenMigration {
private final KeywordList possibleKeywordsToMigrate;
private final Character keywordDelimiter;
private final Map<String, SpecialField> migrationTable = getMigrationTable();

public SpecialFieldsToSeparateFields(Character keywordDelimiter) {
List<SpecialFieldValue> specialFieldValues = Arrays.asList(SpecialFieldValue.values());
possibleKeywordsToMigrate = new KeywordList(specialFieldValues.stream()
.map(SpecialFieldValue::getKeyword)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList()));
this.keywordDelimiter = keywordDelimiter;
}

@Override
public void performMigration(ParserResult parserResult) {
parserResult.getDatabase().getEntries().forEach(this::migrateEntry);
}

private void migrateEntry(BibEntry entry) {
for (Keyword keyword : possibleKeywordsToMigrate) {
if (entry.getKeywords(keywordDelimiter).contains(keyword) && migrationTable.containsKey(keyword.get())) {
entry.setField(migrationTable.get(keyword.get()), keyword.get());
}
}

entry.removeKeywords(possibleKeywordsToMigrate, keywordDelimiter);
}

/**
* Mapping of special field values (contained in the keywords) to their corresponding special field
*/
private Map<String, SpecialField> getMigrationTable() {
Map<String, SpecialField> map = new HashMap<>();
map.put("printed", SpecialField.PRINTED);

map.put("prio1", SpecialField.PRIORITY);
map.put("prio2", SpecialField.PRIORITY);
map.put("prio3", SpecialField.PRIORITY);

map.put("qualityAssured", SpecialField.QUALITY);

map.put("rank1", SpecialField.RANKING);
map.put("rank2", SpecialField.RANKING);
map.put("rank3", SpecialField.RANKING);
map.put("rank4", SpecialField.RANKING);
map.put("rank5", SpecialField.RANKING);

map.put("read", SpecialField.READ_STATUS);
map.put("skimmed", SpecialField.READ_STATUS);

map.put("relevant", SpecialField.RELEVANCE);

return map;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package org.jabref.migrations;

import java.util.List;
import java.util.stream.Stream;

import org.jabref.logic.importer.ParserResult;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.SpecialField;
import org.jabref.model.entry.field.StandardField;

import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;

import static org.junit.jupiter.api.Assertions.assertEquals;

class SpecialFieldsToSeparateFieldsTest {

@ParameterizedTest
@MethodSource("provideKeywordFieldPairs")
public void migrateToCorrectField(SpecialField expectedField, String fieldInKeyword) {
BibEntry entry = new BibEntry().withField(StandardField.AUTHOR, "JabRef")
.withField(StandardField.KEYWORDS, fieldInKeyword);

new SpecialFieldsToSeparateFields(',').performMigration(new ParserResult(List.of(entry)));

assertEquals(fieldInKeyword, entry.getField(expectedField).get());
}

@Test
public void noKewordToMigrate() {
BibEntry entry = new BibEntry().withField(StandardField.AUTHOR, "JabRef")
.withField(StandardField.KEYWORDS, "tdd");
new SpecialFieldsToSeparateFields(',').performMigration(new ParserResult(List.of(entry)));

assertEquals("JabRef", entry.getField(StandardField.AUTHOR).get());
assertEquals("tdd", entry.getField(StandardField.KEYWORDS).get());
}

@Test
public void migrateMultipleSpecialFields() {
BibEntry entry = new BibEntry().withField(StandardField.AUTHOR, "JabRef")
.withField(StandardField.KEYWORDS, "printed, prio1");
new SpecialFieldsToSeparateFields(',').performMigration(new ParserResult(List.of(entry)));

assertEquals("prio1", entry.getField(SpecialField.PRIORITY).get());
assertEquals("printed", entry.getField(SpecialField.PRINTED).get());

koppor marked this conversation as resolved.
Show resolved Hide resolved
}

@Test
public void migrateSpecialFieldsMixedWithKeyword() {
BibEntry entry = new BibEntry().withField(StandardField.AUTHOR, "JabRef")
.withField(StandardField.KEYWORDS, "tdd, prio1, SE");
new SpecialFieldsToSeparateFields(',').performMigration(new ParserResult(List.of(entry)));

assertEquals("prio1", entry.getField(SpecialField.PRIORITY).get());
assertEquals("tdd, SE", entry.getField(StandardField.KEYWORDS).get());

}

private static Stream<Arguments> provideKeywordFieldPairs() {
return Stream.of(
Arguments.of(
SpecialField.PRINTED, "printed"
),
Arguments.of(
SpecialField.PRIORITY, "prio1"
),
Arguments.of(
SpecialField.QUALITY, "qualityAssured"
),
Arguments.of(
SpecialField.RANKING, "rank2"
),
Arguments.of(
SpecialField.READ_STATUS, "skimmed"
),
Arguments.of(
SpecialField.RELEVANCE, "relevant"
)
);
}
}