diff --git a/RFS/build.gradle b/RFS/build.gradle index e727e6270..c0609058d 100644 --- a/RFS/build.gradle +++ b/RFS/build.gradle @@ -86,6 +86,21 @@ task runRfsWorker (type: JavaExec) { mainClass = 'com.rfs.RunRfsWorker' } +task createSnapshot (type: JavaExec) { + classpath = sourceSets.main.runtimeClasspath + mainClass = 'com.rfs.RfsCreateSnapshot' +} + +task migrateMetadata (type: JavaExec) { + classpath = sourceSets.main.runtimeClasspath + mainClass = 'com.rfs.RfsMigrateMetadata' +} + +task migrateDocuments (type: JavaExec) { + classpath = sourceSets.main.runtimeClasspath + mainClass = 'com.rfs.RfsMigrateDocuments' +} + // Cleanup additional docker build directory clean.doFirst { delete project.file("./docker/build") diff --git a/RFS/src/main/java/com/rfs/RfsCreateSnapshot.java b/RFS/src/main/java/com/rfs/RfsCreateSnapshot.java new file mode 100644 index 000000000..fd16d70cd --- /dev/null +++ b/RFS/src/main/java/com/rfs/RfsCreateSnapshot.java @@ -0,0 +1,92 @@ +package com.rfs; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import com.rfs.cms.CmsClient; +import com.rfs.cms.OpenSearchCmsClient; +import com.rfs.common.ConnectionDetails; +import com.rfs.common.Logging; +import com.rfs.common.OpenSearchClient; +import com.rfs.common.SnapshotCreator; +import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.common.S3SnapshotCreator; +import com.rfs.worker.GlobalState; +import com.rfs.worker.SnapshotRunner; + +public class RfsCreateSnapshot { + private static final Logger logger = LogManager.getLogger(RfsCreateSnapshot.class); + + public static class Args { + @Parameter(names = {"--snapshot-name"}, description = "The name of the snapshot to migrate", required = true) + public String snapshotName; + + @Parameter(names = {"--s3-repo-uri"}, description = "The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2", required = true) + public String s3RepoUri; + + @Parameter(names = {"--s3-region"}, description = "The AWS Region the S3 bucket is in, like: us-east-2", required = true) + public String s3Region; + + @Parameter(names = {"--source-host"}, description = "The source host and port (e.g. http://localhost:9200)", required = true) + public String sourceHost; + + @Parameter(names = {"--source-username"}, description = "Optional. The source username; if not provided, will assume no auth on source", required = false) + public String sourceUser = null; + + @Parameter(names = {"--source-password"}, description = "Optional. The source password; if not provided, will assume no auth on source", required = false) + public String sourcePass = null; + + @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) + public String targetHost; + + @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) + public String targetUser = null; + + @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) + public String targetPass = null; + + @Parameter(names = {"--log-level"}, description = "What log level you want. Default: 'info'", required = false, converter = Logging.ArgsConverter.class) + public Level logLevel = Level.INFO; + } + + public static void main(String[] args) throws Exception { + // Grab out args + Args arguments = new Args(); + JCommander.newBuilder() + .addObject(arguments) + .build() + .parse(args); + + final String snapshotName = arguments.snapshotName; + final String s3RepoUri = arguments.s3RepoUri; + final String s3Region = arguments.s3Region; + final String sourceHost = arguments.sourceHost; + final String sourceUser = arguments.sourceUser; + final String sourcePass = arguments.sourcePass; + final String targetHost = arguments.targetHost; + final String targetUser = arguments.targetUser; + final String targetPass = arguments.targetPass; + final Level logLevel = arguments.logLevel; + + Logging.setLevel(logLevel); + + final ConnectionDetails sourceConnection = new ConnectionDetails(sourceHost, sourceUser, sourcePass); + final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); + + TryHandlePhaseFailure.executeWithTryCatch(() -> { + logger.info("Running RfsWorker"); + GlobalState globalState = GlobalState.getInstance(); + OpenSearchClient sourceClient = new OpenSearchClient(sourceConnection); + OpenSearchClient targetClient = new OpenSearchClient(targetConnection); + final CmsClient cmsClient = new OpenSearchCmsClient(targetClient); + + final SnapshotCreator snapshotCreator = new S3SnapshotCreator(snapshotName, sourceClient, s3RepoUri, s3Region); + final SnapshotRunner snapshotWorker = new SnapshotRunner(globalState, cmsClient, snapshotCreator); + snapshotWorker.run(); + }); + } +} diff --git a/RFS/src/main/java/com/rfs/RfsMigrateDocuments.java b/RFS/src/main/java/com/rfs/RfsMigrateDocuments.java new file mode 100644 index 000000000..b60052222 --- /dev/null +++ b/RFS/src/main/java/com/rfs/RfsMigrateDocuments.java @@ -0,0 +1,117 @@ +package com.rfs; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + + +import com.rfs.cms.CmsClient; +import com.rfs.cms.OpenSearchCmsClient; +import com.rfs.common.ConnectionDetails; +import com.rfs.common.DefaultSourceRepoAccessor; +import com.rfs.common.DocumentReindexer; +import com.rfs.common.IndexMetadata; +import com.rfs.common.Logging; +import com.rfs.common.LuceneDocumentsReader; +import com.rfs.common.OpenSearchClient; +import com.rfs.common.S3Uri; +import com.rfs.common.ShardMetadata; +import com.rfs.common.S3Repo; +import com.rfs.common.SourceRepo; +import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.common.SnapshotRepo; +import com.rfs.common.SnapshotShardUnpacker; +import com.rfs.version_es_7_10.ElasticsearchConstants_ES_7_10; +import com.rfs.version_es_7_10.IndexMetadataFactory_ES_7_10; +import com.rfs.version_es_7_10.ShardMetadataFactory_ES_7_10; +import com.rfs.version_es_7_10.SnapshotRepoProvider_ES_7_10; +import com.rfs.worker.DocumentsRunner; +import com.rfs.worker.GlobalState; + +public class RfsMigrateDocuments { + private static final Logger logger = LogManager.getLogger(RfsMigrateDocuments.class); + + public static class Args { + @Parameter(names = {"--snapshot-name"}, description = "The name of the snapshot to migrate", required = true) + public String snapshotName; + + @Parameter(names = {"--s3-local-dir"}, description = "The absolute path to the directory on local disk to download S3 files to", required = true) + public String s3LocalDirPath; + + @Parameter(names = {"--s3-repo-uri"}, description = "The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2", required = true) + public String s3RepoUri; + + @Parameter(names = {"--s3-region"}, description = "The AWS Region the S3 bucket is in, like: us-east-2", required = true) + public String s3Region; + + @Parameter(names = {"--lucene-dir"}, description = "The absolute path to the directory where we'll put the Lucene docs", required = true) + public String luceneDirPath; + + @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) + public String targetHost; + + @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) + public String targetUser = null; + + @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) + public String targetPass = null; + + @Parameter(names = {"--max-shard-size-bytes"}, description = ("Optional. The maximum shard size, in bytes, to allow when" + + " performing the document migration. Useful for preventing disk overflow. Default: 50 * 1024 * 1024 * 1024 (50 GB)"), required = false) + public long maxShardSizeBytes = 50 * 1024 * 1024 * 1024L; + + @Parameter(names = {"--log-level"}, description = "What log level you want. Default: 'info'", required = false, converter = Logging.ArgsConverter.class) + public Level logLevel = Level.INFO; + } + + public static void main(String[] args) throws Exception { + // Grab out args + Args arguments = new Args(); + JCommander.newBuilder() + .addObject(arguments) + .build() + .parse(args); + + final String snapshotName = arguments.snapshotName; + final Path s3LocalDirPath = Paths.get(arguments.s3LocalDirPath); + final String s3RepoUri = arguments.s3RepoUri; + final String s3Region = arguments.s3Region; + final Path luceneDirPath = Paths.get(arguments.luceneDirPath); + final String targetHost = arguments.targetHost; + final String targetUser = arguments.targetUser; + final String targetPass = arguments.targetPass; + final long maxShardSizeBytes = arguments.maxShardSizeBytes; + final Level logLevel = arguments.logLevel; + + Logging.setLevel(logLevel); + + final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); + + TryHandlePhaseFailure.executeWithTryCatch(() -> { + logger.info("Running RfsWorker"); + + GlobalState globalState = GlobalState.getInstance(); + OpenSearchClient targetClient = new OpenSearchClient(targetConnection); + final CmsClient cmsClient = new OpenSearchCmsClient(targetClient); + + final SourceRepo sourceRepo = S3Repo.create(s3LocalDirPath, new S3Uri(s3RepoUri), s3Region); + final SnapshotRepo.Provider repoDataProvider = new SnapshotRepoProvider_ES_7_10(sourceRepo); + + final IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider); + final ShardMetadata.Factory shardMetadataFactory = new ShardMetadataFactory_ES_7_10(repoDataProvider); + final DefaultSourceRepoAccessor repoAccessor = new DefaultSourceRepoAccessor(sourceRepo); + final SnapshotShardUnpacker.Factory unpackerFactory = new SnapshotShardUnpacker.Factory(repoAccessor, luceneDirPath, ElasticsearchConstants_ES_7_10.BUFFER_SIZE_IN_BYTES); + final LuceneDocumentsReader reader = new LuceneDocumentsReader(luceneDirPath); + final DocumentReindexer reindexer = new DocumentReindexer(targetClient); + + DocumentsRunner documentsWorker = new DocumentsRunner(globalState, cmsClient, snapshotName, maxShardSizeBytes, indexMetadataFactory, shardMetadataFactory, unpackerFactory, reader, reindexer); + documentsWorker.run(); + }); + } +} diff --git a/RFS/src/main/java/com/rfs/RfsMigrateMetadata.java b/RFS/src/main/java/com/rfs/RfsMigrateMetadata.java new file mode 100644 index 000000000..41ae31bd3 --- /dev/null +++ b/RFS/src/main/java/com/rfs/RfsMigrateMetadata.java @@ -0,0 +1,130 @@ +package com.rfs; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + + +import com.rfs.cms.CmsClient; +import com.rfs.cms.OpenSearchCmsClient; +import com.rfs.common.ClusterVersion; +import com.rfs.common.ConnectionDetails; +import com.rfs.common.GlobalMetadata; +import com.rfs.common.IndexMetadata; +import com.rfs.common.Logging; +import com.rfs.common.OpenSearchClient; +import com.rfs.common.S3Uri; +import com.rfs.common.S3Repo; +import com.rfs.common.SourceRepo; +import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.common.SnapshotRepo; +import com.rfs.transformers.TransformFunctions; +import com.rfs.transformers.Transformer; +import com.rfs.version_es_7_10.GlobalMetadataFactory_ES_7_10; +import com.rfs.version_es_7_10.IndexMetadataFactory_ES_7_10; +import com.rfs.version_es_7_10.SnapshotRepoProvider_ES_7_10; +import com.rfs.version_os_2_11.GlobalMetadataCreator_OS_2_11; +import com.rfs.version_os_2_11.IndexCreator_OS_2_11; +import com.rfs.worker.GlobalState; +import com.rfs.worker.IndexRunner; +import com.rfs.worker.MetadataRunner; + +public class RfsMigrateMetadata { + private static final Logger logger = LogManager.getLogger(RfsMigrateMetadata.class); + + public static class Args { + @Parameter(names = {"--snapshot-name"}, description = "The name of the snapshot to migrate", required = true) + public String snapshotName; + + @Parameter(names = {"--s3-local-dir"}, description = "The absolute path to the directory on local disk to download S3 files to", required = true) + public String s3LocalDirPath; + + @Parameter(names = {"--s3-repo-uri"}, description = "The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2", required = true) + public String s3RepoUri; + + @Parameter(names = {"--s3-region"}, description = "The AWS Region the S3 bucket is in, like: us-east-2", required = true) + public String s3Region; + + @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) + public String targetHost; + + @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) + public String targetUser = null; + + @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) + public String targetPass = null; + + @Parameter(names = {"--index-allowlist"}, description = ("Optional. List of index names to migrate" + + " (e.g. 'logs_2024_01, logs_2024_02'). Default: all indices"), required = false) + public List indexAllowlist = List.of(); + + @Parameter(names = {"--index-template-allowlist"}, description = ("Optional. List of index template names to migrate" + + " (e.g. 'posts_index_template1, posts_index_template2'). Default: empty list"), required = false) + public List indexTemplateAllowlist = List.of(); + + @Parameter(names = {"--component-template-allowlist"}, description = ("Optional. List of component template names to migrate" + + " (e.g. 'posts_template1, posts_template2'). Default: empty list"), required = false) + public List componentTemplateAllowlist = List.of(); + + //https://opensearch.org/docs/2.11/api-reference/cluster-api/cluster-awareness/ + @Parameter(names = {"--min-replicas"}, description = ("Optional. The minimum number of replicas configured for migrated indices on the target." + + " This can be useful for migrating to targets which use zonal deployments and require additional replicas to meet zone requirements. Default: 0") + , required = false) + public int minNumberOfReplicas = 0; + + @Parameter(names = {"--log-level"}, description = "What log level you want. Default: 'info'", required = false, converter = Logging.ArgsConverter.class) + public Level logLevel = Level.INFO; + } + + public static void main(String[] args) throws Exception { + // Grab out args + Args arguments = new Args(); + JCommander.newBuilder() + .addObject(arguments) + .build() + .parse(args); + + final String snapshotName = arguments.snapshotName; + final Path s3LocalDirPath = Paths.get(arguments.s3LocalDirPath); + final String s3RepoUri = arguments.s3RepoUri; + final String s3Region = arguments.s3Region; + final String targetHost = arguments.targetHost; + final String targetUser = arguments.targetUser; + final String targetPass = arguments.targetPass; + final List indexTemplateAllowlist = arguments.indexTemplateAllowlist; + final List componentTemplateAllowlist = arguments.componentTemplateAllowlist; + final int awarenessDimensionality = arguments.minNumberOfReplicas + 1; + final Level logLevel = arguments.logLevel; + + Logging.setLevel(logLevel); + + final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); + + TryHandlePhaseFailure.executeWithTryCatch(() -> { + logger.info("Running RfsWorker"); + GlobalState globalState = GlobalState.getInstance(); + OpenSearchClient targetClient = new OpenSearchClient(targetConnection); + final CmsClient cmsClient = new OpenSearchCmsClient(targetClient); + + final SourceRepo sourceRepo = S3Repo.create(s3LocalDirPath, new S3Uri(s3RepoUri), s3Region); + final SnapshotRepo.Provider repoDataProvider = new SnapshotRepoProvider_ES_7_10(sourceRepo); + final GlobalMetadata.Factory metadataFactory = new GlobalMetadataFactory_ES_7_10(repoDataProvider); + final GlobalMetadataCreator_OS_2_11 metadataCreator = new GlobalMetadataCreator_OS_2_11(targetClient, List.of(), componentTemplateAllowlist, indexTemplateAllowlist); + final Transformer transformer = TransformFunctions.getTransformer(ClusterVersion.ES_7_10, ClusterVersion.OS_2_11, awarenessDimensionality); + MetadataRunner metadataWorker = new MetadataRunner(globalState, cmsClient, snapshotName, metadataFactory, metadataCreator, transformer); + metadataWorker.run(); + + final IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider); + final IndexCreator_OS_2_11 indexCreator = new IndexCreator_OS_2_11(targetClient); + final IndexRunner indexWorker = new IndexRunner(globalState, cmsClient, snapshotName, indexMetadataFactory, indexCreator, transformer); + indexWorker.run(); + }); + } +} diff --git a/RFS/src/main/java/com/rfs/RunRfsWorker.java b/RFS/src/main/java/com/rfs/RunRfsWorker.java index 245c712a6..2bf8ea83c 100644 --- a/RFS/src/main/java/com/rfs/RunRfsWorker.java +++ b/RFS/src/main/java/com/rfs/RunRfsWorker.java @@ -2,14 +2,10 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Arrays; import java.util.List; -import java.util.Optional; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; @@ -17,7 +13,6 @@ import com.rfs.cms.CmsClient; -import com.rfs.cms.CmsEntry; import com.rfs.cms.OpenSearchCmsClient; import com.rfs.common.ClusterVersion; import com.rfs.common.ConnectionDetails; @@ -33,6 +28,7 @@ import com.rfs.common.S3Repo; import com.rfs.common.SnapshotCreator; import com.rfs.common.SourceRepo; +import com.rfs.common.TryHandlePhaseFailure; import com.rfs.common.S3SnapshotCreator; import com.rfs.common.SnapshotRepo; import com.rfs.common.SnapshotShardUnpacker; @@ -49,9 +45,7 @@ import com.rfs.worker.GlobalState; import com.rfs.worker.IndexRunner; import com.rfs.worker.MetadataRunner; -import com.rfs.worker.Runner; import com.rfs.worker.SnapshotRunner; -import com.rfs.worker.WorkerStep; public class RunRfsWorker { private static final Logger logger = LogManager.getLogger(RunRfsWorker.class); @@ -146,7 +140,7 @@ public static void main(String[] args) throws Exception { final ConnectionDetails sourceConnection = new ConnectionDetails(sourceHost, sourceUser, sourcePass); final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); - try { + TryHandlePhaseFailure.executeWithTryCatch(() -> { logger.info("Running RfsWorker"); GlobalState globalState = GlobalState.getInstance(); OpenSearchClient sourceClient = new OpenSearchClient(sourceConnection); @@ -177,31 +171,6 @@ public static void main(String[] args) throws Exception { final DocumentReindexer reindexer = new DocumentReindexer(targetClient); DocumentsRunner documentsWorker = new DocumentsRunner(globalState, cmsClient, snapshotName, maxShardSizeBytes, indexMetadataFactory, shardMetadataFactory, unpackerFactory, reader, reindexer); documentsWorker.run(); - - } catch (Runner.PhaseFailed e) { - logPhaseFailureRecord(e.phase, e.nextStep, e.cmsEntry, e.getCause()); - throw e; - } catch (Exception e) { - logger.error("Unexpected error running RfsWorker", e); - throw e; - } - } - - public static void logPhaseFailureRecord(GlobalState.Phase phase, WorkerStep nextStep, Optional cmsEntry, Throwable e) { - ObjectNode errorBlob = new ObjectMapper().createObjectNode(); - errorBlob.put("exceptionMessage", e.getMessage()); - errorBlob.put("exceptionClass", e.getClass().getSimpleName()); - errorBlob.put("exceptionTrace", Arrays.toString(e.getStackTrace())); - - errorBlob.put("phase", phase.toString()); - - String currentStep = (nextStep != null) ? nextStep.getClass().getSimpleName() : "null"; - errorBlob.put("currentStep", currentStep); - - String currentEntry = (cmsEntry.isPresent()) ? cmsEntry.get().toRepresentationString() : "null"; - errorBlob.put("cmsEntry", currentEntry); - - - logger.error(errorBlob.toString()); + }); } } diff --git a/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java b/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java new file mode 100644 index 000000000..c8e231748 --- /dev/null +++ b/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java @@ -0,0 +1,53 @@ +package com.rfs.common; + +import java.util.Arrays; +import java.util.Optional; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.cms.CmsEntry; +import com.rfs.worker.GlobalState; +import com.rfs.worker.Runner; +import com.rfs.worker.WorkerStep; + +public class TryHandlePhaseFailure { + private static final Logger logger = LogManager.getLogger(TryHandlePhaseFailure.class); + + @FunctionalInterface + public static interface TryBlock { + void run() throws Exception; + } + + public static void executeWithTryCatch(TryBlock tryBlock) throws Exception { + try { + tryBlock.run(); + } catch (Runner.PhaseFailed e) { + logPhaseFailureRecord(e.phase, e.nextStep, e.cmsEntry, e.getCause()); + throw e; + } catch (Exception e) { + logger.error("Unexpected error running RfsWorker", e); + throw e; + } + } + + public static void logPhaseFailureRecord(GlobalState.Phase phase, WorkerStep nextStep, Optional cmsEntry, Throwable e) { + ObjectNode errorBlob = new ObjectMapper().createObjectNode(); + errorBlob.put("exceptionMessage", e.getMessage()); + errorBlob.put("exceptionClass", e.getClass().getSimpleName()); + errorBlob.put("exceptionTrace", Arrays.toString(e.getStackTrace())); + + errorBlob.put("phase", phase.toString()); + + String currentStep = (nextStep != null) ? nextStep.getClass().getSimpleName() : "null"; + errorBlob.put("currentStep", currentStep); + + String currentEntry = (cmsEntry.isPresent()) ? cmsEntry.get().toRepresentationString() : "null"; + errorBlob.put("cmsEntry", currentEntry); + + + logger.error(errorBlob.toString()); + } +} diff --git a/RFS/src/main/java/com/rfs/worker/DocumentsStep.java b/RFS/src/main/java/com/rfs/worker/DocumentsStep.java index 4eddb407e..add57d363 100644 --- a/RFS/src/main/java/com/rfs/worker/DocumentsStep.java +++ b/RFS/src/main/java/com/rfs/worker/DocumentsStep.java @@ -370,7 +370,7 @@ public void run() { logger.info("Migrating docs: Index " + workItem.indexName + ", Shard " + workItem.shardId); shardMetadata = members.shardMetadataFactory.fromRepo(members.snapshotName, workItem.indexName, workItem.shardId); - logger.info("Shard size: " + shardMetadata.getTotalSizeBytes()); + logger.info("Shard size: " + shardMetadata.getTotalSizeBytes() + " bytes"); if (shardMetadata.getTotalSizeBytes() > members.maxShardSizeBytes) { throw new ShardTooLarge(shardMetadata.getTotalSizeBytes(), members.maxShardSizeBytes); } diff --git a/RFS/src/main/java/com/rfs/worker/IndexStep.java b/RFS/src/main/java/com/rfs/worker/IndexStep.java index f534455bb..8f2ec68c8 100644 --- a/RFS/src/main/java/com/rfs/worker/IndexStep.java +++ b/RFS/src/main/java/com/rfs/worker/IndexStep.java @@ -3,6 +3,7 @@ import java.time.Instant; import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -262,7 +263,10 @@ public void run() { logger.info("Pulling a list of indices to migrate from the CMS..."); workItems = members.cmsClient.getAvailableIndexWorkItems(MAX_WORK_ITEMS); logger.info("Pulled " + workItems.size() + " indices to migrate:"); - logger.info(workItems.toString()); + List representationStrings = workItems.stream() + .map(CmsEntry.IndexWorkItem::toRepresentationString) + .collect(Collectors.toList()); + logger.info(representationStrings); } @Override