diff --git a/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java b/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java index f0cd1c5897..40982a4b34 100644 --- a/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java +++ b/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java @@ -165,7 +165,7 @@ public void run() { SnapshotRunner.runAndWaitForCompletion(snapshotCreator); } } catch (Exception e) { - log.atError().setMessage("Unexpected error running RfsWorker").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected error running RfsWorker").log(); throw e; } } diff --git a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java index 14039232e4..598419549f 100644 --- a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java +++ b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java @@ -259,7 +259,7 @@ public static void main(String[] args) throws Exception { log.atWarn().setMessage("No work left to acquire. Exiting with error code to signal that.").log(); System.exit(NO_WORK_LEFT_EXIT_CODE); } catch (Exception e) { - log.atError().setMessage("Unexpected error running RfsWorker").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected error running RfsWorker").log(); throw e; } } @@ -343,16 +343,10 @@ private static void confirmShardPrepIsComplete( } catch (IWorkCoordinator.LeaseLockHeldElsewhereException e) { long finalLockRenegotiationMillis = lockRenegotiationMillis; int finalShardSetupAttemptNumber = shardSetupAttemptNumber; - log.atInfo() - .setMessage( - () -> "After " - + finalShardSetupAttemptNumber - + "another process holds the lock" - + " for setting up the shard work items. " - + "Waiting " - + finalLockRenegotiationMillis - + "ms before trying again." - ) + log.atInfo().setMessage("{}") + .addArgument(() -> "After " + finalShardSetupAttemptNumber + "another process holds the lock" + + " for setting up the shard work items. " + "Waiting " + finalLockRenegotiationMillis + + "ms before trying again.") .log(); Thread.sleep(lockRenegotiationMillis); lockRenegotiationMillis *= 2; diff --git a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java index 92e18aa6ed..958be21fcf 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java +++ b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java @@ -150,7 +150,7 @@ private void testProcess(int expectedExitCode, Function proces esSourceContainer.copySnapshotData(tempDirSnapshot.toString()); int actualExitCode = processRunner.apply(new RunData(tempDirSnapshot, tempDirLucene, proxyContainer)); - log.atInfo().setMessage("Process exited with code: " + actualExitCode).log(); + log.atInfo().setMessage("Process exited with code: {}").addArgument(actualExitCode).log(); // Check if the exit code is as expected Assertions.assertEquals( @@ -228,7 +228,7 @@ private static ProcessBuilder setupProcess( failHow == FailHow.NEVER ? "PT10M" : "PT1S" }; // Kick off the doc migration process - log.atInfo().setMessage("Running RfsMigrateDocuments with args: " + Arrays.toString(args)).log(); + log.atInfo().setMessage("Running RfsMigrateDocuments with args: {}").addArgument(Arrays.toString(args)).log(); ProcessBuilder processBuilder = new ProcessBuilder( javaExecutable, "-cp", @@ -245,7 +245,7 @@ private static ProcessBuilder setupProcess( private static Process runAndMonitorProcess(ProcessBuilder processBuilder) throws IOException { var process = processBuilder.start(); - log.atInfo().setMessage("Process started with ID: " + process.toHandle().pid()).log(); + log.atInfo().setMessage("Process started with ID: {}").addArgument(process.toHandle().pid()).log(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); var readerThread = new Thread(() -> { @@ -259,8 +259,8 @@ private static Process runAndMonitorProcess(ProcessBuilder processBuilder) throw } String finalLine = line; log.atInfo() - .setMessage(() -> "from sub-process [" + process.toHandle().pid() + "]: " + finalLine) - .log(); + .setMessage("from sub-process [{}]: {}") + .addArgument(process.toHandle().pid()).addArgument(finalLine).log(); } }); diff --git a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java index ceeac2d16c..e857f2c2e2 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java +++ b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java @@ -125,13 +125,8 @@ public static int migrateDocumentsSequentially( ); throw new ExpectedMigrationWorkTerminationException(e, runNumber); } catch (Exception e) { - log.atError() - .setCause(e) - .setMessage( - () -> "Caught an exception, " - + "but just going to run again with this worker to simulate task/container recycling" - ) - .log(); + log.atError().setCause(e).setMessage("Caught an exception, " + + "but just going to run again with this worker to simulate task/container recycling").log(); } } } @@ -166,7 +161,8 @@ public static DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker( var tempDir = Files.createTempDirectory("opensearchMigrationReindexFromSnapshot_test_lucene"); var shouldThrow = new AtomicBoolean(); try (var processManager = new LeaseExpireTrigger(workItemId -> { - log.atDebug().setMessage("Lease expired for " + workItemId + " making next document get throw").log(); + log.atDebug().setMessage("Lease expired for {} making next document get throw") + .addArgument(workItemId).log(); shouldThrow.set(true); })) { UnaryOperator terminatingDocumentFilter = d -> { diff --git a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java index c7661b349c..42876fba9a 100644 --- a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java +++ b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java @@ -29,13 +29,13 @@ public EvaluateResult execute(RootMetadataMigrationContext context) { var items = migrateAllItems(migrationMode, clusters, transformer, context); evaluateResult.items(items); } catch (ParameterException pe) { - log.atError().setMessage("Invalid parameter").setCause(pe).log(); + log.atError().setCause(pe).setMessage("Invalid parameter").log(); evaluateResult .exitCode(INVALID_PARAMETER_CODE) .errorMessage("Invalid parameter: " + pe.getMessage()) .build(); } catch (Throwable e) { - log.atError().setMessage("Unexpected failure").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected failure").log(); evaluateResult .exitCode(UNEXPECTED_FAILURE_CODE) .errorMessage("Unexpected failure: " + e.getMessage()) diff --git a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java index e09d0d76e1..00edd304f0 100644 --- a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java +++ b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java @@ -29,13 +29,13 @@ public MigrateResult execute(RootMetadataMigrationContext context) { var items = migrateAllItems(migrationMode, clusters, transformer, context); migrateResult.items(items); } catch (ParameterException pe) { - log.atError().setMessage("Invalid parameter").setCause(pe).log(); + log.atError().setCause(pe).setMessage("Invalid parameter").log(); migrateResult .exitCode(INVALID_PARAMETER_CODE) .errorMessage("Invalid parameter: " + pe.getMessage()) .build(); } catch (Throwable e) { - log.atError().setMessage("Unexpected failure").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected failure").log(); migrateResult .exitCode(UNEXPECTED_FAILURE_CODE) .errorMessage("Unexpected failure: " + e.getMessage()) diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java index b9a1caa35a..138c89afd6 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java @@ -55,9 +55,11 @@ Mono reindexDocsInParallelBatches(Flux docs, String indexN Mono sendBulkRequest(UUID batchId, List docsBatch, String indexName, IDocumentReindexContext context, Scheduler scheduler) { return client.sendBulkRequest(indexName, docsBatch, context.createBulkRequest()) // Send the request - .doFirst(() -> log.atInfo().log("Batch Id:{}, {} documents in current bulk request.", batchId, docsBatch.size())) - .doOnSuccess(unused -> log.atDebug().log("Batch Id:{}, succeeded", batchId)) - .doOnError(error -> log.atError().log("Batch Id:{}, failed {}", batchId, error.getMessage())) + .doFirst(() -> log.atInfo().setMessage("Batch Id:{}, {} documents in current bulk request.") + .addArgument(batchId).addArgument(docsBatch::size).log()) + .doOnSuccess(unused -> log.atDebug().setMessage("Batch Id:{}, succeeded").addArgument(batchId).log()) + .doOnError(error -> log.atError().setMessage("Batch Id:{}, failed {}") + .addArgument(batchId).addArgument(error::getMessage).log()) // Prevent the error from stopping the entire stream, retries occurring within sendBulkRequest .onErrorResume(e -> Mono.empty()) .then() // Discard the response object diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java index c4583ce714..520db89350 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java @@ -157,7 +157,8 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i try { document = reader.document(docId); } catch (IOException e) { - log.atError().setMessage("Failed to read document at Lucene index location {}").addArgument(docId).setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to read document at Lucene index location {}") + .addArgument(docId).log(); return null; } @@ -188,12 +189,14 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i } } if (id == null) { - log.atError().setMessage("Document with index" + docId + " does not have an id. Skipping").log(); + log.atError().setMessage("Document with index {} does not have an id. Skipping") + .addArgument(docId).log(); return null; // Skip documents with missing id } if (sourceBytes == null || sourceBytes.bytes.length == 0) { - log.atWarn().setMessage("Document {} doesn't have the _source field enabled").addArgument(id).log(); + log.atWarn().setMessage("Document {} doesn't have the _source field enabled") + .addArgument(id).log(); return null; // Skip these } @@ -202,7 +205,7 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i StringBuilder errorMessage = new StringBuilder(); errorMessage.append("Unable to parse Document id from Document. The Document's Fields: "); document.getFields().forEach(f -> errorMessage.append(f.name()).append(", ")); - log.atError().setMessage(errorMessage.toString()).setCause(e).log(); + log.atError().setCause(e).setMessage("{}").addArgument(errorMessage).log(); return null; // Skip documents with invalid id } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java index cb0cd40d86..4e763761db 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java @@ -355,10 +355,7 @@ public Mono sendBulkRequest(String indexName, List d.getDocId(), d -> d)); return Mono.defer(() -> { final String targetPath = indexName + "/_bulk"; - log.atTrace() - .setMessage("Creating bulk body with document ids {}") - .addArgument(() -> docsMap.keySet()) - .log(); + log.atTrace().setMessage("Creating bulk body with document ids {}").addArgument(docsMap::keySet).log(); var body = DocumentReindexer.BulkDocSection.convertToBulkRequestBody(docsMap.values()); var additionalHeaders = new HashMap>(); // Reduce network bandwidth by attempting request and response compression @@ -395,11 +392,8 @@ public Mono sendBulkRequest(String indexName, List"Blob file download(s) complete").log(); + log.atInfo().setMessage("Blob file download(s) complete").log(); // Print out any failed downloads completedDirectoryDownload.failedTransfers().forEach(x->log.error("{}", x)); diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java index 5fd55d8a57..79b549827e 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java @@ -55,7 +55,7 @@ public void registerRepo() { client.registerSnapshotRepo(getRepoName(), settings, context); log.atInfo().setMessage("Snapshot repo registration successful").log(); } catch (Exception e) { - log.atError().setMessage("Snapshot repo registration failed").setCause(e).log(); + log.atError().setCause(e).setMessage("Snapshot repo registration failed").log(); throw new RepoRegistrationFailed(getRepoName()); } } @@ -72,7 +72,7 @@ public void createSnapshot() { client.createSnapshot(getRepoName(), snapshotName, body, context); log.atInfo().setMessage("Snapshot {} creation initiated").addArgument(snapshotName).log(); } catch (Exception e) { - log.atError().setMessage("Snapshot {} creation failed").addArgument(snapshotName).setCause(e).log(); + log.atError().setCause(e).setMessage("Snapshot {} creation failed").addArgument(snapshotName).log(); throw new SnapshotCreationFailed(snapshotName); } } @@ -82,7 +82,7 @@ public boolean isSnapshotFinished() { try { response = client.getSnapshotStatus(getRepoName(), snapshotName, context); } catch (Exception e) { - log.atError().setMessage("Failed to get snapshot status").setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to get snapshot status").log(); throw new SnapshotStatusCheckFailed(snapshotName); } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java index f3c6744a7c..d14ea198df 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java @@ -93,10 +93,8 @@ private ByteBuffer gzipByteBufferSimple(final ByteBuffer inputBuffer) { } } if (inputBuffer.remaining() > 0) { - log.atDebug() - .setMessage("Gzip compression ratio: {}") - .addArgument(() -> String.format("%.2f%%", (double) baos.size() / inputBuffer.remaining() * 100)) - .log(); + log.atDebug().setMessage("Gzip compression ratio: {}") + .addArgument(() -> String.format("%.2f%%", (double) baos.size() / inputBuffer.remaining() * 100)).log(); } return ByteBuffer.wrap(baos.toByteArray()); } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java index a4005752c3..b9ce03c6ac 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java @@ -67,7 +67,7 @@ public IndexMetadata transformIndexMetadata(IndexMetadata index) { } private void transformIndex(Index index, IndexType type) { - log.atDebug().setMessage(()->"Original Object: {}").addArgument(index.getRawJson().toString()).log(); + log.atDebug().setMessage("Original Object: {}").addArgument(index::getRawJson).log((); var newRoot = index.getRawJson(); switch (type) { @@ -85,7 +85,7 @@ private void transformIndex(Index index, IndexType type) { TransformFunctions.removeIntermediateIndexSettingsLevel(newRoot); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(newRoot, awarenessAttributeDimensionality); - log.atDebug().setMessage(()->"Transformed Object: {}").addArgument(newRoot.toString()).log(); + log.atDebug().setMessage("Transformed Object: {}").addArgument(newRoot).log((); } private enum IndexType { diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java index 48c9366a23..53ac10135c 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java @@ -27,11 +27,11 @@ public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData) { ObjectNode templatesRoot = (ObjectNode) root.get(TEMPLATES_KEY_STR).deepCopy(); templatesRoot.fieldNames().forEachRemaining(templateName -> { ObjectNode template = (ObjectNode) templatesRoot.get(templateName); - log.atInfo().setMessage("Transforming template: {}").addArgument(templateName).log(); - log.atDebug().setMessage("Original template: {}").addArgument(template).log(); + log.atInfo().setMessage("Transforming template: {}").addArgument(templateName).log((); + log.atDebug().setMessage("Original template: {}").addArgument(template).log((); TransformFunctions.removeIntermediateIndexSettingsLevel(template); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(templatesRoot, awarenessAttributeDimensionality); - log.atDebug().setMessage("Transformed template: {}").addArgument(template).log(); + log.atDebug().setMessage("Transformed template: {}").addArgument(template).log((); templatesRoot.set(templateName, template); }); root.set(TEMPLATES_KEY_STR, templatesRoot); @@ -49,12 +49,12 @@ public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData) { return; } - log.atInfo().setMessage("Transforming index template: {}").addArgument(templateName).log(); - log.atDebug().setMessage("Original index template: {}").addArgument(template).log(); + log.atInfo().setMessage("Transforming index template: {}").addArgument(templateName).log((); + log.atDebug().setMessage("Original index template: {}").addArgument(template).log((); TransformFunctions.removeIntermediateIndexSettingsLevel(templateSubRoot); // run before // fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(templateSubRoot, awarenessAttributeDimensionality); - log.atDebug().setMessage("Transformed index template: {}").addArgument(template).log(); + log.atDebug().setMessage("Transformed index template: {}").addArgument(template).log((); indexTemplateValuesRoot.set(templateName, template); }); root.set(INDEX_TEMPLATE_KEY_STR, indexTemplatesRoot); @@ -72,10 +72,10 @@ public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData) { return; } - log.atInfo().setMessage("Transforming component template: {}").addArgument(templateName).log(); - log.atDebug().setMessage("Original component template: {}").addArgument(template).log(); + log.atInfo().setMessage("Transforming component template: {}").addArgument(templateName).log((); + log.atDebug().setMessage("Original component template: {}").addArgument(template).log((); // No transformation needed for component templates - log.atDebug().setMessage("Transformed component template: {}").addArgument(template).log(); + log.atDebug().setMessage("Transformed component template: {}").addArgument(template).log((); componentTemplateValuesRoot.set(templateName, template); }); root.set(COMPONENT_TEMPLATE_KEY_STR, componentTemplatesRoot); @@ -86,7 +86,7 @@ public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData) { @Override public IndexMetadata transformIndexMetadata(IndexMetadata indexData) { - log.atDebug().setMessage("Original Object: {}").addArgument(indexData.getRawJson()).log(); + log.atDebug().setMessage("Original Object: {}").addArgument(indexData::getRawJson).log((); var copy = indexData.deepCopy(); var newRoot = copy.getRawJson(); @@ -96,7 +96,7 @@ public IndexMetadata transformIndexMetadata(IndexMetadata indexData) { TransformFunctions.removeIntermediateIndexSettingsLevel(newRoot); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(newRoot, awarenessAttributeDimensionality); - log.atDebug().setMessage("Transformed Object: {}").addArgument(newRoot).log(); + log.atDebug().setMessage("Transformed Object: {}").addArgument(newRoot).log((); return copy; } } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java index 2889a5868e..f200c6ba66 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java @@ -43,10 +43,7 @@ public ObjectNode getClusterData() { assert responses != null; var globalMetadata = globalMetadataFromParts(responses); - log.atDebug() - .setMessage("Combined global metadata:\n{}") - .addArgument(globalMetadata::toString) - .log(); + log.atDebug().setMessage("Combined global metadata:\n{}").addArgument(globalMetadata).log(); return globalMetadata; } @@ -81,10 +78,7 @@ public ObjectNode getIndexes() { .block(); var indexData = combineIndexDetails(indexDetailsList); - log.atDebug() - .setMessage("Index data combined:\n{}") - .addArgument(indexData::toString) - .log(); + log.atDebug().setMessage("Index data combined:\n{}").addArgument(indexData).log(); return indexData; } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java index f1d9e801fb..4d218cae33 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java @@ -156,7 +156,8 @@ public void setup(Supplier + log.atInfo().setMessage("{}") + .addArgument(() -> "Creating " + INDEX_NAME + " because HEAD returned " + indexCheckResponse.getStatusCode()) .log(); return httpClient.makeJsonRequest(AbstractedHttpClient.PUT_METHOD, INDEX_NAME, null, body); @@ -277,18 +278,19 @@ DocumentModificationResult getResult(AbstractedHttpClient.AbstractHttpResponse r try { return DocumentModificationResult.parse(resultStr); } catch (Exception e) { - log.atWarn().setCause(e).setMessage(() -> "Caught exception while parsing the response").log(); - log.atWarn().setMessage(() -> "status: " + response.getStatusCode() + " " + response.getStatusText()).log(); - log.atWarn().setMessage(() -> "headers: " + - response.getHeaders() - .map(kvp->kvp.getKey() + ":" + kvp.getValue()) - .collect(Collectors.joining("\n"))) + log.atWarn().setCause(e).setMessage("Caught exception while parsing the response").log(); + log.atWarn().setMessage("{}") + .addArgument(() -> "status: " + response.getStatusCode() + " " + response.getStatusText()).log(); + log.atWarn().setMessage("headers: {}") + .addArgument(() -> response.getHeaders() + .map(kvp->kvp.getKey() + ":" + kvp.getValue()).collect(Collectors.joining("\n"))) .log(); - log.atWarn().setMessage(() -> { + log.atWarn().setMessage("Payload: {}") + .addArgument(() -> { try { - return "Payload: " + new String(response.getPayloadBytes(), StandardCharsets.UTF_8); + return new String(response.getPayloadBytes(), StandardCharsets.UTF_8); } catch (Exception e2) { - return "while trying to display response bytes, caught exception: " + e2; + return "EXCEPTION: while trying to display response bytes: " + e2; } } ) @@ -588,12 +590,12 @@ private WorkItemAndDuration getAssignedWorkItemUnsafe() var resultHitInner = resultHitsUpper.path("hits").path(0); var expiration = resultHitInner.path(SOURCE_FIELD_NAME).path(EXPIRATION_FIELD_NAME).longValue(); if (expiration == 0) { - log.atWarn().setMessage(() -> - "Expiration wasn't found or wasn't set to > 0 for response:" + response.toDiagnosticString()).log(); + log.atWarn().setMessage("Expiration wasn't found or wasn't set to > 0 for response: {}") + .addArgument(response::toDiagnosticString).log(); throw new MalformedAssignedWorkDocumentException(response); } var rval = new WorkItemAndDuration(resultHitInner.get("_id").asText(), Instant.ofEpochMilli(1000 * expiration)); - log.atInfo().setMessage(() -> "Returning work item and lease: " + rval).log(); + log.atInfo().setMessage("Returning work item and lease: {}").addArgument(rval).log(); return rval; } @@ -613,7 +615,7 @@ private WorkItemAndDuration getAssignedWorkItem(LeaseChecker leaseChecker, // there's no reason to not try at least a few times if (malformedDocRetries > MAX_MALFORMED_ASSIGNED_WORK_DOC_RETRIES) { ctx.addTraceException(e, true); - log.atError().setCause(e).setMessage(() -> + log.atError().setCause(e).setMessage( "Throwing exception because max tries (" + MAX_MALFORMED_ASSIGNED_WORK_DOC_RETRIES + ")" + " have been exhausted").log(); throw new RetriesExceededException(e, malformedDocRetries); @@ -629,8 +631,9 @@ private WorkItemAndDuration getAssignedWorkItem(LeaseChecker leaseChecker, (long) (Math.pow(2.0, (retries-1)) * ACQUIRE_WORK_RETRY_BASE_MS))); leaseChecker.checkRetryWaitTimeOrThrow(e, retries-1, sleepBeforeNextRetryDuration); - log.atWarn().setMessage(() -> "Couldn't complete work assignment due to exception. " - + "Backing off " + sleepBeforeNextRetryDuration + "ms and trying again.").setCause(e).log(); + log.atWarn().setCause(e) + .setMessage("Couldn't complete work assignment due to exception. Backing off {} and trying again.") + .addArgument(sleepBeforeNextRetryDuration).log(); Thread.sleep(sleepBeforeNextRetryDuration.toMillis()); } } @@ -702,16 +705,9 @@ static U doUntil( if (test.test(suppliedVal, transformedVal)) { return transformedVal; } else { - log.atWarn() - .setMessage( - () -> "Retrying " - + labelThatShouldBeAContext - + " because the predicate failed for: (" - + suppliedVal - + "," - + transformedVal - + ")" - ) + log.atWarn().setMessage("{}").addArgument(() -> + "Retrying " + labelThatShouldBeAContext + " because the predicate failed for: (" + + suppliedVal + "," + transformedVal + ")") .log(); if (attempt >= maxTries) { context.recordFailure(); @@ -798,8 +794,9 @@ private void refresh(Supplier context leaseChecker.checkRetryWaitTimeOrThrow(e, driftRetries, sleepBeforeNextRetryDuration); } ++driftRetries; - log.atInfo().setCause(e).setMessage(() -> "Couldn't complete work assignment due to exception. " - + "Backing off " + sleepBeforeNextRetryDuration + "ms and trying again.").log(); + log.atInfo().setCause(e) + .setMessage("Couldn't complete work assignment due to exception. Backing off {} and retrying.") + .addArgument(sleepBeforeNextRetryDuration).log(); Thread.sleep(sleepBeforeNextRetryDuration.toMillis()); } } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java index 958b5d3e6e..d54b7644bc 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java @@ -101,13 +101,9 @@ private void doDocumentsMigration( reindexer.reindex(shardMetadata.getIndexName(), documents, context) .doOnError(error -> log.error("Error during reindexing: " + error)) .doOnSuccess( - done -> log.atInfo() - .setMessage( - () -> "Reindexing completed for Index " - + shardMetadata.getIndexName() - + ", Shard " - + shardMetadata.getShardId() - ) + done -> log.atInfo().setMessage("{}") + .addArgument(() -> "Reindexing completed for Index " + shardMetadata.getIndexName() + + ", Shard " + shardMetadata.getShardId()) .log() ) // Wait for the reindexing to complete before proceeding diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java index 7e79418ae8..f437ad65aa 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java @@ -70,7 +70,7 @@ public Void onAlreadyCompleted() throws IOException { @Override public Void onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) { - log.atInfo().setMessage(() -> "Acquired work to set the shard workitems").log(); + log.atInfo().setMessage("Acquired work to set the shard workitems").log(); prepareShardWorkItems( scopedWorkCoordinator.workCoordinator, metadataFactory, diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java index a477dd5ed3..61fdef88c1 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java @@ -53,7 +53,7 @@ public class LuceneDocumentsReaderTest { @BeforeEach public void setUp() throws IOException { tempDirectory = Files.createTempDirectory("test-temp-dir"); - log.atDebug().log("Temporary directory created at: " + tempDirectory); + log.atDebug().setMessage("Temporary directory created at: {}").addArgument(tempDirectory).log(); } @AfterEach @@ -64,7 +64,7 @@ public void tearDown() throws IOException { try { Files.delete(path); } catch (IOException e) { - log.atError().setMessage("Failed to delete: " + path).setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to delete: {}").addArgument(path).log(); } }); log.atDebug().log("Temporary directory deleted."); diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java index a851ffb144..51835c6115 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java @@ -101,7 +101,7 @@ public void testWhenGetResultAndErrorThenLogged() throws Exception { var closeableLogSetup = new CloseableLogSetup(workCoordinator.getClass().getName())) { Assertions.assertThrows(IllegalArgumentException.class, () -> workCoordinator.getResult(response)); - log.atDebug().setMessage(()->"Logged events: " + closeableLogSetup.getLogEvents()).log(); + log.atDebug().setMessage("Logged events: {}").addArgument(closeableLogSetup.getLogEvents()).log(); Assertions.assertTrue(closeableLogSetup.getLogEvents().stream().anyMatch(e -> e.contains(THROTTLE_RESULT_VALUE))); } } @@ -124,7 +124,7 @@ public void testWhenInvokedWithHttpErrorThenLogged(Function "Logged events: " + closeableLogSetup.getLogEvents()).log(); + log.atDebug().setMessage("Logged events: {}").addArgument(()->closeableLogSetup.getLogEvents()).log(); var logEvents = closeableLogSetup.getLogEvents(); Assertions.assertTrue(logEvents.stream().anyMatch(e -> e.contains(THROTTLE_RESULT_VALUE))); } diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java index 9680f39557..54a014ef74 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java @@ -68,7 +68,7 @@ private JsonNode searchForExpiredDocs(long expirationEpochSeconds) { "" + expirationEpochSeconds ) + "}"; - log.atInfo().setMessage(() -> "Searching with... " + body).log(); + log.atInfo().setMessage("Searching with... {}").addArgument(body).log(); var response = httpClientSupplier.get() .makeJsonRequest( AbstractedHttpClient.GET_METHOD, @@ -162,15 +162,11 @@ public void testAcquireLeaseForQuery() throws Exception { Duration.ofSeconds(2), testContext::createAcquireNextItemContext ); - log.atInfo().setMessage(() -> "Next work item picked=" + nextWorkItem).log(); + log.atInfo().setMessage("Next work item picked={}").addArgument(nextWorkItem).log(); Assertions.assertInstanceOf(IWorkCoordinator.NoAvailableWorkToBeDone.class, nextWorkItem); } catch (OpenSearchWorkCoordinator.PotentialClockDriftDetectedException e) { - log.atError() - .setCause(e) - .setMessage( - () -> "Unexpected clock drift error. Got response: " - + searchForExpiredDocs(e.getTimestampEpochSeconds()) - ) + log.atError().setCause(e).setMessage("Unexpected clock drift error. Got response: {}") + .addArgument(() -> searchForExpiredDocs(e.getTimestampEpochSeconds())) .log(); } @@ -224,7 +220,7 @@ public String onNoAvailableWorkToBeDone() throws IOException { @Override public String onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) throws IOException, InterruptedException { - log.atInfo().setMessage(() -> "Next work item picked=" + workItem).log(); + log.atInfo().setMessage("Next work item picked={}").addArgument(workItem).log(); Assertions.assertNotNull(workItem); Assertions.assertNotNull(workItem.workItemId); Assertions.assertTrue(workItem.leaseExpirationTime.isAfter(oldNow)); @@ -243,10 +239,8 @@ public String onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) thro } catch (OpenSearchWorkCoordinator.PotentialClockDriftDetectedException e) { log.atError() .setCause(e) - .setMessage( - () -> "Unexpected clock drift error. Got response: " - + searchForExpiredDocs(e.getTimestampEpochSeconds()) - ) + .setMessage("Unexpected clock drift error. Got response: {}") + .addArgument(() -> searchForExpiredDocs(e.getTimestampEpochSeconds())) .log(); throw e; } diff --git a/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java b/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java index 6f977f2595..e3a2c13c94 100644 --- a/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java +++ b/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java @@ -106,17 +106,12 @@ private int getHashCodeOfImagesAndArgs(DockerClient dockerClient, boolean pullIf var sourceImageId = getImageId(dockerClient, baseSourceVersion.imageName, pullIfUnavailable); var dataLoaderImageId = getImageId(dockerClient, dataLoaderImageName, pullIfUnavailable); var rval = Objects.hash(sourceImageId, dataLoaderImageId, Arrays.hashCode(generatorContainerArgs)); - log.atInfo() - .setMessage( - "sourceImageId=" - + sourceImageId - + " dataLoaderImageId=" - + dataLoaderImageId - + " args=" - + Arrays.stream(generatorContainerArgs).collect(Collectors.joining()) - + " hash: " - + rval - ) + log.atInfo().setMessage("{}") + .addArgument(() -> + "sourceImageId=" + sourceImageId + + " dataLoaderImageId=" + dataLoaderImageId + + " args=" + Arrays.stream(generatorContainerArgs).collect(Collectors.joining()) + + " hash: " + rval) .log(); return rval; } diff --git a/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java b/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java index 38122b18e4..afe7bdce57 100644 --- a/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java +++ b/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java @@ -748,10 +748,8 @@ protected CompletableFuture kickoffCloseStream(CodedOutputStreamHolder out bb.position(0); var bytesWritten = osh.getOutputStream().getTotalBytesWritten(); bb.limit(bytesWritten); - log.atTrace() - .setMessage("Adding {}") - .addArgument(() -> StandardCharsets.UTF_8.decode(bb.duplicate())) - .log(); + log.atTrace().setMessage("Adding {}") + .addArgument(() -> StandardCharsets.UTF_8.decode(bb.duplicate())).log(); outputBuffers.add(bb); } catch (IOException e) { throw Lombok.sneakyThrow(e); diff --git a/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java b/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java index d914be5bc7..efd6ae6683 100644 --- a/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java +++ b/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java @@ -41,17 +41,15 @@ protected void channelFinishedReadingAnHttpMessage( ((IWireCaptureContexts.IRequestContext) messageContext).onBlockingRequest(); messageContext = messageContext.createBlockingContext(); trafficOffloader.flushCommitAndResetStream(false).whenComplete((result, t) -> { - log.atInfo().setMessage(() -> "Done flushing").log(); + log.atInfo().setMessage("Done flushing").log(); if (t != null) { // This is a spot where we would benefit from having a behavioral policy that different users // could set as needed. Some users may be fine with just logging a failed offloading of a request // where other users may want to stop entirely. JIRA here: // https://opensearch.atlassian.net/browse/MIGRATIONS-1276 - log.atWarn() - .setCause(t) - .setMessage("Error offloading the request, but forwarding it to the service anyway") - .log(); + log.atWarn().setCause(t) + .setMessage("Error offloading the request, but forwarding it to the service anyway").log(); ReferenceCountUtil.release(msg); messageContext.addCaughtException(t); } diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java index 3c243a80a7..bbd39973b2 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java @@ -75,7 +75,7 @@ private void logProgressAtInterval( Duration frequency ) { eventLoop.scheduleAtFixedRate( - () -> log.atLevel(logLevel).log(channelPoolMap.getStats().toString()), + () -> log.atLevel(logLevel).setMessage("{}").addArgument(channelPoolMap::getStats).log(), frequency.toMillis(), frequency.toMillis(), TimeUnit.MILLISECONDS diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java index eec1436034..f586931ffe 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java @@ -38,7 +38,7 @@ public void channelInactive(ChannelHandlerContext ctx) { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - log.atError().setCause(cause).setMessage("Caught error for channel: " + ctx.channel().id().asLongText()).log(); + log.atError().setCause(cause).setMessage("Caught error for channel: " + ctx.channel().id().asLongText()).LOG_TO_CHECK(); FrontsideHandler.closeAndFlush(ctx.channel()); } } diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java index f35c632e81..d7e65859be 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java @@ -210,12 +210,12 @@ public Stats getStats() { // make a copy on the original thread making changes, which will be up to date at the time of capture and // immutable for future accessors, making it thread-safe var copiedStats = eventLoop.submit(() -> { - log.atTrace().setMessage(() -> "copying stats (" + System.identityHashCode(stats) + ")=" + stats).log(); + log.atTrace().setMessage(() -> "copying stats (" + System.identityHashCode(stats) + ")=" + stats).LOG_TO_CHECK(); return new Stats(stats); }).get(); log.atTrace() .setMessage(() -> "Got copied value of (" + System.identityHashCode(copiedStats) + ")=" + copiedStats) - .log(); + .LOG_TO_CHECK(); return copiedStats; } @@ -234,10 +234,10 @@ public F getAvailableOrNewItem() { throw new PoolClosedException(); } var startTime = Instant.now(); - log.atTrace().setMessage("getAvailableOrNewItem: readyItems.size()={}").addArgument(readyItems.size()).log(); + log.atTrace().setMessage("getAvailableOrNewItem: readyItems.size()={}").addArgument(readyItems.size()).LOG_TO_CHECK(); var item = readyItems.poll(); log.atTrace().setMessage("getAvailableOrNewItem: item={} remaining readyItems.size()={}") - .addArgument(item).addArgument(readyItems.size()).log(); + .addArgument(item).addArgument(readyItems.size()).LOG_TO_CHECK(); if (item != null) { stats.addHotGet(); beginLoadingNewItemIfNecessary(); diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java index 72ca5c837f..057ae79f0e 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java @@ -99,7 +99,7 @@ public void testBodyDoesntRewrite() throws Exception { capturedRequestList.add(request); log.atTrace().setMessage(() -> "headers: " + request.getHeaders().stream().map(kvp->kvp.getKey()+": "+kvp.getValue()) - .collect(Collectors.joining())).log(); + .collect(Collectors.joining())).LOG_TO_CHECK(); capturedBodies.add(fullRequest.content().toString(StandardCharsets.UTF_8)); return new SimpleHttpResponse(headers, payloadBytes, "OK", 200); }); diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java index 99ccf6ba2d..508376028c 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java @@ -70,7 +70,7 @@ public static void sliceMessageIntoChannelWrites(EmbeddedChannel channel, String var endIdx = startIdx + len; var substr = msg.substring(lastStart.get(), Math.min(endIdx, msg.length())); lastStart.set(endIdx); - log.atTrace().setMessage(() -> "s: " + substr).log(); + log.atTrace().setMessage(() -> "s: " + substr).LOG_TO_CHECK(); return substr; }) .takeWhile(Objects::nonNull) diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java index 73480a0d3c..599d86e266 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java @@ -28,7 +28,7 @@ class HeaderRemoverHandlerTest { private void runTestsWithSize(BiFunction msgMaker, Supplier sizesSupplier) { log.atDebug().setMessage(() -> "sizes: " + sizesSupplier.get().limit(16).mapToObj(i->""+i) - .collect(Collectors.joining(","))).log(); + .collect(Collectors.joining(","))).LOG_TO_CHECK(); runTestWithSize(b -> msgMaker.apply(b,"\n"), sizesSupplier.get()); runTestWithSize(b -> msgMaker.apply(b, "\r\n"), sizesSupplier.get()); } @@ -92,7 +92,7 @@ public void randomFragmentedCheckInterlaced() { final var bound = getBound(HeaderRemoverHandlerTest::makeInterlacedMessage); for (int i=0; i "random run={}").addArgument(i).log(); + log.atDebug().setMessage(() -> "random run={}").addArgument(i).LOG_TO_CHECK(); runTestsWithSize(HeaderRemoverHandlerTest::makeInterlacedMessage, () -> IntStream.generate(() -> r.nextInt(bound))); } @@ -113,7 +113,7 @@ public void randomFragmentedCheckConsecutive() { final var bound = getBound(HeaderRemoverHandlerTest::makeConsecutiveMessage); for (int i=0; i "random run={}").addArgument(i).log(); + log.atDebug().setMessage(() -> "random run={}").addArgument(i).LOG_TO_CHECK(); runTestsWithSize(HeaderRemoverHandlerTest::makeConsecutiveMessage, () -> IntStream.generate(() -> r.nextInt(bound))); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java index 0d07d5a83c..07ee99d674 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java @@ -85,13 +85,9 @@ public String appendageToDescribeHowToSetMinimumGuaranteedLifetime() { @Override public void onExpireAccumulation(String partitionId, Accumulation accumulation) { connectionsExpiredCounter.incrementAndGet(); - log.atTrace() - .setMessage( - () -> "firing accumulation for accum=[" - + accumulation.getRrPair().getBeginningTrafficStreamKey() - + "]=" - + accumulation - ) + log.atTrace().setMessage("firing accumulation for accum=[{}]={}") + .addArgument(accumulation.getRrPair().getBeginningTrafficStreamKey()) + .addArgument(accumulation) .log(); fireAccumulationsCallbacksAndClose( accumulation, @@ -194,9 +190,8 @@ private static String summarizeTrafficStream(TrafficStream ts) { public void accept(ITrafficStreamWithKey trafficStreamAndKey) { var yetToBeSequencedTrafficStream = trafficStreamAndKey.getStream(); - log.atTrace() - .setMessage(() -> "Got trafficStream: " + summarizeTrafficStream(yetToBeSequencedTrafficStream)) - .log(); + log.atTrace().setMessage("Got trafficStream: {}") + .addArgument(summarizeTrafficStream(yetToBeSequencedTrafficStream)).log(); var partitionId = yetToBeSequencedTrafficStream.getNodeId(); var connectionId = yetToBeSequencedTrafficStream.getConnectionId(); var tsk = trafficStreamAndKey.getKey(); @@ -206,14 +201,8 @@ public void accept(ITrafficStreamWithKey trafficStreamAndKey) { var o = trafficStream.getSubStreamList().get(i); var connectionStatus = addObservationToAccumulation(accum, tsk, o); if (CONNECTION_STATUS.CLOSED == connectionStatus) { - log.atInfo() - .setMessage( - () -> "Connection terminated: removing " - + partitionId - + ":" - + connectionId - + " from liveStreams map" - ) + log.atInfo().setMessage("{}").addArgument(() -> + "Connection terminated: removing " + partitionId + ":" + connectionId + " from liveStreams map") .log(); liveStreams.remove(partitionId, connectionId); break; @@ -236,19 +225,13 @@ private Accumulation createInitialAccumulation(ITrafficStreamWithKey streamWithK if (key.getTrafficStreamIndex() == 0 && (stream.getPriorRequestsReceived() > 0 || stream.getLastObservationWasUnterminatedRead())) { log.atWarn() - .setMessage( - () -> "Encountered a TrafficStream object with inconsistent values between " - + "the prior request count (" - + stream.getPriorRequestsReceived() - + ", " - + "lastObservationWasUnterminatedRead (" - + stream.getLastObservationWasUnterminatedRead() - + ") and the index (" - + key.getTrafficStreamIndex() + .setMessage("{}").addArgument(() -> + "Encountered a TrafficStream object with inconsistent values between " + + "the prior request count (" + stream.getPriorRequestsReceived() + ", " + + "lastObservationWasUnterminatedRead (" + stream.getLastObservationWasUnterminatedRead() + ")" + + " and the index (" + key.getTrafficStreamIndex() + "). Traffic Observations will be ignored until Reads after the next EndOfMessage" - + " are encountered. Full stream object=" - + stream - ) + + " are encountered. Full stream object=" + stream) .log(); } @@ -265,10 +248,8 @@ public CONNECTION_STATUS addObservationToAccumulation( @NonNull ITrafficStreamKey trafficStreamKey, TrafficObservation observation ) { - log.atTrace() - .setMessage("{}") - .addArgument(() -> "Adding observation: " + observation + " with state=" + accum.state) - .log(); + log.atTrace().setMessage("{}") + .addArgument(() -> "Adding observation: " + observation + " with state=" + accum.state).log(); var timestamp = TrafficStreamUtils.instantFromProtoTimestamp(observation.getTs()); liveStreams.expireOldEntries(trafficStreamKey, accum, timestamp); @@ -278,11 +259,8 @@ public CONNECTION_STATUS addObservationToAccumulation( .or(() -> handleObservationForReadState(accum, observation, trafficStreamKey, timestamp)) .or(() -> handleObservationForWriteState(accum, observation, trafficStreamKey, timestamp)) .orElseGet(() -> { - log.atWarn() - .setMessage( - () -> "unaccounted for observation type " + observation + " for " + accum.trafficChannelKey - ) - .log(); + log.atWarn().setMessage("{}").addArgument(() -> + "unaccounted for observation type " + observation + " for " + accum.trafficChannelKey).log(); return CONNECTION_STATUS.ALIVE; }); } @@ -342,13 +320,10 @@ private Optional handleCloseObservationThatAffectEveryState( exceptionConnectionCounter.incrementAndGet(); accum.resetForNextRequest(); log.atDebug() - .setMessage( - () -> "Removing accumulated traffic pair due to " - + "recorded connection exception event for " - + trafficStreamKey.getConnectionId() - ) + .setMessage("Removing accumulated traffic pair due to recorded connection exception event for {}") + .addArgument(trafficStreamKey::getConnectionId) .log(); - log.atTrace().setMessage(() -> "Accumulated object: " + accum).log(); + log.atTrace().setMessage("Accumulated object: {}").addArgument(accum).log(); return Optional.of(CONNECTION_STATUS.ALIVE); } return Optional.empty(); @@ -371,14 +346,21 @@ private Optional handleObservationForReadState( requestCounter.incrementAndGet(); } var rrPair = accum.getOrCreateTransactionPair(trafficStreamKey, originTimestamp); - log.atTrace().setMessage(() -> "Adding request data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("{}").addArgument(() -> + "Adding request data for accum[" + connectionId + "]=" + accum).log(); rrPair.addRequestData(timestamp, observation.getRead().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added request data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added request data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasEndOfMessageIndicator()) { assert accum.hasRrPair(); handleEndOfRequest(accum); } else if (observation.hasReadSegment()) { - log.atTrace().setMessage(() -> "Adding request segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding request segment for accum[{}]={}") + .addArgument(connectionId). + addArgument(accum) + .log(); var rrPair = accum.getOrCreateTransactionPair(trafficStreamKey, originTimestamp); if (rrPair.requestData == null) { rrPair.requestData = new HttpMessageAndTimestamp.Request(timestamp); @@ -386,7 +368,10 @@ private Optional handleObservationForReadState( } rrPair.addRequestData(timestamp, observation.getRead().getData().toByteArray()); rrPair.requestData.addSegment(observation.getReadSegment().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added request segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added request segment for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasSegmentEnd()) { var rrPair = accum.getRrPair(); assert rrPair.requestData.hasInProgressSegment(); @@ -413,17 +398,29 @@ private Optional handleObservationForWriteState( var connectionId = trafficStreamKey.getConnectionId(); if (observation.hasWrite()) { var rrPair = accum.getRrPair(); - log.atTrace().setMessage(() -> "Adding response data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding response data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); rrPair.addResponseData(timestamp, observation.getWrite().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added response data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage(() -> "Added response data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasWriteSegment()) { - log.atTrace().setMessage(() -> "Adding response segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage(() -> "Adding response segment for accum[" + accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); var rrPair = accum.getRrPair(); if (rrPair.responseData == null) { rrPair.responseData = new HttpMessageAndTimestamp.Response(timestamp); } rrPair.responseData.addSegment(observation.getWriteSegment().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added response segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage(() -> "Added response segment for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasSegmentEnd()) { var rrPair = accum.getRrPair(); assert rrPair.responseData.hasInProgressSegment(); @@ -443,9 +440,9 @@ private void handleDroppedRequestForAccumulation(Accumulation accum) { var rrPair = accum.getRrPair(); rrPair.getTrafficStreamsHeld().forEach(listener::onTrafficStreamIgnored); } - log.atTrace().setMessage(() -> "resetting to forget " + accum.trafficChannelKey).log(); + log.atTrace().setMessage("resetting to forget {}").addArgument(accum.trafficChannelKey).log(); accum.resetToIgnoreAndForgetCurrentRequest(); - log.atTrace().setMessage(() -> "done resetting to forget and accum=" + accum).log(); + log.atTrace().setMessage("done resetting to forget and accum={}").addArgument(accum).log(); } // This function manages the transition case when an observation comes in that would terminate @@ -455,7 +452,7 @@ private boolean rotateAccumulationIfNecessary(String connectionId, Accumulation // If this was brand new, we don't need to care about triggering the callback. // We only need to worry about this if we have yet to send the RESPONSE. if (accum.state == Accumulation.State.ACCUMULATING_WRITES) { - log.atDebug().setMessage(() -> "handling EOM for accum[" + connectionId + "]=" + accum).log(); + log.atDebug().setMessage("handling EOM for accum[{}]={}").addArgument(connectionId).addArgument(accum).log(); handleEndOfResponse(accum, RequestResponsePacketPair.ReconstructionStatus.COMPLETE); return true; } @@ -526,13 +523,10 @@ private void fireAccumulationsCallbacksAndClose( // it's a difficult decision and one to be managed with a policy. // TODO - add Jira/github issue here. log.atWarn() - .setMessage( - "Terminating a TrafficStream reconstruction before data was accumulated " - + "for " - + accumulation.trafficChannelKey - + " assuming an empty server interaction and NOT " - + "reproducing this to the target cluster." - ) + .setMessage("Terminating a TrafficStream reconstruction before data was accumulated " + + "for {} assuming an empty server interaction and NOT " + + "reproducing this to the target cluster.") + .addArgument(accumulation.trafficChannelKey) .log(); if (accumulation.hasRrPair()) { listener.onTrafficStreamsExpired( diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java index 433ff14a63..bce0ad64b7 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java @@ -87,30 +87,24 @@ public ConnectionReplaySession buildConnectionReplaySession(IReplayContexts.ICha () -> buildConnectionReplaySession(channelKeyCtx) ); log.atTrace() - .setMessage( - () -> "returning ReplaySession=" - + crs - + " for " - + channelKeyCtx.getConnectionId() - + " from " - + channelKeyCtx - ) + .setMessage("{}").addArgument(() -> + "returning ReplaySession=" + crs + " for " + channelKeyCtx.getConnectionId() + " from " + channelKeyCtx) .log(); return crs; } public void closeConnection(IReplayContexts.IChannelKeyContext ctx, int sessionNumber) { var connId = ctx.getConnectionId(); - log.atTrace().setMessage(() -> "closing connection for " + connId).log(); + log.atTrace().setMessage("closing connection for {}").addArgument(connId).log(); var connectionReplaySession = connectionId2ChannelCache.getIfPresent(getKey(connId, sessionNumber)); if (connectionReplaySession != null) { closeClientConnectionChannel(connectionReplaySession); connectionId2ChannelCache.invalidate(connId); } else { log.atTrace() - .setMessage( - () -> "No ChannelFuture for " + ctx + " in closeConnection. " + + .setMessage("No ChannelFuture for {} in closeConnection. " + "The connection may have already been closed") + .addArgument(ctx) .log(); } } @@ -127,24 +121,30 @@ private TrackedFuture closeClientConnectionChannel(ConnectionRe .getChannelFutureInAnyState() // this could throw, especially if the even loop has begun to shut down .thenCompose(channelFuture -> { if (channelFuture == null) { - log.atTrace().setMessage(() -> - "Couldn't find the channel for " + session.getChannelKeyContext() + " to close it. " + - "It may have already been reset.").log(); + log.atTrace().setMessage("Couldn't find the channel for {} to close it. " + + "It may have already been reset.") + .addArgument(session::getChannelKeyContext) + .log(); return TextTrackedFuture.completedFuture(null, () -> ""); } - log.atTrace().setMessage(() -> - "closing channel " + channelFuture.channel() + "(" + session.getChannelKeyContext() + ")...").log(); + log.atTrace().setMessage("closing channel {} ({})...") + .addArgument(channelFuture.channel()) + .addArgument(session.getChannelKeyContext()) + .log(); return NettyFutureBinders.bindNettyFutureToTrackableFuture( channelFuture.channel().close(), "calling channel.close()") .thenApply(v -> { - log.atTrace().setMessage(() -> - "channel.close() has finished for " + session.getChannelKeyContext() + " with value=" + v).log(); + log.atTrace().setMessage("channel.close() has finished for {} with value={}") + .addArgument(session.getChannelKeyContext()) + .addArgument(v) + .log(); if (session.hasWorkRemaining()) { - log.atWarn().setMessage(() -> - "Work items are still remaining for this connection session" + - "(last associated with connection=" + session.getChannelKeyContext() + "). " - + session.calculateSizeSlowly() + " requests that were enqueued won't be run").log(); + log.atWarn().setMessage("Work items are still remaining for this connection session " + + "(last associated with connection={}). {} requests that were enqueued won't be run") + .addArgument(session::getChannelKeyContext) + .addArgument(session::calculateSizeSlowly) + .log(); } session.schedule.clear(); return channelFuture.channel(); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java index cd88a1c866..4a0f876093 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java @@ -153,13 +153,10 @@ private static Map makeSafeMap( } catch (Exception e) { // TODO - this isn't a good design choice. // We should follow through with the spirit of this class and leave this as empty optional values - log.atWarn() - .setMessage( - () -> "Putting what may be a bogus value in the output because transforming it " - + "into json threw an exception for " - + context - ) - .setCause(e) + log.atWarn().setCause(e) + .setMessage("Putting what may be a bogus value in the output because transforming it " + + "into json threw an exception for {}") + .addArgument(context) .log(); return Map.of(EXCEPTION_KEY_STRING, (Object) e.toString()); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java index d95b298542..93705690aa 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java @@ -121,45 +121,26 @@ private TrackedFuture hookWorkFinishingUpdates( f -> f.whenComplete((v, t) -> Utils.setIfLater(lastCompletedSourceTimeEpochMs, timestamp.toEpochMilli())) .whenComplete((v, t) -> { var newCount = totalCountOfScheduledTasksOutstanding.decrementAndGet(); - log.atInfo() - .setMessage( - () -> "Scheduled task '" - + taskDescription - + "' finished (" - + stringableKey - + ") decremented tasksOutstanding to " - + newCount - ) + log.atInfo().setMessage("{}") + .addArgument(() -> "Scheduled task '" + taskDescription + "' finished (" + stringableKey + ")" + + " decremented tasksOutstanding to " + newCount) .log(); }) .whenComplete((v, t) -> contentTimeController.stopReadsPast(timestamp)) - .whenComplete( - (v, t) -> log.atDebug() - .setMessage( - () -> "work finished and used timestamp=" - + timestamp - + " to update contentTimeController (tasksOutstanding=" - + totalCountOfScheduledTasksOutstanding.get() - + ")" - ) - .log() + .whenComplete((v, t) -> log.atDebug() + .setMessage("{}") + .addArgument(() -> "work finished and used timestamp=" + timestamp + " to update " + + "contentTimeController (tasksOutstanding=" + totalCountOfScheduledTasksOutstanding.get() + ")") + .log() ), () -> "Updating fields for callers to poll progress and updating backpressure" ); } private static void logStartOfWork(Object stringableKey, long newCount, Instant start, String label) { - log.atInfo() - .setMessage( - () -> "Scheduling '" - + label - + "' (" - + stringableKey - + ") to run at " - + start - + " incremented tasksOutstanding to " - + newCount - ) + log.atInfo().setMessage("{}").addArgument(() -> + "Scheduling '" + label + "' (" + stringableKey + ") " + + "to run at " + start + " incremented tasksOutstanding to " + newCount) .log(); } @@ -196,8 +177,8 @@ public TrackedFuture scheduleRequest( var requestKey = ctx.getReplayerRequestKey(); logStartOfWork(requestKey, newCount, start, label); - log.atDebug().setMessage( - () -> "Scheduling request for " + ctx + " to run from [" + start + ", " + end + " with an interval of " + log.atDebug().setMessage("{}").addArgument(() -> + "Scheduling request for " + ctx + " to run from [" + start + ", " + end + " with an interval of " + interval + " for " + numPackets + " packets").log(); var result = networkSendOrchestrator.scheduleRequest(requestKey, ctx, start, interval, packets, retryVisitor); return hookWorkFinishingUpdates(result, originalStart, requestKey, label); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java index 1b60c4ec6a..21d5e7ee92 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java @@ -102,7 +102,10 @@ public TrackedFuture scheduleWork( ctx.getChannelKeyContext(), ctx.getReplayerRequestKey().sourceRequestIndexSessionIdentifier ); - log.atDebug().setMessage(() -> "Scheduling work for " + ctx.getConnectionId() + " at time " + timestamp).log(); + log.atDebug().setMessage(() -> "Scheduling work for {} at time {}") + .addArgument(ctx.getConnectionId()) + .addArgument(timestamp) + .log(); var scheduledContext = ctx.createScheduledContext(timestamp); // This method doesn't use the scheduling that scheduleRequest and scheduleClose use because // doing work associated with a connection is considered to be preprocessing work independent @@ -187,7 +190,10 @@ public TrackedFuture scheduleClose( ) { var channelKey = ctx.getChannelKey(); var channelInteraction = new IndexedChannelInteraction(channelKey, channelInteractionNum); - log.atDebug().setMessage(() -> "Scheduling CLOSE for " + channelInteraction + " at time " + timestamp).log(); + log.atDebug().setMessage("Scheduling CLOSE for {} at time {}") + .addArgument(channelInteraction) + .addArgument(timestamp) + .log(); return submitUnorderedWorkToEventLoop( ctx, sessionNumber, @@ -248,13 +254,8 @@ private TrackedFuture submitUnorderedWorkToEventLoop( log.atTrace() .setMessage("{}") .addArgument( - () -> "adding work item at slot " - + channelInteractionNumber - + " for " - + replaySession.getChannelKeyContext() - + " with " - + replaySession.scheduleSequencer - ) + () -> "adding work item at slot " + channelInteractionNumber + " for " + + replaySession.getChannelKeyContext() + " with " + replaySession.scheduleSequencer) .log(); return replaySession.scheduleSequencer.addFutureForWork( channelInteractionNumber, @@ -323,7 +324,8 @@ private TrackedFuture scheduleOnConnectionReplaySession( Instant atTime, ChannelTask task ) { - log.atInfo().setMessage(() -> channelInteraction + " scheduling " + task.kind + " at " + atTime).log(); + log.atInfo().setMessage("{}") + .addArgument(() -> channelInteraction + " scheduling " + task.kind + " at " + atTime).log(); var schedule = channelFutureAndRequestSchedule.schedule; var eventLoop = channelFutureAndRequestSchedule.eventLoop; @@ -333,9 +335,8 @@ private TrackedFuture scheduleOnConnectionReplaySession( : "Per-connection TrafficStream ordering should force a time ordering on incoming requests"; var workPointTrigger = schedule.appendTaskTrigger(atTime, task.kind).scheduleFuture; var workFuture = task.getRunnable().apply(workPointTrigger); - log.atTrace() - .setMessage(() -> channelInteraction + " added a scheduled event at " + atTime + "... " + schedule) - .log(); + log.atTrace().setMessage("{}") + .addArgument(() -> channelInteraction + " added a scheduled event at " + atTime + "... " + schedule).log(); if (wasEmpty) { bindNettyScheduleToCompletableFuture(eventLoop, atTime, workPointTrigger.future); } @@ -344,14 +345,10 @@ private TrackedFuture scheduleOnConnectionReplaySession( var itemStartTimeOfPopped = schedule.removeFirstItem(); assert atTime.equals(itemStartTimeOfPopped) : "Expected to have popped the item to match the start time for the responseFuture that finished"; - log.atDebug() - .setMessage("{}") - .addArgument( - () -> channelInteraction.toString() - + " responseFuture completed - checking " - + schedule - + " for the next item to schedule" - ) + log.atDebug().setMessage("{}") + .addArgument(() -> + channelInteraction.toString() + " responseFuture completed - " + + "checking " + schedule + " for the next item to schedule") .log(); Optional.ofNullable(schedule.peekFirstItem()) .ifPresent(kvp -> bindNettyScheduleToCompletableFuture(eventLoop, kvp.startTime, kvp.scheduleFuture)); @@ -399,7 +396,7 @@ private Duration doubleRetryDelayCapped(Duration d) { } if (dtr.directive == RetryDirective.RETRY) { var newStartTime = referenceStartTime.plus(nextRetryDelay); - log.atInfo().setMessage(() -> "Making request scheduled at " + newStartTime).log(); + log.atInfo().setMessage("Making request scheduled at {}").addArgument(newStartTime).log(); var schedulingDelay = Duration.between(now(), newStartTime); return NettyFutureBinders.bindNettyScheduleToCompletableFuture( eventLoop, schedulingDelay) @@ -423,7 +420,7 @@ private TrackedFuture sendPackets( AtomicInteger requestPacketCounter ) { final var oldCounter = requestPacketCounter.getAndIncrement(); - log.atTrace().setMessage(() -> "sendNextPartAndContinue: packetCounter=" + oldCounter).log(); + log.atTrace().setMessage("sendNextPartAndContinue: packetCounter={}").addArgument(oldCounter).log(); assert iterator.hasNext() : "Should not have called this with no items to send"; var consumeFuture = packetReceiver.consumeBytes(iterator.next().retainedDuplicate()); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java index 04bf8cc602..c375d994c5 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java @@ -102,7 +102,10 @@ public TrackedFuture transformAndSendRequest( start, () -> transformAllData(inputRequestTransformerFactory.create(ctx), packetsSupplier) ); - log.atDebug().setMessage(() -> "request transform future for " + ctx + " = " + requestReadyFuture).log(); + log.atDebug().setMessage("request transform future for {} = {}") + .addArgument(ctx) + .addArgument(requestReadyFuture) + .log(); // It might be safer to chain this work directly inside the scheduleWork call above so that the // read buffer horizons aren't set after the transformation work finishes, but after the packets // are fully handled @@ -133,26 +136,27 @@ private static TrackedFuture transformAllData( var packets = packetSupplier.get().map(Unpooled::wrappedBuffer); packets.forEach(packetData -> { log.atDebug() - .setMessage( - () -> logLabel + " sending " + packetData.readableBytes() + " bytes to the packetHandler" - ) + .setMessage("{} sending {} bytes to the packetHandler") + .addArgument(logLabel) + .addArgument(packetData::readableBytes) .log(); var consumeFuture = packetHandler.consumeBytes(packetData); - log.atDebug().setMessage(() -> logLabel + " consumeFuture = " + consumeFuture).log(); + log.atDebug().setMessage("{} consumeFuture = {}") + .addArgument(logLabel) + .addArgument(consumeFuture) + .log(); }); - log.atDebug().setMessage(() -> logLabel + " done sending bytes, now finalizing the request").log(); + log.atDebug().setMessage("{} done sending bytes, now finalizing the request").addArgument(logLabel).log(); return packetHandler.finalizeRequest(); } catch (Exception e) { log.atInfo() .setCause(e) .setMessage( "Encountered an exception while transforming the http request. " - + "The base64 gzipped traffic stream, for later diagnostic purposes, is: " - + Utils.packetsToCompressedTrafficStream(packetSupplier.get()) - ) + + "The base64 gzipped traffic stream, for later diagnostic purposes, is: {}") + .addArgument(() -> Utils.packetsToCompressedTrafficStream(packetSupplier.get())) .log(); throw e; } } - } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java index 4b1cf22861..669488f0e9 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java @@ -47,7 +47,7 @@ public ResultsToLogsConsumer(Logger tupleLogger, Logger progressLogger, IJsonTra // if it's configured to output at all private static Logger makeTransactionSummaryLogger() { var logger = LoggerFactory.getLogger(TRANSACTION_SUMMARY_LOGGER); - logger.atDebug().setMessage("{}").addArgument(ResultsToLogsConsumer::getTransactionSummaryStringPreamble).log(); + logger.atDebug().setMessage("{}").addArgument(ResultsToLogsConsumer::getTransactionSummaryStringPreamble).log((); return logger; } @@ -136,19 +136,18 @@ private Map toJSONObject(SourceTargetCaptureTuple tuple, ParsedH */ public void accept(SourceTargetCaptureTuple tuple, ParsedHttpMessagesAsDicts parsedMessages) { final var index = tupleCounter.getAndIncrement(); - progressLogger.atInfo() - .setMessage("{}") - .addArgument(() -> toTransactionSummaryString(index, tuple, parsedMessages)) - .log(); + progressLogger.atInfo().setMessage("{}") + .addArgument(() -> toTransactionSummaryString(index, tuple, parsedMessages)).log(); if (tupleLogger.isInfoEnabled()) { try { var originalTuple = toJSONObject(tuple, parsedMessages); var transformedTuple = tupleTransformer.transformJson(originalTuple); var tupleString = PLAIN_MAPPER.writeValueAsString(transformedTuple); - tupleLogger.atInfo().setMessage("{}").addArgument(() -> tupleString).log(); + tupleLogger.atInfo().setMessage("{}").addArgument(tupleString).log(); } catch (Exception e) { - log.atError().setMessage("Exception converting tuple to string").setCause(e).log(); - tupleLogger.atInfo().setMessage("{}").addArgument("{ \"error\":\"" + e.getMessage() + "\" }").log(); + log.atError().setCause(e).setMessage("Exception converting tuple to string").log(); + tupleLogger.atInfo().setMessage("{}") + .addArgument(() -> "{ \"error\":\"" + e.getMessage() + "\" }").log(); throw Lombok.sneakyThrow(e); } } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java index cf79ef66ac..c73fe6320d 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java @@ -382,7 +382,7 @@ public static void main(String[] args) throws Exception { final var msg = "Exception parsing " + params.targetUriString; System.err.println(msg); System.err.println(e.getMessage()); - log.atError().setMessage(msg).setCause(e).log(); + log.atError().setCause(e).setMessage("{}").addArgument(msg).log(); System.exit(3); return; } @@ -465,10 +465,9 @@ public static void main(String[] args) throws Exception { ); ActiveContextMonitor finalActiveContextMonitor = activeContextMonitor; scheduledExecutorService.scheduleAtFixedRate(() -> { - activeContextLogger.atInfo() - .setMessage( - () -> "Total requests outstanding at " + Instant.now() + ": " + tr.requestWorkTracker.size() - ) + activeContextLogger.atInfo().setMessage("Total requests outstanding at {}: {}") + .addArgument(Instant::now) + .addArgument(tr.requestWorkTracker::size) .log(); finalActiveContextMonitor.run(); }, ACTIVE_WORK_MONITOR_CADENCE_MS, ACTIVE_WORK_MONITOR_CADENCE_MS, TimeUnit.MILLISECONDS); @@ -490,9 +489,9 @@ public static void main(String[] args) throws Exception { var acmLevel = globalContextTracker.getActiveScopesByAge().findAny().isPresent() ? Level.ERROR : Level.INFO; - activeContextLogger.atLevel(acmLevel).setMessage(() -> "Outstanding work after shutdown...").log(); + activeContextLogger.atLevel(acmLevel).setMessage("Outstanding work after shutdown...").log(); activeContextMonitor.run(); - activeContextLogger.atLevel(acmLevel).setMessage(() -> "[end of run]]").log(); + activeContextLogger.atLevel(acmLevel).setMessage("[end of run]]").log(); } } } @@ -514,7 +513,7 @@ private static void setupShutdownHookForReplayer(TrafficReplayerTopLevel tr) { Optional.of("Done shutting down TrafficReplayer (due to Runtime shutdown). " + "Logs may be missing for events that have happened after the Shutdown event was received.") .ifPresent(afterMsg -> { - log.atWarn().setMessage(afterMsg).log(); + log.atWarn().setMessage(afterMsg).log();w System.err.println(afterMsg); }); })); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java index 4acf7d6aed..450bcd26d9 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java @@ -160,9 +160,9 @@ private TrackedFuture sendReque ); httpSentRequestFuture.future.whenComplete( (v, t) -> log.atTrace() - .setMessage(() -> "Summary response value for " + requestKey + " returned=" + v) - .log() - ); + .setMessage("Summary response value for {} returned={}") + .addArgument(requestKey).addArgument(v) + .log()); return httpSentRequestFuture; } @@ -189,26 +189,19 @@ Void handleCompletedTransaction( commitTrafficStreams(rrPair.completionStatus, rrPair.trafficStreamKeysBeingHeld); return null; } else { - log.atError() - .setCause(t) - .setMessage(() -> "Throwable passed to handle() for " + context + ". Rethrowing.") - .log(); + log.atError().setCause(t) + .setMessage("Throwable passed to handle() for {}. Rethrowing.").addArgument(context).log(); throw Lombok.sneakyThrow(t); } } catch (Error error) { - log.atError() - .setCause(error) - .setMessage(() -> "Caught error and initiating TrafficReplayer shutdown") - .log(); + log.atError().setCause(error) + .setMessage("Caught error and initiating TrafficReplayer shutdown").log(); shutdown(error); throw error; } catch (Exception e) { - log.atError() - .setMessage( - "Unexpected exception while sending the " - + "aggregated response and context for {} to the callback. " - + "Proceeding, but the tuple receiver context may be compromised." - ) + log.atError().setMessage("Unexpected exception while sending the " + + "aggregated response and context for {} to the callback. " + + "Proceeding, but the tuple receiver context may be compromised.") .addArgument(context) .setCause(e) .log(); @@ -286,9 +279,7 @@ private void packageAndWriteResponse( } try (var requestResponseTuple = new SourceTargetCaptureTuple(tupleHandlingContext, rrPair, summary, t)) { log.atDebug() - .setMessage("{}") - .addArgument(() -> "Source/Target Request/Response tuple: " + requestResponseTuple) - .log(); + .setMessage("Source/Target Request/Response tuple: {}").addArgument(requestResponseTuple).log(); tupleWriter.accept(requestResponseTuple); } @@ -322,11 +313,8 @@ protected void perResponseConsumer(AggregatedRawResponse summary, HttpRequestTransformationStatus transformationStatus, IReplayContexts.IReplayerHttpTransactionContext context) { if (summary != null && summary.getError() != null) { - log.atInfo() - .setCause(summary.getError()) - .setMessage("Exception for {}: ") - .addArgument(context) - .log(); + log.atInfo().setCause(summary.getError()) + .setMessage("Exception for {}: ").addArgument(context).log(); exceptionRequestCount.incrementAndGet(); } else if (transformationStatus.isError()) { log.atInfo() @@ -336,8 +324,7 @@ protected void perResponseConsumer(AggregatedRawResponse summary, .log(); exceptionRequestCount.incrementAndGet(); } else if (summary == null) { - log.atInfo() - .setMessage("Not counting this response. No result at all for {}: ") + log.atInfo().setMessage("Not counting this response. No result at all for {}: ") .addArgument(context) .log(); } else { @@ -363,10 +350,8 @@ public void pullCaptureFromSourceToAccumulator( trafficStreams = this.nextChunkFutureRef.get().get(); } catch (ExecutionException ex) { if (ex.getCause() instanceof EOFException) { - log.atWarn() - .setCause(ex.getCause()) - .setMessage("Got an EOF on the stream. " + "Done reading traffic streams.") - .log(); + log.atWarn().setCause(ex.getCause()) + .setMessage("Got an EOF on the stream. " + "Done reading traffic streams.").log(); break; } else { log.atWarn().setCause(ex).setMessage("Done reading traffic streams due to exception.").log(); @@ -380,9 +365,7 @@ public void pullCaptureFromSourceToAccumulator( .collect(Collectors.joining(";")) ) .filter(s -> !s.isEmpty()) - .ifPresent( - s -> log.atInfo().setMessage("{}").addArgument("TrafficStream Summary: {" + s + "}").log() - ); + .ifPresent(s -> log.atInfo().setMessage("TrafficStream Summary: {{}}").addArgument(s).log()); } trafficStreams.forEach(trafficToHttpTransactionAccumulator::accept); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java index 7b0c32037b..c493865980 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java @@ -203,17 +203,15 @@ protected void wrapUpWorkAndEmitSummary( waitForRemainingWork(logLevel, timeout); break; } catch (TimeoutException e) { - log.atLevel(logLevel).log("Timed out while waiting for the remaining " + "requests to be finalized..."); + log.atLevel(logLevel).log("Timed out while waiting for the remaining requests to be finalized..."); logLevel = secondaryLogLevel; } } if (!requestWorkTracker.isEmpty() || exceptionRequestCount.get() > 0) { log.atWarn() - .setMessage( - "{} in-flight requests being dropped due to pending shutdown; " - + "{} requests to the target threw an exception; " - + "{} requests were successfully processed." - ) + .setMessage("{} in-flight requests being dropped due to pending shutdown; " + + "{} requests to the target threw an exception; " + + "{} requests were successfully processed.") .addArgument(requestWorkTracker.size()) .addArgument(exceptionRequestCount.get()) .addArgument(successfulRequestCount.get()) @@ -325,10 +323,8 @@ private void handleAlreadySetFinishedSignal() throws InterruptedException, Execu throw e; } } catch (Error t) { - log.atError() - .setCause(t) - .setMessage(() -> "Not waiting for all work to finish. " + "The TrafficReplayer is shutting down") - .log(); + log.atError().setCause(t) + .setMessage("Not waiting for all work to finish. The TrafficReplayer is shutting down").log(); throw t; } } @@ -339,21 +335,21 @@ protected static void writeStatusLogsForRemainingWork( UniqueReplayerRequestKey, TrackedFuture>[] allRemainingWorkArray ) { - log.atLevel(logLevel).log("All remaining work to wait on " + allRemainingWorkArray.length); + log.atLevel(logLevel).setMessage("All remaining work to wait on {}") + .addArgument(allRemainingWorkArray.length).log(); if (log.isInfoEnabled()) { LoggingEventBuilder loggingEventBuilderToUse = log.isTraceEnabled() ? log.atTrace() : log.atInfo(); long itemLimit = log.isTraceEnabled() ? Long.MAX_VALUE : MAX_ITEMS_TO_SHOW_FOR_LEFTOVER_WORK_AT_INFO_LEVEL; - loggingEventBuilderToUse.setMessage( - () -> " items: " - + Arrays.stream(allRemainingWorkArray) - .map( - kvp -> kvp.getKey() - + " --> " - + kvp.getValue().formatAsString(TrafficReplayerTopLevel::formatWorkItem) - ) - .limit(itemLimit) - .collect(Collectors.joining("\n")) - ).log(); + loggingEventBuilderToUse.setMessage(" items: {}") + .addArgument(() -> Arrays.stream(allRemainingWorkArray) + .map( + kvp -> kvp.getKey() + + " --> " + + kvp.getValue().formatAsString(TrafficReplayerTopLevel::formatWorkItem) + ) + .limit(itemLimit) + .collect(Collectors.joining("\n"))) + .log(); } } @@ -380,13 +376,10 @@ protected boolean shouldRetry() { @SneakyThrows @Override public @NonNull CompletableFuture shutdown(Error error) { - log.atWarn().setCause(error).setMessage(() -> "Shutting down " + this).log(); + log.atWarn().setCause(error).setMessage("Shutting down {}").addArgument(this).log(); shutdownReasonRef.compareAndSet(null, error); if (!shutdownFutureRef.compareAndSet(null, new CompletableFuture<>())) { - log.atError() - .setMessage( - () -> "Shutdown was already signaled by {}. " + "Ignoring this shutdown request due to {}." - ) + log.atError().setMessage("Shutdown was already signaled by {}. Ignoring this shutdown request due to {}.") .addArgument(shutdownReasonRef.get()) .addArgument(error) .log(); @@ -416,7 +409,7 @@ protected boolean shouldRetry() { } } var shutdownFuture = shutdownFutureRef.get(); - log.atWarn().setMessage(() -> "Shutdown setup has been initiated").log(); + log.atWarn().setMessage("Shutdown setup has been initiated").log(); return shutdownFuture; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TransformationLoader.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TransformationLoader.java index 277b8be96e..d3b0855390 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TransformationLoader.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TransformationLoader.java @@ -41,11 +41,8 @@ public TransformationLoader() { inProgressProviders.add(provider); } providers = Collections.unmodifiableList(inProgressProviders); - log.atInfo() - .setMessage( - () -> "IJsonTransformerProviders loaded: " - + providers.stream().map(p -> p.getClass().toString()).collect(Collectors.joining("; ")) - ) + log.atInfo().setMessage("IJsonTransformerProviders loaded: {}") + .addArgument(providers.stream().map(p -> p.getClass().toString()).collect(Collectors.joining("; "))) .log(); } @@ -82,10 +79,9 @@ private IJsonTransformer configureTransformerFromConfig(Map c) { var transformerName = p.getName(); if (transformerName.equals(key)) { var configuration = c.get(key); - log.atInfo() - .setMessage( - () -> "Creating a transformer through provider=" + p + " with configuration=" + configuration - ) + log.atInfo().setMessage("Creating a transformer through provider={} with configuration={}") + .addArgument(p) + .addArgument(configuration) .log(); return p.createTransformer(configuration); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java index e875fd78d4..3c57dbb6b1 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java @@ -61,7 +61,7 @@ public Object consumeByteBufferForSingleObject(ByteBuffer byteBuffer) throws IOE } public void consumeByteBuffer(ByteBuffer byteBuffer) throws IOException { - log.atTrace().setMessage(() -> "Consuming bytes: {}").addArgument(() -> byteBuffer.toString()).log(); + log.atTrace().setMessage("Consuming bytes: {}").addArgument(byteBuffer::toString).log(); feeder.feedInput(byteBuffer); } @@ -73,7 +73,7 @@ public Object getNextTopLevelObject() throws IOException { break; } - log.atTrace().setMessage(() -> "{} ... adding token={}").addArgument(this).addArgument(token).log(); + log.atTrace().setMessage("{} ... adding token={}").addArgument(this).addArgument(token).log(); switch (token) { case FIELD_NAME: jsonObjectStack.push(parser.getText()); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java index 57fa4a4a39..84007faf0d 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java @@ -91,9 +91,8 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { socketContext.addTraceException(cause, true); - log.atDebug() - .setMessage("Exception caught in ConnectionClosedListenerHandler." + "Closing channel due to exception") - .setCause(cause) + log.atDebug().setCause(cause) + .setMessage("Exception caught in ConnectionClosedListenerHandler. Closing channel due to exception") .log(); ctx.close(); super.exceptionCaught(ctx, cause); @@ -109,7 +108,7 @@ public NettyPacketToHttpConsumer( var parentContext = ctx.createTargetRequestContext(); this.setCurrentMessageContext(parentContext.createHttpSendingContext()); responseBuilder = AggregatedRawResponse.builder(Instant.now()); - log.atDebug().setMessage(() -> "C'tor: incoming session=" + replaySession).log(); + log.atDebug().setMessage("C'tor: incoming session={}").addArgument(replaySession).log(); this.activeChannelFuture = activateLiveChannel(); this.readTimeoutDuration = readTimeoutDuration; } @@ -125,7 +124,7 @@ private TrackedFuture activateLiveChannel() { if (t != null) { channelCtx.addFailedChannelCreation(); channelCtx.addTraceException(channelFuture.cause(), true); - log.atWarn().setMessage(() -> "error creating channel, not retrying").setCause(t).log(); + log.atWarn().setCause(t).setMessage("error creating channel, not retrying").log(); throw Lombok.sneakyThrow(t); } @@ -133,15 +132,15 @@ private TrackedFuture activateLiveChannel() { if (c.isActive()) { this.channel = c; initializeRequestHandlers(); - log.atDebug().setMessage(() -> "Channel initialized for " + channelCtx + " signaling future").log(); + log.atDebug().setMessage("Channel initialized for {} signaling future") + .addArgument(channelCtx).log(); return TextTrackedFuture.completedFuture(null, () -> "Done"); } else { // this may recurse forever - until the event loop is shutdown // (see the ClientConnectionPool::shutdownNow()) channelCtx.addFailedChannelCreation(); log.atWarn() - .setMessage(() -> "Channel wasn't active, trying to create another for this request") - .log(); + .setMessage("Channel wasn't active, trying to create another for this request").log(); return activateLiveChannel(); } }, () -> "acting on ready channelFuture to retry if inactive or to return"), @@ -192,7 +191,10 @@ public static TrackedFuture createClientConnection( } String host = serverUri.getHost(); int port = serverUri.getPort(); - log.atTrace().setMessage(() -> "Active - setting up backend connection to " + host + ":" + port).log(); + log.atTrace().setMessage("Active - setting up backend connection to {}:{}") + .addArgument(host) + .addArgument(port) + .log(); Bootstrap b = new Bootstrap(); var channelKeyCtx = requestCtx.getLogicalEnclosingScope().getChannelKeyContext(); @@ -211,10 +213,8 @@ protected void initChannel(@NonNull Channel ch) throws Exception { try { var t = TrackedFuture.unwindPossibleCompletionException(tWrapped); if (t != null) { - log.atWarn() - .setMessage(() -> "Caught exception while trying to get an active channel") - .setCause(t) - .log(); + log.atWarn().setCause(t) + .setMessage("Caught exception while trying to get an active channel").log(); } else if (!outboundChannelFuture.channel().isActive()) { t = new ChannelNotActiveException(); } @@ -243,13 +243,8 @@ protected void initChannel(@NonNull Channel ch) throws Exception { ChannelFuture outboundChannelFuture) { final var channel = outboundChannelFuture.channel(); - log.atTrace() - .setMessage( - () -> channelKeyContext.getChannelKey() - + " Done setting up client channel & it was successful for " - + channel - ) - .log(); + log.atTrace().setMessage("{}").addArgument(() -> + channelKeyContext.getChannelKey() + " successfully one setting up client channel for " + channel).log(); var pipeline = channel.pipeline(); if (sslContext != null) { var sslEngine = sslContext.newEngine(channel.alloc()); @@ -324,7 +319,7 @@ private void initializeRequestHandlers() { addLoggingHandlerLast(pipeline, "D"); pipeline.addLast(BACKSIDE_HTTP_WATCHER_HANDLER_NAME, new BacksideHttpWatcherHandler(responseBuilder)); addLoggingHandlerLast(pipeline, "E"); - log.atTrace().setMessage(() -> "Added handlers to the pipeline: " + pipeline).log(); + log.atTrace().setMessage("Added handlers to the pipeline: {}").addArgument(pipeline).log(); channel.config().setAutoRead(true); } @@ -336,8 +331,7 @@ private static void addLoggingHandlerLast(ChannelPipeline pipeline, String name) private void deactivateChannel() { try { var pipeline = channel.pipeline(); - log.atDebug() - .setMessage(() -> "Resetting the pipeline for channel {} currently at: {}") + log.atDebug().setMessage("Resetting the pipeline for channel {} currently at: {}") .addArgument(channel) .addArgument(pipeline) .log(); @@ -345,9 +339,8 @@ private void deactivateChannel() { try { pipeline.remove(handlerName); } catch (NoSuchElementException e) { - log.atWarn() - .setMessage(() -> "Ignoring an exception that the " + handlerName + " wasn't present") - .log(); + log.atWarn().setMessage("Ignoring an exception that the {}} wasn't present") + .addArgument(handlerName).log(); } } while (true) { @@ -358,7 +351,10 @@ private void deactivateChannel() { pipeline.removeLast(); } channel.config().setAutoRead(false); - log.atDebug().setMessage(() -> "Reset the pipeline for channel " + channel + " back to: " + pipeline).log(); + log.atDebug().setMessage("Reset the pipeline for channel {} back to: {}") + .addArgument(channel) + .addArgument(pipeline) + .log(); } finally { getCurrentRequestSpan().close(); getParentContext().close(); @@ -369,26 +365,21 @@ private void deactivateChannel() { public TrackedFuture consumeBytes(ByteBuf packetData) { activeChannelFuture = activeChannelFuture.getDeferredFutureThroughHandle((v, channelException) -> { if (channelException == null) { - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "outboundChannelFuture is ready. Writing packets (hash=" - + System.identityHashCode(packetData) - + "): " - + httpContext() - + ": " - + packetData.toString(StandardCharsets.UTF_8) - ) + log.atTrace().setMessage("outboundChannelFuture is ready. Writing packets (hash={}): {}: {}") + .addArgument(() -> System.identityHashCode(packetData)) + .addArgument(this::httpContext) + .addArgument(packetData.toString(StandardCharsets.UTF_8)) .log(); return writePacketAndUpdateFuture(packetData).whenComplete((v2, t2) -> - log.atTrace().setMessage(() -> "finished writing " + httpContext() + " t=" + t2).log(), () -> ""); + log.atTrace().setMessage("finished writing {} t={}") + .addArgument(this::httpContext) + .addArgument(t2) + .log(), () -> ""); } else { log.atWarn() - .setMessage( - () -> httpContext().getReplayerRequestKey() - + "outbound channel was not set up successfully, NOT writing bytes hash=" - + System.identityHashCode(packetData) - ) + .setMessage("{} outbound channel was not set up successfully, NOT writing bytes hash={}") + .addArgument(() -> httpContext().getReplayerRequestKey()) + .addArgument(System.identityHashCode(packetData)) .log(); if (channel != null) { channel.close(); @@ -397,14 +388,10 @@ public TrackedFuture consumeBytes(ByteBuf packetData) { } }, () -> "consumeBytes - after channel is fully initialized (potentially waiting on TLS handshake)"); log.atTrace() - .setMessage( - () -> "Setting up write of packetData[" - + packetData - + "] hash=" - + System.identityHashCode(packetData) - + ". Created future consumeBytes=" - + activeChannelFuture - ) + .setMessage("Setting up write of packetData[{}] hash={}. Created future consumeBytes={}") + .addArgument(packetData) + .addArgument(() -> System.identityHashCode(packetData)) + .addArgument(activeChannelFuture) .log(); return activeChannelFuture; } @@ -423,9 +410,8 @@ private TrackedFuture writePacketAndUpdateFuture(ByteBuf packetDat @Override public TrackedFuture finalizeRequest() { var ff = activeChannelFuture.getDeferredFutureThroughHandle((v, t) -> { - log.atDebug() - .setMessage(() -> "finalization running since all prior work has completed for " + httpContext()) - .log(); + log.atDebug().setMessage("finalization running since all prior work has completed for {}") + .addArgument(() -> httpContext()).log(); if (!(this.currentRequestContextUnion instanceof IReplayContexts.IReceivingHttpResponseContext)) { this.getCurrentRequestSpan().close(); this.setCurrentMessageContext(getParentContext().createWaitingForResponseContext()); @@ -455,15 +441,10 @@ public TrackedFuture finalizeRequest() { getParentContext().close(); } }), () -> "clearing pipeline"); - log.atDebug() - .setMessage( - () -> "Chaining finalization work off of " - + activeChannelFuture - + " for " - + httpContext() - + ". Returning finalization future=" - + ff - ) + log.atDebug().setMessage("Chaining finalization work off of {} for {}. Returning finalization future={}") + .addArgument(activeChannelFuture) + .addArgument(() -> httpContext()) + .addArgument(ff) .log(); return ff; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java index 81424e89a6..be8055a210 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java @@ -98,14 +98,9 @@ private HttpRequestDecoder getHttpRequestDecoderHandler() { public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { chunks.add(nextRequestPacket.retainedDuplicate()); chunkSizes.get(chunkSizes.size() - 1).add(nextRequestPacket.readableBytes()); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "HttpJsonTransformingConsumer[" - + this - + "]: writing into embedded channel: " - + nextRequestPacket.toString(StandardCharsets.UTF_8) - ) + log.atTrace().setMessage("HttpJsonTransformingConsumer[{}]: writing into embedded channel: {}") + .addArgument(this) + .addArgument(nextRequestPacket.toString(StandardCharsets.UTF_8)) .log(); return TextTrackedFuture.completedFuture(null, () -> "initialValue") .map( diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java index e1c4193dc9..891ac016d6 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java @@ -40,7 +40,7 @@ public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) + " " + request.protocolVersion().text() ) - .log(); + .LOG_TO_CHECK(); var httpJsonMessage = parseHeadersIntoMessage(request); ctx.fireChannelRead(httpJsonMessage); } else { diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java index b1ab1aded7..e33035e943 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java @@ -40,7 +40,7 @@ public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) + " " + response.protocolVersion().text() ) - .log(); + .LOG_TO_CHECK(); var httpJsonMessage = parseHeadersIntoMessage(response); ctx.fireChannelRead(httpJsonMessage); } else { diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java index f80112116d..754ac2a4ee 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java @@ -93,7 +93,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } } } catch (JacksonException e) { - log.atInfo().setCause(e).setMessage(() -> "Error parsing json body. " + + log.atInfo().setCause(e).setMessage("Error parsing json body. " + "Will pass all payload bytes directly as a ByteBuf within the payload map").log(); jsonWasInvalid = true; parsedJsonObjects.clear(); @@ -139,8 +139,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception context.onTextPayloadParseSucceeded(); } catch (CharacterCodingException e) { context.onTextPayloadParseFailed(); - log.atDebug().setMessage("Payload not valid utf-8, fallback to binary") - .setCause(e).log(); + log.atDebug().setCause(e).setMessage("Payload not valid utf-8, fallback to binary").log(); context.onPayloadSetBinary(); capturedHttpJsonMessage.payload() .put(JsonKeysForHttpMessage.INLINED_BINARY_BODY_DOCUMENT_KEY, diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java index d6a9bc7940..109a591f35 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java @@ -55,12 +55,8 @@ private boolean flushDownstream(ChannelHandlerContext ctx) { public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { boolean messageFlushed = flushDownstream(ctx); if (messageFlushed) { - log.atWarn() - .setMessage( - () -> "Failed to sign message due to handler removed" - + " before the end of the http contents were received" - ) - .log(); + log.atWarn().setMessage("Failed to sign message due to handler removed " + + "before the end of the http contents were received").log(); } super.handlerRemoved(ctx); } @@ -70,11 +66,8 @@ public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { boolean messageFlushed = flushDownstream(ctx); if (messageFlushed) { log.atWarn() - .setMessage( - () -> "Failed to sign message due to channel unregistered" - + " before the end of the http contents were received" - ) - .log(); + .setMessage("Failed to sign message due to channel unregistered" + + " before the end of the http contents were received").log(); } super.channelUnregistered(ctx); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java index 0e5a9e8374..a68be242bb 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java @@ -131,9 +131,9 @@ private void writeHeadersIntoByteBufs(ChannelHandlerContext ctx, HttpJsonRequest return; } } catch (Exception e) { - log.atWarn() - .setCause(e) - .setMessage(() -> "writing headers directly to chunks w/ sizes didn't work for " + httpJson) + log.atWarn().setCause(e) + .setMessage("writing headers directly to chunks w/ sizes didn't work for {}") + .addArgument(httpJson) .log(); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java index 62e503b355..e470ef02e9 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java @@ -156,33 +156,28 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) { + System.identityHashCode(ctx) ); var bb = (ByteBuf) msg; - log.atTrace().setMessage(() -> "Send bb.refCnt=" + bb.refCnt() + " " + System.identityHashCode(bb)).log(); + log.atTrace().setMessage("Send bb.refCnt={} {}") + .addArgument(bb::refCnt) + .addArgument(() -> System.identityHashCode(bb)) + .log(); // I don't want to capture the *this* object, the preexisting value of the currentFuture field only final var preexistingFutureForCapture = currentFuture; var numBytesToSend = bb.readableBytes(); currentFuture = currentFuture.getDeferredFutureThroughHandle((v, t) -> { try { if (t != null) { - log.atInfo() - .setCause(t) - .setMessage( - () -> "got exception from a previous future that " - + "will prohibit sending any more data to the packetReceiver" - ) - .log(); + log.atInfo().setCause(t).setMessage("got exception from a previous future that " + + "will prohibit sending any more data to the packetReceiver").log(); return TextTrackedFuture.failedFuture(t, () -> "failed previous future"); } else { - log.atTrace() - .setMessage( - () -> "chaining consumingBytes with " - + msg - + " lastFuture=" - + preexistingFutureForCapture - ) + log.atTrace().setMessage("chaining consumingBytes with {} lastFuture={}") + .addArgument(msg) + .addArgument(preexistingFutureForCapture) .log(); var rval = packetReceiver.consumeBytes(bb); - log.atTrace() - .setMessage(() -> "packetReceiver.consumeBytes()=" + rval + " bb.refCnt=" + bb.refCnt()) + log.atTrace().setMessage("packetReceiver.consumeBytes()={} bb.refCnt={}") + .addArgument(rval) + .addArgument(bb::refCnt) .log(); return rval.map( cf -> cf.thenApply(ignore -> false), diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java index 7cf32a8549..d90d757815 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java @@ -53,7 +53,8 @@ boolean hadNoErrors() { @Override public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) throws Exception { if (msg instanceof HttpContent && errorField == null) { - log.atDebug().setMessage(() -> "body contents: " + ((HttpContent) msg).content().duplicate().toString()).log(); + log.atDebug().setMessage("body contents: {}") + .addArgument(((HttpContent) msg).content().duplicate()).log(); feeder.feedInput(((HttpContent) msg).content().nioBuffer()); consumeInput(); if (msg instanceof LastHttpContent) { @@ -74,7 +75,7 @@ private void consumeInput() throws IOException { token != JsonToken.NOT_AVAILABLE) { JsonToken finalToken = token; - log.atTrace().setMessage(() -> "Got token: " + finalToken).log(); + log.atTrace().setMessage("Got token: {}").addArgument(finalToken).log(); if (token == JsonToken.FIELD_NAME && "errors".equals(parser.getCurrentName())) { parser.nextToken(); errorField = parser.getValueAsBoolean(); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java index 25a898c258..1555b7716f 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java @@ -236,11 +236,8 @@ public List readNextTrafficStreamSynchronously( try { TrafficStream ts = TrafficStream.parseFrom(kafkaRecord.value()); var trafficStreamsSoFar = trafficStreamsRead.incrementAndGet(); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "Parsed traffic stream #" + trafficStreamsSoFar + ": " + offsetData + " " + ts - ) + log.atTrace().setMessage("{}").addArgument(() -> + "Parsed traffic stream #" + trafficStreamsSoFar + ": " + offsetData + " " + ts) .log(); var key = new TrafficStreamKeyWithKafkaRecordId(tsk -> { var channelKeyCtx = channelContextManager.retainOrCreateContext(tsk); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java index f96f02d4fb..4a884bf2ba 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java @@ -56,12 +56,16 @@ Optional removeAndReturnNewHead(long offsetToRemove) { if (offsetToRemove == topCursor) { topCursor = Optional.ofNullable(pQueue.peek()).orElse(cursorHighWatermark + 1); // most recent cursor // was previously popped - log.atDebug() - .setMessage("Commit called for " + offsetToRemove + ", and new topCursor=" + topCursor) + log.atDebug().setMessage("Commit called for {}, and new topCursor={}") + .addArgument(offsetToRemove) + .addArgument(topCursor) .log(); return Optional.of(topCursor); } else { - log.atDebug().setMessage("Commit called for " + offsetToRemove + ", but topCursor=" + topCursor).log(); + log.atDebug().setMessage("Commit called for {}, but topCursor={}") + .addArgument(offsetToRemove) + .addArgument(topCursor) + .log(); return Optional.empty(); } } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java index f2f8fc277c..bc9123b302 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java @@ -136,7 +136,7 @@ public TrackingKafkaConsumer( @Override public void onPartitionsRevoked(Collection partitions) { if (partitions.isEmpty()) { - log.atDebug().setMessage(() -> this + " revoked no partitions.").log(); + log.atDebug().setMessage("{} revoked no partitions.").addArgument(this).log(); return; } @@ -153,12 +153,9 @@ public void onPartitionsRevoked(Collection partitions) { partitionToOffsetLifecycleTrackerMap.values().stream().mapToInt(OffsetLifecycleTracker::size).sum() ); kafkaRecordsReadyToCommit.set(!nextSetOfCommitsMap.values().isEmpty()); - log.atWarn() - .setMessage( - () -> this - + " partitions revoked for " - + partitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - ) + log.atWarn().setMessage("{} partitions revoked for {}") + .addArgument(this) + .addArgument(() -> partitions.stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } @@ -166,7 +163,7 @@ public void onPartitionsRevoked(Collection partitions) { @Override public void onPartitionsAssigned(Collection newPartitions) { if (newPartitions.isEmpty()) { - log.atInfo().setMessage(() -> this + " assigned no new partitions.").log(); + log.atInfo().setMessage("{} assigned no new partitions.").addArgument(this).log(); return; } @@ -180,22 +177,17 @@ public void onPartitionsAssigned(Collection newPartitions) { ) ); log.atInfo() - .setMessage( - () -> this - + " partitions added for " - + newPartitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - ) + .setMessage("{} partitions added for {}") + .addArgument(this) + .addArgument(newPartitions.stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } public void close() { log.atInfo() - .setMessage( - () -> "Kafka consumer closing. " - + "Committing (implicitly by Kafka's consumer): " - + nextCommitsToString() - ) + .setMessage("Kafka consumer closing. Committing (implicitly by Kafka's consumer): {}") + .addArgument(this::nextCommitsToString) .log(); kafkaConsumer.close(); } @@ -205,13 +197,9 @@ public Optional getNextRequiredTouch() { var r = kafkaRecordsLeftToCommitEventually.get() == 0 ? Optional.empty() : Optional.of(kafkaRecordsReadyToCommit.get() ? Instant.now() : lastTouchTime.plus(keepAliveInterval)); - log.atTrace() - .setMessage( - () -> "returning next required touch at " - + r.map(t -> "" + t).orElse("N/A") - + " from a lastTouchTime of " - + lastTouchTime - ) + log.atTrace().setMessage("returning next required touch at {} from a lastTouchTime of {}") + .addArgument(r.map(Instant::toString).orElse("N/A")) + .addArgument(lastTouchTime) .log(); return r; } @@ -231,14 +219,10 @@ public void touch(ITrafficSourceContexts.IBackPressureBlockContext context) { } catch (IllegalStateException e) { throw e; } catch (RuntimeException e) { - log.atWarn() - .setCause(e) - .setMessage( - "Unable to poll the topic: " - + topic - + " with our Kafka consumer. " - + "Swallowing and awaiting next metadata refresh to try again." - ) + log.atWarn().setCause(e) + .setMessage("Unable to poll the topic: {} with our Kafka consumer. " + + "Swallowing and awaiting next metadata refresh to try again.") + .addArgument(topic) .log(); } finally { resume(); @@ -268,7 +252,7 @@ private void pause() { + "The active partitions according to the consumer: " + kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(",")) ) - .log(); + .LOG_TO_CHECK(); } } @@ -294,7 +278,7 @@ private void resume() { + "The active partitions according to the consumer: " + kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(",")) ) - .log(); + .LOG_TO_CHECK(); } } @@ -328,7 +312,7 @@ private Stream applyBuilder( ); offsetTracker.add(offsetDetails.getOffset()); kafkaRecordsLeftToCommitEventually.incrementAndGet(); - log.atTrace().setMessage(() -> "records in flight=" + kafkaRecordsLeftToCommitEventually.get()).log(); + log.atTrace().setMessage(() -> "records in flight=" + kafkaRecordsLeftToCommitEventually.get()).LOG_TO_CHECK(); return builder.apply(offsetDetails, kafkaRecord); }); } @@ -350,7 +334,7 @@ private ConsumerRecords safePollWithSwallowedRuntimeExceptions( + "Records in flight=" + kafkaRecordsLeftToCommitEventually.get() ) - .log(); + .LOG_TO_CHECK(); log.atTrace() .setMessage("{}") .addArgument( @@ -361,7 +345,7 @@ private ConsumerRecords safePollWithSwallowedRuntimeExceptions( .collect(Collectors.joining(",")) + "}" ) - .log(); + .LOG_TO_CHECK(); log.atTrace() .setMessage("{}") .addArgument( @@ -372,7 +356,7 @@ private ConsumerRecords safePollWithSwallowedRuntimeExceptions( .collect(Collectors.joining(",")) + "}" ) - .log(); + .LOG_TO_CHECK(); return records; } catch (RuntimeException e) { log.atWarn() @@ -382,7 +366,7 @@ private ConsumerRecords safePollWithSwallowedRuntimeExceptions( + "Swallowing and awaiting next metadata refresh to try again." ) .addArgument(topic) - .log(); + .LOG_TO_CHECK(); return new ConsumerRecords<>(Collections.emptyMap()); } } @@ -405,7 +389,7 @@ ITrafficCaptureSource.CommitResult commitKafkaKey(ITrafficStreamKey streamKey, K + "have been handled again by a current consumer within this process or another. Full key=" + kafkaTsk ) - .log(); + .LOG_TO_CHECK(); return ITrafficCaptureSource.CommitResult.IGNORED; } @@ -417,7 +401,7 @@ ITrafficCaptureSource.CommitResult commitKafkaKey(ITrafficStreamKey streamKey, K newHeadValue = tracker.removeAndReturnNewHead(kafkaTsk.getOffset()); return newHeadValue.map(o -> { var v = new OffsetAndMetadata(o); - log.atDebug().setMessage(() -> "Adding new commit " + k + "->" + v + " to map").log(); + log.atDebug().setMessage(() -> "Adding new commit " + k + "->" + v + " to map").LOG_TO_CHECK(); synchronized (commitDataLock) { addKeyContextForEventualCommit(streamKey, kafkaTsk, k); nextSetOfCommitsMap.put(k, v); @@ -473,7 +457,7 @@ private void safeCommit(Supplier com ); log.atDebug() .setMessage(() -> "Done committing now records in flight=" + kafkaRecordsLeftToCommitEventually.get()) - .log(); + .LOG_TO_CHECK(); } catch (RuntimeException e) { log.atWarn() .setCause(e) @@ -490,7 +474,7 @@ private void safeCommit(Supplier com .map(kvp -> kvp.getKey() + "->" + kvp.getValue()) .collect(Collectors.joining(",")) ) - .log(); + .LOG_TO_CHECK(); } finally { if (context != null) { context.close(); @@ -504,7 +488,7 @@ private static void safeCommitStatic( HashMap nextCommitsMap ) { assert !nextCommitsMap.isEmpty(); - log.atDebug().setMessage(() -> "Committing " + nextCommitsMap).log(); + log.atDebug().setMessage(() -> "Committing " + nextCommitsMap).LOG_TO_CHECK(); try (var kafkaContext = context.createNewKafkaCommitContext()) { kafkaConsumer.commitSync(nextCommitsMap); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java index b578bd8213..8f8b8a6830 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java @@ -61,9 +61,8 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } private void triggerResponseCallbackAndRemoveCallback() { - log.atTrace() - .setMessage(() -> "triggerResponseCallbackAndRemoveCallback, callback=" + this.responseCallback) - .log(); + log.atTrace().setMessage("triggerResponseCallbackAndRemoveCallback, callback={}") + .addArgument(this.responseCallback).log(); doneReadingRequest = true; if (this.responseCallback != null) { // this method may be re-entrant upon calling the callback, so make sure that we don't loop diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java index c50658b0c1..711b0f1b62 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java @@ -72,7 +72,7 @@ public void onNewDataArrivingAfterItsAccumulationHasBeenExpired( + ". To remedy this, set the minimumGuaranteedLifetime to at least " + extraGap ) - .log(); + .LOG_TO_CHECK(); } public boolean shouldRetryAfterAccumulationTimestampRaceDetected( diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java index b25da73f23..f7d23916a0 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java @@ -82,7 +82,7 @@ public void stopReadsPast(Instant pointInTime) { + " the new stopReadingAtRef=" + newValue ) - .log(); + .LOG_TO_CHECK(); // No reason to signal more than one reader. We don't support concurrent reads with the current contract readGate.drainPermits(); readGate.release(); @@ -96,7 +96,7 @@ public void stopReadsPast(Instant pointInTime) { + "] didn't move the cursor because the value was already at " + newValue ) - .log(); + .LOG_TO_CHECK(); } } @@ -132,7 +132,7 @@ public CompletableFuture> readNextTrafficStreamChunk () -> "end of readNextTrafficStreamChunk trigger...lastTimestampSecondsRef=" + lastTimestampSecondsRef.get() ) - .log(); + .LOG_TO_CHECK(); }); } @@ -151,7 +151,7 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) .setMessage( () -> "stopReadingAtRef=" + stopReadingAtRef + " lastTimestampSecondsRef=" + lastTimestampSecondsRef ) - .log(); + .LOG_TO_CHECK(); ITrafficSourceContexts.IBackPressureBlockContext blockContext = null; while (stopReadingAtRef.get().isBefore(lastTimestampSecondsRef.get())) { if (blockContext == null) { @@ -162,7 +162,7 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) .setMessage("blocking until signaled to read the next chunk last={} stop={}") .addArgument(lastTimestampSecondsRef.get()) .addArgument(stopReadingAtRef.get()) - .log(); + .LOG_TO_CHECK(); var nextTouchOp = underlyingSource.getNextRequiredTouch(); if (nextTouchOp.isEmpty()) { log.trace("acquiring readGate semaphore (w/out timeout)"); @@ -183,7 +183,7 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) + nowTime + ")" ) - .log(); + .LOG_TO_CHECK(); if (waitIntervalMs <= 0) { underlyingSource.touch(blockContext); } else { @@ -191,16 +191,16 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) // if it DOES succeed, we'll loop around and make sure that there's not another reason to stop log.atTrace() .setMessage(() -> "acquiring readGate semaphore with timeout=" + waitIntervalMs) - .log(); + .LOG_TO_CHECK(); try (var waitContext = blockContext.createWaitForSignalContext()) { var didAcquire = readGate.tryAcquire(waitIntervalMs, TimeUnit.MILLISECONDS); log.atTrace().setMessage("semaphore ") - .addArgument(() -> (didAcquire ? "" : "not ") + "acquired").log(); + .addArgument(() -> (didAcquire ? "" : "not ") + "acquired").LOG_TO_CHECK(); } } } } catch (InterruptedException e) { - log.atWarn().setCause(e).log("Interrupted while waiting to read more data"); + log.atWarn().setCause(e).LOG_TO_CHECK("Interrupted while waiting to read more data"); Thread.currentThread().interrupt(); break; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java index de95adeab7..dcd0f7de36 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java @@ -39,9 +39,9 @@ public InputStreamOfTraffic(RootReplayerContext context, InputStream inputStream try { isr.close(); } catch (Exception e2) { - log.atError().setMessage("Caught exception while closing InputStreamReader that " + + log.atError().setCause(e2).setMessage("Caught exception while closing InputStreamReader that " + "was in response to an earlier thrown exception. Swallowing the inner exception and " + - "throwing the original one.").setCause(e2).log(); + "throwing the original one.").log(); } throw e; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java index 31c272e8db..8093dea7e9 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java @@ -49,7 +49,7 @@ private void consumeFromQueue() { .setMessage(() -> "liveTrafficStreamCostGate.permits: {} acquiring: {}") .addArgument(liveTrafficStreamCostGate.availablePermits()) .addArgument(workItem.cost) - .log(); + .LOG_TO_CHECK(); liveTrafficStreamCostGate.acquire(workItem.cost); WorkItem finalWorkItem = workItem; log.atDebug() @@ -59,7 +59,7 @@ private void consumeFromQueue() { + ") to process " + finalWorkItem.context ) - .log(); + .LOG_TO_CHECK(); workItem.task.accept(workItem); workItem = null; } @@ -73,7 +73,7 @@ private void consumeFromQueue() { + workQueue.size() + " enqueued items" ) - .log(); + .LOG_TO_CHECK(); } throw e; } @@ -97,7 +97,7 @@ public void doneProcessing(@NonNull WorkItem workItem) { + " for " + workItem.context ) - .log(); + .LOG_TO_CHECK(); } @Override diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java index d99be87b65..0267112a00 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java @@ -88,7 +88,7 @@ private static class TestTrafficCaptureSource implements ISimpleTrafficCaptureSo public CompletableFuture> readNextTrafficStreamChunk( Supplier contextSupplier ) { - log.atTrace().setMessage(() -> "Test.readNextTrafficStreamChunk.counter=" + counter).log(); + log.atTrace().setMessage(() -> "Test.readNextTrafficStreamChunk.counter=" + counter).LOG_TO_CHECK(); var i = counter.getAndIncrement(); if (i >= nStreamsToCreate) { return CompletableFuture.failedFuture(new EOFException()); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java index 84f9669d25..3d1a69dd14 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java @@ -68,7 +68,7 @@ public static Arguments[] generateAllTestsAndConfirmComplete(IntStream seedStrea log.atInfo() .setMessage(() -> "Sufficient random seeds to generate a full cover of tests:{}") .addArgument(seedsThatOfferUniqueTestCases.toString()) - .log(); + .LOG_TO_CHECK(); return argsArray; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java index 86f387fcf4..15d0be1d07 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java @@ -63,9 +63,9 @@ public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { lastCheckIsReady.release(); log.atDebug() .setMessage(() -> "trying to acquire semaphore for packet #" + index + " and id=" + id) - .log(); + .LOG_TO_CHECK(); consumeIsReady.acquire(); - log.atDebug().setMessage(() -> "Acquired semaphore for packet #" + index + " and id=" + id).log(); + log.atDebug().setMessage(() -> "Acquired semaphore for packet #" + index + " and id=" + id).LOG_TO_CHECK(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); } @@ -79,7 +79,7 @@ public TrackedFuture finalizeRequest() { return new TextTrackedFuture<>(CompletableFuture.supplyAsync(() -> { try { lastCheckIsReady.release(); - log.atDebug().setMessage(() -> "trying to acquire semaphore for finalize and id=" + id).log(); + log.atDebug().setMessage(() -> "trying to acquire semaphore for finalize and id=" + id).LOG_TO_CHECK(); consumeIsReady.acquire(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); @@ -169,7 +169,7 @@ public void testFutureGraphBuildout() throws Exception { .map(sr -> getParentsDiagnosticString(sr, "")) .collect(Collectors.joining("\n---\n")) ) - .log(); + .LOG_TO_CHECK(); pktConsumer.consumeIsReady.release(); } } @@ -281,14 +281,14 @@ public void testThatSchedulingWorks() throws Exception { body.toString(StandardCharsets.UTF_8) ); } catch (Throwable e) { - log.atError().setMessage(()->"caught exception(1)").setCause(e).log(); + log.atError().setMessage(()->"caught exception(1)").setCause(e).LOG_TO_CHECK(); throw e; } } closeFuture.get(); log.error("Done running loop"); } catch (Throwable e) { - log.atError().setMessage(()->"caught exception(2)").setCause(e).log(); + log.atError().setMessage(()->"caught exception(2)").setCause(e).LOG_TO_CHECK(); throw e; } } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java index 6876ffb93c..790ffe2220 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java @@ -141,7 +141,7 @@ public void testThatTestSetupIsCorrect() throws Exception { new String(response.payloadBytes, StandardCharsets.UTF_8) ); } catch (Throwable t) { - log.atError().setMessage(() -> "Error=").setCause(t).log(); + log.atError().setMessage(() -> "Error=").setCause(t).LOG_TO_CHECK(); } } } @@ -227,7 +227,7 @@ private void testPeerResets( NettyPacketToHttpConsumerTest::makeResponseContext ) ) { - log.atError().setMessage("Got port " + testServer.port).log(); + log.atError().setMessage("Got port " + testServer.port).LOG_TO_CHECK(); var sslContext = !useTls ? null : SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build(); @@ -422,7 +422,7 @@ public void testResponseTakesLongerThanTimeout(boolean useTls) throws Exception var timeShifter = new TimeShifter(); var firstRequestTime = Instant.now(); timeShifter.setFirstTimestamp(firstRequestTime); - log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).log(); + log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).LOG_TO_CHECK(); var replayEngineFactory = new ReplayEngineFactory(responseTimeout, new TestFlowController(), @@ -441,7 +441,7 @@ public void testResponseTakesLongerThanTimeout(boolean useTls) throws Exception () -> Stream.of(EXPECTED_REQUEST_STRING.getBytes(StandardCharsets.UTF_8))); var maxTimeToWaitForTimeoutOrResponse = Duration.ofSeconds(30); var aggregatedResponse = requestFinishFuture.get(maxTimeToWaitForTimeoutOrResponse); - log.atInfo().setMessage("RequestFinishFuture finished").log(); + log.atInfo().setMessage("RequestFinishFuture finished").LOG_TO_CHECK(); Assertions.assertInstanceOf(ReadTimeoutException.class, aggregatedResponse.getError()); } } @@ -459,7 +459,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr + " and timeBetweenRequests" + timeBetweenRequests ) - .log(); + .LOG_TO_CHECK(); try ( var testServer = SimpleNettyHttpServer.makeServer( useTls, @@ -483,7 +483,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr var timeShifter = new TimeShifter(); var firstRequestTime = Instant.now(); timeShifter.setFirstTimestamp(firstRequestTime); - log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).log(); + log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).LOG_TO_CHECK(); var replayEngineFactory = new ReplayEngineFactory(responseTimeout, new TestFlowController(), timeShifter @@ -491,7 +491,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr int i = 0; while (true) { var ctx = rootContext.getTestConnectionRequestContext("TEST", i); - log.atInfo().setMessage("Starting transformAndSendRequest for request " + i).log(); + log.atInfo().setMessage("Starting transformAndSendRequest for request " + i).LOG_TO_CHECK(); var tr = new RequestTransformerAndSender<>(new NoRetryEvaluatorFactory()); var requestFinishFuture = tr.transformAndSendRequest( @@ -504,7 +504,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr () -> Stream.of(EXPECTED_REQUEST_STRING.getBytes(StandardCharsets.UTF_8))); var maxTimeToWaitForTimeoutOrResponse = Duration.ofSeconds(10); var aggregatedResponse = requestFinishFuture.get(maxTimeToWaitForTimeoutOrResponse); - log.atInfo().setMessage("RequestFinishFuture finished for request " + i).log(); + log.atInfo().setMessage("RequestFinishFuture finished for request " + i).LOG_TO_CHECK(); Assertions.assertNull(aggregatedResponse.getError()); var responseAsString = getResponsePacketsAsString(aggregatedResponse); Assertions.assertEquals(EXPECTED_RESPONSE_STRING, responseAsString); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java index c2c7dbf5e7..aa475c0a77 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java @@ -258,7 +258,7 @@ public void fullTestWithThrottledStart() throws Throwable { .map(ts -> TrafficStreamUtils.summarizeTrafficStream(ts)) .collect(Collectors.joining("\n")) ) - .log(); + .LOG_TO_CHECK(); Function trafficSourceSupplier = rc -> new ISimpleTrafficCaptureSource() { boolean isDone = false; @@ -382,7 +382,7 @@ public void fullTestWithRestarts(int testSize, boolean randomize) throws Throwab .map(ts -> TrafficStreamUtils.summarizeTrafficStream(ts)) .collect(Collectors.joining("\n")) ) - .log(); + .LOG_TO_CHECK(); var trafficSourceSupplier = new ArrayCursorTrafficSourceContext(trafficStreams); TrafficReplayerRunner.runReplayer( numExpectedRequests, diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java index 21fc6a5855..5688bc8cc0 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java @@ -110,7 +110,7 @@ public void fullTest(int testSize, boolean randomize) throws Throwable { .map(TrafficStreamUtils::summarizeTrafficStream) .collect(Collectors.joining("\n")) ) - .log(); + .LOG_TO_CHECK(); loadStreamsToKafka( buildKafkaConsumer(), @@ -145,7 +145,7 @@ private KafkaConsumer buildKafkaConsumer() { ); kafkaConsumerProps.setProperty("max.poll.interval.ms", DEFAULT_POLL_INTERVAL_MS + ""); var kafkaConsumer = new KafkaConsumer(kafkaConsumerProps); - log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).log(); + log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).LOG_TO_CHECK(); return kafkaConsumer; } @@ -207,7 +207,7 @@ Producer buildKafkaProducer() { try { return new KafkaProducer(kafkaProps); } catch (Exception e) { - log.atError().setCause(e).log(); + log.atError().setCause(e).LOG_TO_CHECK(); System.exit(1); throw e; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java index 6576017495..3a06728f24 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java @@ -143,7 +143,7 @@ public static void runReplayer( log.atLevel(e.originalCause instanceof FabricatedErrorToKillTheReplayer ? Level.INFO : Level.ERROR) .setCause(e.originalCause) .setMessage(() -> "broke out of the replayer, with this shutdown reason") - .log(); + .LOG_TO_CHECK(); log.atLevel(e.immediateCause == null ? Level.INFO : Level.ERROR) .setCause(e.immediateCause) .setMessage( @@ -151,7 +151,7 @@ public static void runReplayer( + e.originalCause + " and this immediate reason" ) - .log(); + .LOG_TO_CHECK(); FabricatedErrorToKillTheReplayer killSignalError = e.originalCause instanceof FabricatedErrorToKillTheReplayer ? (FabricatedErrorToKillTheReplayer) e.originalCause @@ -189,7 +189,7 @@ public static void runReplayer( log.atInfo() .setMessage(() -> "completely received request keys=\n{}") .addArgument(completelyHandledItems.keySet().stream().sorted().collect(Collectors.joining("\n"))) - .log(); + .LOG_TO_CHECK(); var skippedPerRun = IntStream.range(0, receivedPerRun.size()) .map(i -> totalUniqueEverReceivedSizeAfterEachRun.get(i) - receivedPerRun.get(i)) .toArray(); @@ -206,7 +206,7 @@ public static void runReplayer( ) .collect(Collectors.joining("\n")) ) - .log(); + .LOG_TO_CHECK(); var skippedPerRunDiffs = IntStream.range(0, receivedPerRun.size() - 1) .map(i -> (skippedPerRun[i] <= skippedPerRun[i + 1]) ? 1 : 0) .toArray(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java index 4166867408..6d80a4e893 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java @@ -171,7 +171,7 @@ public void testConnectionFailuresNeverGiveUp() throws Exception { var e = Assertions.assertThrows(Exception.class, f::get); var shutdownResult = ccpShutdownFuture.get(); - log.atInfo().setCause(e).setMessage(() -> "exception: ").log(); + log.atInfo().setCause(e).setMessage(() -> "exception: ").LOG_TO_CHECK(); // doubly-nested ExecutionException. Once for the get() call here and once for the work done in submit, // which wraps the scheduled request's future Assertions.assertInstanceOf(IllegalStateException.class, e.getCause().getCause()); @@ -227,7 +227,7 @@ public void testMalformedResponseFailuresNeverGiveUp() throws Exception { var lastResponse = responseList.get(responseList.size()-1).getRawResponse(); Assertions.assertNotNull(lastResponse); Assertions.assertEquals(200, lastResponse.status().code()); - log.atInfo().setMessage(()->"responses: " + responses).log(); + log.atInfo().setMessage(()->"responses: " + responses).LOG_TO_CHECK(); var retries = checkHttpRetryConsistency(rootContext); Assertions.assertTrue(retries > 0); var metrics = rootContext.inMemoryInstrumentationBundle.getFinishedMetrics(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java index f5f5ffe13d..20bf139b30 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java @@ -44,7 +44,7 @@ private KafkaConsumer buildKafkaConsumer() { ); kafkaConsumerProps.setProperty("max.poll.interval.ms", DEFAULT_POLL_INTERVAL_MS + ""); var kafkaConsumer = new KafkaConsumer(kafkaConsumerProps); - log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).log(); + log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).LOG_TO_CHECK(); return kafkaConsumer; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java index 4bfad92b2e..c7bbb75e20 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java @@ -141,27 +141,27 @@ public void testBlockedReadsAndBrokenCommitsDontCauseReordering() throws Excepti keysReceived = new ArrayList<>(); log.info("re-establish a client connection so that the following commit will work"); - log.atInfo().setMessage(() -> "1 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage(() -> "1 ..." + renderNextCommitsAsString()).LOG_TO_CHECK(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 1); - log.atInfo().setMessage(() -> "2 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage(() -> "2 ..." + renderNextCommitsAsString()).LOG_TO_CHECK(); log.info("wait long enough to fall out of the group again"); Thread.sleep(2 * MAX_POLL_INTERVAL_MS); var keysReceivedUntilDrop2 = keysReceived; keysReceived = new ArrayList<>(); - log.atInfo().setMessage(() -> "re-establish... 3 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage(() -> "re-establish... 3 ..." + renderNextCommitsAsString()).LOG_TO_CHECK(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 1); trafficSource.commitTrafficStream(keysReceivedUntilDrop1.get(1)); - log.atInfo().setMessage(() -> "re-establish... 4 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage(() -> "re-establish... 4 ..." + renderNextCommitsAsString()).LOG_TO_CHECK(); readNextNStreams(rootContext, trafficSource, keysReceived, 1, 1); - log.atInfo().setMessage(() -> "5 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage(() -> "5 ..." + renderNextCommitsAsString()).LOG_TO_CHECK(); Thread.sleep(2 * MAX_POLL_INTERVAL_MS); var keysReceivedUntilDrop3 = keysReceived; keysReceived = new ArrayList<>(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 3); - log.atInfo().setMessage(() -> "6 ..." + kafkaSource.trackingKafkaConsumer.nextCommitsToString()).log(); + log.atInfo().setMessage(() -> "6 ..." + kafkaSource.trackingKafkaConsumer.nextCommitsToString()).LOG_TO_CHECK(); trafficSource.close(); } @@ -182,7 +182,7 @@ private static void readNextNStreams( var trafficStreams = kafkaSource.readNextTrafficStreamChunk(rootContext::createReadChunkContext).get(); trafficStreams.forEach(ts -> { var tsk = ts.getKey(); - log.atInfo().setMessage(() -> "checking for " + tsk).log(); + log.atInfo().setMessage(() -> "checking for " + tsk).LOG_TO_CHECK(); Assertions.assertFalse(keysReceived.contains(tsk)); keysReceived.add(tsk); }); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java index 8d2c2b1b43..5c74d3edb3 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java @@ -50,7 +50,7 @@ static Producer buildKafkaProducer(String bootstrapServers) { try { return new KafkaProducer(kafkaProps); } catch (Exception e) { - log.atError().setCause(e).log(); + log.atError().setCause(e).LOG_TO_CHECK(); System.exit(1); throw e; } @@ -102,7 +102,7 @@ var record = new ProducerRecord(TEST_TOPIC_NAME, recordId, trafficStream.toByteA .setMessage( () -> "completed send of TrafficStream with key=" + tsKeyStr + " metadata=" + metadata ) - .log(); + .LOG_TO_CHECK(); }); var recordMetadata = sendFuture.get(); log.info("finished publishing record... metadata=" + recordMetadata); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java index a75accaa60..3f02fbe004 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java @@ -27,7 +27,7 @@ private static String add( if (m != null) { m.put(v, future); } - log.atInfo().setMessage(() -> "after adding work... " + future).log(); + log.atInfo().setMessage(() -> "after adding work... " + future).LOG_TO_CHECK(); return stringify(receivedItems.stream()); } @@ -47,8 +47,8 @@ void testOnlineRadixSorter_outOfOrder() { Assertions.assertEquals("", add(radixSorter, futureMap, receiverList, 3)); Assertions.assertEquals("", add(radixSorter, futureMap, receiverList, 4)); Assertions.assertEquals("1", add(radixSorter, futureMap, receiverList, 1)); - log.atInfo().setMessage(() -> "after adding work for '1'... future[3]=" + futureMap.get(3)).log(); - log.atInfo().setMessage(() -> "after adding work for '1'... future[4]=" + futureMap.get(4)).log(); + log.atInfo().setMessage(() -> "after adding work for '1'... future[3]=" + futureMap.get(3)).LOG_TO_CHECK(); + log.atInfo().setMessage(() -> "after adding work for '1'... future[4]=" + futureMap.get(4)).LOG_TO_CHECK(); receiverList.clear(); Assertions.assertEquals("2,3,4", add(radixSorter, futureMap, receiverList, 2)); receiverList.clear(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java index f55c9442cd..a7d7926492 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java @@ -23,7 +23,7 @@ public void test() throws Exception { try { observerSemaphore.release(); tfSemaphore.acquire(); - log.atInfo().setMessage(() -> "tf[" + finalI + "]" + lastTf).log(); + log.atInfo().setMessage(() -> "tf[" + finalI + "]" + lastTf).LOG_TO_CHECK(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); } @@ -37,8 +37,8 @@ public void test() throws Exception { tfSemaphore.release(); Thread.sleep(10); int finalI = i; - log.atInfo().setMessage(() -> "top tf after " + finalI + " releases=" + finalTf).log(); + log.atInfo().setMessage(() -> "top tf after " + finalI + " releases=" + finalTf).LOG_TO_CHECK(); } - log.atInfo().setMessage(() -> "final tf after any ancestor culls=" + finalTf).log(); + log.atInfo().setMessage(() -> "final tf after any ancestor culls=" + finalTf).LOG_TO_CHECK(); } } diff --git a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java index 8ae4953ecd..5c8268553c 100644 --- a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java +++ b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java @@ -44,20 +44,20 @@ public TestCapturePacketToHttpHandler( @Override public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { numConsumes.incrementAndGet(); - log.atDebug().setMessage(()->"incoming buffer refcnt=" + nextRequestPacket.refCnt()).log(); + log.atDebug().setMessage(()->"incoming buffer refcnt=" + nextRequestPacket.refCnt()).LOG_TO_CHECK(); var duplicatedPacket = nextRequestPacket.retainedDuplicate(); return new TrackedFuture<>(CompletableFuture.runAsync(() -> { try { - log.atDebug().setMessage(()->"Running async future for " + nextRequestPacket).log(); + log.atDebug().setMessage(()->"Running async future for " + nextRequestPacket).LOG_TO_CHECK(); Thread.sleep(consumeDuration.toMillis()); - log.atDebug().setMessage(()->"woke up from sleeping for " + nextRequestPacket).log(); + log.atDebug().setMessage(()->"woke up from sleeping for " + nextRequestPacket).LOG_TO_CHECK(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Lombok.sneakyThrow(e); } try { log.atDebug() - .setMessage(()->"At the time of committing the buffer, refcnt=" + duplicatedPacket.refCnt()).log(); + .setMessage(()->"At the time of committing the buffer, refcnt=" + duplicatedPacket.refCnt()).LOG_TO_CHECK(); duplicatedPacket.readBytes(byteArrayOutputStream, nextRequestPacket.readableBytes()); duplicatedPacket.release(); } catch (IOException e) { diff --git a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java index 0cb4cba908..d4654456b1 100644 --- a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java +++ b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java @@ -181,7 +181,7 @@ public static int classifyTrafficStream(HashSet possibilitiesLeftToTest getTypeFromObservation(ssl.get(ssl.size() - 1), outgoingLastType), ssl.size() ); - log.atTrace().setMessage(() -> "classification=" + classificationToString(type)).log(); + log.atTrace().setMessage(() -> "classification=" + classificationToString(type)).LOG_TO_CHECK(); previousObservationType = outgoingLastType; counter += possibilitiesLeftToTest.remove(type) ? 1 : 0; } @@ -284,7 +284,7 @@ private static TrafficStream[] fillCommandsAndSizesForSeed( MAX_WRITES_IN_RESPONSE ) ) - .log(); + .LOG_TO_CHECK(); var flushLikelihood = Math.pow(r2.nextDouble(), 2.0); fillCommandsAndSizes(r2, flushLikelihood / 4, flushLikelihood, bufferBound, commands, sizes); return TrafficStreamGenerator.makeTrafficStream( @@ -363,7 +363,7 @@ public static StreamAndExpectedSizes generateStreamAndSumOfItsTransactions( .flatMap(tc -> Arrays.stream(tc.trafficStreams).map(TrafficStreamUtils::summarizeTrafficStream)) .collect(Collectors.joining("\n")) ) - .log(); + .LOG_TO_CHECK(); var aggregatedStreams = randomize ? StreamInterleaver.randomlyInterleaveStreams( new Random(count), diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java index 4f0dab4dc0..347229b071 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java @@ -71,10 +71,9 @@ public CommonScopedMetricInstruments(Meter meter, ScopeLabels stockMetricLabels, private static List getBuckets(double firstBucketSize, double lastBucketCeiling) { var buckets = getExponentialBucketsBetween(firstBucketSize, lastBucketCeiling); log.atTrace() - .setMessage( - () -> "Setting buckets to " - + buckets.stream().map(x -> "" + x).collect(Collectors.joining(",", "[", "]")) - ) + .setMessage("Setting buckets to {}") + .addArgument(() -> buckets.stream().map(x -> "" + x) + .collect(Collectors.joining(",", "[", "]"))) .log(); return buckets; } diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java index f877f73af5..4fc51c1fc7 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java @@ -95,13 +95,9 @@ public static OpenTelemetry initializeOpenTelemetryWithCollectorOrAsNoop( .map(endpoint -> initializeOpenTelemetryForCollector(endpoint, serviceName, instanceName)) .orElseGet(() -> { if (serviceName != null) { - log.atWarn() - .setMessage( - "Collector endpoint=null, so serviceName parameter '" - + serviceName - + "' is being ignored since a no-op OpenTelemetry object is being created" - ) - .log(); + log.atWarn().setMessage("Collector endpoint=null, so serviceName parameter '{}'" + + " is being ignored since a no-op OpenTelemetry object is being created") + .addArgument(serviceName).log(); } return initializeNoopOpenTelemetry(); }); diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java b/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java index 6e4957755a..f244fdfe0f 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java @@ -15,7 +15,7 @@ public static String getNodeInstanceName() { var nodeId = Optional.of("ECS_TASK_ID").map(System::getenv) .or(() -> Optional.of("HOSTNAME").map(System::getenv)) .orElse(DEFAULT_NODE_ID); - log.atInfo().setMessage(() -> "getNodeInstanceName()=" + nodeId).log(); + log.atInfo().setMessage("getNodeInstanceName()={}").addArgument(nodeId).log(); return nodeId; } } diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java index 99d2cf0e58..6c5cb5bdcd 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java @@ -34,11 +34,8 @@ public void onCreated(IScopedInstrumentationAttributes ctx) { if (priorValue != null) { var priorKey = scopedContextToCallDetails.entrySet().stream().findFirst().get().getKey(); if (priorKey.equals(ctx)) { - log.atError() - .setMessage( - () -> "Trying to re-add the same context (" + ctx + ") back into this context tracker" - ) - .log(); + log.atError().setMessage("Trying to re-add the same context ({}) back into this context tracker") + .addArgument(ctx).log(); } } assert priorValue == null; @@ -55,8 +52,8 @@ public void onClosed(IScopedInstrumentationAttributes ctx) { assert oldCallDetails != null; final var oldE = oldCallDetails.closeStackException; if (oldE != null) { - log.atError().setCause(newExceptionStack).setMessage(() -> "Close is being called here").log(); - log.atError().setCause(oldE).setMessage(() -> "... but close was already called here").log(); + log.atError().setCause(newExceptionStack).setMessage("Close is being called here").log(); + log.atError().setCause(oldE).setMessage("... but close was already called here").log(); assert oldE == null; } oldCallDetails.closeStackException = new ExceptionForStackTracingOnly(); diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java index 6d4b7c0968..f35c79fb30 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java @@ -123,9 +123,9 @@ public List getMetricsUntil( return true; } else { try { - log.atInfo().setMessage("{}").addArgument(() -> - "Waiting " + sleepAmount + "ms because the last test for metrics from " + metricName + - " on " + matchingMetrics.get() + " did not satisfy the predicate").log(); + log.atInfo().setMessage("{}") + .addArgument(() -> "Waiting " + sleepAmount + "ms because the last test for metrics from " + + metricName + " on " + matchingMetrics.get() + " did not satisfy the predicate").log(); Thread.sleep(sleepAmount); } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java index 5d158fab5d..d94fc2eeb6 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java @@ -10,14 +10,14 @@ public class LoggingContextTracer implements IContextTracker { @Override public void onContextCreated(final IScopedInstrumentationAttributes context) { - log.atDebug().setMessage(CREATED_MESSAGE).addArgument(context.getActivityName()).log(); + log.atDebug().setMessage(CREATED_MESSAGE).addArgument(context::getActivityName).log(); } @Override public void onContextClosed(IScopedInstrumentationAttributes context) { log.atDebug() .setMessage(CLOSED_MESSAGE) - .addArgument(context.getActivityName()) + .addArgument(context::getActivityName) .addArgument(context::getPopulatedSpanAttributes) .log(); } diff --git a/deployment/cdk/opensearch-service-migration/lib/cdk-logger.ts b/deployment/cdk/opensearch-service-migration/lib/cdk-logger.ts index efd9e6c6a0..326e277586 100644 --- a/deployment/cdk/opensearch-service-migration/lib/cdk-logger.ts +++ b/deployment/cdk/opensearch-service-migration/lib/cdk-logger.ts @@ -6,15 +6,15 @@ export class CdkLogger { static info(message: string) { if (process.env.CDK_CLI_COMMAND === 'deploy') { - console.log(message); + console.LOG_TO_CHECK(message); } } static warn(message: string) { - console.log(message); + console.LOG_TO_CHECK(message); } static error(message: string) { - console.log(message); + console.LOG_TO_CHECK(message); } } \ No newline at end of file diff --git a/deployment/cdk/opensearch-service-migration/lib/lambda/acm-cert-importer-handler.ts b/deployment/cdk/opensearch-service-migration/lib/lambda/acm-cert-importer-handler.ts index 1cf7157e39..3be8ec227a 100644 --- a/deployment/cdk/opensearch-service-migration/lib/lambda/acm-cert-importer-handler.ts +++ b/deployment/cdk/opensearch-service-migration/lib/lambda/acm-cert-importer-handler.ts @@ -8,8 +8,8 @@ import * as https from 'https'; import * as forge from 'node-forge'; export const handler = async (event: CloudFormationCustomResourceEvent, context: Context): Promise => { - console.log('Received event:', JSON.stringify(event, null, 2)); - console.log('Received context:', JSON.stringify(context, null, 2)); + console.LOG_TO_CHECK('Received event:', JSON.stringify(event, null, 2)); + console.LOG_TO_CHECK('Received context:', JSON.stringify(context, null, 2)); let responseData: { [key: string]: any } = {}; let physicalResourceId: string = ''; @@ -18,7 +18,7 @@ export const handler = async (event: CloudFormationCustomResourceEvent, context: case 'Create': { const { certificate, privateKey, certificateChain } = await generateSelfSignedCertificate(); const certificateArn = await importCertificate(certificate, privateKey, certificateChain); - console.log(`Certificate imported with ARN: ${certificateArn}`); + console.LOG_TO_CHECK(`Certificate imported with ARN: ${certificateArn}`); responseData = { CertificateArn: certificateArn }; physicalResourceId = certificateArn; break; @@ -126,10 +126,10 @@ async function sendResponse(event: CloudFormationCustomResourceEvent, context: C Data: responseData }); - console.log('Response body:', responseBody); + console.LOG_TO_CHECK('Response body:', responseBody); if (event.ResponseURL === "Local") { - console.log('Running locally, simulating response success.'); + console.LOG_TO_CHECK('Running locally, simulating response success.'); return { Status: 'SUCCESS', Reason: 'Running locally, response simulated.', @@ -155,8 +155,8 @@ async function sendResponse(event: CloudFormationCustomResourceEvent, context: C return new Promise((resolve, reject) => { const request = https.request(options, (response) => { - console.log(`Status code: ${response.statusCode}`); - console.log(`Status message: ${response.statusMessage}`); + console.LOG_TO_CHECK(`Status code: ${response.statusCode}`); + console.LOG_TO_CHECK(`Status message: ${response.statusMessage}`); resolve({ Status: responseStatus === 'SUCCESS' || responseStatus === 'FAILED' ? responseStatus : 'FAILED', Reason: `See the details in CloudWatch Log Stream: ${context.logStreamName}`, diff --git a/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java index 28004107af..b92062015f 100644 --- a/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java +++ b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java @@ -110,14 +110,14 @@ private boolean applyTransformation(final ObjectNode indexJson) { var index = mock(Index.class); Mockito.when(index.getRawJson()).thenReturn(indexJson); - log.atInfo().setMessage("Original\n{}").addArgument(indexJson.toPrettyString()).log(); + log.atInfo().setMessage("Original\n{}").addArgument(indexJson.toPrettyString()).LOG_TO_CHECK(); var wasChanged = transformer.applyTransformation(index); log.atInfo() .setMessage("After{}\n{}") .addArgument(wasChanged ? " *Changed* " : "") .addArgument(indexJson.toPrettyString()) - .log(); + .LOG_TO_CHECK(); return wasChanged; }