From 3bb716182754478d8120162947b1bd56a562428e Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 12 Sep 2022 10:58:35 +0530 Subject: [PATCH 01/14] Add Delete Signed-off-by: Rishab Nahata --- .../TransportDeleteDecommissionAction.java | 97 +++++++++++++++++++ .../decommission/DecommissionService.java | 30 ++++++ 2 files changed, 127 insertions(+) create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java new file mode 100644 index 0000000000000..8db770afc720c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +public class TransportDeleteDecommissionAction extends TransportClusterManagerNodeAction< + DeleteDecommissionRequest, + DeleteDecommissionResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionAction.class); + + DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionAction( + TransportService transportService, + ClusterService clusterService, + DecommissionService decommissionService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + DeleteDecommissionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation( + DeleteDecommissionRequest request, + ClusterState state, + ActionListener listener + ) { + // TODO: Enable when service class change is merged + logger.info("Received delete decommission Request"); + decommissionService.clearDecommissionStatus(new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + listener.onResponse(new DeleteDecommissionResponse(true)); + } + + @Override + public void onFailure(Exception e) { + logger.error("Recommission failed with exception " + e.getMessage()); + listener.onFailure(e); + } + }); + + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index c31ea53a5fd16..9881ec89248bf 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -481,4 +481,34 @@ public void onFailure(Exception e) { } }; } + + public void clearDecommissionStatus(final ActionListener listener) { + clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteDecommissionAttribute(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute."), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + // Once the cluster state is processed we can try to recommission nodes by setting the weights for the zone. + // TODO Set the weights for the recommissioning zone. + listener.onResponse(new ClusterStateUpdateResponse(true)); + } + }); + } + + ClusterState deleteDecommissionAttribute(final ClusterState currentState) { + logger.info("Delete decommission request received"); + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } } From 5d9c7b3054633a63f3ef513ba7aa8173b6cb0aeb Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 3 Oct 2022 12:55:04 +0530 Subject: [PATCH 02/14] Fixes to IT Signed-off-by: Rishab Nahata --- .../AwarenessAttributeDecommissionIT.java | 423 ++++++++++++++++++ .../TransportDeleteDecommissionAction.java | 97 ---- 2 files changed, 423 insertions(+), 97 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java new file mode 100644 index 0000000000000..d6753337912a6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java @@ -0,0 +1,423 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.junit.After; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.cluster.coordination.JoinHelper; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.discovery.PeerFinder; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.RemoteTransportException; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.test.NodeRoles.onlyRole; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionIT extends OpenSearchIntegTestCase { + + private final Logger logger = LogManager.getLogger(AwarenessAttributeDecommissionIT.class); + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + @After + public void cleanup() throws Exception { + assertNoTimeout(client().admin().cluster().prepareHealth().get()); + } + /* + Test Plan - + 1. Basic decommissioning + 2. Decommissioning when master in decommissioned zone + 3. Basic assert testing (zone, force zone not present; second request after successful etc) + 4. Failure scenarios + 5. Concurrency handling + */ + + public void testNodesRemovedAfterZoneDecommission_ClusterManagerNotInToBeDecommissionedZone() throws Exception { + int dataNodeCountPerAZ = 4; + List zones = new ArrayList<>(Arrays.asList("a", "b", "c")); + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build() + ); + Map clusterManagerNameToZone = new HashMap<>(); + clusterManagerNameToZone.put(clusterManagerNodes.get(0), "a"); + clusterManagerNameToZone.put(clusterManagerNodes.get(1), "b"); + clusterManagerNameToZone.put(clusterManagerNodes.get(2), "c"); + + logger.info("--> starting 4 nodes each on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + ensureStableCluster(15); + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(15)) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + String originalClusterManager = internalCluster().getClusterManagerName(); + String originalClusterManagerZone = clusterManagerNameToZone.get(originalClusterManager); + logger.info("--> original cluster manager - name {}, zone {}", originalClusterManager, originalClusterManagerZone); + + List tempZones = new ArrayList<>(zones); + tempZones.remove(originalClusterManagerZone); + String zoneToDecommission = randomFrom(tempZones); + + logger.info("--> starting decommissioning nodes in zone {}", zoneToDecommission); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", zoneToDecommission); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + assertTrue(decommissionResponse.isAcknowledged()); + + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // assert that number of nodes should be 10 ( 2 cluster manager nodes + 8 data nodes ) + assertEquals(clusterState.nodes().getNodes().size(), 10); + assertEquals(clusterState.nodes().getDataNodes().size(), 8); + assertEquals(clusterState.nodes().getClusterManagerNodes().size(), 2); + + // assert that no node has decommissioned attribute + Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + while(discoveryNodeIterator.hasNext()) { + DiscoveryNode node = discoveryNodeIterator.next(); + assertNotEquals(node.getAttributes().get("zone"), zoneToDecommission); + } + + // assert that decommission status is successful + GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()).get(); + assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); + + // assert that no node present in Voting Config Exclusion + assertEquals(clusterState.metadata().coordinationMetadata().getVotingConfigExclusions().size(),0); + + // assert that cluster manager didn't switch during test + assertEquals(originalClusterManager,internalCluster().getClusterManagerName()); + + // ------------------------------------------------------------------------------ + // tests for removed nodes + // ------------------------------------------------------------------------------ + List removedNodes; + switch (zoneToDecommission) { + case "a": + removedNodes = new ArrayList<>(nodes_in_zone_a); + break; + case "b": + removedNodes = new ArrayList<>(nodes_in_zone_b); + break; + case "c": + removedNodes = new ArrayList<>(nodes_in_zone_c); + break; + default: + throw new IllegalStateException("unrecognized zone to be decommissioned"); + } + String randomRemovedNode = randomFrom(removedNodes); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, randomRemovedNode); + DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); + + // The decommissioned node would be having status as IN_PROGRESS as it was kicked out later + // and not receiving any further state updates + assertEquals(metadata.decommissionAttribute(), decommissionAttribute); + assertEquals(metadata.status(), DecommissionStatus.IN_PROGRESS); + + // assert the node has decommissioned attribute + assertEquals(clusterService.localNode().getAttributes().get("zone"), zoneToDecommission); + + // assert exception on decommissioned node + Logger clusterLogger = LogManager.getLogger(JoinHelper.class); + MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(clusterLogger); + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "local node is decommissioned. Will not be able to join the cluster" + ) + ); + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "failed to join" + ) { + @Override + public boolean innerMatch(LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == RemoteTransportException.class + && event.getThrown().getCause() != null + && event.getThrown().getCause().getClass() == NodeDecommissionedException.class; + } + } + ); + TransportService clusterManagerTransportService = internalCluster().getInstance( + TransportService.class, + originalClusterManager + ); + MockTransportService decommissionedNodeTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + randomRemovedNode + ); + final CountDownLatch countDownLatch = new CountDownLatch(3); + decommissionedNodeTransportService.addSendBehavior( + clusterManagerTransportService, + (connection, requestId, action, request, options) -> { + if (action.equals(JoinHelper.JOIN_ACTION_NAME)) { + countDownLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + } + ); + decommissionedNodeTransportService.addConnectBehavior(clusterManagerTransportService, Transport::openConnection); + countDownLatch.await(); + mockLogAppender.assertAllExpectationsMatched(); + + // test for PeerFinder + PeerFinder peerFinder = (PeerFinder) internalCluster().getInstance( + PeerFinder.class, + randomRemovedNode + ); + +// // recommissioning the zone, to safely succeed the test. Specific tests for recommissioning will be written separately +// DeleteDecommissionResponse deleteDecommissionResponse = client().execute(DeleteDecommissionAction.INSTANCE, new DeleteDecommissionRequest()).get(); +// assertTrue(deleteDecommissionResponse.isAcknowledged()); +// +// ensureStableCluster(15, TimeValue.timeValueSeconds(30L)); // time should be set to findPeerInterval setting + } + + public void testNodesRemovedAfterZoneDecommission_ClusterManagerInToBeDecommissionedZone() throws Exception { + int dataNodeCountPerAZ = 4; + List zones = new ArrayList(Arrays.asList("a", "b", "c")); + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build() + ); + Map clusterManagerNameToZone = new HashMap<>(); + clusterManagerNameToZone.put(clusterManagerNodes.get(0), "a"); + clusterManagerNameToZone.put(clusterManagerNodes.get(1), "b"); + clusterManagerNameToZone.put(clusterManagerNodes.get(2), "c"); + + logger.info("--> starting 4 nodes each on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + ensureStableCluster(15); + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(15)) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + String originalClusterManager = internalCluster().getClusterManagerName(); + String originalClusterManagerZone = clusterManagerNameToZone.get(originalClusterManager); + logger.info("--> original cluster manager - name {}, zone {}", originalClusterManager, originalClusterManagerZone); + + logger.info("--> starting decommissioning nodes in zone {}", originalClusterManagerZone); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", originalClusterManagerZone); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + assertTrue(decommissionResponse.isAcknowledged()); + + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // assert that number of nodes should be 10 ( 2 cluster manager nodes + 8 data nodes ) + assertEquals(clusterState.nodes().getNodes().size(), 10); + assertEquals(clusterState.nodes().getDataNodes().size(), 8); + assertEquals(clusterState.nodes().getClusterManagerNodes().size(), 2); + + // assert that no node has decommissioned attribute + Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + while(discoveryNodeIterator.hasNext()) { + DiscoveryNode node = discoveryNodeIterator.next(); + assertNotEquals(node.getAttributes().get("zone"), originalClusterManagerZone); + } + + // assert that cluster manager is changed + assertNotEquals(originalClusterManager, internalCluster().getClusterManagerName()); + + // assert that decommission status is successful + GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()).get(); + assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); + + // assert that no node present in Voting Config Exclusion + assertEquals(clusterState.metadata().coordinationMetadata().getVotingConfigExclusions().size(),0); + + // ------------------------------------------------------------------------------ + // tests for removed nodes + // ------------------------------------------------------------------------------ + List removedNodes; + switch (originalClusterManagerZone) { + case "a": + removedNodes = new ArrayList<>(nodes_in_zone_a); + break; + case "b": + removedNodes = new ArrayList<>(nodes_in_zone_b); + break; + case "c": + removedNodes = new ArrayList<>(nodes_in_zone_c); + break; + default: + throw new IllegalStateException("unrecognized zone to be decommissioned"); + } + String randomRemovedNode = randomFrom(removedNodes); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, randomRemovedNode); + DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); + + // The decommissioned node would be having status as IN_PROGRESS + assertEquals(metadata.decommissionAttribute(), decommissionAttribute); + assertEquals(metadata.status(), DecommissionStatus.IN_PROGRESS); + + // assert the node has decommissioned attribute + assertEquals(clusterService.localNode().getAttributes().get("zone"), originalClusterManagerZone); + + // assert exception on decommissioned node + Logger clusterLogger = LogManager.getLogger(JoinHelper.class); + MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(clusterLogger); + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "local node is decommissioned. Will not be able to join the cluster" + ) + ); + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "failed to join" + ) { + @Override + public boolean innerMatch(LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == RemoteTransportException.class + && event.getThrown().getCause() != null + && event.getThrown().getCause().getClass() == NodeDecommissionedException.class; + } + } + ); + TransportService clusterManagerTransportService = internalCluster().getInstance( + TransportService.class, + internalCluster().getClusterManagerName() + ); + MockTransportService decommissionedNodeTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + randomRemovedNode + ); + // test for PeerFinder + PeerFinder peerFinder = internalCluster().getInstance( + PeerFinder.class, + randomRemovedNode + ); + final CountDownLatch countDownLatch = new CountDownLatch(3); + // TODO can check join request timings using counters + decommissionedNodeTransportService.addSendBehavior( + clusterManagerTransportService, + (connection, requestId, action, request, options) -> { + if (action.equals(JoinHelper.JOIN_ACTION_NAME)) { + countDownLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + } + ); + decommissionedNodeTransportService.addConnectBehavior(clusterManagerTransportService, Transport::openConnection); + countDownLatch.await(); + mockLogAppender.assertAllExpectationsMatched(); + +// // recommissioning the zone, to safely succeed the test. Specific tests for recommissioning will be written separately +// DeleteDecommissionResponse deleteDecommissionResponse = client().execute(DeleteDecommissionAction.INSTANCE, new DeleteDecommissionRequest()).get(); +// assertTrue(deleteDecommissionResponse.isAcknowledged()); +// +// ensureStableCluster(15, TimeValue.timeValueSeconds(30L)); // time should be set to findPeerInterval setting + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java deleted file mode 100644 index 8db770afc720c..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.admin.cluster.decommission.awareness.delete; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ack.ClusterStateUpdateResponse; -import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.decommission.DecommissionService; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -import java.io.IOException; - -public class TransportDeleteDecommissionAction extends TransportClusterManagerNodeAction< - DeleteDecommissionRequest, - DeleteDecommissionResponse> { - - private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionAction.class); - - DecommissionService decommissionService; - - @Inject - public TransportDeleteDecommissionAction( - TransportService transportService, - ClusterService clusterService, - DecommissionService decommissionService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - DeleteDecommissionAction.NAME, - transportService, - clusterService, - threadPool, - actionFilters, - DeleteDecommissionRequest::new, - indexNameExpressionResolver - ); - this.decommissionService = decommissionService; - - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected DeleteDecommissionResponse read(StreamInput in) throws IOException { - return new DeleteDecommissionResponse(in); - } - - @Override - protected ClusterBlockException checkBlock(DeleteDecommissionRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected void masterOperation( - DeleteDecommissionRequest request, - ClusterState state, - ActionListener listener - ) { - // TODO: Enable when service class change is merged - logger.info("Received delete decommission Request"); - decommissionService.clearDecommissionStatus(new ActionListener() { - @Override - public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { - listener.onResponse(new DeleteDecommissionResponse(true)); - } - - @Override - public void onFailure(Exception e) { - logger.error("Recommission failed with exception " + e.getMessage()); - listener.onFailure(e); - } - }); - - } -} From 27c2e4c93e5516188585c188f2f65cdc2501f342 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 3 Oct 2022 14:55:32 +0530 Subject: [PATCH 03/14] Random Signed-off-by: Rishab Nahata --- .../AwarenessAttributeDecommissionIT.java | 14 ++++----- .../decommission/DecommissionService.java | 30 ------------------- .../opensearch/discovery/PeerFinderTests.java | 2 ++ 3 files changed, 7 insertions(+), 39 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java index d6753337912a6..45d2e9e2a8153 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/AwarenessAttributeDecommissionIT.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; @@ -181,10 +182,10 @@ public void testNodesRemovedAfterZoneDecommission_ClusterManagerNotInToBeDecommi ClusterService clusterService = internalCluster().getInstance(ClusterService.class, randomRemovedNode); DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); - // The decommissioned node would be having status as IN_PROGRESS as it was kicked out later + // The decommissioned node would not be having status as SUCCESS as it was kicked out later // and not receiving any further state updates assertEquals(metadata.decommissionAttribute(), decommissionAttribute); - assertEquals(metadata.status(), DecommissionStatus.IN_PROGRESS); + assertNotEquals(metadata.status(), DecommissionStatus.SUCCESSFUL); // assert the node has decommissioned attribute assertEquals(clusterService.localNode().getAttributes().get("zone"), zoneToDecommission); @@ -193,11 +194,11 @@ public void testNodesRemovedAfterZoneDecommission_ClusterManagerNotInToBeDecommi Logger clusterLogger = LogManager.getLogger(JoinHelper.class); MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(clusterLogger); mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLogAppender.PatternSeenEventExpectation( "test", JoinHelper.class.getCanonicalName(), Level.INFO, - "local node is decommissioned. Will not be able to join the cluster" + "local node is decommissioned \\[.*]\\. Will not be able to join the cluster" ) ); mockLogAppender.addExpectation( @@ -238,11 +239,6 @@ public boolean innerMatch(LogEvent event) { countDownLatch.await(); mockLogAppender.assertAllExpectationsMatched(); - // test for PeerFinder - PeerFinder peerFinder = (PeerFinder) internalCluster().getInstance( - PeerFinder.class, - randomRemovedNode - ); // // recommissioning the zone, to safely succeed the test. Specific tests for recommissioning will be written separately // DeleteDecommissionResponse deleteDecommissionResponse = client().execute(DeleteDecommissionAction.INSTANCE, new DeleteDecommissionRequest()).get(); diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 9881ec89248bf..c31ea53a5fd16 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -481,34 +481,4 @@ public void onFailure(Exception e) { } }; } - - public void clearDecommissionStatus(final ActionListener listener) { - clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { - @Override - public ClusterState execute(ClusterState currentState) { - return deleteDecommissionAttribute(currentState); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute."), e); - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // Once the cluster state is processed we can try to recommission nodes by setting the weights for the zone. - // TODO Set the weights for the recommissioning zone. - listener.onResponse(new ClusterStateUpdateResponse(true)); - } - }); - } - - ClusterState deleteDecommissionAttribute(final ClusterState currentState) { - logger.info("Delete decommission request received"); - Metadata metadata = currentState.metadata(); - Metadata.Builder mdBuilder = Metadata.builder(metadata); - mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); - return ClusterState.builder(currentState).metadata(mdBuilder).build(); - } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 5e7dede0309c6..c89d512cb83d2 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -853,4 +853,6 @@ public String toString() { deterministicTaskQueue.runAllRunnableTasks(); assertNotifiedOfAllUpdates(); } + + } From ed359f0d380659c98d9b313fba9ba4d2d6025e35 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 6 Oct 2022 11:08:17 -0700 Subject: [PATCH 04/14] Fix new race condition in DecommissionControllerTests (#4688) My previous fix introduced a new race condition by making the assertions before waiting on the latch. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../cluster/decommission/DecommissionControllerTests.java | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 868ce6ed7bf9e..a9efbaed8a671 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) +- Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) ### Security diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 5b2dce277189c..06f2de04907d6 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -223,7 +223,9 @@ public void testTimesOut() throws InterruptedException { TimeValue.timeValueMillis(0), new ActionListener<>() { @Override - public void onResponse(Void unused) {} + public void onResponse(Void unused) { + countDownLatch.countDown(); + } @Override public void onFailure(Exception e) { @@ -232,10 +234,10 @@ public void onFailure(Exception e) { } } ); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); MatcherAssert.assertThat(exceptionReference.get(), instanceOf(OpenSearchTimeoutException.class)); MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("waiting for removal of decommissioned nodes")); - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } public void testSuccessfulDecommissionStatusMetadataUpdate() throws InterruptedException { From a2f95ce324d7869701bb8229a28f917af08214f8 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 6 Oct 2022 16:24:31 -0400 Subject: [PATCH 05/14] Fixing SearchStats (de)serialization (#4697) Fixes bwc failure caused by commit 6993ac9834d0f7e8e77e6cade09de2245d2a20f7. Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../rest-api-spec/test/cat.shards/10_basic.yml | 14 +++++++------- .../resources/rest-api-spec/test/pit/10_basic.yml | 4 ++-- .../opensearch/index/search/stats/SearchStats.java | 4 ++-- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9efbaed8a671..b08c21d54be73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) +- Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 6ebe273d552cc..189215b6562a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,14 +1,14 @@ --- "Help": - skip: - version: " - 2.9.99" - reason: point in time stats were added in 3.0.0 + version: " - 2.3.99" + reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "3.0.0 - " + version: "2.4.0 - " - match: $body: | @@ -88,16 +88,16 @@ path.state .+ \n $/ --- -"Help before - 3.0.0": +"Help before - 2.4.0": - skip: - version: "3.0.0 - " - reason: point in time stats were added in 3.0.0 + version: "2.4.0 - " + reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: " - 2.9.99" + version: " - 2.3.99" - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml index cd0c5b9126a9d..847131ead35b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -1,7 +1,7 @@ "Create PIT, Search with PIT ID and Delete": - skip: - version: " - 2.9.99" - reason: "mode to be introduced later than 3.0" + version: " - 2.3.99" + reason: "mode to be introduced later than 2.4.0" - do: indices.create: index: test_pit diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 38b18355fd98d..012c94639d526 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -141,7 +141,7 @@ private Stats(StreamInput in) throws IOException { suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_4_0)) { pitCount = in.readVLong(); pitTimeInMillis = in.readVLong(); pitCurrent = in.readVLong(); @@ -292,7 +292,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeVLong(pitCount); out.writeVLong(pitTimeInMillis); out.writeVLong(pitCurrent); From f5ea1a4d8067896080f61ab5f88cdcd307ee3c84 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Thu, 6 Oct 2022 19:46:07 -0700 Subject: [PATCH 06/14] Fixed misunderstanding message 'No OpenSearchException found' when detailed_error disabled (#4669) * Fixed misunderstanding message 'No OpenSearchException found' when detailed_error disabled Signed-off-by: Xue Zhou --- CHANGELOG.md | 1 + .../java/org/opensearch/ExceptionsHelper.java | 15 +++++++++++++++ .../java/org/opensearch/OpenSearchException.java | 4 +--- .../org/opensearch/ExceptionsHelperTests.java | 6 ++++++ .../org/opensearch/OpenSearchExceptionTests.java | 7 +------ .../opensearch/rest/BytesRestResponseTests.java | 2 +- 6 files changed, 25 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b08c21d54be73..f8bfaf5abfed3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) +- Fixed misunderstanding message "No OpenSearchException found" when detailed_error disabled ([#4669](https://github.com/opensearch-project/OpenSearch/pull/4669)) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) diff --git a/server/src/main/java/org/opensearch/ExceptionsHelper.java b/server/src/main/java/org/opensearch/ExceptionsHelper.java index f252d0b05af79..fbfc9beaea468 100644 --- a/server/src/main/java/org/opensearch/ExceptionsHelper.java +++ b/server/src/main/java/org/opensearch/ExceptionsHelper.java @@ -99,6 +99,21 @@ public static RestStatus status(Throwable t) { return RestStatus.INTERNAL_SERVER_ERROR; } + public static String summaryMessage(Throwable t) { + if (t != null) { + if (t instanceof OpenSearchException) { + return t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; + } else if (t instanceof IllegalArgumentException) { + return "Invalid argument"; + } else if (t instanceof JsonParseException) { + return "Failed to parse JSON"; + } else if (t instanceof OpenSearchRejectedExecutionException) { + return "Too many requests"; + } + } + return "Internal failure"; + } + public static Throwable unwrapCause(Throwable t) { int counter = 0; Throwable result = t; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 34d7509c7afb2..4b6ca173ec692 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -594,16 +594,14 @@ public static void generateFailureXContent(XContentBuilder builder, Params param // Render the exception with a simple message if (detailed == false) { - String message = "No OpenSearchException found"; Throwable t = e; for (int counter = 0; counter < 10 && t != null; counter++) { if (t instanceof OpenSearchException) { - message = t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; break; } t = t.getCause(); } - builder.field(ERROR, message); + builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); return; } diff --git a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java index d16b2e9d291b0..41051d7ff2dd0 100644 --- a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java @@ -113,6 +113,12 @@ public void testStatus() { assertThat(ExceptionsHelper.status(new OpenSearchRejectedExecutionException("rejected")), equalTo(RestStatus.TOO_MANY_REQUESTS)); } + public void testSummaryMessage() { + assertThat(ExceptionsHelper.summaryMessage(new IllegalArgumentException("illegal")), equalTo("Invalid argument")); + assertThat(ExceptionsHelper.summaryMessage(new JsonParseException(null, "illegal")), equalTo("Failed to parse JSON")); + assertThat(ExceptionsHelper.summaryMessage(new OpenSearchRejectedExecutionException("rejected")), equalTo("Too many requests")); + } + public void testGroupBy() { ShardOperationFailedException[] failures = new ShardOperationFailedException[] { createShardFailureParsingException("error", "node0", "index", 0, null), diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index bd2695508dfcb..6ceb1d6f12e3b 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -814,12 +814,7 @@ public void testFailureToAndFromXContentWithNoDetails() throws IOException { } assertNotNull(parsedFailure); - String reason; - if (failure instanceof OpenSearchException) { - reason = failure.getClass().getSimpleName() + "[" + failure.getMessage() + "]"; - } else { - reason = "No OpenSearchException found"; - } + String reason = ExceptionsHelper.summaryMessage(failure); assertEquals(OpenSearchException.buildMessage("exception", reason, null), parsedFailure.getMessage()); assertEquals(0, parsedFailure.getHeaders().size()); assertEquals(0, parsedFailure.getMetadata().size()); diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 1ea7f006cf482..20a41b1d8d120 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -117,7 +117,7 @@ public void testNonOpenSearchExceptionIsNotShownAsSimpleMessage() throws Excepti assertThat(text, not(containsString("UnknownException[an error occurred reading data]"))); assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); assertThat(text, not(containsString("error_trace"))); - assertThat(text, containsString("\"error\":\"No OpenSearchException found\"")); + assertThat(text, containsString("\"error\":\"Internal failure\"")); } public void testErrorTrace() throws Exception { From b1bc4d96d69e5f1278dfe4aea7e532e1e049c794 Mon Sep 17 00:00:00 2001 From: Vijayan Balasubramanian Date: Fri, 7 Oct 2022 05:27:54 -0700 Subject: [PATCH 07/14] Update GeoGrid base class access modifier to support extensibility (#4572) * Update access modifier to support extensibility Change access modifier from default to protected. This will help to build new geo based aggregation outside OpenSearch, by keeping GeoGrid Classes as base class. Signed-off-by: Vijayan Balasubramanian * Updated CHANGELOG Added PR details to CHANGELOG.md Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGridBucket to BaseGeoGridBucket Update class names, references and comments. Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGrid to BaseGeoGrid Signed-off-by: Vijayan Balasubramanian * Make GridBucket classes package-private Signed-off-by: Vijayan Balasubramanian * Remove Internal prefix from Geo Grid classes Signed-off-by: Vijayan Balasubramanian * Update constructor and class access modifier Signed-off-by: Vijayan Balasubramanian * Update access modifier based on usage Made classes package private if it is not used outside the package. Signed-off-by: Vijayan Balasubramanian Signed-off-by: Vijayan Balasubramanian --- CHANGELOG.md | 2 + .../org/opensearch/geo/GeoModulePlugin.java | 11 ++-- ...{InternalGeoGrid.java => BaseGeoGrid.java} | 50 +++++++++---------- ...GridBucket.java => BaseGeoGridBucket.java} | 16 +++--- .../bucket/geogrid/BucketPriorityQueue.java | 4 +- .../bucket/geogrid/CellIdSource.java | 2 +- .../aggregations/bucket/geogrid/GeoGrid.java | 4 +- .../geogrid/GeoGridAggregationBuilder.java | 4 +- .../bucket/geogrid/GeoGridAggregator.java | 22 ++++---- ...ernalGeoHashGrid.java => GeoHashGrid.java} | 22 ++++---- .../GeoHashGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoHashGridAggregator.java | 15 +++--- .../geogrid/GeoHashGridAggregatorFactory.java | 4 +- ...ernalGeoTileGrid.java => GeoTileGrid.java} | 22 ++++---- .../GeoTileGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoTileGridAggregator.java | 15 +++--- .../geogrid/GeoTileGridAggregatorFactory.java | 4 +- .../geogrid/InternalGeoHashGridBucket.java | 4 +- .../geogrid/InternalGeoTileGridBucket.java | 4 +- .../bucket/geogrid/ParsedGeoGrid.java | 4 +- .../bucket/geogrid/ParsedGeoGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoHashGrid.java | 2 +- .../geogrid/ParsedGeoHashGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoTileGrid.java | 2 +- ...idAggregationCompositeAggregatorTests.java | 3 +- .../geogrid/GeoGridAggregatorTestCase.java | 12 ++--- .../bucket/geogrid/GeoGridTestCase.java | 16 +++--- .../bucket/geogrid/GeoHashGridTests.java | 11 ++-- .../bucket/geogrid/GeoTileGridTests.java | 11 ++-- .../geo/tests/common/AggregationBuilders.java | 8 +-- .../common/AggregationInspectionHelper.java | 4 +- 31 files changed, 139 insertions(+), 147 deletions(-) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGrid.java => BaseGeoGrid.java} (72%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGridBucket.java => BaseGeoGridBucket.java} (87%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoHashGrid.java => GeoHashGrid.java} (70%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoTileGrid.java => GeoTileGrid.java} (70%) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8bfaf5abfed3..018475c7e9ae6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) +- Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 77abba7f54677..8ca1d2a0c214f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -35,9 +35,8 @@ import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsGeoShapeAggregator; @@ -78,18 +77,18 @@ public List getAggregations() { GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); final AggregationSpec geoTileGrid = new AggregationSpec( GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); return List.of(geoBounds, geoHashGrid, geoTileGrid); } /** - * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * Registering the geotile grid in the {@link CompositeAggregation}. * * @return a {@link List} of {@link CompositeAggregationSpec} */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java similarity index 72% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index 9dbed7b27307a..b58c19a7186e6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -54,30 +54,30 @@ * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGrid extends InternalMultiBucketAggregation< - InternalGeoGrid, - InternalGeoGridBucket> implements GeoGrid { +public abstract class BaseGeoGrid extends InternalMultiBucketAggregation + implements + GeoGrid { protected final int requiredSize; - protected final List buckets; + protected final List buckets; - InternalGeoGrid(String name, int requiredSize, List buckets, Map metadata) { + protected BaseGeoGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, metadata); this.requiredSize = requiredSize; this.buckets = buckets; } - abstract Writeable.Reader getBucketReader(); + protected abstract Writeable.Reader getBucketReader(); /** * Read from a stream. */ - public InternalGeoGrid(StreamInput in) throws IOException { + public BaseGeoGrid(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); - buckets = (List) in.readList(getBucketReader()); + buckets = (List) in.readList(getBucketReader()); } @Override @@ -86,24 +86,24 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeList(buckets); } - abstract InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata); + protected abstract BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata); @Override - public List getBuckets() { + public List getBuckets() { return unmodifiableList(buckets); } @Override - public InternalGeoGrid reduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + public BaseGeoGrid reduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { - InternalGeoGrid grid = (InternalGeoGrid) aggregation; + BaseGeoGrid grid = (BaseGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + BaseGeoGridBucket bucket = (BaseGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -113,13 +113,13 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); } buckets.close(); - InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + BaseGeoGridBucket[] list = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } @@ -128,11 +128,11 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } @Override - protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + protected BaseGeoGridBucket reduceBucket(List buckets, ReduceContext context) { assert buckets.size() > 0; List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } @@ -140,12 +140,12 @@ protected InternalGeoGridBucket reduceBucket(List buckets return createBucket(buckets.get(0).hashAsLong, docCount, aggs); } - abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); @@ -168,7 +168,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; - InternalGeoGrid other = (InternalGeoGrid) obj; + BaseGeoGrid other = (BaseGeoGrid) obj; return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(buckets, other.buckets); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java similarity index 87% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java index 93fcdbd098400..f362d2b3d33d6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java @@ -45,12 +45,12 @@ /** * Base implementation of geogrid aggs * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class BaseGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, - Comparable { + Comparable { protected long hashAsLong; protected long docCount; @@ -58,7 +58,7 @@ public abstract class InternalGeoGridBucket ext long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + public BaseGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.hashAsLong = hashAsLong; @@ -67,7 +67,7 @@ public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregation /** * Read from a stream. */ - public InternalGeoGridBucket(StreamInput in) throws IOException { + public BaseGeoGridBucket(StreamInput in) throws IOException { hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - long hashAsLong() { + public long hashAsLong() { return hashAsLong; } @@ -95,7 +95,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(InternalGeoGridBucket other) { + public int compareTo(BaseGeoGridBucket other) { if (this.hashAsLong > other.hashAsLong) { return 1; } @@ -119,7 +119,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; + BaseGeoGridBucket bucket = (BaseGeoGridBucket) o; return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index 70d0552b3e80b..83fcdf4f66424 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -38,14 +38,14 @@ * * @opensearch.internal */ -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends PriorityQueue { BucketPriorityQueue(int size) { super(size); } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + protected boolean lessThan(BaseGeoGridBucket o1, BaseGeoGridBucket o2) { int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); if (cmp == 0) { cmp = o2.compareTo(o1); diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index d40029e9a762d..89ce288770185 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -43,7 +43,7 @@ * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. * - * @opensearch.internal + * @opensearch.api */ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index 4ae888640efc8..b2fe6e33ef95c 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -39,13 +39,13 @@ * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. * - * @opensearch.internal + * @opensearch.api */ public interface GeoGrid extends MultiBucketsAggregation { /** * A bucket that is associated with a geo-grid cell. The key of the bucket is - * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell + * the {@link BaseGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket {} diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 4a904b3aa2b16..0ca2a28844f99 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -58,9 +58,9 @@ import java.util.function.Function; /** - * Base Aggregation Builder for geohash_grid and geotile_grid aggs + * Base Aggregation Builder for geogrid aggs * - * @opensearch.internal + * @opensearch.api */ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { /* recognized field names in JSON */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 909772c61a960..db07ac8f947e5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -55,16 +55,16 @@ /** * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. * - * @opensearch.internal + * @opensearch.api */ -public abstract class GeoGridAggregator extends BucketsAggregator { +public abstract class GeoGridAggregator extends BucketsAggregator { protected final int requiredSize; protected final int shardSize; protected final ValuesSource.Numeric valuesSource; protected final LongKeyedBucketOrds bucketOrds; - GeoGridAggregator( + protected GeoGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -118,23 +118,23 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } - abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); + protected abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); /** * This method is used to return a re-usable instance of the bucket when building * the aggregation. - * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + * @return a new {@link BaseGeoGridBucket} implementation with empty parameters */ - abstract InternalGeoGridBucket newEmptyBucket(); + protected abstract BaseGeoGridBucket newEmptyBucket(); @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; + BaseGeoGridBucket[][] topBucketsPerOrd = new BaseGeoGridBucket[owningBucketOrds.length][]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + BaseGeoGridBucket spare = null; LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); while (ordsEnum.next()) { if (spare == null) { @@ -149,7 +149,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I spare = ordered.insertWithOverflow(spare); } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { topBucketsPerOrd[ordIdx][i] = ordered.pop(); } @@ -163,7 +163,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } @Override - public InternalGeoGrid buildEmptyAggregation() { + public BaseGeoGrid buildEmptyAggregation() { return buildAggregation(name, requiredSize, Collections.emptyList(), metadata()); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java index ff1247300939a..aa1d5504ad24f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoHashGrid extends InternalGeoGrid { +public class GeoHashGrid extends BaseGeoGrid { - InternalGeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoHashGrid(StreamInput in) throws IOException { + public GeoHashGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoHashGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index bbaf9613fb216..760d7d643c0a5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geohash_grid * - * @opensearch.internal + * @opensearch.api */ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geohash_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 6ca7a4d8a9cb8..9ff9fe7d8f9ba 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -47,9 +47,9 @@ * * @opensearch.internal */ -public class GeoHashGridAggregator extends GeoGridAggregator { +class GeoHashGridAggregator extends GeoGridAggregator { - public GeoHashGridAggregator( + GeoHashGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -64,16 +64,17 @@ public GeoHashGridAggregator( } @Override - InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected GeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoHashGrid buildEmptyAggregation() { + return new GeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoHashGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 1914c07e831f7..898a7d82a4dec 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -86,7 +86,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, emptyList(), metadata); + final InternalAggregation aggregation = new GeoHashGrid(name, requiredSize, emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java index fa544b5893f0c..91c523c80855e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoTileGrid extends InternalGeoGrid { +public class GeoTileGrid extends BaseGeoGrid { - InternalGeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoTileGrid(StreamInput in) throws IOException { + public GeoTileGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoTileGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 76ad515f34fe5..0f1f87bdc57fa 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geotile_grid agg * - * @opensearch.internal + * @opensearch.api */ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geotile_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index a205a9afde41e..8faed4e9cd2d4 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -48,9 +48,9 @@ * * @opensearch.internal */ -public class GeoTileGridAggregator extends GeoGridAggregator { +class GeoTileGridAggregator extends GeoGridAggregator { - public GeoTileGridAggregator( + GeoTileGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -65,16 +65,17 @@ public GeoTileGridAggregator( } @Override - InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected GeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoTileGrid buildEmptyAggregation() { - return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoTileGrid buildEmptyAggregation() { + return new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoTileGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b830988a3d410..6eb73727ad6c8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -85,7 +85,7 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); + final InternalAggregation aggregation = new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index 659909e868651..6e7ed8a679681 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoHashGridBucket extends BaseGeoGridBucket { InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -51,7 +51,7 @@ public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoTileGridBucket extends BaseGeoGridBucket { InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -52,7 +52,7 @@ public class InternalGeoTileGridBucket extends InternalGeoGridBucket implements GeoGrid { @@ -63,7 +63,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 80124cda50b19..cbe3a2ee89dd7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -40,7 +40,7 @@ /** * A single geo grid bucket result parsed between nodes * - * @opensearch.internal + * @opensearch.api */ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 109524e755c4d..343149f8e19ab 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGrid extends ParsedGeoGrid { +class ParsedGeoHashGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoHashGrid::new, diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 4e6e454b08324..6704273f45580 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { +class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 8734c96a15578..cb64a0e153e87 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoTileGrid extends ParsedGeoGrid { +class ParsedGeoTileGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoTileGrid::new, diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index 3c7c292f9d193..bc7fde8d66d0a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -31,7 +30,7 @@ import java.util.Map; /** - * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + * Testing the geo tile grid as part of CompositeAggregation. */ public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..5ec10a7f4f7cf 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -73,7 +73,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -201,9 +201,9 @@ public void testAsSubAgg() throws IOException { Consumer verify = (terms) -> { Map> actual = new TreeMap<>(); for (StringTerms.Bucket tb : terms.getBuckets()) { - InternalGeoGrid gg = tb.getAggregations().get("gg"); + BaseGeoGrid gg = tb.getAggregations().get("gg"); Map sub = new TreeMap<>(); - for (InternalGeoGridBucket ggb : gg.getBuckets()) { + for (BaseGeoGridBucket ggb : gg.getBuckets()) { sub.put(ggb.getKeyAsString(), ggb.getDocCount()); } actual.put(tb.getKeyAsString(), sub); @@ -299,7 +299,7 @@ private void testCase( String field, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex ) throws IOException { testCase(query, precision, geoBoundingBox, verify, buildIndex, createBuilder("_name").field(field)); @@ -309,7 +309,7 @@ private void testCase( Query query, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex, GeoGridAggregationBuilder aggregationBuilder ) throws IOException { @@ -333,7 +333,7 @@ private void testCase( aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((InternalGeoGrid) aggregator.buildTopLevel()); + verify.accept((BaseGeoGrid) aggregator.buildTopLevel()); indexReader.close(); directory.close(); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 432736a2b43fe..2a655239997b6 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -50,16 +50,16 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridTestCase> extends - InternalMultiBucketAggregationTestCase { +public abstract class GeoGridTestCase> extends InternalMultiBucketAggregationTestCase< + T> { /** - * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGrid}-derived class using the same parameters as constructor. */ - protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); + protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); /** - * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGridBucket}-derived class using the same parameters as constructor. */ protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); @@ -117,7 +117,7 @@ protected List getNamedXContents() { protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); @@ -176,7 +176,7 @@ protected Class implementationClass() { protected T mutateInstance(T instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { case 0: @@ -206,7 +206,7 @@ protected T mutateInstance(T instance) { } public void testCreateFromBuckets() { - InternalGeoGrid original = createTestInstance(); + BaseGeoGrid original = createTestInstance(); assertThat(original, equalTo(original.create(original.buckets))); } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c84c6ef5ec076..ada943b6dd369 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoHashGridTests extends GeoGridTestCase { +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected InternalGeoHashGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoHashGrid(name, size, buckets, metadata); + protected GeoHashGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoHashGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index ead67e0455d94..b59e9ec2cff53 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoTileGridTests extends GeoGridTestCase { +public class GeoTileGridTests extends GeoGridTestCase { @Override - protected InternalGeoTileGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoTileGrid(name, size, buckets, metadata); + protected GeoTileGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoTileGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c0d7e51047c6b..706c73e7416f5 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -10,8 +10,8 @@ import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -24,14 +24,14 @@ public static GeoBoundsAggregationBuilder geoBounds(String name) { } /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + * Create a new {@link GeoHashGrid} aggregation with the given name. */ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + * Create a new {@link GeoTileGrid} aggregation with the given name. */ public static GeoTileGridAggregationBuilder geotileGrid(String name) { return new GeoTileGridAggregationBuilder(name); diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 3473cf2d94b76..89debdf5abd95 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,7 +8,7 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.BaseGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -17,7 +17,7 @@ public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } - public static boolean hasValue(InternalGeoGrid agg) { + public static boolean hasValue(BaseGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } } From ef45809468ff381498c69a8b0af07e7443967836 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 08:44:54 -0500 Subject: [PATCH 08/14] [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants (#2768) * [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants Removes all usages of LegacyESVersion.V_7_0_ and LegacyESVersion.V_7_1 version constants. Signed-off-by: Nicholas Walter Knize * Rebase from main Signed-off-by: Nicholas Walter Knize * fix serialization issue with build flavor removal Signed-off-by: Nicholas Walter Knize * remove stale bwc test Signed-off-by: Nicholas Walter Knize * rebase and update Signed-off-by: Nicholas Walter Knize * cleanup Signed-off-by: Nicholas Walter Knize * fix failing mapper test Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 34 +-- .../common/NGramTokenFilterFactory.java | 29 +- .../mustache/MultiSearchTemplateResponse.java | 11 +- ...GoogleCloudStorageRetryingInputStream.java | 3 +- .../s3/S3RetryingInputStream.java | 3 +- .../upgrades/FullClusterRestartIT.java | 9 +- .../upgrades/QueryBuilderBWCIT.java | 3 +- .../upgrades/JodaCompatibilityIT.java | 282 ------------------ .../org/opensearch/upgrades/MappingIT.java | 70 ----- .../org/opensearch/upgrades/RecoveryIT.java | 9 +- .../upgrades/SystemIndicesUpgradeIT.java | 13 +- .../src/main/java/org/opensearch/Build.java | 41 +-- .../java/org/opensearch/LegacyESVersion.java | 4 - .../org/opensearch/OpenSearchException.java | 2 +- .../src/main/java/org/opensearch/Version.java | 2 +- .../cluster/state/ClusterStateResponse.java | 8 - .../indices/alias/IndicesAliasesRequest.java | 8 +- .../indices/create/CreateIndexRequest.java | 7 - .../TransportGetFieldMappingsIndexAction.java | 4 +- .../mapping/put/PutMappingRequest.java | 6 - .../action/delete/DeleteRequest.java | 7 - ...TransportFieldCapabilitiesIndexAction.java | 3 +- .../org/opensearch/action/get/GetRequest.java | 7 - .../action/get/MultiGetRequest.java | 7 - .../opensearch/action/index/IndexRequest.java | 6 - .../opensearch/action/main/MainResponse.java | 7 - .../action/search/MultiSearchResponse.java | 11 +- .../action/search/SearchRequest.java | 8 +- .../termvectors/TermVectorsRequest.java | 8 - .../action/update/UpdateRequest.java | 29 -- .../cluster/coordination/Coordinator.java | 9 +- .../cluster/metadata/MappingMetadata.java | 7 - .../opensearch/cluster/metadata/Metadata.java | 34 +-- .../metadata/MetadataCreateIndexService.java | 27 +- .../cluster/routing/UnassignedInfo.java | 6 +- .../java/org/opensearch/common/Rounding.java | 19 +- .../breaker/CircuitBreakingException.java | 11 +- .../java/org/opensearch/common/joda/Joda.java | 13 - .../org/opensearch/common/lucene/Lucene.java | 56 ++-- .../java/org/opensearch/env/NodeMetadata.java | 6 +- .../opensearch/gateway/GatewayMetaState.java | 5 +- .../index/analysis/AnalysisRegistry.java | 11 +- .../analysis/ShingleTokenFilterFactory.java | 47 +-- .../index/mapper/DateFieldMapper.java | 7 +- .../index/mapper/DocumentMapper.java | 7 +- .../index/mapper/DocumentMapperParser.java | 2 +- .../mapper/LegacyGeoShapeFieldMapper.java | 7 +- .../index/mapper/MapperService.java | 6 +- .../index/mapper/RangeFieldMapper.java | 8 +- .../index/query/MultiMatchQueryBuilder.java | 7 - .../index/similarity/SimilarityProviders.java | 48 +-- .../index/similarity/SimilarityService.java | 60 +--- .../opensearch/indices/IndicesService.java | 9 +- .../indices/analysis/AnalysisModule.java | 17 +- .../indices/mapper/MapperRegistry.java | 14 +- .../org/opensearch/monitor/jvm/JvmInfo.java | 16 +- .../org/opensearch/script/ScriptStats.java | 6 +- .../org/opensearch/search/DocValueFormat.java | 44 +-- .../pipeline/InternalPercentilesBucket.java | 12 +- ...tilesBucketPipelineAggregationBuilder.java | 11 +- .../PercentilesBucketPipelineAggregator.java | 11 +- .../BaseMultiValuesSourceFieldConfig.java | 13 +- .../ValuesSourceAggregationBuilder.java | 14 +- .../search/builder/SearchSourceBuilder.java | 12 +- .../search/dfs/DfsSearchResult.java | 28 +- .../search/internal/ShardSearchRequest.java | 10 +- .../opensearch/search/slice/SliceBuilder.java | 12 +- .../opensearch/search/suggest/Suggest.java | 65 +--- .../completion/context/GeoContextMapping.java | 38 +-- .../search/suggest/term/TermSuggestion.java | 10 +- .../opensearch/snapshots/RestoreService.java | 10 - .../transport/RemoteConnectionInfo.java | 71 +---- .../test/java/org/opensearch/BuildTests.java | 13 +- .../ExceptionSerializationTests.java | 2 +- .../java/org/opensearch/VersionTests.java | 2 +- .../state/ClusterStateRequestTests.java | 15 +- .../action/search/SearchRequestTests.java | 7 +- .../coordination/JoinTaskExecutorTests.java | 20 +- .../MetadataIndexStateServiceTests.java | 90 ------ .../common/settings/SettingsTests.java | 3 +- .../index/query/RangeQueryBuilderTests.java | 2 +- .../similarity/SimilarityServiceTests.java | 8 +- .../indices/IndicesModuleTests.java | 28 +- .../indices/IndicesServiceTests.java | 13 +- .../search/query/QuerySearchResultTests.java | 33 -- .../test/rest/yaml/section/DoSection.java | 13 - .../AbstractFullClusterRestartTestCase.java | 5 +- 88 files changed, 241 insertions(+), 1481 deletions(-) delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 018475c7e9ae6..e1b23eaa5ac20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) ### Fixed diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 57865e15d523a..7cad01c5cc00a 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -485,19 +485,10 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [edge_ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } - return new EdgeNGramTokenFilter(reader, 1); + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); })); filters.add( PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)) @@ -524,19 +515,10 @@ public List getPreConfiguredTokenFilters() { ); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.openSearchVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } - return new NGramTokenFilter(reader, 1, 2, false); + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java index 218bb74b84667..a6adf680a454c 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -54,25 +53,15 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" - + maxAllowedNgramDiff - + "] but was [" - + ngramDiff - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "ngram_big_difference", - "Deprecated big difference between max_gram and min_gram in NGram Tokenizer," - + "expected difference must be less than or equal to: [" - + maxAllowedNgramDiff - + "]" - ); - } + throw new IllegalArgumentException( + "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + + maxAllowedNgramDiff + + "] but was [" + + ngramDiff + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + + "] index level setting." + ); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index 1802d03e20942..7c2c403fdd487 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; @@ -125,11 +124,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = -1L; - } + tookInMillis = in.readVLong(); } MultiSearchTemplateResponse(Item[] items, long tookInMillis) { @@ -159,9 +154,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 72d3e37466d09..5448799e7f81b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -42,7 +42,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.SpecialPermission; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.internal.io.IOUtils; @@ -61,7 +60,7 @@ /** * Wrapper around reads from GCS that will retry blob downloads that fail part-way through, resuming from where the failure occurred. * This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. */ class GoogleCloudStorageRetryingInputStream extends InputStream { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 388f5b8d74a2b..f751d63232f79 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.core.internal.io.IOUtils; import java.io.IOException; @@ -52,7 +51,7 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. * * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 714d8a252579f..0ed51b9d8a011 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -329,9 +329,6 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -1253,7 +1250,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); - if (randomBoolean() || getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { + if (randomBoolean()) { // this is the default after v7.0.0, but is required before that settings.field("soft_deletes.enabled", true); } @@ -1436,10 +1433,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index de042cb2b7634..856dc45d42203 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -46,6 +46,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.DisMaxQueryBuilder; @@ -157,7 +158,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(LegacyESVersion.V_7_0_0) ? "doc" : "_doc"; + final String type = MapperService.SINGLE_MAPPING_NAME; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java deleted file mode 100644 index 0ef1e3a5050af..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.apache.http.HttpStatus; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Node; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Booleans; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.DocValueFormat; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; - -/** - * This is test is meant to verify that when upgrading from 6.x version to 7.7 or newer it is able to parse date fields with joda pattern. - * - * The test is indexing documents and searches with use of joda or java pattern. - * In order to make sure that serialization logic is used a search call is executed 3 times (using all nodes). - * It cannot be guaranteed that serialization logic will always be used as it might happen that - * all shards are allocated on the same node and client is connecting to it. - * Because of this warnings assertions have to be ignored. - * - * A special flag used when serializing {@link DocValueFormat.DateTime#writeTo DocValueFormat.DateTime::writeTo} - * is used to indicate that an index was created in 6.x and has a joda pattern. The same flag is read when - * {@link DocValueFormat.DateTime#DateTime(StreamInput)} deserializing. - * When upgrading from 7.0-7.6 to 7.7 there is no way to tell if a pattern was created in 6.x as this flag cannot be added. - * Hence a skip assume section in init() - * - * @see org.opensearch.search.DocValueFormat.DateTime - */ -public class JodaCompatibilityIT extends AbstractRollingTestCase { - - @BeforeClass - public static void init(){ - assumeTrue("upgrading from 7.0-7.6 will fail parsing joda formats", - UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - } - - public void testJodaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("joda_time", "YYYY-MM-dd'T'HH:mm:ssZZ"); - createTestIndex.setOptions(ignoreWarnings()); - - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("joda_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("joda_time", minute); - - Request search = dateRangeSearch("joda_time"); - search.setOptions(ignoreWarnings()); - - performOnAllNodes(search, r -> assertEquals(HttpStatus.SC_OK, r.getStatusLine().getStatusCode())); - break; - case UPGRADED: - postNewDoc("joda_time", 4); - - search = searchWithAgg("joda_time"); - search.setOptions(ignoreWarnings()); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - break; - } - } - - public void testJavaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("java_time", "8yyyy-MM-dd'T'HH:mm:ssXXX"); - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("java_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("java_time", minute); - - Request search = dateRangeSearch("java_time"); - Response searchResp = client().performRequest(search); - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - break; - case UPGRADED: - postNewDoc("java_time", 4); - - search = searchWithAgg("java_time"); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - - break; - } - } - - private RequestOptions ignoreWarnings() { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - return options.build(); - } - - private void performOnAllNodes(Request search, Consumer consumer) throws IOException { - List nodes = client().getNodes(); - for (Node node : nodes) { - client().setNodes(Collections.singletonList(node)); - Response response = client().performRequest(search); - consumer.accept(response); - assertEquals(HttpStatus.SC_OK, response.getStatusLine().getStatusCode()); - } - client().setNodes(nodes); - } - - private void assertResponseHasAllDocuments(Response searchResp) { - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - try { - assertEquals(removeWhiteSpace("{" + - " \"_shards\": {" + - " \"total\": 3," + - " \"successful\": 3" + - " },"+ - " \"hits\": {" + - " \"total\": 4," + - " \"hits\": [" + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:01+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:02+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:03+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:04+01:00\"" + - " }" + - " }" + - " ]" + - " }" + - "}"), - EntityUtils.toString(searchResp.getEntity(), StandardCharsets.UTF_8)); - } catch (IOException e) { - throw new AssertionError("Exception during response parising", e); - } - } - - private String removeWhiteSpace(String input) { - return input.replaceAll("[\\n\\r\\t\\ ]", ""); - } - - private Request dateRangeSearch(String endpoint) { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - search.setJsonEntity("" + - "{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - - private Request searchWithAgg(String endpoint) throws IOException { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - - search.setJsonEntity("{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " },\n" + - " \"aggs\" : {\n" + - " \"docs_per_year\" : {\n" + - " \"date_histogram\" : {\n" + - " \"field\" : \"date\",\n" + - " \"calendar_interval\" : \"year\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - private Request indexWithDateField(String indexName, String format) { - Request createTestIndex = new Request("PUT", indexName); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity("{\n" + - " \"settings\": {\n" + - " \"index.number_of_shards\": 3\n" + - " },\n" + - " \"mappings\": {\n" + - " \"properties\": {\n" + - " \"datetime\": {\n" + - " \"type\": \"date\",\n" + - " \"format\": \"" + format + "\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - return createTestIndex; - } - - private void postNewDoc(String endpoint, int minute) throws IOException { - Request putDoc = new Request("POST", endpoint+"/_doc"); - putDoc.addParameter("refresh", "true"); - putDoc.addParameter("wait_for_active_shards", "all"); - putDoc.setJsonEntity("{\n" + - " \"datetime\": \"2020-01-01T00:00:0" + minute + "+01:00\"\n" + - "}" - ); - Response resp = client().performRequest(putDoc); - assertEquals(HttpStatus.SC_CREATED, resp.getStatusLine().getStatusCode()); - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java deleted file mode 100644 index 07b1d67fde7ff..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.support.XContentMapValues; - -public class MappingIT extends AbstractRollingTestCase { - /** - * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) - * and check that it can be upgraded to 7x. - */ - public void testAllFieldDisable6x() throws Exception { - assumeTrue("_all", UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = new Request("PUT", "all-index"); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity( - "{ \"settings\": { \"index.number_of_shards\": 1 }, " + - "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" - ); - createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + - " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); - Response resp = client().performRequest(createTestIndex); - assertEquals(200, resp.getStatusLine().getStatusCode()); - break; - - default: - final Request request = new Request("GET", "all-index"); - Response response = client().performRequest(request); - assertEquals(200, response.getStatusLine().getStatusCode()); - Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); - assertNotNull(enabled); - assertEquals(false, enabled); - break; - } - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index cbf91fa9d71e7..3d71f4a198aac 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; @@ -244,6 +245,7 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } + return id; } return null; } @@ -270,6 +272,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); break; case MIXED: + // todo: verify this test can be removed in 3.0.0 final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -348,11 +351,7 @@ public void testRecovery() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - if (getNodeId(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)) == null) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); - } else { - client().performRequest(new Request("DELETE", index + "/_doc/" + i)); - } + client().performRequest(new Request("DELETE", index + "/" + MapperService.SINGLE_MAPPING_NAME + "/" + i)); } } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index c50af0084b000..634dc0628f27a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -59,13 +59,8 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"_doc\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } else { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -91,10 +86,6 @@ public void testSystemIndicesUpgrades() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/server/src/main/java/org/opensearch/Build.java b/server/src/main/java/org/opensearch/Build.java index 364b17ad4aa33..13c951b10cfe3 100644 --- a/server/src/main/java/org/opensearch/Build.java +++ b/server/src/main/java/org/opensearch/Build.java @@ -207,58 +207,27 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { - final String distribution; - final Type type; // the following is new for opensearch: we write the distribution to support any "forks" - if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - distribution = in.readString(); - } else { - distribution = "other"; - } - - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - // (Integ test zip still write OSS as distribution) - // See issue: https://github.com/opendistro-for-elasticsearch/search/issues/159 - if (in.getVersion().before(Version.V_1_3_0)) { - String flavor = in.readString(); - } + final String distribution = in.readString(); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); + final Type type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - - final String version; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + final String version = in.readString(); return new Build(type, hash, date, snapshot, version, distribution); } public static void writeBuild(Build build, StreamOutput out) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code - if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeString(build.distribution); - } + out.writeString(build.distribution); - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - if (out.getVersion().before(Version.V_1_3_0)) { - out.writeString("oss"); - } final Type buildType = build.type(); out.writeString(buildType.displayName()); out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index d4ac3c7d2f8b1..1eb22a6bef3b5 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,10 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_0_0 = new LegacyESVersion(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_0_1 = new LegacyESVersion(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_0 = new LegacyESVersion(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_1 = new LegacyESVersion(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4b6ca173ec692..17ece23f819a2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -1533,7 +1533,7 @@ private enum OpenSearchExceptionHandle { org.opensearch.cluster.coordination.CoordinationStateRejectedException.class, org.opensearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - LegacyESVersion.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.opensearch.snapshots.SnapshotInProgressException.class, diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1bffe9ec98ec5..3387eee2dffc8 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -412,7 +412,7 @@ private Version computeMinIndexCompatVersion() { } else if (major == 7 || major == 1) { return LegacyESVersion.fromId(6000026); } else if (major == 2) { - return LegacyESVersion.V_7_0_0; + return LegacyESVersion.fromId(7000099); } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 89cd112d30c79..d2d7d843e19db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,14 +32,12 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -59,9 +57,6 @@ public ClusterStateResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - new ByteSizeValue(in); - } waitForTimedOut = in.readBoolean(); } @@ -98,9 +93,6 @@ public boolean isWaitForTimedOut() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeOptionalWriteable(clusterState); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - ByteSizeValue.ZERO.writeTo(out); - } out.writeBoolean(waitForTimedOut); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..eb2d2706a6531 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -284,9 +284,7 @@ public AliasActions(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { isHidden = in.readOptionalBoolean(); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - originalAliases = in.readStringArray(); - } + originalAliases = in.readStringArray(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { mustExist = in.readOptionalBoolean(); } else { @@ -308,9 +306,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { out.writeOptionalBoolean(isHidden); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(originalAliases); - } + out.writeStringArray(originalAliases); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalBoolean(mustExist); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..302c2aad64bb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -126,9 +125,6 @@ public CreateIndexRequest(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(new Alias(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -505,9 +501,6 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 64f76db5e1549..6d238a385231f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -120,8 +119,7 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); - Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(indexCreatedVersion, f); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper documentMapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..a8eeedd4a3e4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -117,9 +117,6 @@ public PutMappingRequest(StreamInput in) throws IOException { } } source = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { @@ -349,9 +346,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index ce723df0c383a..86880c0211c1d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -96,9 +95,6 @@ public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); ifSeqNo = in.readZLong(); @@ -280,9 +276,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeLong(version); out.writeByte(versionType.getValue()); out.writeZLong(ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 99962741299ca..7d9ab4ff93f59 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -153,8 +153,7 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd for (String field : fieldNames) { MappedFieldType ft = mapperService.fieldType(field); if (ft != null) { - if (indicesService.isMetadataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) - || fieldPredicate.test(ft.name())) { + if (indicesService.isMetadataField(field) || fieldPredicate.test(ft.name())) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 5f740ba789bb2..64148f070cc16 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; @@ -89,9 +88,6 @@ public GetRequest() {} } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); - } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -260,9 +256,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); - } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 00df8657736ae..91f506dafafe1 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; @@ -114,9 +113,6 @@ public Item(StreamInput in) throws IOException { } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -211,9 +207,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index f863c4a11340e..381eca2dc716f 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -148,9 +148,6 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } id = in.readOptionalString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); version = in.readLong(); @@ -669,9 +666,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeBytesReference(source); out.writeByte(opType.getId()); out.writeLong(version); diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 691bbda512275..0fbfdab9ba294 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; @@ -71,9 +70,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); - } } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -111,9 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); - } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 6c25a16a65c75..c4ba3becbc151 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; @@ -147,11 +146,7 @@ public MultiSearchResponse(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = new Item(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = 0L; - } + tookInMillis = in.readVLong(); } public MultiSearchResponse(Item[] items, long tookInMillis) { @@ -184,9 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index da34dab6383d9..e4dd0d0b1a116 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -251,9 +251,7 @@ public SearchRequest(StreamInput in) throws IOException { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; finalReduce = true; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - ccsMinimizeRoundtrips = in.readBoolean(); - } + ccsMinimizeRoundtrips = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { cancelAfterTimeInterval = in.readOptionalTimeValue(); @@ -288,9 +286,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); out.writeBoolean(finalReduce); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(ccsMinimizeRoundtrips); - } + out.writeBoolean(ccsMinimizeRoundtrips); if (out.getVersion().onOrAfter(Version.V_1_1_0)) { out.writeOptionalTimeValue(cancelAfterTimeInterval); diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index cf2d10d2f1db3..dcd5feda0004a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.termvectors; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -189,10 +188,6 @@ public TermVectorsRequest() {} xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } preference = in.readOptionalString(); long flags = in.readVLong(); @@ -541,9 +536,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(xContentType); } out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalString(preference); long longFlags = 0; for (Flag flag : flagsEnum) { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index d434f134f4321..abd3c31597c18 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -170,9 +170,6 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } if (in.readBoolean()) { script = new Script(in); } @@ -181,26 +178,11 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti if (in.readBoolean()) { doc = new IndexRequest(shardId, in); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - String[] fields = in.readOptionalStringArray(); - if (fields != null) { - throw new IllegalArgumentException("[fields] is no longer supported"); - } - } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { upsertRequest = new IndexRequest(shardId, in); } docAsUpsert = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - long version = in.readLong(); - VersionType versionType = VersionType.readFromStream(in); - if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { - throw new UnsupportedOperationException( - "versioned update requests have been removed in 7.0. Use if_seq_no and if_primary_term" - ); - } - } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); @@ -893,10 +875,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } - boolean hasScript = script != null; out.writeBoolean(hasScript); if (hasScript) { @@ -917,9 +895,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { doc.writeTo(out); } } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalStringArray(null); - } out.writeOptionalWriteable(fetchSourceContext); if (upsertRequest == null) { out.writeBoolean(false); @@ -935,10 +910,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } } out.writeBoolean(docAsUpsert); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeLong(Versions.MATCH_ANY); - out.writeByte(VersionType.INTERNAL.getValue()); - } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c7e7cd0419e2..7ac716084793d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -1771,14 +1770,8 @@ protected void sendApplyCommit( } } - // TODO: only here temporarily for BWC development, remove once complete - public static Settings.Builder addZen1Attribute(boolean isZen1Node, Settings.Builder builder) { - return builder.put("node.attr.zen1", isZen1Node); - } - // TODO: only here temporarily for BWC development, remove once complete public static boolean isZen1Node(DiscoveryNode discoveryNode) { - return discoveryNode.getVersion().before(LegacyESVersion.V_7_0_0) - || (Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false"))); + return Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false")); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 35ee222541771..223127783621e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -161,9 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); // routing out.writeBoolean(routingRequired); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(false); // hasParentField - } } @Override @@ -190,9 +186,6 @@ public MappingMetadata(StreamInput in) throws IOException { source = CompressedXContent.readCompressedString(in); // routing routingRequired = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // hasParentField - } } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index eb5e8bbc2d49b..fb8123f20e904 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -981,15 +981,9 @@ private static class MetadataDiff implements Diff { MetadataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - clusterUUIDCommitted = in.readBoolean(); - } + clusterUUIDCommitted = in.readBoolean(); version = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata = new CoordinationMetadata(in); - } else { - coordinationMetadata = CoordinationMetadata.EMPTY_METADATA; - } + coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1005,13 +999,9 @@ private static class MetadataDiff implements Diff { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } + out.writeBoolean(clusterUUIDCommitted); out.writeLong(version); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1043,12 +1033,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.clusterUUIDCommitted = in.readBoolean(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.coordinationMetadata(new CoordinationMetadata(in)); - } + builder.clusterUUIDCommitted = in.readBoolean(); + builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1074,12 +1060,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + out.writeBoolean(clusterUUIDCommitted); + coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d78e5e872fd2b..36e25b5458b76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; @@ -1385,21 +1384,17 @@ static void prepareResizeIndexSettings( * the less default split operations are supported */ public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_0_0)) { - // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour - // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters - // will always have the behavior of the min node in the cluster. - // - // We use as a default number of routing shards the higher number that can be expressed - // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. - int log2MaxNumShards = 10; // logBase2(1024) - int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) - int numSplits = log2MaxNumShards - log2NumShards; - numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once - return numShards * 1 << numSplits; - } else { - return numShards; - } + // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour + // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters + // will always have the behavior of the min node in the cluster. + // + // We use as a default number of routing shards the higher number that can be expressed + // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. + int log2MaxNumShards = 10; // logBase2(1024) + int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) + int numSplits = log2MaxNumShards - log2NumShards; + numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once + return numShards * 1 << numSplits; } public static void validateTranslogRetentionSettings(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 49c18fe44cc04..489c6125f7d13 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -322,11 +322,7 @@ public UnassignedInfo(StreamInput in) throws IOException { } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && reason == Reason.INDEX_CLOSED) { - out.writeByte((byte) Reason.REINITIALIZED.ordinal()); - } else { - out.writeByte((byte) reason.ordinal()); - } + out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index c396f6c88fd57..7160cb1e6d233 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -459,20 +459,13 @@ static class TimeUnitRounding extends Rounding { } TimeUnitRounding(StreamInput in) throws IOException { - this( - DateTimeUnit.resolve(in.readByte()), - in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString()) - ); + this(DateTimeUnit.resolve(in.readByte()), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override @@ -924,17 +917,13 @@ static class TimeIntervalRounding extends Rounding { } TimeIntervalRounding(StreamInput in) throws IOException { - this(in.readVLong(), in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString())); + this(in.readVLong(), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java index ee9977bfa2eb0..fda089cf26942 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java +++ b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java @@ -31,7 +31,6 @@ package org.opensearch.common.breaker; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -55,11 +54,7 @@ public CircuitBreakingException(StreamInput in) throws IOException { super(in); byteLimit = in.readLong(); bytesWanted = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - durability = in.readEnum(CircuitBreaker.Durability.class); - } else { - durability = CircuitBreaker.Durability.PERMANENT; - } + durability = in.readEnum(CircuitBreaker.Durability.class); } public CircuitBreakingException(String message, CircuitBreaker.Durability durability) { @@ -78,9 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(byteLimit); out.writeLong(bytesWanted); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(durability); - } + out.writeEnum(durability); } public long getBytesWanted() { diff --git a/server/src/main/java/org/opensearch/common/joda/Joda.java b/server/src/main/java/org/opensearch/common/joda/Joda.java index 9ecb3f2236e7c..7a82b8ce49d21 100644 --- a/server/src/main/java/org/opensearch/common/joda/Joda.java +++ b/server/src/main/java/org/opensearch/common/joda/Joda.java @@ -32,8 +32,6 @@ package org.opensearch.common.joda; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; @@ -388,17 +386,6 @@ public DateTimeField getField(Chronology chronology) { } }; - /** - * Checks if a pattern is Joda-style. - * Joda style patterns are not always compatible with java.time patterns. - * @param version - creation version of the index where pattern was used - * @param pattern - the pattern to check - * @return - true if pattern is joda style, otherwise false - */ - public static boolean isJodaPattern(Version version, String pattern) { - return version.before(LegacyESVersion.V_7_0_0) && pattern.startsWith("8") == false; - } - /** * parses epcoch timers * diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 2692a8fa2b914..7b69dff020bc4 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -97,7 +97,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -322,10 +321,7 @@ public static boolean exists(IndexSearcher searcher, Query query) throws IOExcep public static TotalHits readTotalHits(StreamInput in) throws IOException { long totalHits = in.readVLong(); - TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - totalHitsRelation = in.readEnum(TotalHits.Relation.class); - } + TotalHits.Relation totalHitsRelation = in.readEnum(TotalHits.Relation.class); return new TotalHits(totalHits, totalHitsRelation); } @@ -444,11 +440,7 @@ public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { public static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { out.writeVLong(totalHits.value); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(totalHits.relation); - } else if (totalHits.value > 0 && totalHits.relation != TotalHits.Relation.EQUAL_TO) { - throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); - } + out.writeEnum(totalHits.relation); } public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { @@ -648,20 +640,16 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws } private static Number readExplanationValue(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - final int numberType = in.readByte(); - switch (numberType) { - case 0: - return in.readFloat(); - case 1: - return in.readDouble(); - case 2: - return in.readZLong(); - default: - throw new IOException("Unexpected number type: " + numberType); - } - } else { - return in.readFloat(); + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); } } @@ -680,19 +668,15 @@ public static Explanation readExplanation(StreamInput in) throws IOException { } private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - if (value instanceof Float) { - out.writeByte((byte) 0); - out.writeFloat(value.floatValue()); - } else if (value instanceof Double) { - out.writeByte((byte) 1); - out.writeDouble(value.doubleValue()); - } else { - out.writeByte((byte) 2); - out.writeZLong(value.longValue()); - } - } else { + if (value instanceof Float) { + out.writeByte((byte) 0); out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); } } diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index 3944ecfd72d4c..03e92424c4517 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.env; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ObjectParser; @@ -93,7 +92,7 @@ public Version nodeVersion() { public NodeMetadata upgradeToCurrentVersion() { if (nodeVersion.equals(Version.V_EMPTY)) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; return new NodeMetadata(nodeId, Version.CURRENT); } @@ -127,8 +126,7 @@ public void setNodeVersionId(int nodeVersionId) { public NodeMetadata build() { final Version nodeVersion; if (this.nodeVersion == null) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; nodeVersion = Version.V_EMPTY; } else { nodeVersion = this.nodeVersion; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f70fdea153893..48dd0ddf90413 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; @@ -136,8 +135,8 @@ public void start( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "legacy metadata loader is not needed anymore from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 + : "legacy metadata loader is not needed anymore from v4 onwards"; final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 8ec2b70001fc9..7a78d97edf360 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -35,7 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -213,12 +212,10 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } else if ("standard_html_strip".equals(analyzer)) { - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "[standard_html_strip] analyzer is not supported for new indices, " - + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" - ); - } + throw new IllegalArgumentException( + "[standard_html_strip] analyzer is not supported for new indices, " + + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" + ); } return analyzerProvider.get(environment, analyzer).get(); diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index 701a9302fc164..e66ae20508dfe 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -35,8 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.shingle.ShingleFilter; -import org.opensearch.LegacyESVersion; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -48,8 +46,6 @@ */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ShingleTokenFilterFactory.class); - private final Factory factory; public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { @@ -61,27 +57,17 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro int shingleDiff = maxShingleSize - minShingleSize + (outputUnigrams ? 1 : 0); if (shingleDiff > maxAllowedShingleDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" - + " must be less than or equal to: [" - + maxAllowedShingleDiff - + "] but was [" - + shingleDiff - + "]. This limit" - + " can be set by changing the [" - + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "excessive_shingle_diff", - "Deprecated big difference between maxShingleSize and minShingleSize" - + " in Shingle TokenFilter, expected difference must be less than or equal to: [" - + maxAllowedShingleDiff - + "]" - ); - } + throw new IllegalArgumentException( + "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" + + " must be less than or equal to: [" + + maxAllowedShingleDiff + + "] but was [" + + shingleDiff + + "]. This limit" + + " can be set by changing the [" + + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() + + "] index level setting." + ); } Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); @@ -105,16 +91,7 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); - } else { - DEPRECATION_LOGGER.deprecate( - name() + "_synonym_tokenfilters", - "Token filter " + name() + "] will not be usable to parse synonyms after v7.0" - ); - } - return this; - + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); } public Factory getInnerFactory() { diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 8e01c1f41f078..4b19fe4c5de79 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -45,7 +45,6 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.time.DateFormatter; @@ -260,11 +259,7 @@ public Builder( private DateFormatter buildFormatter() { try { - if (Joda.isJodaPattern(indexCreatedVersion, format.getValue())) { - return Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 1e5b3b4a9c93e..23d58fa18b7e3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -40,7 +40,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchGenerationException; -import org.opensearch.Version; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; @@ -90,12 +89,8 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1)); this.rootObjectMapper = builder.build(builderContext); - final String type = rootObjectMapper.name(); final DocumentMapper existingMapper = mapperService.documentMapper(); - final Version indexCreatedVersion = mapperService.getIndexSettings().getIndexVersionCreated(); - final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers( - indexCreatedVersion - ); + final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 9fa088396a38b..237d69e3ad244 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -91,7 +91,7 @@ public DocumentMapperParser( this.scriptService = scriptService; this.typeParsers = mapperRegistry.getMapperParsers(); this.indexVersionCreated = indexSettings.getIndexVersionCreated(); - this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); + this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); } public Mapper.TypeParser.ParserContext parserContext() { diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java index 71d76c6a835c2..a2224e7214f4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -41,7 +41,6 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Explicit; @@ -577,11 +576,7 @@ public void doXContentBody(XContentBuilder builder, boolean includeDefaults, Par } else if (includeDefaults && fieldType().treeLevels() == 0) { // defaults only make sense if tree levels are not specified builder.field(DeprecatedParameters.Names.PRECISION.getPreferredName(), DistanceUnit.METERS.toString(50)); } - - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); - } - + builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); if (includeDefaults || fieldType().distanceErrorPct() != fieldType().defaultDistanceErrorPct) { builder.field(DeprecatedParameters.Names.DISTANCE_ERROR_PCT.getPreferredName(), fieldType().distanceErrorPct()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index f0d0b77396b0e..af37ddc41b567 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -36,7 +36,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -228,8 +227,7 @@ public MapperService( this.mapperRegistry = mapperRegistry; this.idFieldDataEnabled = idFieldDataEnabled; - if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } } @@ -674,7 +672,7 @@ public void close() throws IOException { * this method considers all mapper plugins */ public boolean isMetadataField(String field) { - return mapperRegistry.isMetadataField(indexVersionCreated, field); + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 5257609e0cba9..faf9fd5182690 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -40,7 +40,6 @@ import org.opensearch.common.Explicit; import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Setting; @@ -197,12 +196,7 @@ protected RangeFieldType setupFieldType(BuilderContext context) { // The builder context may not have index created version, falling back to indexCreatedVersion // property of this mapper builder. - DateFormatter dateTimeFormatter; - if (Joda.isJodaPattern(context.indexCreatedVersionOrDefault(indexCreatedVersion), format.getValue())) { - dateTimeFormatter = Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + DateFormatter dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); return new RangeFieldType( buildFullName(context), index.getValue(), diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 4e7b6fb51291b..fe3bcd81e72be 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -254,9 +253,6 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { maxExpansions = in.readVInt(); minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalBoolean(); // unused use_dis_max flag - } tieBreaker = in.readOptionalFloat(); lenient = in.readOptionalBoolean(); cutoffFrequency = in.readOptionalFloat(); @@ -282,9 +278,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(maxExpansions); out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalBoolean(null); - } out.writeOptionalFloat(tieBreaker); out.writeOptionalBoolean(lenient); out.writeOptionalFloat(cutoffFrequency); diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java index 1e3ec368df411..139b8fffbac3a 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java @@ -63,7 +63,6 @@ import org.apache.lucene.search.similarities.NormalizationH2; import org.apache.lucene.search.similarities.NormalizationH3; import org.apache.lucene.search.similarities.NormalizationZ; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; @@ -157,22 +156,9 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings if (model == null) { String replacement = LEGACY_BASIC_MODELS.get(basicModel); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." - ); - } else { - deprecationLogger.deprecate( - basicModel + "_similarity_model_replaced", - "Basic model [" - + basicModel - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - model = BASIC_MODELS.get(replacement); - assert model != null; - } + throw new IllegalArgumentException( + "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." + ); } } @@ -195,22 +181,9 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting if (effect == null) { String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." - ); - } else { - deprecationLogger.deprecate( - afterEffect + "_after_effect_replaced", - "After effect [" - + afterEffect - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - effect = AFTER_EFFECTS.get(replacement); - assert effect != null; - } + throw new IllegalArgumentException( + "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." + ); } } @@ -294,14 +267,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett unknownSettings.removeAll(Arrays.asList(supportedSettings)); unknownSettings.remove("type"); // used to figure out which sim this is if (unknownSettings.isEmpty() == false) { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); - } else { - deprecationLogger.deprecate( - "unknown_similarity_setting", - "Unknown settings for similarity of type [" + type + "]: " + unknownSettings - ); - } + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index d575ec508acb6..c3fc7ffbb0fe5 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -39,12 +39,10 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.TriFunction; import org.opensearch.common.logging.DeprecationLogger; @@ -76,25 +74,12 @@ public final class SimilarityService extends AbstractIndexComponent { static { Map>> defaults = new HashMap<>(); defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - return () -> { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - }; - } else { - final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); - return () -> { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return similarity; - }; - } + return () -> { + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); + }; }); defaults.put("BM25", version -> { final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); @@ -107,20 +92,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map> builtIn = new HashMap<>(); builtIn.put(CLASSIC_SIMILARITY, (settings, version, script) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - } else { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return SimilarityProviders.createClassicSimilarity(settings, version); - } + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); builtIn.put("boolean", (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); @@ -258,10 +233,7 @@ private static void validateScoresArePositive(Version indexCreatedVersion, Simil for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < 0) { - fail( - indexCreatedVersion, - "Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm) - ); + fail("Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm)); break; } } @@ -288,7 +260,6 @@ private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVers float score = scorer.score(freq, norm); if (score < previousScore) { fail( - indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" @@ -327,7 +298,6 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers float score = scorer.score(1, norm); if (score > previousScore) { fail( - indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" @@ -340,12 +310,8 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } } - private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException(message); - } else { - deprecationLogger.deprecate("similarity_failure", message); - } + private static void fail(String message) { + throw new IllegalArgumentException(message); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 6808803ee0988..f2961c0f3b13d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -40,10 +40,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -699,8 +697,7 @@ private synchronized IndexService createIndexService( IndexingOperationListener... indexingOperationListeners ) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetadata, settings, indexScopedSettings); - if (idxSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0) - && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { + if (EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { throw new IllegalArgumentException( "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0" ); @@ -1710,8 +1707,8 @@ public Function> getFieldFilter() { /** * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise. */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return mapperRegistry.isMetadataField(indexCreatedVersion, field); + public boolean isMetadataField(String field) { + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java index cc87c982a684d..22be07dd90f94 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java @@ -33,7 +33,6 @@ package org.opensearch.indices.analysis; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.TokenStream; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -42,7 +41,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.AnalyzerProvider; import org.opensearch.index.analysis.CharFilterFactory; @@ -152,20 +150,7 @@ private NamedRegistry> setupTokenFilters( tokenFilters.register("standard", new AnalysisProvider() { @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - if (indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_0_0)) { - deprecationLogger.deprecate( - "standard_deprecation", - "The [standard] token filter name is deprecated and will be removed in a future version." - ); - } else { - throw new IllegalArgumentException("The [standard] token filter has been removed."); - } - return new AbstractTokenFilterFactory(indexSettings, name, settings) { - @Override - public TokenStream create(TokenStream tokenStream) { - return tokenStream; - } - }; + throw new IllegalArgumentException("The [standard] token filter has been removed."); } @Override diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index 3a1d7b1ebb1e3..c26428309aec5 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,10 +32,8 @@ package org.opensearch.indices.mapper; -import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -53,7 +51,6 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; - private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -63,9 +60,6 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); - Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); - tempPre20.remove(NestedPathFieldMapper.NAME); - this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -81,15 +75,15 @@ public Map getMapperParsers() { * Return a map of the meta mappers that have been registered. The * returned map uses the name of the field as a key. */ - public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; + public Map getMetadataMapperParsers() { + return metadataMapperParsers; } /** * Returns true if the provided field is a registered metadata field, false otherwise */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return getMetadataMapperParsers(indexCreatedVersion).containsKey(field); + public boolean isMetadataField(String field) { + return getMetadataMapperParsers().containsKey(field); } /** diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 819d67cb8621a..426551ab50f18 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -33,7 +33,6 @@ package org.opensearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; @@ -305,13 +304,8 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - bundledJdk = in.readBoolean(); - usingBundledJdk = in.readOptionalBoolean(); - } else { - bundledJdk = false; - usingBundledJdk = null; - } + bundledJdk = in.readBoolean(); + usingBundledJdk = in.readOptionalBoolean(); startTime = in.readLong(); inputArguments = new String[in.readInt()]; for (int i = 0; i < inputArguments.length; i++) { @@ -341,10 +335,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(bundledJdk); - out.writeOptionalBoolean(usingBundledJdk); - } + out.writeBoolean(bundledJdk); + out.writeOptionalBoolean(usingBundledJdk); out.writeLong(startTime); out.writeInt(inputArguments.length); for (String inputArgument : inputArguments) { diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index 34d868f1d6046..9c8f1157cb718 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -89,7 +89,7 @@ public ScriptStats(ScriptContextStats context) { public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); - compilationLimitTriggered = in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readVLong() : 0; + compilationLimitTriggered = in.readVLong(); contextStats = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readList(ScriptContextStats::new) : Collections.emptyList(); } @@ -97,9 +97,7 @@ public ScriptStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(compilationLimitTriggered); - } + out.writeVLong(compilationLimitTriggered); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeList(contextStats); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 84c46e400543a..4b592303ee253 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -34,7 +34,6 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -44,7 +43,6 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.GeoTileUtils; @@ -224,34 +222,12 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu public DateTime(StreamInput in) throws IOException { String datePattern = in.readString(); - String zoneId = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.of(zoneId); - this.resolution = DateFieldMapper.Resolution.MILLISECONDS; - } else { - this.timeZone = ZoneId.of(zoneId); - this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - } - final boolean isJoda; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // if stream is from 7.7 Node it will have a flag indicating if format is joda - isJoda = in.readBoolean(); - } else { - /* - When received a stream from 6.0-6.latest Node it can be java if starts with 8 otherwise joda. - - If a stream is from [7.0 - 7.7) the boolean indicating that this is joda is not present. - This means that if an index was created in 6.x using joda pattern and then cluster was upgraded to - 7.x but earlier then 7.0, there is no information that can tell that the index is using joda style pattern. - It will be assumed that clusters upgrading from [7.0 - 7.7) are using java style patterns. - */ - isJoda = Joda.isJodaPattern(in.getVersion(), datePattern); - } + this.timeZone = ZoneId.of(zoneId); + this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); + final boolean isJoda = in.readBoolean(); this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); - this.parser = formatter.toDateMathParser(); - } @Override @@ -262,16 +238,10 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } else { - out.writeString(timeZone.getId()); - out.writeVInt(resolution.ordinal()); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // in order not to loose information if the formatter is a joda we send a flag - out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. - } + out.writeString(timeZone.getId()); + out.writeVInt(resolution.ordinal()); + // in order not to loose information if the formatter is a joda we send a flag + out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. } public DateMathParser getDateMathParser() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 40c7a5791454c..7c30656505663 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; @@ -99,11 +98,7 @@ public InternalPercentilesBucket(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); percentiles = in.readDoubleArray(); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } - + keyed = in.readBoolean(); computeLookup(); } @@ -112,10 +107,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(percentiles); out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index bef97bbbaa83a..8e68e62b04766 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.DoubleArrayList; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -70,19 +69,13 @@ public PercentilesBucketPipelineAggregationBuilder(String name, String bucketsPa public PercentilesBucketPipelineAggregationBuilder(StreamInput in) throws IOException { super(in, NAME); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index bd838fe23da8b..7fad7e233c424 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; @@ -76,19 +75,13 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { super(in); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java index b7e157b6050fc..ae76fd0a3aa3f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -15,7 +15,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -86,11 +85,7 @@ public BaseMultiValuesSourceFieldConfig(StreamInput in) throws IOException { } this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + this.timeZone = in.readOptionalZoneId(); } @Override @@ -102,11 +97,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeGenericValue(missing); out.writeOptionalWriteable(script); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); doWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 52afc6435d562..b492d9cadb975 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -31,12 +31,10 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.AbstractObjectParser; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; @@ -233,11 +231,7 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - timeZone = in.readOptionalZoneId(); - } + timeZone = in.readOptionalZoneId(); } @Override @@ -259,11 +253,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); innerWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index d3b131cf9f792..565932f1bca13 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -260,11 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - trackTotalHitsUpTo = in.readOptionalInt(); - } else { - trackTotalHitsUpTo = in.readBoolean() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; - } + trackTotalHitsUpTo = in.readOptionalInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { if (in.readBoolean()) { fetchFields = in.readList(FieldAndFormat::new); @@ -326,11 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeOptionalInt(trackTotalHitsUpTo); - } else { - out.writeBoolean(trackTotalHitsUpTo == null ? true : trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); - } + out.writeOptionalInt(trackTotalHitsUpTo); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index 90cc547f62a95..4a82b8eba653f 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -147,16 +147,10 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - out.writeVLong(statistics.docCount()); - out.writeVLong(statistics.sumTotalTermFreq()); - out.writeVLong(statistics.sumDocFreq()); - } else { - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); - } + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); } } @@ -188,16 +182,10 @@ static ObjectObjectHashMap readFieldStats(StreamIn final long docCount; final long sumTotalTermFreq; final long sumDocFreq; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - docCount = in.readVLong(); - sumTotalTermFreq = in.readVLong(); - sumDocFreq = in.readVLong(); - } else { - docCount = subOne(in.readVLong()); - sumTotalTermFreq = subOne(in.readVLong()); - sumDocFreq = subOne(in.readVLong()); - } + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index 828c2f8c78d69..006a0627c337d 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -258,11 +258,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { outboundNetworkTime = in.readVLong(); } clusterAlias = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - allowPartialSearchResults = in.readBoolean(); - } else { - allowPartialSearchResults = false; - } + allowPartialSearchResults = in.readBoolean(); indexRoutings = in.readStringArray(); preference = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { @@ -336,9 +332,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeVLong(outboundNetworkTime); } out.writeOptionalString(clusterAlias); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(allowPartialSearchResults); - } + out.writeBoolean(allowPartialSearchResults); if (asKey == false) { out.writeStringArray(indexRoutings); out.writeOptionalString(preference); diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 21e8a5646b9a5..1fd94eaddb2dd 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -260,16 +259,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecate( - "slice_on_uid", - "Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead" - ); - useTermQuery = true; + throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } else if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 90cc382ee4126..0aa881e2a3c9e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -33,7 +33,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -53,8 +52,6 @@ import org.opensearch.search.suggest.Suggest.Suggestion.Entry; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.completion.CompletionSuggestion; -import org.opensearch.search.suggest.phrase.PhraseSuggestion; -import org.opensearch.search.suggest.term.TermSuggestion; import java.io.IOException; import java.util.ArrayList; @@ -101,36 +98,11 @@ public Suggest(List>> suggestions) } public Suggest(StreamInput in) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - final int size = in.readVInt(); - suggestions = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - Suggestion> suggestion; - final int type = in.readVInt(); - switch (type) { - case TermSuggestion.TYPE: - suggestion = new TermSuggestion(in); - break; - case CompletionSuggestion.TYPE: - suggestion = new CompletionSuggestion(in); - break; - case PhraseSuggestion.TYPE: - suggestion = new PhraseSuggestion(in); - break; - default: - throw new IllegalArgumentException("Unknown suggestion type with ordinal " + type); - } - suggestions.add(suggestion); - } - } else { - int suggestionCount = in.readVInt(); - suggestions = new ArrayList<>(suggestionCount); - for (int i = 0; i < suggestionCount; i++) { - suggestions.add(in.readNamedWriteable(Suggestion.class)); - } + int suggestionCount = in.readVInt(); + suggestions = new ArrayList<>(suggestionCount); + for (int i = 0; i < suggestionCount; i++) { + suggestions.add(in.readNamedWriteable(Suggestion.class)); } - hasScoreDocs = filter(CompletionSuggestion.class).stream().anyMatch(CompletionSuggestion::hasScoreDocs); } @@ -169,18 +141,9 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeVInt(suggestions.size()); - for (Suggestion command : suggestions) { - out.writeVInt(command.getWriteableType()); - command.writeTo(out); - } - } else { - out.writeVInt(suggestions.size()); - for (Suggestion> suggestion : suggestions) { - out.writeNamedWriteable(suggestion); - } + out.writeVInt(suggestions.size()); + for (Suggestion> suggestion : suggestions) { + out.writeNamedWriteable(suggestion); } } @@ -284,13 +247,6 @@ public Suggestion(String name, int size) { public Suggestion(StreamInput in) throws IOException { name = in.readString(); size = in.readVInt(); - - // this is a hack to work around slightly different serialization order of earlier versions of TermSuggestion - if (in.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion t = (TermSuggestion) this; - t.setSort(SortBy.readFromStream(in)); - } - int entriesCount = in.readVInt(); entries.clear(); for (int i = 0; i < entriesCount; i++) { @@ -398,13 +354,6 @@ public void trim() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeVInt(size); - - // this is a hack to work around slightly different serialization order in older versions of TermSuggestion - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion termSuggestion = (TermSuggestion) this; - termSuggestion.getSort().writeTo(out); - } - out.writeVInt(entries.size()); for (Entry entry : entries) { entry.writeTo(out); diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index e3b809dc57b83..bf9598a3110ad 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -37,7 +37,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.geo.GeoPoint; @@ -312,37 +311,14 @@ public void validateReferences(Version indexVersionCreated, Function sortComparator() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - sort.writeTo(out); - } + sort.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 417498467622a..ca5078c4d1c56 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -391,16 +391,6 @@ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionLi @Override public ClusterState execute(ClusterState currentState) { RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY); - if (currentState.getNodes().getMinNodeVersion().before(LegacyESVersion.V_7_0_0)) { - // Check if another restore process is already running - cannot run two restore processes at the - // same time in versions prior to 7.0 - if (restoreInProgress.isEmpty() == false) { - throw new ConcurrentSnapshotExecutionException( - snapshot, - "Restore process is already running in this cluster" - ); - } - } // Check if the snapshot to restore is currently being deleted SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index ac88eae624813..92c0d482a848f 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -36,20 +36,14 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; /** * This class encapsulates all remote cluster information to be rendered on @@ -79,26 +73,7 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { clusterAlias = input.readString(); skipUnavailable = input.readBoolean(); } else { - List seedNodes; - if (input.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - seedNodes = Arrays.asList(input.readStringArray()); - } else { - // versions prior to 7.0.0 sent the resolved transport address of the seed nodes - final List transportAddresses = input.readList(TransportAddress::new); - seedNodes = transportAddresses.stream() - .map(a -> a.address().getHostString() + ":" + a.address().getPort()) - .collect(Collectors.toList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * We just throw any HTTP addresses received here on the floor - * because we don't need to do anything with them. - */ - input.readList(TransportAddress::new); - } - + List seedNodes = Arrays.asList(input.readStringArray()); int connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); int numNodesConnected = input.readVInt(); @@ -137,52 +112,12 @@ public void writeTo(StreamOutput out) throws IOException { } else { if (modeInfo.modeType() == RemoteConnectionStrategy.ConnectionStrategy.SNIFF) { SniffConnectionStrategy.SniffModeInfo sniffInfo = (SniffConnectionStrategy.SniffModeInfo) this.modeInfo; - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(sniffInfo.seedNodes.stream().map(s -> { - final String host = RemoteConnectionStrategy.parseHost(s); - final int port = RemoteConnectionStrategy.parsePort(s); - try { - return new TransportAddress(InetAddress.getByAddress(host, TransportAddress.META_ADDRESS.getAddress()), port); - } catch (final UnknownHostException e) { - throw new AssertionError(e); - } - }).collect(Collectors.toList())); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); out.writeVInt(sniffInfo.maxConnectionsPerCluster); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(sniffInfo.numNodesConnected); } else { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(new String[0]); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(emptyList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(new String[0]); out.writeVInt(0); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(0); diff --git a/server/src/test/java/org/opensearch/BuildTests.java b/server/src/test/java/org/opensearch/BuildTests.java index eeb6890699fdc..6e6a91419b762 100644 --- a/server/src/test/java/org/opensearch/BuildTests.java +++ b/server/src/test/java/org/opensearch/BuildTests.java @@ -299,26 +299,17 @@ public void testSerializationBWC() throws IOException { new Build(Build.Type.DOCKER, randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6), "other") ); - final List versions = Version.getDeclaredVersions(LegacyESVersion.class); - final Version post70Version = randomFrom( - versions.stream().filter(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)).collect(Collectors.toList()) - ); + final List versions = Version.getDeclaredVersions(Version.class); + final Version post10OpenSearchVersion = randomFrom( versions.stream().filter(v -> v.onOrAfter(Version.V_1_0_0)).collect(Collectors.toList()) ); - - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); final WriteableBuild post10OpenSearch = copyWriteable( dockerBuild, writableRegistry(), WriteableBuild::new, post10OpenSearchVersion ); - - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - assertThat(post70.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); assertThat(post10OpenSearch.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); assertThat(post10OpenSearch.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index ff2bb77531486..0ca4cdd780f94 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -393,7 +393,7 @@ public void testSearchContextMissingException() throws IOException { public void testCircuitBreakingException() throws IOException { CircuitBreakingException ex = serialize( new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), - LegacyESVersion.V_7_0_0 + Version.V_2_0_0 ); assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 5b3213ded1c02..70bcf343e4c1e 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -415,7 +415,7 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.V_7_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(7000099))); assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(6050099))); int currentMajorID = Version.computeID(Version.CURRENT.major, 0, 0, 99); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java index 7127e0001592f..672e5ace8b5ae 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -64,14 +63,12 @@ public void testSerialization() throws Exception { Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT ); - // TODO: change version to V_6_6_0 after backporting: - if (testVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - if (randomBoolean()) { - clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); - } - if (randomBoolean()) { - clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); - } + + if (randomBoolean()) { + clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); + } + if (randomBoolean()) { + clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); } BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 19544af63944c..3ffa6d6910548 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; @@ -107,11 +106,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest searchRequest = createSearchRequest(); Version version = VersionUtils.randomVersion(random()); SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); - if (version.before(LegacyESVersion.V_7_0_0)) { - assertTrue(deserializedRequest.isCcsMinimizeRoundtrips()); - } else { - assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); - } + assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5aa582a5e73f6..52922bc6a0e83 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -111,18 +111,16 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - final Version tooLow = LegacyESVersion.fromString("6.7.0"); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } + final Version tooLow = LegacyESVersion.fromString("6.7.0"); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0) && minNodeVersion.before(Version.V_3_0_0)) { + if (minNodeVersion.before(Version.V_3_0_0)) { Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index fb01a493ff7c3..72b22e0efc09b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.close.CloseIndexResponse; @@ -44,9 +43,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; @@ -97,7 +93,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -181,91 +176,6 @@ public void testCloseRoutingTableWithSnapshottedIndex() { assertThat(updatedState.blocks().hasIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID), is(true)); } - public void testCloseRoutingTableRemovesRoutingTable() { - final Set nonBlockedIndices = new HashSet<>(); - final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); - final ClusterBlock closingBlock = MetadataIndexStateService.createIndexClosingBlock(); - - ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); - for (int i = 0; i < randomIntBetween(1, 25); i++) { - final String indexName = "index-" + i; - - if (randomBoolean()) { - state = addOpenedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state); - nonBlockedIndices.add(state.metadata().index(indexName).getIndex()); - } else { - state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - final Index index = state.metadata().index(indexName).getIndex(); - blockedIndices.put(index, closingBlock); - if (randomBoolean()) { - results.put(index, new CloseIndexResponse.IndexResult(index)); - } else { - results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); - } - } - } - - state = ClusterState.builder(state) - .nodes( - DiscoveryNodes.builder(state.nodes()) - .add( - new DiscoveryNode( - "old_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_0_0 - ) - ) - .add( - new DiscoveryNode( - "new_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_2_0 - ) - ) - ) - .build(); - - state = MetadataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); - assertThat(state.metadata().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); - - for (Index nonBlockedIndex : nonBlockedIndices) { - assertIsOpened(nonBlockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); - } - for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).hasFailures() == false) { - IndexMetadata indexMetadata = state.metadata().index(blockedIndex); - assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); - Settings indexSettings = indexMetadata.getSettings(); - assertThat(indexSettings.hasValue(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false)); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), MetadataIndexStateService.INDEX_CLOSED_BLOCK), is(true)); - assertThat( - "Index must have only 1 block with [id=" + MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]", - state.blocks() - .indices() - .getOrDefault(blockedIndex.getName(), emptySet()) - .stream() - .filter(clusterBlock -> clusterBlock.id() == MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID) - .count(), - equalTo(1L) - ); - assertThat( - "Index routing table should have been removed when closing the index on mixed cluster version", - state.routingTable().index(blockedIndex), - nullValue() - ); - } else { - assertIsOpened(blockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), closingBlock), is(true)); - } - } - } - public void testAddIndexClosedBlocks() { final ClusterState initialState = ClusterState.builder(new ClusterName("testAddIndexClosedBlocks")).build(); { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 6848cd2bbc773..a1b6cd763476a 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.settings; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -634,7 +633,7 @@ public void testMissingValue() throws Exception { public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.CURRENT, LegacyESVersion.V_7_0_0)); + output.setVersion(randomFrom(Version.CURRENT, Version.V_2_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); Settings.writeSettingsToStream(settings, output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 0f451fda7b9fb..22c10844028a9 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -94,7 +94,7 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { if (randomBoolean()) { // drawing a truly random zoneId here can rarely fail under the following conditons: - // - index versionCreated before V_7_0_0 + // - index versionCreated before legacy V_7_0_0 // - no "forced" date parser through a format parameter // - one of the SystemV* time zones that Jodas DateTimeZone parser doesn't know about // thats why we exlude it here (see #58431) diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index eb666f1206c26..1d7b749433c65 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.Similarity; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; @@ -97,7 +97,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, negativeScoresSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -122,7 +122,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -147,7 +147,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index ec7ab06ac86a6..9c8ad3917c23f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -32,7 +32,6 @@ package org.opensearch.indices; -import org.opensearch.Version; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -51,7 +50,6 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -105,11 +103,9 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(version); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty()); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); assertEquals(EXPECTED_METADATA_FIELDS.length, metadataMapperParsers.size()); int i = 0; for (String field : metadataMapperParsers.keySet()) { @@ -117,12 +113,7 @@ public void testBuiltinMappers() { } } { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.V_1_0_0, - VersionUtils.getPreviousVersion(Version.V_2_0_0) - ); - assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + assertEquals(EXPECTED_METADATA_FIELDS.length, module.getMapperRegistry().getMetadataMapperParsers().size()); } } @@ -132,11 +123,10 @@ public void testBuiltinWithPlugins() { MapperRegistry registry = module.getMapperRegistry(); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); assertThat( - registry.getMetadataMapperParsers(Version.CURRENT).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size()) + registry.getMetadataMapperParsers().size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size()) ); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(Version.CURRENT); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; @@ -213,15 +203,13 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index e481384c3d6f3..c39af60650657 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,7 +66,6 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -79,7 +78,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.VersionUtils; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -565,16 +563,9 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { public void testIsMetadataField() { IndicesService indicesService = getIndicesService(); - final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); - assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); + assertFalse(indicesService.isMetadataField(randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { - continue; // nested field mapper does not exist prior to 2.0 - } - assertTrue( - "Expected " + builtIn + " to be a metadata field for version " + randVersion, - indicesService.isMetadataField(randVersion, builtIn) - ); + assertTrue(indicesService.isMetadataField(builtIn)); } } diff --git a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java index 6b89eb92065b1..1cdc2f166224f 100644 --- a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java @@ -35,16 +35,13 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.OriginalIndicesTests; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; @@ -52,7 +49,6 @@ import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.Aggregations; -import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalAggregationsTests; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchContextId; @@ -60,9 +56,6 @@ import org.opensearch.search.suggest.SuggestTests; import org.opensearch.test.OpenSearchTestCase; -import java.io.IOException; -import java.util.Base64; - import static java.util.Collections.emptyList; public class QuerySearchResultTests extends OpenSearchTestCase { @@ -127,32 +120,6 @@ public void testSerialization() throws Exception { assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); } - public void testReadFromPre_7_1_0() throws IOException { - String message = "AAAAAAAAAGQAAAEAAAB/wAAAAAEBBnN0ZXJtcwVJblhNRgoDBVNhdWpvAAVrS3l3cwVHSVVZaAAFZXRUbEUFZGN0WVoABXhzYnVrAAEDAfoN" - + "A3JhdwUBAAJRAAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVkFhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hyd" - + "y0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2RMZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAA" - + "AAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZd3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXh" - + "DSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAAAEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NL" - + "U1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAApydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQ" - + "lFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksAClRJZHJlSkpVc1Y4AAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVk" - + "FhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hydy0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2R" - + "MZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAAAAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZ" - + "d3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXhDSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAA" - + "AEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NLU1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAA" - + "pydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQlFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksACm5rdExLUHp3cGgBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2t" - + "ldDH/A3JhdwEBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2tldDH/A3JhdwEAAAIAAf////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; - byte[] bytes = Base64.getDecoder().decode(message); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { - in.setVersion(LegacyESVersion.V_7_0_0); - QuerySearchResult querySearchResult = new QuerySearchResult(in); - assertEquals(100, querySearchResult.getContextId().getId()); - assertTrue(querySearchResult.hasAggs()); - InternalAggregations aggs = querySearchResult.consumeAggs().expand(); - assertEquals(1, aggs.asList().size()); - // We deserialize and throw away top level pipeline aggs - } - } - public void testNullResponse() throws Exception { QuerySearchResult querySearchResult = QuerySearchResult.nullInstance(); QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, Version.CURRENT); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index f71c67ce456bc..22f4fcc1fde3f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.HasAttributeNodeSelector; import org.opensearch.client.Node; @@ -367,18 +366,6 @@ void checkWarningHeaders(final List warningHeaders, final Version cluste final boolean matches = matcher.matches(); if (matches) { final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true); - if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0) - && message.equals( - "the default number of shards will change from [5] to [1] in 7.0.0; " - + "if you wish to continue using the default of [5] shards, " - + "you must manage this on the create index request or with an index template" - )) { - /* - * This warning header will come back in the vast majority of our tests that create an index when running against an - * older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected. - */ - continue; - } if (message.startsWith("[types removal]")) { // We skip warnings related to types deprecation because they are *everywhere*. continue; diff --git a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java index d79e1730e16f6..9fb693efa9f8b 100644 --- a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.xcontent.support.XContentMapValues; @@ -52,10 +51,10 @@ public static boolean isRunningAgainstOldCluster() { /** * @return true if test is running against an old cluster before that last major, in this case - * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link LegacyESVersion#V_7_0_0} + * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link Version#V_2_0_0} */ protected final boolean isRunningAgainstAncientCluster() { - return isRunningAgainstOldCluster() && oldClusterVersion.before(LegacyESVersion.V_7_0_0); + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_2_0_0); } public static Version getOldClusterVersion() { From 588db38c38b3166d6d4ad31bc4fa516e4304ac42 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 7 Oct 2022 09:47:09 -0400 Subject: [PATCH 09/14] Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks (#4696) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../org/opensearch/gradle/PublishPlugin.java | 5 +++-- .../opensearch/gradle/pluginzip/Publish.java | 17 ++++++++++++----- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1b23eaa5ac20..2f7893881fdf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -124,6 +124,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) +- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 2bdef8e4cd244..be12fdd99c1df 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -92,7 +92,7 @@ public String call() throws Exception { return String.format( "%s/distributions/%s-%s.pom", project.getBuildDir(), - getArchivesBaseName(project), + pomTask.getName().toLowerCase().contains("zip") ? project.getName() : getArchivesBaseName(project), project.getVersion() ); } @@ -130,7 +130,6 @@ public String call() throws Exception { publication.getPom().withXml(PublishPlugin::addScmInfo); if (!publication.getName().toLowerCase().contains("zip")) { - // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); @@ -139,6 +138,8 @@ public String call() throws Exception { publication.artifact(project.getTasks().getByName("sourcesJar")); publication.artifact(project.getTasks().getByName("javadocJar")); } + } else { + project.afterEvaluate(p -> publication.setArtifactId(project.getName())); } generatePomTask.configure( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 6dc7d660922b2..6b581fcaa7774 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -13,12 +13,14 @@ import org.gradle.api.publish.maven.MavenPublication; import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; + import org.gradle.api.Task; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; @@ -67,10 +69,15 @@ public void apply(Project project) { if (validatePluginZipPom != null) { validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + + // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: + // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal + final Set publishPluginZipPublicationToTasks = project.getTasks() + .stream() + .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) + .collect(Collectors.toSet()); + if (!publishPluginZipPublicationToTasks.isEmpty()) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); } } else { project.getLogger() From ef50f78d27e78c9b367d3c44f2f46450ca2749ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Oct 2022 08:24:05 -0700 Subject: [PATCH 10/14] Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture (#4066) * Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture Bumps [gson](https://github.com/google/gson) from 2.9.0 to 2.9.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.9.0...gson-parent-2.9.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f7893881fdf1..621e64717338a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] +### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 ### Added @@ -156,4 +158,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 40e3a1dc0587d..2e541572b7385 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -40,7 +40,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.0' + api 'com.google.code.gson:gson:2.9.1' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" From 109319e0724d5dfa46f5554a7ef819560eaa1fac Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 11:25:39 -0500 Subject: [PATCH 11/14] Always auto release the flood stage block (#4703) * Always auto release the flood stage block Removes support for using a system property to disable the automatic release of the write block applied when a node exceeds the flood-stage watermark. Signed-off-by: Nicholas Walter Knize * update IAE message Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../allocation/DiskThresholdMonitor.java | 37 ++----------------- .../allocation/DiskThresholdSettings.java | 18 +++------ .../DiskThresholdSettingsTests.java | 1 - 4 files changed, 11 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 621e64717338a..e35d8c3eda0b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index a89271261ed14..0a6cfd8c04977 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.Client; @@ -54,7 +53,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.Strings; import org.opensearch.common.collect.ImmutableOpenMap; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; @@ -88,7 +86,6 @@ public class DiskThresholdMonitor { private final RerouteService rerouteService; private final AtomicLong lastRunTimeMillis = new AtomicLong(Long.MIN_VALUE); private final AtomicBoolean checkInProgress = new AtomicBoolean(); - private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(logger.getName()); /** * The IDs of the nodes that were over the low threshold in the last check (and maybe over another threshold too). Tracked so that we @@ -121,14 +118,6 @@ public DiskThresholdMonitor( this.rerouteService = rerouteService; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; - if (diskThresholdSettings.isAutoReleaseIndexEnabled() == false) { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - } } private void checkFinished() { @@ -371,23 +360,7 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); if (indicesToAutoRelease.isEmpty() == false) { - if (diskThresholdSettings.isAutoReleaseIndexEnabled()) { - logger.info("releasing read-only-allow-delete block on indices: [{}]", indicesToAutoRelease); - updateIndicesReadOnly(indicesToAutoRelease, listener, false); - } else { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - logger.debug( - "[{}] disabled, not releasing read-only-allow-delete block on indices: [{}]", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - indicesToAutoRelease - ); - listener.onResponse(null); - } + updateIndicesReadOnly(indicesToAutoRelease, listener, false); } else { logger.trace("no auto-release required"); listener.onResponse(null); @@ -421,11 +394,9 @@ private void markNodesMissingUsageIneligibleForRelease( ) { for (RoutingNode routingNode : routingNodes) { if (usages.containsKey(routingNode.nodeId()) == false) { - if (routingNode != null) { - for (ShardRouting routing : routingNode) { - String indexName = routing.index().getName(); - indicesToMarkIneligibleForAutoRelease.add(indexName); - } + for (ShardRouting routing : routingNode) { + String indexName = routing.index().getName(); + indicesToMarkIneligibleForAutoRelease.add(indexName); } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java index 0ce0b1bd7b688..56a1ccad112c5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -108,18 +109,15 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; private volatile Double freeDiskThresholdFloodStage; private volatile ByteSizeValue freeBytesThresholdFloodStage; - private static final boolean autoReleaseIndexEnabled; - public static final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; static { + assert Version.CURRENT.major == Version.V_2_0_0.major + 1; // this check is unnecessary in v4 + final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; + final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property == null) { - autoReleaseIndexEnabled = true; - } else if (Boolean.FALSE.toString().equals(property)) { - autoReleaseIndexEnabled = false; - } else { + if (property != null) { throw new IllegalArgumentException( - AUTO_RELEASE_INDEX_ENABLED_KEY + " may only be unset or set to [false] but was [" + property + "]" + "system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] has been removed in 3.0.0 and is not supported anymore" ); } } @@ -371,10 +369,6 @@ public ByteSizeValue getFreeBytesThresholdFloodStage() { return freeBytesThresholdFloodStage; } - public boolean isAutoReleaseIndexEnabled() { - return autoReleaseIndexEnabled; - } - public boolean includeRelocations() { return includeRelocations; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index 363484777fe66..5184ca7fe887d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -60,7 +60,6 @@ public void testDefaults() { assertTrue(diskThresholdSettings.includeRelocations()); assertEquals(zeroBytes, diskThresholdSettings.getFreeBytesThresholdFloodStage()); assertEquals(5.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); - assertTrue(diskThresholdSettings.isAutoReleaseIndexEnabled()); } public void testUpdate() { From 2e4b27b243d8bd2c515f66cf86c6d1d6a601307f Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Fri, 7 Oct 2022 22:26:38 +0530 Subject: [PATCH 12/14] Controlling discovery for decommissioned nodes (#4590) * Controlling discovery for decommissioned nodes Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../cluster/coordination/Coordinator.java | 28 +++++++- .../cluster/coordination/JoinHelper.java | 16 ++++- .../coordination/JoinTaskExecutor.java | 32 ++++----- .../decommission/DecommissionService.java | 38 ++++++++-- .../common/settings/ClusterSettings.java | 1 + .../org/opensearch/discovery/PeerFinder.java | 29 +++++++- .../coordination/CoordinatorTests.java | 43 +++++++++++ .../cluster/coordination/JoinHelperTests.java | 9 ++- .../coordination/JoinTaskExecutorTests.java | 45 ++++++++++++ .../cluster/coordination/NodeJoinTests.java | 71 +++++++++++++++++++ .../opensearch/discovery/PeerFinderTests.java | 46 ++++++++++++ 12 files changed, 329 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e35d8c3eda0b3..04b62c11a6865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 7ac716084793d..fbb345ea3a441 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,6 +105,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -138,6 +139,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final Settings settings; private final boolean singleNodeDiscovery; + private volatile boolean localNodeCommissioned; private final ElectionStrategy electionStrategy; private final TransportService transportService; private final ClusterManagerService clusterManagerService; @@ -218,7 +220,8 @@ public Coordinator( this::joinLeaderInTerm, this.onJoinValidators, rerouteService, - nodeHealthService + nodeHealthService, + this::onNodeCommissionStatusChange ); this.persistedStateSupplier = persistedStateSupplier; this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); @@ -281,6 +284,7 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.localNodeCommissioned = true; } private ClusterFormationState getClusterFormationState() { @@ -596,6 +600,9 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion() ); + // we are checking source node commission status here to reject any join request coming from a decommissioned node + // even before executing the join task to fail fast + JoinTaskExecutor.ensureNodeCommissioned(joinRequest.getSourceNode(), stateForJoinValidation.metadata()); } sendValidateJoinRequest(stateForJoinValidation, joinRequest, joinCallback); } else { @@ -1424,6 +1431,17 @@ protected void onFoundPeersUpdated() { } } + // package-visible for testing + synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + this.localNodeCommissioned = localNodeCommissioned; + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + } + + // package-visible for testing + boolean localNodeCommissioned() { + return localNodeCommissioned; + } + private void startElectionScheduler() { assert electionScheduler == null : electionScheduler; @@ -1450,6 +1468,14 @@ public void run() { return; } + // if either the localNodeCommissioned flag or the last accepted state thinks it should skip pre voting, we will + // acknowledge it + if (nodeCommissioned(lastAcceptedState.nodes().getLocalNode(), lastAcceptedState.metadata()) == false + || localNodeCommissioned == false) { + logger.debug("skip prevoting as local node is decommissioned"); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 656e6d220720f..a66152b8016ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; @@ -57,6 +58,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; @@ -78,6 +80,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -118,6 +121,7 @@ public class JoinHelper { private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); private final Supplier joinTaskExecutorGenerator; + private final Consumer nodeCommissioned; JoinHelper( Settings settings, @@ -130,12 +134,14 @@ public class JoinHelper { Function joinLeaderInTerm, Collection> joinValidators, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + Consumer nodeCommissioned ) { this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); + this.nodeCommissioned = nodeCommissioned; this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { private final long term = currentTermSupplier.getAsLong(); @@ -342,6 +348,7 @@ public void handleResponse(Empty response) { pendingOutgoingJoins.remove(dedupKey); logger.debug("successfully joined {} with {}", destination, joinRequest); lastFailedJoinAttempt.set(null); + nodeCommissioned.accept(true); onCompletion.run(); } @@ -352,6 +359,13 @@ public void handleException(TransportException exp) { FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); lastFailedJoinAttempt.set(attempt); + if (exp instanceof RemoteTransportException && (exp.getCause() instanceof NodeDecommissionedException)) { + logger.info( + "local node is decommissioned [{}]. Will not be able to join the cluster", + exp.getCause().getMessage() + ); + nodeCommissioned.accept(false); + } onCompletion.run(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 814aa17255931..ac237db85ee5b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -39,9 +39,6 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.decommission.DecommissionAttribute; -import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; -import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -64,6 +61,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; /** @@ -196,6 +194,9 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); + // we have added the same check in handleJoinRequest method and adding it here as this method + // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness + ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); @@ -203,7 +204,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (node.isClusterManagerNode()) { joiniedNodeNameIds.put(node.getName(), node.getId()); } - } catch (IllegalArgumentException | IllegalStateException e) { + } catch (IllegalArgumentException | IllegalStateException | NodeDecommissionedException e) { results.failure(joinTask, e); continue; } @@ -477,22 +478,13 @@ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version } public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) { - DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { - DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); - DecommissionStatus status = decommissionAttributeMetadata.status(); - if (decommissionAttribute != null && status != null) { - // We will let the node join the cluster if the current status is in FAILED state - if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) - && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { - throw new NodeDecommissionedException( - "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", - node.toString(), - decommissionAttribute.toString(), - status.status() - ); - } - } + if (nodeCommissioned(node, metadata) == false) { + throw new NodeDecommissionedException( + "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", + node.toString(), + metadata.decommissionAttributeMetadata().decommissionAttribute().toString(), + metadata.decommissionAttributeMetadata().status().status() + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index fcab411f073ba..5def2733b5ded 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -389,10 +389,6 @@ private Set filterNodesWithDecommissionAttribute( return nodesWithDecommissionAttribute; } - private static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { - return discoveryNode.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()); - } - private static void validateAwarenessAttribute( final DecommissionAttribute decommissionAttribute, List awarenessAttributes, @@ -531,4 +527,38 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } + + /** + * Utility method to check if the node has decommissioned attribute + * + * @param discoveryNode node to check on + * @param decommissionAttribute attribute to be checked with + * @return true or false based on whether node has decommissioned attribute + */ + public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); + return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); + } + + /** + * Utility method to check if the node is commissioned or not + * + * @param discoveryNode node to check on + * @param metadata metadata present current which will be used to check the commissioning status of the node + * @return if the node is commissioned or not + */ + public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + return false; + } + } + } + return true; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1665614c18496..54579031aac08 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -534,6 +534,7 @@ public void apply(Settings value, Settings current, Settings previous) { PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING, + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING, PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING, ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING, ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index a601a6fbe4d82..e8b6c72c512a2 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -84,6 +84,14 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); + // the time between attempts to find all peers when node is in decommissioned state, default set to 2 minutes + public static final Setting DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING = Setting.timeSetting( + "discovery.find_peers_interval_during_decommission", + TimeValue.timeValueSeconds(120L), + TimeValue.timeValueMillis(1000), + Setting.Property.NodeScope + ); + public static final Setting DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting( "discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), @@ -91,7 +99,8 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); - private final TimeValue findPeersInterval; + private final Settings settings; + private TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; private final Object mutex = new Object(); @@ -112,6 +121,7 @@ public PeerFinder( TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver ) { + this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -128,6 +138,23 @@ public PeerFinder( ); } + public synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + findPeersInterval = localNodeCommissioned + ? DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings) + : DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings); + logger.info( + "setting findPeersInterval to [{}] as node commission status = [{}] for local node [{}]", + findPeersInterval, + localNodeCommissioned, + transportService.getLocalNode() + ); + } + + // package private for tests + TimeValue getFindPeersInterval() { + return findPeersInterval; + } + public void activate(final DiscoveryNodes lastAcceptedNodes) { logger.trace("activating with {}", lastAcceptedNodes); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index d96c972bc6021..74c5d0fcccbed 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -87,6 +87,7 @@ import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; +import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -1780,6 +1781,48 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { } } + public void testLocalNodeAlwaysCommissionedWithoutDecommissionedException() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + } + } + + public void testClusterStabilisesForPreviouslyDecommissionedNode() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + final ClusterNode leader = cluster.getAnyLeader(); + + ClusterNode decommissionedNode = cluster.new ClusterNode( + nextNodeIndex.getAndIncrement(), true, leader.nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info") + ); + decommissionedNode.coordinator.onNodeCommissionStatusChange(false); + cluster.clusterNodes.add(decommissionedNode); + + assertFalse(decommissionedNode.coordinator.localNodeCommissioned()); + + cluster.stabilise( + // Interval is updated to decommissioned find peer interval + defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING) + // One message delay to send a join + + DEFAULT_DELAY_VARIABILITY + // Commit a new cluster state with the new node(s). Might be split into multiple commits, and each might need a + // followup reconfiguration + + 3 * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY + ); + + // once cluster stabilises the node joins and would be commissioned + assertTrue(decommissionedNode.coordinator.localNodeCommissioned()); + } + } + private ClusterState buildNewClusterStateWithVotingConfigExclusion( ClusterState currentState, Set newVotingConfigExclusion diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index a3c945cdbac3a..7b21042b2ed4a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -90,7 +90,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> new StatusInfo(HEALTHY, "info") + () -> new StatusInfo(HEALTHY, "info"), + nodeCommissioned -> {} ); transportService.start(); @@ -230,7 +231,8 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - null + null, + nodeCommissioned -> {} ); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); @@ -284,7 +286,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + nodeCommissioned -> {} ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 52922bc6a0e83..66a3b00f2979d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -261,6 +261,51 @@ public void testJoinClusterWithDifferentDecommission() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinFailedForDecommissionedNode() throws Exception { + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + final ClusterState clusterManagerClusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + + final DiscoveryNode decommissionedNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + String decommissionedNodeID = decommissionedNode.getId(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterManagerClusterState, + List.of(new JoinTaskExecutor.Task(decommissionedNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertFalse(taskResult.isSuccess()); + assertTrue(taskResult.getFailure() instanceof NodeDecommissionedException); + assertFalse(result.resultingState.getNodes().nodeExists(decommissionedNodeID)); + } + public void testJoinClusterWithDecommissionFailed() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index c77baba5fe167..18a7b892a424c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -39,6 +39,10 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -775,6 +779,60 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { assertTrue(clusterStateHasNode(node1)); } + public void testJoinFailsWhenDecommissioned() { + DiscoveryNode node0 = newNode(0, true); + DiscoveryNode node1 = newNode(1, true); + long initialTerm = randomLongBetween(1, 10); + long initialVersion = randomLongBetween(1, 10); + setupFakeClusterManagerServiceAndCoordinator( + initialTerm, + initialStateWithDecommissionedAttribute( + initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), + new DecommissionAttribute("zone", "zone1") + ), + () -> new StatusInfo(HEALTHY, "healthy-info") + ); + assertFalse(isLocalNodeElectedMaster()); + long newTerm = initialTerm + randomLongBetween(1, 10); + joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertFalse(clusterStateHasNode(node1)); + joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertTrue(clusterStateHasNode(node1)); + DiscoveryNode decommissionedNode = new DiscoveryNode( + "data_2", + 2 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long anotherTerm = newTerm + randomLongBetween(1, 10); + + assertThat( + expectThrows( + NodeDecommissionedException.class, + () -> joinNodeAndRun(new JoinRequest(decommissionedNode, anotherTerm, Optional.empty())) + ).getMessage(), + containsString("with current status of decommissioning") + ); + assertFalse(clusterStateHasNode(decommissionedNode)); + + DiscoveryNode node3 = new DiscoveryNode( + "data_3", + 3 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone2"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long termForNode3 = anotherTerm + randomLongBetween(1, 10); + + joinNodeAndRun(new JoinRequest(node3, termForNode3, Optional.empty())); + assertTrue(clusterStateHasNode(node3)); + } + private boolean isLocalNodeElectedMaster() { return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } @@ -782,4 +840,17 @@ private boolean isLocalNodeElectedMaster() { private boolean clusterStateHasNode(DiscoveryNode node) { return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } + + private static ClusterState initialStateWithDecommissionedAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute + ) { + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 5e7dede0309c6..7e7bb2f0a2911 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -807,6 +807,42 @@ public void testReconnectsToDisconnectedNodes() { assertFoundPeers(rebootedOtherNode); } + public void testConnectionAttemptDuringDecommissioning() { + boolean localNodeCommissioned = randomBoolean(); + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + + long findPeersInterval = peerFinder.getFindPeersInterval().millis(); + + final DiscoveryNode otherNode = newDiscoveryNode("node-1"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + runAllRunnableTasks(); + assertFoundPeers(otherNode); + + transportAddressConnector.reachableNodes.clear(); + final DiscoveryNode newNode = new DiscoveryNode("new-node", otherNode.getAddress(), Version.CURRENT); + transportAddressConnector.addReachableNode(newNode); + + connectedNodes.remove(otherNode); + disconnectedNodes.add(otherNode); + + // peer discovery will be delayed now + if (localNodeCommissioned == false) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + assertPeersNotDiscovered(newNode); + } + + final long expectedTime = CONNECTION_TIMEOUT_MILLIS + findPeersInterval; + while (deterministicTaskQueue.getCurrentTimeMillis() < expectedTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + } + assertFoundPeers(newNode); + } + private void respondToRequests(Function responseFactory) { final CapturedRequest[] capturedRequests = capturingTransport.getCapturedRequestsAndClear(); for (final CapturedRequest capturedRequest : capturedRequests) { @@ -828,6 +864,16 @@ private void assertFoundPeers(DiscoveryNode... expectedNodesArray) { assertNotifiedOfAllUpdates(); } + private void assertPeersNotDiscovered(DiscoveryNode... undiscoveredNodesArray) { + final Set undiscoveredNodes = Arrays.stream(undiscoveredNodesArray).collect(Collectors.toSet()); + final List actualNodesList = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false) + .collect(Collectors.toList()); + final HashSet actualNodesSet = new HashSet<>(actualNodesList); + Set intersection = new HashSet<>(actualNodesSet); + intersection.retainAll(undiscoveredNodes); + assertEquals(intersection.size(), 0); + } + private void assertNotifiedOfAllUpdates() { final Stream actualNodes = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false); final Stream notifiedNodes = StreamSupport.stream(foundPeersFromNotification.spliterator(), false); From fe3994ccdf62735907bc4834f93c71d97636d150 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 15:41:16 -0500 Subject: [PATCH 13/14] [Remove] LegacyESVersion.V_7_2_* and V_7_3_* constants (#4702) Removes all usages of LegacyESVersion.V_7_2_ and LegacyESVersion.V_7_3 version constants along with related ancient APIs. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 3 +- .../common/CommonAnalysisModulePlugin.java | 45 ++++----- .../upgrades/FullClusterRestartIT.java | 8 +- .../org/opensearch/upgrades/RecoveryIT.java | 59 ++++-------- .../gateway/ReplicaShardAllocatorIT.java | 3 +- .../indices/IndicesLifecycleListenerIT.java | 11 +-- .../java/org/opensearch/LegacyESVersion.java | 5 - .../org/opensearch/OpenSearchException.java | 2 +- .../cluster/health/ClusterHealthRequest.java | 11 +-- .../TransportNodesHotThreadsAction.java | 4 +- .../node/info/TransportNodesInfoAction.java | 4 +- ...nsportNodesReloadSecureSettingsAction.java | 4 +- .../node/stats/TransportNodesStatsAction.java | 4 +- .../node/usage/TransportNodesUsageAction.java | 4 +- .../create/CreateSnapshotRequest.java | 9 +- .../status/TransportNodesSnapshotsStatus.java | 4 +- .../stats/TransportClusterStatsAction.java | 4 +- .../indices/alias/IndicesAliasesRequest.java | 12 +-- .../admin/indices/analyze/AnalyzeAction.java | 63 ++---------- .../indices/close/CloseIndexRequest.java | 11 +-- .../indices/close/CloseIndexResponse.java | 18 +--- ...TransportVerifyShardBeforeCloseAction.java | 11 +-- .../find/NodeFindDanglingIndexRequest.java | 4 +- .../list/NodeListDanglingIndicesRequest.java | 4 +- .../admin/indices/stats/CommonStatsFlags.java | 9 +- .../fieldcaps/FieldCapabilitiesRequest.java | 10 +- .../fieldcaps/FieldCapabilitiesResponse.java | 11 +-- .../action/search/GetAllPitNodeRequest.java | 4 +- .../action/support/nodes/BaseNodeRequest.java | 69 ------------- .../support/nodes/TransportNodesAction.java | 2 +- .../cluster/SnapshotsInProgress.java | 11 +-- .../cluster/metadata/IndexMetadata.java | 24 ++--- .../opensearch/cluster/metadata/Metadata.java | 19 +--- .../metadata/MetadataIndexStateService.java | 20 +--- .../cluster/node/DiscoveryNode.java | 96 ++++++------------- .../TransportNodesListGatewayMetaState.java | 4 +- ...ransportNodesListGatewayStartedShards.java | 4 +- .../index/engine/ReadOnlyEngine.java | 26 ++--- .../org/opensearch/index/get/GetResult.java | 25 +---- .../index/mapper/TextFieldMapper.java | 3 +- .../index/query/IntervalsSourceProvider.java | 11 +-- .../index/refresh/RefreshStats.java | 13 +-- .../index/seqno/ReplicationTracker.java | 2 +- .../recovery/RecoveryCleanFilesRequest.java | 12 +-- .../RecoveryTranslogOperationsRequest.java | 5 +- .../TransportNodesListShardStoreMetadata.java | 4 +- .../AutoDateHistogramAggregationBuilder.java | 25 +---- .../bucket/histogram/DateIntervalWrapper.java | 34 +------ .../metrics/InternalGeoCentroid.java | 17 +--- .../search/query/QuerySearchResult.java | 30 ------ .../search/sort/FieldSortBuilder.java | 9 +- .../opensearch/snapshots/SnapshotInfo.java | 15 +-- .../node/tasks/CancellableTasksTests.java | 4 +- .../node/tasks/ResourceAwareTasksTests.java | 4 +- .../node/tasks/TaskManagerTestCase.java | 6 +- .../cluster/node/tasks/TestTaskPlugin.java | 3 +- .../node/tasks/TransportTasksActionTests.java | 4 +- .../indices/close/CloseIndexRequestTests.java | 16 +--- .../nodes/TransportNodesActionTests.java | 3 +- .../termvectors/TermVectorsUnitTests.java | 4 +- 60 files changed, 212 insertions(+), 653 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 04b62c11a6865..7ee30dc84e9fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed @@ -160,4 +161,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 7cad01c5cc00a..b0850d0b9144d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -563,18 +563,22 @@ public List getPreConfiguredTokenFilters() { ) ) ); - filters.add(PreConfiguredTokenFilter.openSearchVersion("word_delimiter_graph", false, false, (input, version) -> { - boolean adjustOffsets = version.onOrAfter(LegacyESVersion.V_7_3_0); - return new WordDelimiterGraphFilter( - input, - adjustOffsets, - WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, - WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS - | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, - null - ); - })); + filters.add( + PreConfiguredTokenFilter.openSearchVersion( + "word_delimiter_graph", + false, + false, + (input, version) -> new WordDelimiterGraphFilter( + input, + true, + WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS + | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, + null + ) + ) + ); return filters; } @@ -588,12 +592,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edge_ngram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - })); + tokenizers.add( + PreConfiguredTokenizer.openSearchVersion( + "edge_ngram", + (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) + ) + ); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -619,10 +623,7 @@ public List getPreConfiguredTokenizers() { + "Please change the tokenizer name to [edge_ngram] instead." ); } - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 0ed51b9d8a011..a3db7532c3a10 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -1003,12 +1003,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - if (getOldClusterVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 3d71f4a198aac..dbb18aec19fca 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -245,7 +245,6 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } - return id; } return null; } @@ -457,15 +456,10 @@ public void testRecoveryClosedIndex() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -491,14 +485,10 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index is created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index is created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -525,27 +515,20 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - if (minimumNodeVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - switch (CLUSTER_TYPE) { - case OLD: break; - case MIXED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); - break; - case UPGRADED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); - break; - } - } - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); + switch (CLUSTER_TYPE) { + case OLD: break; + case MIXED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); + break; + case UPGRADED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); + break; + } } /** * Returns the version in which the given index has been created diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index db46fb4424848..6d05ecd0b56b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -512,7 +511,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { } /** - * If the recovery source is on an old node (before
{@link LegacyESVersion#V_7_2_0}
) then the recovery target + * If the recovery source is on an old node (before
{@code LegacyESVersion#V_7_2_0}
) then the recovery target * won't have the safe commit after phase1 because the recovery source does not send the global checkpoint in the clean_files * step. And if the recovery fails and retries, then the recovery stage might not transition properly. This test simulates * this behavior by changing the global checkpoint in phase1 to unassigned. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index 72f28e94528ba..17a7d4c84b6fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -32,9 +32,7 @@ package org.opensearch.indices; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -246,13 +244,8 @@ public void testIndexStateShardChanged() throws Throwable { assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6)); assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1)); - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_2_0)) { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - } else { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED); - } + assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); + assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); } private static void assertShardStatesMatch( diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 1eb22a6bef3b5..283a6581a64bb 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,11 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 17ece23f819a2..e9f0d831d2977 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -601,7 +601,7 @@ public static void generateFailureXContent(XContentBuilder builder, Params param } t = t.getCause(); } - builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); + builder.field(ERROR, ExceptionsHelper.summaryMessage(e)); return; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 1dedf481dec56..84a7616fe6b06 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -90,11 +89,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { waitForEvents = Priority.readFrom(in); } waitForNoInitializingShards = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - indicesOptions = IndicesOptions.lenientExpandOpen(); - } + indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override @@ -122,9 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { Priority.writeTo(waitForEvents, out); } out.writeBoolean(waitForNoInitializingShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions.writeIndicesOptions(out); - } + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index e8429580ec887..4c71993251f4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -43,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.monitor.jvm.HotThreads; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -117,7 +117,7 @@ protected NodeHotThreads nodeOperation(NodeRequest request) { * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesHotThreadsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 7bcf83ba28111..ee7b287b878e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -126,7 +126,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { * * @opensearch.internal */ - public static class NodeInfoRequest extends BaseNodeRequest { + public static class NodeInfoRequest extends TransportRequest { NodesInfoRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d7ad4357fa046..920c66bc5c543 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -54,6 +53,7 @@ import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -188,7 +188,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesReloadSecureSettingsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 644c7f02d45f0..5d5d54c8fe7ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -127,7 +127,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { * * @opensearch.internal */ - public static class NodeStatsRequest extends BaseNodeRequest { + public static class NodeStatsRequest extends TransportRequest { NodesStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index c7612f7e15838..dbd3673149efe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; @@ -117,7 +117,7 @@ protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) { * * @opensearch.internal */ - public static class NodeUsageRequest extends BaseNodeRequest { + public static class NodeUsageRequest extends TransportRequest { NodesUsageRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index d78a4c95246b4..cb64718ed5843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -60,7 +60,6 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Create snapshot request @@ -124,9 +123,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } + userMetadata = in.readMap(); } @Override @@ -140,9 +137,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(waitForCompletion); out.writeBoolean(partial); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 86d0499a23f9e..e9bf564afaf32 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -36,7 +36,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -51,6 +50,7 @@ import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -207,7 +207,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) th * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a13932e137ab0..401813a6174fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -57,6 +56,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.Transports; @@ -216,7 +216,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq * * @opensearch.internal */ - public static class ClusterStatsNodeRequest extends BaseNodeRequest { + public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index eb2d2706a6531..bb28623430f2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -88,11 +88,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest tokens, DetailAnalyzeResponse detail) { } public Response(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } else { - tokens = null; - } - } + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } @@ -371,22 +358,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = null; - if (tokens != null) { - tokenArray = tokens.toArray(new AnalyzeToken[0]); - } - out.writeOptionalArray(tokenArray); - } else { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); out.writeOptionalWriteable(detail); } @@ -766,19 +742,7 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); - } - } else { - tokens = null; - } - } + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); } public String getName() { @@ -811,18 +775,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalArray(tokens); - } else { - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } + out.writeOptionalArray(tokens); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..1095cec447442 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -61,11 +60,7 @@ public CloseIndexRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } + waitForActiveShards = ActiveShardCount.readFrom(in); } public CloseIndexRequest() {} @@ -143,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..0388ea47bfc69 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Objects; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -62,12 +60,8 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; CloseIndexResponse(StreamInput in) throws IOException { - super(in, in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - indices = unmodifiableList(in.readList(IndexResult::new)); - } else { - indices = unmodifiableList(emptyList()); - } + super(in, true); + indices = unmodifiableList(in.readList(IndexResult::new)); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -82,12 +76,8 @@ public List getIndices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - writeShardsAcknowledged(out); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeList(indices); - } + writeShardsAcknowledged(out); + out.writeList(indices); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index fe39e2a254301..691b2c7c95730 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; @@ -205,11 +204,7 @@ public static class ShardRequest extends ReplicationRequest { ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - phase1 = in.readBoolean(); - } else { - phase1 = false; - } + phase1 = in.readBoolean(); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { @@ -228,9 +223,7 @@ public String toString() { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeBoolean(phase1); - } + out.writeBoolean(phase1); } public ClusterBlock clusterBlock() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 6026dd10c607b..6885de74e4479 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -34,16 +34,16 @@ import java.io.IOException; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; /** * Used when querying every node in the cluster for a specific dangling index. * * @opensearch.internal */ -public class NodeFindDanglingIndexRequest extends BaseNodeRequest { +public class NodeFindDanglingIndexRequest extends TransportRequest { private final String indexUUID; public NodeFindDanglingIndexRequest(String indexUUID) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index 9b737fff8316e..696daf75942fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.dangling.list; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class NodeListDanglingIndicesRequest extends BaseNodeRequest { +public class NodeListDanglingIndicesRequest extends TransportRequest { /** * Filter the response by index UUID. Leave as null to find all indices. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 9a24d8a42dc9d..fd3d6daa9c393 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -87,9 +86,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); includeSegmentFileSizes = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnloadedSegments = in.readBoolean(); - } + includeUnloadedSegments = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); @@ -111,9 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); out.writeBoolean(includeSegmentFileSizes); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnloadedSegments); - } + out.writeBoolean(includeUnloadedSegments); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 688568ba9a6d6..50d60560bdfe4 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -72,11 +72,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } + includeUnmapped = in.readBoolean(); indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; } @@ -109,9 +105,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } + out.writeBoolean(includeUnmapped); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index e5f644987182c..847cca25ceb35 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; @@ -87,11 +86,7 @@ private FieldCapabilitiesResponse( public FieldCapabilitiesResponse(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } + indices = in.readStringArray(); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } @@ -138,9 +133,7 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java index c90f75e3c0aed..de0c0dd9bbfc3 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; /** * Inner node get all pits request */ -public class GetAllPitNodeRequest extends BaseNodeRequest { +public class GetAllPitNodeRequest extends TransportRequest { public GetAllPitNodeRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java deleted file mode 100644 index b5ff1d60ff75b..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.nodes; - -import org.opensearch.LegacyESVersion; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportRequest; - -import java.io.IOException; - -/** - * Base class for node transport requests - * - * @opensearch.internal - * - * @deprecated this class is deprecated and classes will extend TransportRequest directly - */ -// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 -@Deprecated -public abstract class BaseNodeRequest extends TransportRequest { - - public BaseNodeRequest() {} - - public BaseNodeRequest(StreamInput in) throws IOException { - super(in); - if (in.getVersion().before(LegacyESVersion.V_7_3_0)) { - in.readString(); // previously nodeId - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_3_0)) { - out.writeString(""); // previously nodeId - } - } -} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 18fcdfad0bcc4..a12e9b753599d 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -70,7 +70,7 @@ public abstract class TransportNodesAction< NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, - NodeRequest extends BaseNodeRequest, + NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { protected final ThreadPool threadPool; diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 0ff373b6116de..4cbf0cfe70adb 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -67,7 +67,6 @@ import java.util.stream.Collectors; import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Meta data about snapshots that are currently executing @@ -296,11 +295,7 @@ private Entry(StreamInput in) throws IOException { shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); repositoryStateId = in.readLong(); failure = in.readOptionalString(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { @@ -736,9 +731,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(shards); out.writeLong(repositoryStateId); out.writeOptionalString(failure); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { Version.writeVersion(version, out); } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cd1c92a8b109f..b6ca8c52cd818 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1016,11 +1016,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 1; - } + aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -1051,9 +1047,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); @@ -1093,11 +1087,7 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { builder.version(in.readLong()); builder.mappingVersion(in.readVLong()); builder.settingsVersion(in.readVLong()); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - builder.aliasesVersion(in.readVLong()); - } else { - builder.aliasesVersion(1); - } + builder.aliasesVersion(in.readVLong()); builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -1140,9 +1130,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -1821,8 +1809,8 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices"; } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(LegacyESVersion.V_7_2_0)) { - assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0"; + if (Assertions.ENABLED) { + assert aliasesVersion : "aliases version should be present for indices"; } return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index fb8123f20e904..8e73a72d43219 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -39,7 +39,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.LegacyESVersion; import org.opensearch.action.AliasesRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.FeatureAware; @@ -986,11 +985,7 @@ private static class MetadataDiff implements Diff { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); - } else { - hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; - } + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); @@ -1004,9 +999,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); @@ -1037,9 +1030,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); - } + builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetadata.readFrom(in), false); @@ -1064,9 +1055,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); out.writeVInt(indices.size()); for (IndexMetadata indexMetadata : this) { indexMetadata.writeTo(out); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 4f1000e3407fd..6de69c5a6f8f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -840,10 +839,6 @@ static Tuple> closeRoutingTable( final Map verifyResult ) { - // Remove the index routing table of closed indices if the cluster is in a mixed version - // that does not support the replication of closed indices - final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(LegacyESVersion.V_7_2_0); - final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); @@ -916,16 +911,11 @@ static Tuple> closeRoutingTable( blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID); blocks.addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE); - if (removeRoutingTable) { - metadata.put(updatedMetadata); - routingTable.remove(index.getName()); - } else { - metadata.put( - updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) - .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) - ); - routingTable.addAsFromOpenToClose(metadata.getSafe(index)); - } + metadata.put( + updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) + .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) + ); + routingTable.addAsFromOpenToClose(metadata.getSafe(index)); logger.debug("closing index {} succeeded", index); closedIndices.add(index.getName()); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 199d79070c050..016c0aac44be6 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -49,7 +49,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -323,50 +322,30 @@ public DiscoveryNode(StreamInput in) throws IOException { } int rolesSize = in.readVInt(); final Set roles = new HashSet<>(rolesSize); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - for (int i = 0; i < rolesSize; i++) { - final String roleName = in.readString(); - final String roleNameAbbreviation = in.readString(); - final boolean canContainData; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - canContainData = in.readBoolean(); - } else { - canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); - } - final DiscoveryNodeRole role = roleMap.get(roleName); - if (role == null) { - if (in.getVersion().onOrAfter(Version.V_2_1_0)) { - roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); - } else { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); - } - } else { - assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; - assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" - + roleName - + "] does not match role [" - + role.roleNameAbbreviation() - + "]"; - roles.add(role); - } + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData; + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + canContainData = in.readBoolean(); + } else { + canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); } - } else { - // an old node will only send us legacy roles since pluggable roles is a new concept - for (int i = 0; i < rolesSize; i++) { - final LegacyRole legacyRole = in.readEnum(LegacyRole.class); - switch (legacyRole) { - case MASTER: - roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - break; - case DATA: - roles.add(DiscoveryNodeRole.DATA_ROLE); - break; - case INGEST: - roles.add(DiscoveryNodeRole.INGEST_ROLE); - break; - default: - throw new AssertionError(legacyRole.roleName()); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); } } this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); @@ -386,30 +365,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeVInt(roles.size()); - for (final DiscoveryNodeRole role : roles) { - final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); - out.writeString(compatibleRole.roleName()); - out.writeString(compatibleRole.roleNameAbbreviation()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeBoolean(compatibleRole.canContainData()); - } - } - } else { - // an old node will only understand legacy roles since pluggable roles is a new concept - final List rolesToWrite = roles.stream() - .filter(DiscoveryNodeRole.LEGACY_ROLES::contains) - .collect(Collectors.toList()); - out.writeVInt(rolesToWrite.size()); - for (final DiscoveryNodeRole role : rolesToWrite) { - if (role.isClusterManager()) { - out.writeEnum(LegacyRole.MASTER); - } else if (role.equals(DiscoveryNodeRole.DATA_ROLE)) { - out.writeEnum(LegacyRole.DATA); - } else if (role.equals(DiscoveryNodeRole.INGEST_ROLE)) { - out.writeEnum(LegacyRole.INGEST); - } + out.writeVInt(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); + out.writeString(compatibleRole.roleName()); + out.writeString(compatibleRole.roleNameAbbreviation()); + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + out.writeBoolean(compatibleRole.canContainData()); } } if (out.getVersion().before(Version.V_1_0_0) && version.onOrAfter(Version.V_1_0_0)) { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java index be914c6a40a83..ba1490a7929bd 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java @@ -37,7 +37,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -52,6 +51,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -168,7 +168,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodeRequest() {} NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index c43f539243d7a..5b79ca5970e63 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -65,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -307,7 +307,7 @@ protected void writeNodesTo(StreamOutput out, List nod * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index f426768119c1d..dceb26bc33aa7 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -39,8 +39,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -174,25 +172,21 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat if (requireCompleteHistory == false) { return; } - // Before 8.0 the global checkpoint is not known and up to date when the engine is created after + // Before 3.0 the global checkpoint is not known and up to date when the engine is created after // peer recovery, so we only check the max seq no / global checkpoint coherency when the global // checkpoint is different from the unassigned sequence number value. // In addition to that we only execute the check if the index the engine belongs to has been // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction // that guarantee that all operations have been flushed to Lucene. - final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0) - || (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { - assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); - if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { - throw new IllegalStateException( - "Maximum sequence number [" - + seqNoStats.getMaxSeqNo() - + "] from last commit does not match global checkpoint [" - + seqNoStats.getGlobalCheckpoint() - + "]" - ); - } + assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); + if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { + throw new IllegalStateException( + "Maximum sequence number [" + + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + + seqNoStats.getGlobalCheckpoint() + + "]" + ); } } diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index e2f23353f250e..5da4f8d5c7833 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -32,7 +32,6 @@ package org.opensearch.index.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -105,20 +104,8 @@ public GetResult(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - documentFields = readFields(in); - metaFields = readFields(in); - } else { - Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - fields.forEach( - (fieldName, docField) -> (MapperService.META_FIELDS_BEFORE_7DOT8.contains(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields = readFields(in); + metaFields = readFields(in); } else { metaFields = Collections.emptyMap(); documentFields = Collections.emptyMap(); @@ -446,12 +433,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - writeFields(out, documentFields); - writeFields(out, metaFields); - } else { - writeFields(out, this.getFields()); - } + writeFields(out, documentFields); + writeFields(out, metaFields); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 82bd8ebb4d362..15aae1774213a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -69,7 +69,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; @@ -422,7 +421,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(LegacyESVersion.V_7_2_1) ? name() : buildFullName(context); + String fullName = buildFullName(context); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index cc69a44c9c80c..e1cb0cdff4ebd 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -166,11 +165,7 @@ public Match(StreamInput in) throws IOException { } this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - this.useField = in.readOptionalString(); - } else { - this.useField = null; - } + this.useField = in.readOptionalString(); } @Override @@ -234,9 +229,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(useField); - } + out.writeOptionalString(useField); } @Override diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 8a54e5105c61e..fc874608fb3b1 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,7 +32,6 @@ package org.opensearch.index.refresh; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -68,10 +67,8 @@ public RefreshStats() {} public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - externalTotal = in.readVLong(); - externalTotalTimeInMillis = in.readVLong(); - } + externalTotal = in.readVLong(); + externalTotalTimeInMillis = in.readVLong(); listeners = in.readVInt(); } @@ -79,10 +76,8 @@ public RefreshStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(externalTotal); - out.writeVLong(externalTotalTimeInMillis); - } + out.writeVLong(externalTotal); + out.writeVLong(externalTotalTimeInMillis); out.writeVInt(listeners); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 55d95381923b3..63995ae121abb 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1495,7 +1495,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert primaryMode == false; if (primaryContext.checkpoints.containsKey(shardAllocationId) == false) { // can happen if the old primary was on an old version - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); + assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.fromId(7000099)); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java index 6c597fcd086c4..d346ec5c975f4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; @@ -76,11 +74,7 @@ public RecoveryCleanFilesRequest( shardId = new ShardId(in); snapshotFiles = new Store.MetadataSnapshot(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - globalCheckpoint = in.readZLong(); - } else { - globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + globalCheckpoint = in.readZLong(); } @Override @@ -90,9 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); snapshotFiles.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeZLong(globalCheckpoint); - } + out.writeZLong(globalCheckpoint); } public Store.MetadataSnapshot sourceMetaSnapshot() { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java index eacfb87ecc732..32560bc211669 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.seqno.RetentionLeases; @@ -139,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(maxSeenAutoIdTimestampOnPrimary); out.writeZLong(maxSeqNoOfUpdatesOrDeletesOnPrimary); retentionLeases.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(mappingVersionOnPrimary); - } + out.writeVLong(mappingVersionOnPrimary); } } diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index b49cdcd127962..0e452fcbbc7a9 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -68,6 +67,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -410,7 +410,7 @@ protected void writeNodesTo(StreamOutput out, List nodes * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 6b6d33717bab4..f7e3de4eb988f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.io.stream.StreamInput; @@ -150,17 +149,13 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - minimumIntervalExpression = in.readOptionalString(); - } + minimumIntervalExpression = in.readOptionalString(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalString(minimumIntervalExpression); - } + out.writeOptionalString(minimumIntervalExpression); } protected AutoDateHistogramAggregationBuilder( @@ -321,17 +316,7 @@ public RoundingInfo(StreamInput in) throws IOException { roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - dateTimeUnit = in.readString(); - } else { - /* - * This *should* be safe because we only deserialize RoundingInfo - * when reading result and results don't actually use this at all. - * We just set it to something non-null to line up with the normal - * ctor. "seconds" is the smallest unit anyway. - */ - dateTimeUnit = "second"; - } + dateTimeUnit = in.readString(); } @Override @@ -340,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeString(dateTimeUnit); - } + out.writeString(dateTimeUnit); } public int getMaximumInnerInterval() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index d378bb2ec1bd2..f8a4c4ffd9cba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.DateTimeUnit; @@ -143,21 +142,8 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - long interval = in.readLong(); - DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); - - if (histoInterval != null) { - dateHistogramInterval = histoInterval; - intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; - } else { - dateHistogramInterval = new DateHistogramInterval(interval + "ms"); - intervalType = IntervalTypeEnum.LEGACY_INTERVAL; - } - } else { - dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - intervalType = IntervalTypeEnum.fromStream(in); - } + dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + intervalType = IntervalTypeEnum.fromStream(in); } public IntervalTypeEnum getIntervalType() { @@ -402,20 +388,8 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { - out.writeLong( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo") - .getMillis() - ); - } else { - out.writeLong(0L); - } - out.writeOptionalWriteable(dateHistogramInterval); - } else { - out.writeOptionalWriteable(dateHistogramInterval); - intervalType.writeTo(out); - } + out.writeOptionalWriteable(dateHistogramInterval); + intervalType.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java index 96e7541de25d9..35449a28c9087 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; @@ -84,13 +83,7 @@ public InternalGeoCentroid(StreamInput in) throws IOException { super(in); count = in.readVLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - centroid = new GeoPoint(in.readDouble(), in.readDouble()); - } else { - final long hash = in.readLong(); - centroid = new GeoPoint(decodeLatitude(hash), decodeLongitude(hash)); - } - + centroid = new GeoPoint(in.readDouble(), in.readDouble()); } else { centroid = null; } @@ -101,12 +94,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeDouble(centroid.lat()); - out.writeDouble(centroid.lon()); - } else { - out.writeLong(encodeLatLon(centroid.lat(), centroid.lon())); - } + out.writeDouble(centroid.lat()); + out.writeDouble(centroid.lon()); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index dceacd57d623e..31fdc5c9d9e9d 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -45,7 +45,6 @@ import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.profile.NetworkTime; @@ -54,7 +53,6 @@ import java.io.IOException; -import static java.util.Collections.emptyList; import static org.opensearch.common.lucene.Lucene.readTopDocs; import static org.opensearch.common.lucene.Lucene.writeTopDocs; @@ -361,10 +359,6 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - // The list of PipelineAggregators is sent by old versions. We don't need it anyway. - in.readNamedWriteableList(PipelineAggregator.class); - } } else { if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); @@ -410,35 +404,11 @@ public void writeToNoId(StreamOutput out) throws IOException { writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(emptyList()); - } } else { out.writeBoolean(true); if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { InternalAggregations aggs = aggregations.expand(); aggs.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(aggs.getTopLevelPipelineAggregators()); - } } else { aggregations.writeTo(out); } diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index b4a93ec9869e6..395ebaf2523e7 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.index.Terms; import org.apache.lucene.search.SortField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -161,9 +160,7 @@ public FieldSortBuilder(StreamInput in) throws IOException { sortMode = in.readOptionalWriteable(SortMode::readFromStream); unmappedType = in.readOptionalString(); nestedSort = in.readOptionalWriteable(NestedSortBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - numericType = in.readOptionalString(); - } + numericType = in.readOptionalString(); } @Override @@ -176,9 +173,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sortMode); out.writeOptionalString(unmappedType); out.writeOptionalWriteable(nestedSort); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(numericType); - } + out.writeOptionalString(numericType); } /** Returns the document field this sort should be based on. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index d7ebba721a52c..38d9df0e960e0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -73,7 +73,6 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - public static final Version METADATA_FIELD_INTRODUCED = LegacyESVersion.V_7_3_0; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; @@ -401,11 +400,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { shardFailures = Collections.unmodifiableList(in.readList(SnapshotShardFailure::new)); version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { dataStreams = in.readStringList(); } else { @@ -840,11 +835,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeMap(userMetadata); + if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { + out.writeStringCollection(dataStreams); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5b2b4f361083b..ffd3a66ad1d48 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.node.DiscoveryNode; @@ -55,6 +54,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -78,7 +78,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { - public static class CancellableNodeRequest extends BaseNodeRequest { + public static class CancellableNodeRequest extends TransportRequest { protected String requestName; public CancellableNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 5d947a743385f..d49dd14492327 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -32,6 +31,7 @@ import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.tasks.MockTaskManagerListener; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -56,7 +56,7 @@ public class ResourceAwareTasksTests extends TaskManagerTestCase { private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - public static class ResourceAwareNodeRequest extends BaseNodeRequest { + public static class ResourceAwareNodeRequest extends TransportRequest { protected String requestName; public ResourceAwareNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 68cf69e30f8a6..8b0c2187d05af 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -37,7 +37,6 @@ import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -66,6 +65,7 @@ import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.junit.After; @@ -156,8 +156,8 @@ public int failureCount() { /** * Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager */ - abstract class AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> extends - TransportNodesAction { + abstract class AbstractTestNodesAction, NodeRequest extends TransportRequest> + extends TransportNodesAction { AbstractTestNodesAction( String actionName, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 3bb1957c69fb4..aa0e9511f86ce 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -41,7 +41,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -182,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected final String requestName; protected final boolean shouldBlock; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 7590bf88eeca0..97a045872477d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -44,7 +44,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.action.support.tasks.BaseTasksResponse; @@ -67,6 +66,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -91,7 +91,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected String requestName; public NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java index 4a90a23cbd2f0..07246f144d95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -82,11 +82,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), indicesOptions); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); - } else { - assertEquals(0, in.available()); - } + assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); } } } @@ -100,9 +96,7 @@ public void testBwcSerialization() throws Exception { out.writeTimeValue(sample.timeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - sample.waitForActiveShards().writeTo(out); - } + sample.waitForActiveShards().writeTo(out); final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { @@ -119,11 +113,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); - } else { - assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards()); - } + assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); } } } diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 86657a98d9a1d..76142efc60b7d 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -50,6 +50,7 @@ import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -378,7 +379,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) thro } } - private static class TestNodeRequest extends BaseNodeRequest { + private static class TestNodeRequest extends TransportRequest { TestNodeRequest() {} TestNodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index 089dfcaf65517..bf33d7a45f4fb 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -286,7 +286,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(LegacyESVersion.V_7_2_0); + out.setVersion(LegacyESVersion.fromId(7000099)); request.writeTo(out); // First check the type on the stream was written as "_doc" by manually parsing the stream until the type @@ -302,7 +302,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // now read the stream as normal to check it is parsed correct if received from an older node opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); - opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0); + opensearchBuffer.setVersion(LegacyESVersion.fromId(7000099)); TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); From 77cff55bd431c0e2f8fdea5a40c83f8e8bddeca1 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 16:29:39 -0500 Subject: [PATCH 14/14] [Remove] LegacyESVersion.V_7_4_* and V_7_5_* constants (#4704) Removes all usages of LegacyESVersion.V_7_4_ and LegacyESVersion.V_7_5 version constants along with ancient API logic. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/upgrades/IndexingIT.java | 13 +- .../DedicatedClusterSnapshotRestoreIT.java | 40 -- .../java/org/opensearch/LegacyESVersion.java | 6 - .../org/opensearch/OpenSearchException.java | 9 +- .../NodesReloadSecureSettingsRequest.java | 18 +- .../TransportCleanupRepositoryAction.java | 16 +- .../create/TransportCreateSnapshotAction.java | 14 +- .../snapshots/status/SnapshotStatus.java | 18 +- .../shards/IndicesShardStoresResponse.java | 15 +- .../admin/indices/shrink/ResizeRequest.java | 4 - .../opensearch/action/index/IndexRequest.java | 18 +- .../ingest/SimulateDocumentBaseResult.java | 29 +- .../cluster/RepositoryCleanupInProgress.java | 2 +- .../cluster/routing/UnassignedInfo.java | 11 +- .../org/opensearch/index/IndexSettings.java | 4 +- .../query/VectorGeoShapeQueryProcessor.java | 2 +- .../ScriptScoreQueryBuilder.java | 13 +- .../index/seqno/ReplicationTracker.java | 10 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../RecoveryFinalizeRecoveryRequest.java | 12 +- ...ryPrepareForTranslogOperationsRequest.java | 7 - .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryState.java | 9 +- .../TransportNodesListShardStoreMetadata.java | 10 +- .../repositories/FilterRepository.java | 6 - .../opensearch/repositories/Repository.java | 14 - .../blobstore/BlobStoreRepository.java | 16 - .../repositories/blobstore/package-info.java | 16 +- .../rest/action/document/RestIndexAction.java | 3 +- .../CompositeValuesSourceParserHelper.java | 10 - .../MovFnPipelineAggregationBuilder.java | 11 +- .../pipeline/MovFnPipelineAggregator.java | 11 +- .../snapshots/SnapshotsService.java | 368 +----------------- .../GeoShapeQueryBuilderGeoShapeTests.java | 20 +- .../RepositoriesServiceTests.java | 5 - .../index/shard/RestoreOnlyRepository.java | 4 - .../test/rest/OpenSearchRestTestCase.java | 4 +- 38 files changed, 64 insertions(+), 712 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ee30dc84e9fd..37e16aecff69a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) ### Fixed diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..888fa886c3c5e 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -181,18 +181,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 283a6581a64bb..6e2aeaa5f7b4f 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,12 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index e9f0d831d2977..78bda1cf088cd 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -317,10 +316,6 @@ public void writeTo(StreamOutput out) throws IOException { public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -1569,13 +1564,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b721c8f005974 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -114,16 +114,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 381eca2dc716f..ceff8dcbc4b55 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -153,12 +153,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - isPipelineResolved = in.readBoolean(); - } + finalPipeline = in.readOptionalString(); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -639,7 +635,7 @@ public void resolveRouting(Metadata metadata) { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.fromId(7050099))) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id until all nodes " + "are on version 7.5.0 or higher" ); @@ -671,12 +667,8 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java index 2440a1802912b..f36ca0e7d7379 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java @@ -31,7 +31,6 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -94,34 +93,14 @@ public SimulateDocumentBaseResult(Exception failure) { * Read from a stream. */ public SimulateDocumentBaseResult(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - failure = in.readException(); - ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); - } else { - if (in.readBoolean()) { - ingestDocument = null; - failure = in.readException(); - } else { - ingestDocument = new WriteableIngestDocument(in); - failure = null; - } - } + failure = in.readException(); + ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeException(failure); - out.writeOptionalWriteable(ingestDocument); - } else { - if (failure == null) { - out.writeBoolean(false); - ingestDocument.writeTo(out); - } else { - out.writeBoolean(true); - out.writeException(failure); - } - } + out.writeException(failure); + out.writeOptionalWriteable(ingestDocument); } public IngestDocument getIngestDocument() { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 2cf9d66fee2bd..291aa88a3fb3e 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -114,7 +114,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return LegacyESVersion.V_7_4_0; + return LegacyESVersion.fromId(7040099); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 489c6125f7d13..ed5a23cd09e40 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -314,11 +313,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } else { - this.failedNodeIds = Collections.emptySet(); - } + this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public void writeTo(StreamOutput out) throws IOException { @@ -330,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeCollection(failedNodeIds, StreamOutput::writeString); - } + out.writeCollection(failedNodeIds, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c7f4804755d4..00daea147f16f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; @@ -1122,8 +1121,7 @@ public int getTranslogRetentionTotalFiles() { } private static boolean shouldDisableTranslogRetention(Settings settings) { - return INDEX_SOFT_DELETES_SETTING.get(settings) - && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_4_0); + return INDEX_SOFT_DELETES_SETTING.get(settings); } /** diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index e11b22e9296cf..d50585ae0aebf 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -67,7 +67,7 @@ public class VectorGeoShapeQueryProcessor { public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.fromId(7050099))) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); } // wrap geoQuery as a ConstantScoreQuery diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 9605ba424bfb0..ef606ce35b84f 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.index.query.functionscore; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -123,22 +122,14 @@ public ScriptScoreQueryBuilder(QueryBuilder query, Script script) { public ScriptScoreQueryBuilder(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script = new Script(in); - } else { - script = in.readNamedWriteable(ScriptScoreFunctionBuilder.class).getScript(); - } + script = new Script(in); minScore = in.readOptionalFloat(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(query); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script.writeTo(out); - } else { - out.writeNamedWriteable(new ScriptScoreFunctionBuilder(script)); - } + script.writeTo(out); out.writeOptionalFloat(minScore); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 63995ae121abb..ea1604c16190b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -223,7 +223,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Whether there should be a peer recovery retention lease (PRRL) for every tracked shard copy. Always true on indices created from - * {@link LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an + * {@code LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an * earlier version if we recently did a rolling upgrade and * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} has not yet completed. Is only permitted * to change from false to true; can be removed once support for pre-PRRL indices is no longer needed. @@ -996,9 +996,7 @@ public ReplicationTracker( this.routingTable = null; this.replicationGroup = null; this.hasAllPeerRecoveryRetentionLeases = indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0) - || (indexSettings.isSoftDeleteEnabled() - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_4_0) - && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); + || (indexSettings.isSoftDeleteEnabled() && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; @@ -1126,7 +1124,7 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { /** * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@link LegacyESVersion#V_7_4_0}, in which case the missing leases should be created + * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { @@ -1528,7 +1526,7 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { /** * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@link LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d05f7c34f80ce..52ecc5bc66607 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -52,7 +52,6 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -3187,7 +3186,7 @@ public RetentionLease addPeerRecoveryRetentionLease( ) { assert assertPrimaryMode(); // only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs: - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_4_0) || indexSettings.isSoftDeleteEnabled() == false; + assert indexSettings.isSoftDeleteEnabled() == false; return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index a7334fba15664..446fb78958db4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -57,11 +55,7 @@ final class RecoveryFinalizeRecoveryRequest extends RecoveryTransportRequest { recoveryId = in.readLong(); shardId = new ShardId(in); globalCheckpoint = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - trimAboveSeqNo = in.readZLong(); - } else { - trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + trimAboveSeqNo = in.readZLong(); } RecoveryFinalizeRecoveryRequest( @@ -100,9 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeZLong(globalCheckpoint); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeZLong(trimAboveSeqNo); - } + out.writeZLong(trimAboveSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index bdacb0b724884..68979fa4b69bc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.shard.ShardId; @@ -62,9 +61,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends RecoveryTransportReque recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - in.readBoolean(); // was fileBasedRecovery - } } public long recoveryId() { @@ -85,8 +81,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeBoolean(true); // was fileBasedRecovery - } } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 665e79722770e..505d3c7adfb3f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -720,8 +719,7 @@ void createRetentionLease(final long startingSeqNo, ActionListener addRetentionLeaseStep = new StepListener<>(); final long estimatedGlobalCheckpoint = startingSeqNo - 1; final RetentionLease newLease = shard.addPeerRecoveryRetentionLease( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 57208ab029bf4..de2ee1b8512b4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -430,9 +429,7 @@ public Translog(StreamInput in) throws IOException { recovered = in.readVInt(); total = in.readVInt(); totalOnStart = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - totalLocal = in.readVInt(); - } + totalLocal = in.readVInt(); } @Override @@ -441,9 +438,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(recovered); out.writeVInt(total); out.writeVInt(totalOnStart); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeVInt(totalLocal); - } + out.writeVInt(totalLocal); } public synchronized void reset() { diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 0e452fcbbc7a9..6189c983e3c8a 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -253,20 +253,14 @@ public StoreFilesMetadata( public StoreFilesMetadata(StreamInput in) throws IOException { this.shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } else { - this.peerRecoveryRetentionLeases = Collections.emptyList(); - } + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); } @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); metadataSnapshot.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeList(peerRecoveryRetentionLeases); - } + out.writeList(peerRecoveryRetentionLeases); } public ShardId shardId() { diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index aaa021a0e8b93..a6a649fa2cd44 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -95,11 +94,6 @@ public void getRepositoryData(ActionListener listener) { in.getRepositoryData(listener); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - in.initializeSnapshot(snapshotId, indices, metadata); - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index a16e0e8d441bc..1826fe1aa51da 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -129,19 +128,6 @@ default Repository create(RepositoryMetadata metadata, Function listener); - /** - * Starts snapshotting process - * - * @param snapshotId snapshot id - * @param indices list of indices to be snapshotted - * @param metadata cluster metadata - * - * @deprecated this method is only used when taking snapshots in a mixed version cluster where a cluster-manager node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. - */ - @Deprecated - void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata); - /** * Finalizes snapshotting process *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c36d92abcf498..bf06191bdc8d3 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.opensearch.repositories.RepositoryVerificationException; import org.opensearch.repositories.ShardGenerations; import org.opensearch.snapshots.AbortedSnapshotException; -import org.opensearch.snapshots.SnapshotCreationException; import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -713,21 +712,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata clusterMetadata) { - try { - // Write Global Metadata - GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress); - - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - INDEX_METADATA_FORMAT.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), compress); - } - } catch (IOException ex) { - throw new SnapshotCreationException(metadata.name(), snapshotId, ex); - } - } - @Override public void deleteSnapshots( Collection snapshotIds, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index aacd386cd4bd7..b13a63ef77f6a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -163,23 +163,9 @@ * *

Creating a snapshot in the repository happens in the three steps described in detail below.

* - *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

- * - *

In mixed version clusters that contain a node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a - * call to {@link org.opensearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the - * following actions:

- *
    - *
  1. Verify that no snapshot by the requested name exists.
  2. - *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. - *
  5. Write the metadata for each index to a blob in that index's directory at - * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. - *
- * TODO: Remove this section once BwC logic it references is removed - * *

Writing Shard Data (Segments)

* - *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + *

The snapshot process writes the actual shard data * to the repository by invoking {@link org.opensearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries * for the shards in the current snapshot. It is implemented as follows:

* diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index e7b1da91aba8f..bd2c11cf71ff1 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.document; -import org.opensearch.LegacyESVersion; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActiveShardCount; @@ -128,7 +127,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { + if (request.params().get("op_type") == null) { // default to op_type create request.params().put("op_type", "create"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index d8526e684f391..6ca64c2186cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; @@ -103,15 +102,6 @@ public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); - if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 33c09e04bd4b0..501f1af63b3d9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -114,11 +113,7 @@ public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException { format = in.readOptionalString(); gapPolicy = GapPolicy.readFrom(in); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -128,9 +123,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(format); gapPolicy.writeTo(out); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 70652e7ddce44..7b20a796b8134 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; @@ -106,11 +105,7 @@ public MovFnPipelineAggregator(StreamInput in) throws IOException { gapPolicy = BucketHelpers.GapPolicy.readFrom(in); bucketsPath = in.readString(); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -120,9 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); out.writeString(bucketsPath); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 4f672c9813d64..e53c2889f88e6 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -90,7 +90,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.repositories.IndexId; @@ -142,12 +141,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - /** - * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, Metadata)} to write snapshot metadata - * when starting a snapshot. - */ - public static final Version NO_REPO_INITIALIZE_VERSION = LegacyESVersion.V_7_5_0; - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; @@ -156,7 +149,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.V_7_5_0; + public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; @@ -244,144 +237,6 @@ public SnapshotsService( } } - /** - * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of - * the snapshot. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot completion listener - */ - public void executeSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshotLegacy( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); - } - - /** - * Initializes the snapshotting process. - *

- * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and - * creates a snapshot record in cluster state metadata. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot creation listener - */ - public void createSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot - Repository repository = repositoriesService.repository(request.repository()); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - clusterService.submitStateUpdateTask("create_snapshot [" + snapshotName + ']', new ClusterStateUpdateTask() { - - private List indices; - - private SnapshotsInProgress.Entry newEntry; - - @Override - public ClusterState execute(ClusterState currentState) { - validate(repositoryName, snapshotName, currentState); - SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); - if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the cluster state anyway in #applyClusterState. - if (snapshots != null - && snapshots.entries() - .stream() - .anyMatch( - entry -> (entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false) == false - )) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } - // Store newSnapshot here to be processed in clusterStateProcessed - indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); - - final List dataStreams = indexNameExpressionResolver.dataStreamNames( - currentState, - request.indicesOptions(), - request.indices() - ); - - logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); - newEntry = new SnapshotsInProgress.Entry( - new Snapshot(repositoryName, snapshotId), - request.includeGlobalState(), - request.partial(), - State.INIT, - Collections.emptyList(), // We'll resolve the list of indices when moving to the STARTED state in #beginSnapshot - dataStreams, - threadPool.absoluteTimeInMillis(), - RepositoryData.UNKNOWN_REPO_GEN, - ImmutableOpenMap.of(), - userMeta, - Version.CURRENT - ); - initializingSnapshots.add(newEntry.snapshot()); - snapshots = SnapshotsInProgress.of(Collections.singletonList(newEntry)); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newEntry != null) { - initializingSnapshots.remove(newEntry.snapshot()); - } - newEntry = null; - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - if (newEntry != null) { - final Snapshot current = newEntry.snapshot(); - assert initializingSnapshots.contains(current); - assert indices != null; - beginSnapshot(newState, newEntry, request.partial(), indices, repository, new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); - } - } - - @Override - public TimeValue timeout() { - return request.clusterManagerNodeTimeout(); - } - }); - } - /** * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of * the snapshot. @@ -946,227 +801,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - /** - * Starts snapshot. - *

- * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param clusterState cluster state - * @param snapshot snapshot meta data - * @param partial allow partial snapshots - * @param userCreateSnapshotListener listener - */ - private void beginSnapshot( - final ClusterState clusterState, - final SnapshotsInProgress.Entry snapshot, - final boolean partial, - final List indices, - final Repository repository, - final ActionListener userCreateSnapshotListener - ) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - - boolean hadAbortedInitializations; - - @Override - protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); - if (repository.isReadOnly()) { - throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); - } - final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); - final StepListener repositoryDataListener = new StepListener<>(); - repository.getRepositoryData(repositoryDataListener); - repositoryDataListener.whenComplete(repositoryData -> { - // check if the snapshot name already exists in the repository - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException( - repository.getMetadata().name(), - snapshotName, - "snapshot with the same name already exists" - ); - } - if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager - // failover to an - // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a - // valid - // snapshot. - repository.initializeSnapshot( - snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - metadataForSnapshot(snapshot, clusterState.metadata()) - ); - } - - logger.info("snapshot [{}] started", snapshot.snapshot()); - final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null); - if (indices.isEmpty()) { - // No indices in this snapshot - we are done - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot( - SnapshotsInProgress.startedEntry( - snapshot.snapshot(), - snapshot.includeGlobalState(), - snapshot.partial(), - Collections.emptyList(), - Collections.emptyList(), - threadPool.absoluteTimeInMillis(), - repositoryData.getGenId(), - ImmutableOpenMap.of(), - snapshot.userMetadata(), - version - ), - clusterState.metadata(), - repositoryData - ); - return; - } - clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) == false) { - entries.add(entry); - continue; - } - - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = shards( - snapshots, - currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), - currentState.metadata(), - currentState.routingTable(), - indexIds, - useShardGenerations(version), - repositoryData, - entry.repository() - ); - if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards( - shards, - currentState.metadata() - ); - Set missing = indicesWithMissingShards.v1(); - Set closed = indicesWithMissingShards.v2(); - if (missing.isEmpty() == false || closed.isEmpty() == false) { - final StringBuilder failureMessage = new StringBuilder(); - if (missing.isEmpty() == false) { - failureMessage.append("Indices don't have primary shards "); - failureMessage.append(missing); - } - if (closed.isEmpty() == false) { - if (failureMessage.length() > 0) { - failureMessage.append("; "); - } - failureMessage.append("Indices are closed "); - failureMessage.append(closed); - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.FAILED, - indexIds, - repositoryData.getGenId(), - shards, - version, - failureMessage.toString() - ) - ); - continue; - } - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.STARTED, - indexIds, - repositoryData.getGenId(), - shards, - version, - null - ) - ); - } - } - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), - e - ); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - - @Override - public void onNoLongerClusterManager(String source) { - // We are not longer a cluster-manager - we shouldn't try to do any cleanup - // The new cluster-manager will take care of it - logger.warn( - "[{}] failed to create snapshot - no longer a cluster-manager", - snapshot.snapshot().getSnapshotId() - ); - userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") - ); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted - // for processing. If client wants to wait for the snapshot completion, it can register snapshot - // completion listener in this method. For the snapshot completion to work properly, the snapshot - // should still exist when listener is registered. - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry, newState.metadata(), repositoryData); - } else { - endCompletedSnapshots(newState); - } - } - }); - }, this::onFailure); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - }); - } - private static class CleanupAfterErrorListener { private final ActionListener userCreateSnapshotListener; diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java index f588767d5336d..f8f512e5aefc6 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.query; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.test.geo.RandomShapeGenerator; @@ -73,21 +72,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } } if (randomBoolean()) { - QueryShardContext context = createShardContext(); - if (context.indexVersionCreated().onOrAfter(LegacyESVersion.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); - } + if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { + builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..da44643de98a5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -246,11 +246,6 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(null); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 5ed85fedc8cea..2a85fffa8699a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -55,7 +55,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -116,9 +115,6 @@ public void getRepositoryData(ActionListener listener) { ); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) {} - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f2b68b6fdaca0..bbf7763551bcd 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -531,8 +531,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced - // in version 7.4 + if (nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies();