diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 347b88ecc3e4d..1ba3ee562317a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -48,3 +48,5 @@ BWC_VERSION: - "2.1.0" - "2.1.1" - "2.2.0" + - "2.2.1" + - "2.3.0" diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 73f3b6c2487d3..b8d3912c5864a 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -3,7 +3,7 @@ name: Releases on: push: tags: - - '*.*.*' + - '*' jobs: @@ -12,11 +12,18 @@ jobs: permissions: contents: write steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 - name: Get tag id: tag uses: dawidd6/action-get-tag@v1 - uses: actions/checkout@v2 - uses: ncipollo/release-action@v1 with: - token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ steps.github_app_token.outputs.token }} bodyFile: release-notes/opensearch.release-notes-${{steps.tag.outputs.tag}}.md diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index dec5ee15d0bea..cbaa7fa10fbb6 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -2,9 +2,9 @@ name: Gradle Check (Jenkins) on: push: branches-ignore: - - 'backport/*' - - 'create-pull-request/*' - - 'dependabot/*' + - 'backport/**' + - 'create-pull-request/**' + - 'dependabot/**' pull_request_target: types: [opened, synchronize, reopened] diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index ce84d9658a808..8c2a6b4889122 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -4,7 +4,8 @@ - [Install Prerequisites](#install-prerequisites) - [JDK 11](#jdk-11) - [JDK 14](#jdk-14) - - [Runtime JDK](#runtime-jdk) + - [JDK 17](#jdk-17) + - [Custom Runtime JDK](#custom-runtime-jdk) - [Windows](#windows) - [Docker](#docker) - [Build](#build) @@ -12,6 +13,7 @@ - [Run OpenSearch](#run-opensearch) - [Use an Editor](#use-an-editor) - [IntelliJ IDEA](#intellij-idea) + - [Remote development using JetBrains Gateway](#remote-development-using-jetbrains-gateway) - [Visual Studio Code](#visual-studio-code) - [Eclipse](#eclipse) - [Project Layout](#project-layout) @@ -35,6 +37,7 @@ - [testImplementation](#testimplementation) - [Gradle Plugins](#gradle-plugins) - [Distribution Download Plugin](#distribution-download-plugin) + - [Creating fat-JAR of a Module](#creating-fat-jar-of-a-module) - [Misc](#misc) - [git-secrets](#git-secrets) - [Installation](#installation) @@ -49,7 +52,7 @@ - [Submitting Changes](#submitting-changes) - [Backports](#backports) - [LineLint](#linelint) - - [Lucene Snapshots](#lucene-snapshots) +- [Lucene Snapshots](#lucene-snapshots) # Developer Guide @@ -374,6 +377,42 @@ The Distribution Download plugin downloads the latest version of OpenSearch by d ./gradlew integTest -PcustomDistributionUrl="https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/1127/linux/x64/dist/opensearch-1.2.0-linux-x64.tar.gz" ``` +### Creating fat-JAR of a Module + +A fat-JAR (or an uber-JAR) is the JAR, which contains classes from all the libraries, on which your project depends and, of course, the classes of current project. + +There might be cases where a developer would like to add some custom logic to the code of a module (or multiple modules) and generate a fat-JAR that can be directly used by the dependency management tool. For example, in [#3665](https://github.com/opensearch-project/OpenSearch/pull/3665) a developer wanted to provide a tentative patch as a fat-JAR to a consumer for changes made in the high level REST client. + +Use [Gradle Shadow plugin](https://imperceptiblethoughts.com/shadow/). +Add the following to the `build.gradle` file of the module for which you want to create the fat-JAR, e.g. `client/rest-high-level/build.gradle`: + +``` +apply plugin: 'com.github.johnrengelman.shadow' +``` + +Run the `shadowJar` command using: +``` +./gradlew :client:rest-high-level:shadowJar +``` + +This will generate a fat-JAR in the `build/distributions` folder of the module, e.g. .`/client/rest-high-level/build/distributions/opensearch-rest-high-level-client-1.4.0-SNAPSHOT.jar`. + +You can further customize your fat-JAR by customising the plugin, More information about shadow plugin can be found [here](https://imperceptiblethoughts.com/shadow/). + +To use the generated JAR, install the JAR locally, e.g. +``` +mvn install:install-file -Dfile=src/main/resources/opensearch-rest-high-level-client-1.4.0-SNAPSHOT.jar -DgroupId=org.opensearch.client -DartifactId=opensearch-rest-high-level-client -Dversion=1.4.0-SNAPSHOT -Dpackaging=jar -DgeneratePom=true +``` + +Refer the installed JAR as any other maven artifact, e.g. + +``` + + org.opensearch.client + opensearch-rest-high-level-client + 1.4.0-SNAPSHOT + +``` ## Misc diff --git a/build.gradle b/build.gradle index e0bb961ce14c2..ce5ea6cdd7e11 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.0" apply false + id "com.diffplug.spotless" version "6.9.1" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index c947a457d33ec..b14e93ecfd22d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -75,9 +75,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.15+10"; + private static final String SYSTEM_JDK_VERSION = "11.0.16+8"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.3+7"; + private static final String GRADLE_JDK_VERSION = "17.0.4+8"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java index ef52adab6377a..0f5348d5a8dcf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java @@ -84,6 +84,8 @@ public class OpenSearchCluster implements TestClusterConfiguration, Named { private final ArchiveOperations archiveOperations; private int nodeIndex = 0; + private int zoneCount = 1; + public OpenSearchCluster( String clusterName, Project project, @@ -104,13 +106,21 @@ public OpenSearchCluster( this.bwcJdk = bwcJdk; // Always add the first node - addNode(clusterName + "-0"); + String zone = hasZoneProperty() ? "zone-1" : ""; + addNode(clusterName + "-0", zone); // configure the cluster name eagerly so all nodes know about it this.nodes.all((node) -> node.defaultConfig.put("cluster.name", safeName(clusterName))); addWaitForClusterHealth(); } + public void setNumberOfZones(int zoneCount) { + if (zoneCount < 1) { + throw new IllegalArgumentException("Number of zones should be >= 1 but was " + zoneCount + " for " + this); + } + this.zoneCount = zoneCount; + } + public void setNumberOfNodes(int numberOfNodes) { checkFrozen(); @@ -124,12 +134,31 @@ public void setNumberOfNodes(int numberOfNodes) { ); } - for (int i = nodes.size(); i < numberOfNodes; i++) { - addNode(clusterName + "-" + i); + if (numberOfNodes < zoneCount) { + throw new IllegalArgumentException( + "Number of nodes should be >= zoneCount but was " + numberOfNodes + " for " + this.zoneCount + ); } + + if (hasZoneProperty()) { + int currentZone; + for (int i = nodes.size(); i < numberOfNodes; i++) { + currentZone = i % zoneCount + 1; + String zoneName = "zone-" + currentZone; + addNode(clusterName + "-" + i, zoneName); + } + } else { + for (int i = nodes.size(); i < numberOfNodes; i++) { + addNode(clusterName + "-" + i, ""); + } + } + } + + private boolean hasZoneProperty() { + return this.project.findProperty("numZones") != null; } - private void addNode(String nodeName) { + private void addNode(String nodeName, String zoneName) { OpenSearchNode newNode = new OpenSearchNode( path, nodeName, @@ -138,7 +167,8 @@ private void addNode(String nodeName) { fileSystemOperations, archiveOperations, workingDirBase, - bwcJdk + bwcJdk, + zoneName ); // configure the cluster name eagerly newNode.defaultConfig.put("cluster.name", safeName(clusterName)); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index b051c15e81d6d..ab765efde7885 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -32,6 +32,7 @@ package org.opensearch.gradle.testclusters; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.StringUtils; import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; import org.opensearch.gradle.OpenSearchDistribution; @@ -175,6 +176,8 @@ public class OpenSearchNode implements TestClusterConfiguration { private final Config legacyESConfig; private Config currentConfig; + private String zone; + OpenSearchNode( String path, String name, @@ -183,7 +186,8 @@ public class OpenSearchNode implements TestClusterConfiguration { FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, File workingDirBase, - Jdk bwcJdk + Jdk bwcJdk, + String zone ) { this.path = path; this.name = name; @@ -205,6 +209,7 @@ public class OpenSearchNode implements TestClusterConfiguration { opensearchConfig = Config.getOpenSearchConfig(workingDir); legacyESConfig = Config.getLegacyESConfig(workingDir); currentConfig = opensearchConfig; + this.zone = zone; } /* @@ -1239,6 +1244,10 @@ private void createConfiguration() { baseConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); baseConfig.put("path.shared_data", workingDir.resolve("sharedData").toString()); baseConfig.put("node.attr.testattr", "test"); + if (StringUtils.isNotBlank(zone)) { + baseConfig.put("cluster.routing.allocation.awareness.attributes", "zone"); + baseConfig.put("node.attr.zone", zone); + } baseConfig.put("node.portsfile", "true"); baseConfig.put("http.port", httpPort); if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fc751d8461e92..4af1acfed0ab2 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,8 +1,8 @@ opensearch = 3.0.0 -lucene = 9.3.0 +lucene = 9.4.0-snapshot-ddf0d0a bundled_jdk_vendor = adoptium -bundled_jdk = 17.0.3+7 +bundled_jdk = 17.0.4+8 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index d293b979debb5..7ae8f8826c5a4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -157,7 +157,6 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -169,7 +168,6 @@ import org.opensearch.search.aggregations.metrics.ParsedAvg; import org.opensearch.search.aggregations.metrics.ParsedCardinality; import org.opensearch.search.aggregations.metrics.ParsedExtendedStats; -import org.opensearch.search.aggregations.metrics.ParsedGeoBounds; import org.opensearch.search.aggregations.metrics.ParsedGeoCentroid; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentiles; @@ -2116,7 +2114,6 @@ static List getDefaultNamedXContents() { map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index efcc13921c398..3da0f81023f72 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -885,7 +885,8 @@ public void testApiNamingConventions() throws Exception { "nodes.hot_threads", "nodes.usage", "nodes.reload_secure_settings", - "search_shards", }; + "search_shards", + "remote_store.restore", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index a8c54137c7fd2..761478a9fdc6e 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -53,3 +53,13 @@ logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.additivity = false + +appender.task_detailslog_rolling.type = Console +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.layout.type = OpenSearchJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog + +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.additivity = false diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 4820396c79eb7..bb27aaf2e22e6 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -195,3 +195,40 @@ logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false + +######## Task details log JSON #################### +appender.task_detailslog_rolling.type = RollingFile +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog.json +appender.task_detailslog_rolling.filePermissions = rw-r----- +appender.task_detailslog_rolling.layout.type = OpenSearchJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog +appender.task_detailslog_rolling.layout.opensearchmessagefields=taskId,type,action,description,start_time_millis,resource_stats,metadata + +appender.task_detailslog_rolling.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog-%i.json.gz +appender.task_detailslog_rolling.policies.type = Policies +appender.task_detailslog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling.policies.size.size = 1GB +appender.task_detailslog_rolling.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling.strategy.max = 4 +################################################# +######## Task details log - old style pattern #### +appender.task_detailslog_rolling_old.type = RollingFile +appender.task_detailslog_rolling_old.name = task_detailslog_rolling_old +appender.task_detailslog_rolling_old.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog.log +appender.task_detailslog_rolling_old.filePermissions = rw-r----- +appender.task_detailslog_rolling_old.layout.type = PatternLayout +appender.task_detailslog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.task_detailslog_rolling_old.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog-%i.log.gz +appender.task_detailslog_rolling_old.policies.type = Policies +appender.task_detailslog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling_old.policies.size.size = 1GB +appender.task_detailslog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling_old.strategy.max = 4 +################################################# +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling_old.ref = task_detailslog_rolling_old +logger.task_detailslog_rolling.additivity = false diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java index 7f67e08c66b9e..96544d3297ad4 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java @@ -67,7 +67,7 @@ public void testImportLog4jPropertiesTask() throws IOException { Properties properties = new Properties(); properties.load(Files.newInputStream(taskInput.getOpenSearchConfig().resolve(ImportLog4jPropertiesTask.LOG4J_PROPERTIES))); assertThat(properties, is(notNullValue())); - assertThat(properties.entrySet(), hasSize(137)); + assertThat(properties.entrySet(), hasSize(165)); assertThat(properties.get("appender.rolling.layout.type"), equalTo("OpenSearchJsonLayout")); assertThat( properties.get("appender.deprecation_rolling.fileName"), diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties b/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties index b9ad71121165a..4b92d3fc62376 100644 --- a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties +++ b/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties @@ -176,3 +176,38 @@ logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false + +######## Task details log JSON #################### +appender.task_detailslog_rolling.type = RollingFile +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.json +appender.task_detailslog_rolling.layout.type = ESJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog +appender.task_detailslog_rolling.layout.esmessagefields=taskId,type,action,description,start_time_millis,resource_stats,metadata + +appender.task_detailslog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.json.gz +appender.task_detailslog_rolling.policies.type = Policies +appender.task_detailslog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling.policies.size.size = 1GB +appender.task_detailslog_rolling.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling.strategy.max = 4 +################################################# +######## Task details log - old style pattern #### +appender.task_detailslog_rolling_old.type = RollingFile +appender.task_detailslog_rolling_old.name = task_detailslog_rolling_old +appender.task_detailslog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.log +appender.task_detailslog_rolling_old.layout.type = PatternLayout +appender.task_detailslog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.task_detailslog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.log.gz +appender.task_detailslog_rolling_old.policies.type = Policies +appender.task_detailslog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling_old.policies.size.size = 1GB +appender.task_detailslog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling_old.strategy.max = 4 +################################################# +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling_old.ref = task_detailslog_rolling_old +logger.task_detailslog_rolling.additivity = false diff --git a/gradle.properties b/gradle.properties index 86af9ad62b1a4..73df0940ce181 100644 --- a/gradle.properties +++ b/gradle.properties @@ -19,7 +19,7 @@ org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -options.forkOptions.memoryMaximumSize=2g +options.forkOptions.memoryMaximumSize=3g # Disable duplicate project id detection # See https://docs.gradle.org/current/userguide/upgrading_version_6.html#duplicate_project_names_may_cause_publication_to_fail diff --git a/gradle/run.gradle b/gradle/run.gradle index 5a1fed06c0ef7..639479e97d28f 100644 --- a/gradle/run.gradle +++ b/gradle/run.gradle @@ -31,9 +31,14 @@ import org.opensearch.gradle.testclusters.RunTask apply plugin: 'opensearch.testclusters' +def numNodes = findProperty('numNodes') as Integer ?: 1 +def numZones = findProperty('numZones') as Integer ?: 1 + testClusters { runTask { testDistribution = 'archive' + if (numZones > 1) numberOfZones = numZones + if (numNodes > 1) numberOfNodes = numNodes } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 24c164f0f1e12..58e9a16f424db 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index dd3aee61f7664..705fa17456a79 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' - classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin' + classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin' hasClientJar = true } diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java similarity index 95% rename from modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java rename to modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java index debeacffe321e..df1926282d500 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java @@ -42,7 +42,7 @@ import static java.util.Collections.singletonList; -public class MatrixAggregationPlugin extends Plugin implements SearchPlugin { +public class MatrixAggregationModulePlugin extends Plugin implements SearchPlugin { @Override public List getAggregations() { return singletonList( diff --git a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java index e523c77e0786f..cc16b9b23b5d5 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java @@ -43,7 +43,7 @@ import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.ParsedAggregation; -import org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin; +import org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin; import org.opensearch.search.aggregations.matrix.stats.InternalMatrixStats.Fields; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.test.InternalAggregationTestCase; @@ -64,7 +64,7 @@ public class InternalMatrixStatsTests extends InternalAggregationTestCase getSearchPlugins() { - return Collections.singletonList(new MatrixAggregationPlugin()); + return Collections.singletonList(new MatrixAggregationModulePlugin()); } } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index be0acf7218c1e..58ecf79cda0d7 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Adds "built in" analyzers to OpenSearch.' - classname 'org.opensearch.analysis.common.CommonAnalysisPlugin' + classname 'org.opensearch.analysis.common.CommonAnalysisModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 8c2f83bf83d85..785e597857825 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -48,7 +48,7 @@ public class QueryStringWithAnalyzersIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CommonAnalysisPlugin.class); + return Arrays.asList(CommonAnalysisModulePlugin.class); } /** diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java similarity index 99% rename from modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java rename to modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index c69917ed52be8..57865e15d523a 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -167,9 +167,9 @@ import static org.opensearch.plugins.AnalysisPlugin.requiresAnalysisSettings; -public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { +public class CommonAnalysisModulePlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisPlugin.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisModulePlugin.class); private final SetOnce scriptService = new SetOnce<>(); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java index 1a4651dc23fff..d107977237b9e 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java @@ -51,7 +51,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding"); String source = "Ansprüche"; @@ -68,7 +68,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .put("index.analysis.filter.my_ascii_folding.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding"); String source = "Ansprüche"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java index 9d54776755766..829ace512b5c8 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java @@ -60,7 +60,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", type) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -78,7 +78,7 @@ public void testCatenateWords() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -96,7 +96,7 @@ public void testCatenateNumbers() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -115,7 +115,7 @@ public void testCatenateAll() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") .put("index.analysis.filter.my_word_delimiter.catenate_all", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -132,7 +132,7 @@ public void testSplitOnCaseChange() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -149,7 +149,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.preserve_original", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -186,7 +186,7 @@ public void testStemEnglishPossessive() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java index f2c0d9859cbe4..2f33194125652 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java @@ -52,7 +52,7 @@ public class CJKFilterFactoryTests extends OpenSearchTokenStreamTestCase { @Before public void setup() throws IOException { - analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE, new CommonAnalysisPlugin()); + analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE, new CommonAnalysisModulePlugin()); } public void testDefault() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 4cf0d1de28717..1c4db089565ff 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -50,7 +50,7 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase { public CommonAnalysisFactoryTests() { - super(new CommonAnalysisPlugin()); + super(new CommonAnalysisModulePlugin()); } @Override diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java index 04570be7a6f9e..713d61f294630 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java @@ -58,7 +58,7 @@ public void testDefault() throws IOException { .build(); try { - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); Assert.fail("[common_words] or [common_words_path] is set"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -333,7 +333,7 @@ private Path createHome() throws IOException { } private static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(Settings settings) throws IOException { - return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index e5ce7c818f72b..32556db3939b8 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -102,8 +102,8 @@ private List analyze(Settings settings, String analyzerName, String text } private AnalysisModule createAnalysisModule(Settings settings) throws IOException { - CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin(); - return new AnalysisModule(TestEnvironment.newEnvironment(settings), Arrays.asList(commonAnalysisPlugin, new AnalysisPlugin() { + CommonAnalysisModulePlugin commonAnalysisModulePlugin = new CommonAnalysisModulePlugin(); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Arrays.asList(commonAnalysisModulePlugin, new AnalysisPlugin() { @Override public Map> getTokenFilters() { return singletonMap("myfilter", MyFilterTokenFilterFactory::new); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java index eaf571e7469d6..1a78690dffcf7 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java @@ -29,7 +29,7 @@ public class ConcatenateGraphTokenFilterFactoryTests extends OpenSearchTokenStre public void testSimpleTokenizerAndConcatenate() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("concatenate_graph"); @@ -47,7 +47,7 @@ public void testTokenizerCustomizedSeparator() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -65,7 +65,7 @@ public void testTokenizerEmptySeparator() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -83,7 +83,7 @@ public void testPreservePositionIncrementsDefault() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -106,7 +106,7 @@ public void testPreservePositionIncrementsTrue() throws IOException { .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .put("index.analysis.filter.my_concatenate_graph.preserve_position_increments", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -132,7 +132,7 @@ public void testGraph() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace") .put("index.analysis.analyzer.my_analyzer.filter", "my_word_delimiter, my_concatenate_graph") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); String source = "PowerShot Is AweSome"; @@ -166,7 +166,7 @@ public void testInvalidSeparator() { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "11") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ) ); } @@ -187,7 +187,7 @@ public void testMaxGraphExpansion() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace") .put("index.analysis.analyzer.my_analyzer.filter", "my_word_delimiter, my_concatenate_graph") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); String source = "PowerShot Is AweSome"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java index 35915af8f263d..9bfc3a77e8c44 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java @@ -74,7 +74,7 @@ public class DisableGraphQueryTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(CommonAnalysisPlugin.class); + return Collections.singleton(CommonAnalysisModulePlugin.class); } @Before diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java index b3724d99f10ed..e62a9c52edc5c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java @@ -52,7 +52,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_edge_ngram.type", "edge_ngram") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram"); String source = "foo"; @@ -69,7 +69,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_edge_ngram.type", "edge_ngram") .put("index.analysis.filter.my_edge_ngram.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram"); String source = "foo"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java index e77f895d05661..34fdec4135bfe 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java @@ -60,7 +60,7 @@ private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisPlugin())) + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisModulePlugin())) .getAnalysisRegistry() .build(idxSettings); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java index 164068eab5e1f..fc5c9ce49bbc9 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java @@ -49,7 +49,7 @@ public void testElisionFilterWithNoArticles() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("elision filter requires [articles] or [articles_path] setting", e.getMessage()); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 57c959a4f0b65..74ed3cd79e753 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -65,7 +65,7 @@ public class HighlighterWithAnalyzersTests extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CommonAnalysisPlugin.class); + return Arrays.asList(CommonAnalysisModulePlugin.class); } public void testNgramHighlightingWithBrokenPositions() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java index 0b094e52df8a1..41f27cd8b9136 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java @@ -54,7 +54,7 @@ public void testLoadWithoutSettings() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep"); Assert.assertNull(tokenFilter); @@ -68,7 +68,7 @@ public void testLoadOverConfiguredSettings() { .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]") .build(); try { - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); Assert.fail("path and array are configured"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -83,7 +83,7 @@ public void testKeepWordsPathSettings() { .build(); try { // test our none existing setup is picked up - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); fail("expected an exception due to non existent keep_words_path"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -92,7 +92,7 @@ public void testKeepWordsPathSettings() { settings = Settings.builder().put(settings).putList("index.analysis.filter.non_broken_keep_filter.keep_words", "test").build(); try { // test our none existing setup is picked up - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] "); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -104,7 +104,7 @@ public void testCaseInsensitiveMapping() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); @@ -119,7 +119,7 @@ public void testCaseSensitiveMapping() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_case_sensitive_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java index 1f1021b4bfe66..eaab746be26dc 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java @@ -63,7 +63,10 @@ public void testKeepTypesInclude() throws IOException { ); } Settings settings = settingsBuilder.build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers"); assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class)); String source = "Hello 123 world"; @@ -80,7 +83,10 @@ public void testKeepTypesExclude() throws IOException { .putList(BASE_SETTING + "." + KeepTypesFilterFactory.KEEP_TYPES_KEY, new String[] { "", "" }) .put(BASE_SETTING + "." + KeepTypesFilterFactory.KEEP_TYPES_MODE_KEY, KeepTypesFilterFactory.KeepTypesMode.EXCLUDE) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers"); assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class)); String source = "Hello 123 world"; @@ -99,7 +105,7 @@ public void testKeepTypesException() throws IOException { .build(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("`keep_types` tokenfilter mode can only be [include] or [exclude] but was [bad_parameter].", ex.getMessage()); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java index 40e354785ddbe..f9c5a25444ed0 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java @@ -65,7 +65,7 @@ public void testKeywordSet() throws IOException { .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); @@ -87,7 +87,7 @@ public void testKeywordPattern() throws IOException { .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); @@ -112,7 +112,7 @@ public void testCannotSpecifyBothKeywordsAndPattern() throws IOException { .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("cannot specify both `keywords_pattern` and `keywords` or `keywords_path`", e.getMessage()); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java index 76471fd98e5fe..99708045b0be2 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java @@ -119,7 +119,7 @@ public void testSettings() throws IOException { } private static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(Settings settings) throws IOException { - return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java index 390e36c4ca0a0..41f60e1264b5c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java @@ -43,7 +43,7 @@ public class MassiveWordListTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(CommonAnalysisPlugin.class); + return Collections.singleton(CommonAnalysisModulePlugin.class); } public void testCreateIndexWithMassiveWordList() { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java index 514c53f17456c..e86a939dc857b 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java @@ -50,7 +50,10 @@ public void testDefault() throws IOException { int default_bucket_size = 512; int default_hash_set_size = 1; Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash"); String source = "the quick brown fox"; Tokenizer tokenizer = new WhitespaceTokenizer(); @@ -70,7 +73,10 @@ public void testSettings() throws IOException { .put("index.analysis.filter.test_min_hash.with_rotation", false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash"); String source = "sushi"; Tokenizer tokenizer = new WhitespaceTokenizer(); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java index 167f61464da1b..e9dfa299871e5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java @@ -65,7 +65,7 @@ public void testMultiplexingFilter() throws IOException { IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings); try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { @@ -99,7 +99,7 @@ public void testMultiplexingNoOriginal() throws IOException { IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings); try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java index 85090648096d1..e5f558b1c2fdd 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java @@ -52,7 +52,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_ngram.type", "ngram") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram"); String source = "foo"; @@ -69,7 +69,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_ngram.type", "ngram") .put("index.analysis.filter.my_ngram.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram"); String source = "foo"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java index 5cd18a5b01f18..a3dc75fd37671 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java @@ -55,7 +55,7 @@ public void testPatternCaptureTokenFilter() throws Exception { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisModulePlugin()).indexAnalyzers; NamedAnalyzer analyzer1 = indexAnalyzers.get("single"); assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[] { "foobarbaz", "foobar", "foo" }); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java index c16f4f37846ec..b31f4020ef627 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -81,7 +81,7 @@ public FactoryType compile(Script script, ScriptContext FactoryType compile(Script script, ScriptContext { indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; } + () -> { indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisModulePlugin()).indexAnalyzers; } ); } @@ -259,7 +259,7 @@ public void testTokenFiltersBypassSynonymAnalysis() throws IOException { String[] bypassingFactories = new String[] { "dictionary_decompounder" }; - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); for (String factory : bypassingFactories) { TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); @@ -294,7 +294,7 @@ public void testPreconfiguredTokenFilters() throws IOException { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { if (disallowedFilters.contains(tf.getName())) { @@ -319,7 +319,7 @@ public void testDisallowedTokenFilters() throws IOException { .put("output_unigrams", "true") .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); String[] disallowedFactories = new String[] { "multiplexer", diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java index 3ea9c526052f2..a5419df92db07 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java @@ -49,7 +49,10 @@ public void testNormalizer() throws IOException { .putList("index.analysis.normalizer.my_normalizer.filter", "trim") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); assertNull(analysis.indexAnalyzers.get("my_normalizer")); NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer"); assertNotNull(normalizer); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 102182f381128..7a717fe7fe22e 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -64,7 +64,7 @@ public void testMultiTerms() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_all", "true") .put("index.analysis.filter.my_word_delimiter.preserve_original", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); @@ -114,7 +114,7 @@ public void testPartsAndCatenate() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -146,7 +146,7 @@ public void testAdjustingOffsets() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .put("index.analysis.filter.my_word_delimiter.adjust_offsets", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -181,7 +181,10 @@ public void testIgnoreKeywords() throws IOException { .put("index.analysis.analyzer.my_analyzer.filter", "my_keyword, my_word_delimiter") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); String source = "PowerShot PowerHungry"; int[] expectedStartOffsets = new int[] { 0, 5, 10, 15 }; int[] expectedEndOffsets = new int[] { 5, 9, 15, 21 }; @@ -191,7 +194,7 @@ public void testIgnoreKeywords() throws IOException { // test with keywords but ignore_keywords is set as true settings = Settings.builder().put(settings).put("index.analysis.filter.my_word_delimiter.ignore_keywords", "true").build(); - analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); analyzer = analysis.indexAnalyzers.get("my_analyzer"); expectedStartOffsets = new int[] { 0, 5, 10 }; expectedEndOffsets = new int[] { 5, 9, 21 }; @@ -213,7 +216,7 @@ public void testPreconfiguredFilter() throws IOException { try ( IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings) ) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java index ea37fd5ce9546..b6e064f72630a 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java @@ -58,7 +58,7 @@ public void testPartsAndCatenate() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index d78e83ec7c4c6..0b8e623c24ac6 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -28,10 +28,11 @@ * under the License. */ apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Placeholder plugin for geospatial features in OpenSearch. only registers geo_shape field mapper for now' - classname 'org.opensearch.geo.GeoPlugin' + description 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations GeoBounds on Geo_Shape and Geo_Point' + classname 'org.opensearch.geo.GeoModulePlugin' } restResources { @@ -42,4 +43,3 @@ restResources { artifacts { restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test')) } -test.enabled = false diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java new file mode 100644 index 0000000000000..7dc6f2c1b89b7 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo; + +import org.opensearch.index.mapper.GeoShapeFieldMapper; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.TestGeoShapeFieldMapperPlugin; + +import java.util.Collection; +import java.util.Collections; + +/** + * This is the base class for all the Geo related integration tests. Use this class to add the features and settings + * for the test cluster on which integration tests are running. + */ +public abstract class GeoModulePluginIntegTestCase extends OpenSearchIntegTestCase { + /** + * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this + * geo plugin is not getting packaged in a zip, we need to load it before the tests run. + * + * @return List of {@link Plugin} + */ + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + /** + * This was added as a backdoor to Mock the implementation of {@link GeoShapeFieldMapper} which was coming from + * {@link GeoModulePlugin}. Mock implementation is {@link TestGeoShapeFieldMapperPlugin}. Now we are using the + * {@link GeoModulePlugin} in our integration tests we need to override this functionality to avoid multiple mapper + * error. + * + * @return boolean + */ + @Override + protected boolean addMockGeoShapeFieldMapper() { + return false; + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java new file mode 100644 index 0000000000000..2ac73728b2dab --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class MissingValueIT extends GeoModulePluginIntegTestCase { + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(prepareCreate("idx").setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource(), + client().prepareIndex("idx") + .setId("2") + .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") + ); + } + + public void testUnmappedGeoBounds() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(AggregationBuilders.geoBounds("bounds").field("non_existing_field").missing("2,1")) + .get(); + assertSearchResponse(response); + GeoBounds bounds = response.getAggregations().get("bounds"); + assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5)); + assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); + } + + public void testGeoBounds() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(AggregationBuilders.geoBounds("bounds").field("location").missing("2,1")) + .get(); + assertSearchResponse(response); + GeoBounds bounds = response.getAggregations().get("bounds"); + assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5)); + assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java new file mode 100644 index 0000000000000..0065cca7d6101 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java @@ -0,0 +1,295 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.ObjectIntMap; +import com.carrotsearch.hppc.ObjectObjectHashMap; +import com.carrotsearch.hppc.ObjectObjectMap; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.Strings; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.geometry.utils.Geohash; +import org.opensearch.search.SearchHit; +import org.opensearch.search.sort.SortBuilders; +import org.opensearch.search.sort.SortOrder; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +/** + * This is base class for all Geo Aggregations Integration Tests. This class is similar to what we have in the server + * folder of the OpenSearch repo. As part of moving the Geo based aggregation into separate module and plugin we need + * to copy the code as we cannot depend on this class. + * GitHub issue + */ +public abstract class AbstractGeoAggregatorTestCaseModulePlugin extends GeoModulePluginIntegTestCase { + + protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; + protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; + protected static final String NUMBER_FIELD_NAME = "l_values"; + protected static final String UNMAPPED_IDX_NAME = "idx_unmapped"; + protected static final String IDX_NAME = "idx"; + protected static final String EMPTY_IDX_NAME = "empty_idx"; + protected static final String DATELINE_IDX_NAME = "dateline_idx"; + protected static final String HIGH_CARD_IDX_NAME = "high_card_idx"; + protected static final String IDX_ZERO_NAME = "idx_zero"; + + protected static int numDocs; + protected static int numUniqueGeoPoints; + protected static GeoPoint[] singleValues, multiValues; + protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, singleCentroid, multiCentroid, + unmappedCentroid; + protected static ObjectIntMap expectedDocCountsForGeoHash = null; + protected static ObjectObjectMap expectedCentroidsForGeoHash = null; + protected static final double GEOHASH_TOLERANCE = 1E-5D; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex(UNMAPPED_IDX_NAME); + assertAcked( + prepareCreate(IDX_NAME).setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long", + "tag", + "type=keyword" + ) + ); + + singleTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY); + singleBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); + multiTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY); + multiBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); + singleCentroid = new GeoPoint(0, 0); + multiCentroid = new GeoPoint(0, 0); + unmappedCentroid = new GeoPoint(0, 0); + + numDocs = randomIntBetween(6, 20); + numUniqueGeoPoints = randomIntBetween(1, numDocs); + expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); + expectedCentroidsForGeoHash = new ObjectObjectHashMap<>(numDocs * 2); + + singleValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0; i < singleValues.length; i++) { + singleValues[i] = RandomGeoGenerator.randomPoint(random()); + updateBoundsTopLeft(singleValues[i], singleTopLeft); + updateBoundsBottomRight(singleValues[i], singleBottomRight); + } + + multiValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0; i < multiValues.length; i++) { + multiValues[i] = RandomGeoGenerator.randomPoint(random()); + updateBoundsTopLeft(multiValues[i], multiTopLeft); + updateBoundsBottomRight(multiValues[i], multiBottomRight); + } + + List builders = new ArrayList<>(); + + GeoPoint singleVal; + final GeoPoint[] multiVal = new GeoPoint[2]; + double newMVLat, newMVLon; + for (int i = 0; i < numDocs; i++) { + singleVal = singleValues[i % numUniqueGeoPoints]; + multiVal[0] = multiValues[i % numUniqueGeoPoints]; + multiVal[1] = multiValues[(i + 1) % numUniqueGeoPoints]; + builders.add( + client().prepareIndex(IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, singleVal.lon(), singleVal.lat()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(multiVal[0].lon()) + .value(multiVal[0].lat()) + .endArray() + .startArray() + .value(multiVal[1].lon()) + .value(multiVal[1].lat()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + singleCentroid = singleCentroid.reset( + singleCentroid.lat() + (singleVal.lat() - singleCentroid.lat()) / (i + 1), + singleCentroid.lon() + (singleVal.lon() - singleCentroid.lon()) / (i + 1) + ); + newMVLat = (multiVal[0].lat() + multiVal[1].lat()) / 2d; + newMVLon = (multiVal[0].lon() + multiVal[1].lon()) / 2d; + multiCentroid = multiCentroid.reset( + multiCentroid.lat() + (newMVLat - multiCentroid.lat()) / (i + 1), + multiCentroid.lon() + (newMVLon - multiCentroid.lon()) / (i + 1) + ); + } + + assertAcked(prepareCreate(EMPTY_IDX_NAME).setMapping(SINGLE_VALUED_FIELD_NAME, "type=geo_point")); + + assertAcked( + prepareCreate(DATELINE_IDX_NAME).setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long", + "tag", + "type=keyword" + ) + ); + + GeoPoint[] geoValues = new GeoPoint[5]; + geoValues[0] = new GeoPoint(38, 178); + geoValues[1] = new GeoPoint(12, -179); + geoValues[2] = new GeoPoint(-24, 170); + geoValues[3] = new GeoPoint(32, -175); + geoValues[4] = new GeoPoint(-11, 178); + + for (int i = 0; i < 5; i++) { + builders.add( + client().prepareIndex(DATELINE_IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, geoValues[i].lon(), geoValues[i].lat()) + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + } + assertAcked( + prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2)) + .setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long,store=true", + "tag", + "type=keyword" + ) + ); + + for (int i = 0; i < 2000; i++) { + singleVal = singleValues[i % numUniqueGeoPoints]; + builders.add( + client().prepareIndex(HIGH_CARD_IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, singleVal.lon(), singleVal.lat()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(multiValues[i % numUniqueGeoPoints].lon()) + .value(multiValues[i % numUniqueGeoPoints].lat()) + .endArray() + .startArray() + .value(multiValues[(i + 1) % numUniqueGeoPoints].lon()) + .value(multiValues[(i + 1) % numUniqueGeoPoints].lat()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + updateGeohashBucketsCentroid(singleVal); + } + + builders.add( + client().prepareIndex(IDX_ZERO_NAME) + .setSource(jsonBuilder().startObject().array(SINGLE_VALUED_FIELD_NAME, 0.0, 1.0).endObject()) + ); + assertAcked(prepareCreate(IDX_ZERO_NAME).setMapping(SINGLE_VALUED_FIELD_NAME, "type=geo_point")); + + indexRandom(true, builders); + ensureSearchable(); + + // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same + // value for NUMBER_FIELD_NAME. This will check that after random indexing each document only has 1 value for + // NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting + // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type. + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) + .addStoredField(NUMBER_FIELD_NAME) + .addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME).order(SortOrder.ASC)) + .setSize(5000) + .get(); + assertSearchResponse(response); + long totalHits = response.getHits().getTotalHits().value; + XContentBuilder builder = XContentFactory.jsonBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + logger.info("Full high_card_idx Response Content:\n{ {} }", Strings.toString(builder)); + for (int i = 0; i < totalHits; i++) { + SearchHit searchHit = response.getHits().getAt(i); + assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); + DocumentField hitField = searchHit.field(NUMBER_FIELD_NAME); + + assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1)); + Long value = hitField.getValue(); + assertThat("Hit " + i + " has wrong value", value.intValue(), equalTo(i)); + } + assertThat(totalHits, equalTo(2000L)); + } + + private void updateGeohashBucketsCentroid(final GeoPoint location) { + String hash = Geohash.stringEncode(location.lon(), location.lat(), Geohash.PRECISION); + for (int precision = Geohash.PRECISION; precision > 0; --precision) { + final String h = hash.substring(0, precision); + expectedDocCountsForGeoHash.put(h, expectedDocCountsForGeoHash.getOrDefault(h, 0) + 1); + expectedCentroidsForGeoHash.put(h, updateHashCentroid(h, location)); + } + } + + private GeoPoint updateHashCentroid(String hash, final GeoPoint location) { + GeoPoint centroid = expectedCentroidsForGeoHash.getOrDefault(hash, null); + if (centroid == null) { + return new GeoPoint(location.lat(), location.lon()); + } + final int docCount = expectedDocCountsForGeoHash.get(hash); + final double newLon = centroid.lon() + (location.lon() - centroid.lon()) / docCount; + final double newLat = centroid.lat() + (location.lat() - centroid.lat()) / docCount; + return centroid.reset(newLat, newLon); + } + + private void updateBoundsBottomRight(GeoPoint geoPoint, GeoPoint currentBound) { + if (geoPoint.lat() < currentBound.lat()) { + currentBound.resetLat(geoPoint.lat()); + } + if (geoPoint.lon() > currentBound.lon()) { + currentBound.resetLon(geoPoint.lon()); + } + } + + private void updateBoundsTopLeft(GeoPoint geoPoint, GeoPoint currentBound) { + if (geoPoint.lat() > currentBound.lat()) { + currentBound.resetLat(geoPoint.lat()); + } + if (geoPoint.lon() < currentBound.lon()) { + currentBound.resetLon(geoPoint.lon()); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java similarity index 97% rename from server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java index 3af3b9e5212f8..5cbd98a4936e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; @@ -43,21 +43,21 @@ import java.util.List; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.search.aggregations.AggregationBuilders.geoBounds; -import static org.opensearch.search.aggregations.AggregationBuilders.global; -import static org.opensearch.search.aggregations.AggregationBuilders.terms; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.closeTo; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoBoundsIT extends AbstractGeoTestCase { +public class GeoBoundsIT extends AbstractGeoAggregatorTestCaseModulePlugin { private static final String aggName = "geoBounds"; public void testSingleValuedField() throws Exception { @@ -226,7 +226,8 @@ public void testSingleValuedFieldNearDateLineWrapLongitude() throws Exception { } /** - * This test forces the {@link GeoBoundsAggregator} to resize the {@link BigArray}s it uses to ensure they are resized correctly + * This test forces the {@link GeoBoundsAggregator} to resize the {@link BigArray}s it uses to ensure they are + * resized correctly */ public void testSingleValuedFieldAsSubAggToHighCardTermsAgg() { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java similarity index 63% rename from modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java rename to modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 9b898da33bb12..64aac66b7eef3 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -32,18 +32,36 @@ package org.opensearch.geo; +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; +import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; import java.util.Collections; +import java.util.List; import java.util.Map; -public class GeoPlugin extends Plugin implements MapperPlugin { +public class GeoModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { return Collections.singletonMap(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); } + + /** + * Registering {@link GeoBounds} aggregation on GeoPoint field. + */ + @Override + public List getAggregations() { + final AggregationSpec spec = new AggregationSpec( + GeoBoundsAggregationBuilder.NAME, + GeoBoundsAggregationBuilder::new, + GeoBoundsAggregationBuilder.PARSER + ).addResultReader(InternalGeoBounds::new).setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators); + return Collections.singletonList(spec); + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java new file mode 100644 index 0000000000000..4a39fa1da04eb --- /dev/null +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.DoubleArray; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.metrics.MetricsAggregator; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.aggregations.support.ValuesSourceConfig; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +/** + * Abstract class for doing the {@link GeoBounds} Aggregation over fields of type geo_shape and geo_point. + * + * @param Class extending the {@link ValuesSource} which will provide the data on which aggregation will happen. + * @opensearch.internal + */ +public abstract class AbstractGeoBoundsAggregator extends MetricsAggregator { + + protected final T valuesSource; + protected final boolean wrapLongitude; + protected DoubleArray tops; + protected DoubleArray bottoms; + protected DoubleArray posLefts; + protected DoubleArray posRights; + protected DoubleArray negLefts; + protected DoubleArray negRights; + + @SuppressWarnings("unchecked") + protected AbstractGeoBoundsAggregator( + String name, + SearchContext searchContext, + Aggregator aggregator, + ValuesSourceConfig valuesSourceConfig, + boolean wrapLongitude, + Map metaData + ) throws IOException { + super(name, searchContext, aggregator, metaData); + this.wrapLongitude = wrapLongitude; + valuesSource = valuesSourceConfig.hasValues() ? (T) valuesSourceConfig.getValuesSource() : null; + + if (valuesSource != null) { + final BigArrays bigArrays = context.bigArrays(); + tops = bigArrays.newDoubleArray(1, false); + tops.fill(0, tops.size(), Double.NEGATIVE_INFINITY); + bottoms = bigArrays.newDoubleArray(1, false); + bottoms.fill(0, bottoms.size(), Double.POSITIVE_INFINITY); + posLefts = bigArrays.newDoubleArray(1, false); + posLefts.fill(0, posLefts.size(), Double.POSITIVE_INFINITY); + posRights = bigArrays.newDoubleArray(1, false); + posRights.fill(0, posRights.size(), Double.NEGATIVE_INFINITY); + negLefts = bigArrays.newDoubleArray(1, false); + negLefts.fill(0, negLefts.size(), Double.POSITIVE_INFINITY); + negRights = bigArrays.newDoubleArray(1, false); + negRights.fill(0, negRights.size(), Double.NEGATIVE_INFINITY); + } + } + + /** + * Build an empty aggregation. + */ + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalGeoBounds( + name, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NEGATIVE_INFINITY, + wrapLongitude, + metadata() + ); + } + + /** + * Build an aggregation for data that has been collected into owningBucketOrd. + */ + @Override + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + if (valuesSource == null) { + return buildEmptyAggregation(); + } + double top = tops.get(owningBucketOrdinal); + double bottom = bottoms.get(owningBucketOrdinal); + double posLeft = posLefts.get(owningBucketOrdinal); + double posRight = posRights.get(owningBucketOrdinal); + double negLeft = negLefts.get(owningBucketOrdinal); + double negRight = negRights.get(owningBucketOrdinal); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metadata()); + } + + @Override + public void doClose() { + Releasables.close(tops, bottoms, posLefts, posRights, negLefts, negRights); + } + + protected void setBucketSize(final long bucket, final BigArrays bigArrays) { + if (bucket >= tops.size()) { + long from = tops.size(); + tops = bigArrays.grow(tops, bucket + 1); + tops.fill(from, tops.size(), Double.NEGATIVE_INFINITY); + bottoms = bigArrays.resize(bottoms, tops.size()); + bottoms.fill(from, bottoms.size(), Double.POSITIVE_INFINITY); + posLefts = bigArrays.resize(posLefts, tops.size()); + posLefts.fill(from, posLefts.size(), Double.POSITIVE_INFINITY); + posRights = bigArrays.resize(posRights, tops.size()); + posRights.fill(from, posRights.size(), Double.NEGATIVE_INFINITY); + negLefts = bigArrays.resize(negLefts, tops.size()); + negLefts.fill(from, negLefts.size(), Double.POSITIVE_INFINITY); + negRights = bigArrays.resize(negRights, tops.size()); + negRights.fill(from, negRights.size(), Double.NEGATIVE_INFINITY); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java index 380fbce85ada7..81ef502dda130 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoPoint; import org.opensearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 64e27fa7e13d1..b2c441f9a951c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; +import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.ObjectParser; @@ -40,6 +41,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.metrics.GeoBoundsAggregatorSupplier; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -57,6 +59,7 @@ */ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "geo_bounds"; + private static final ParseField WRAP_LONGITUDE_FIELD = new ParseField("wrap_longitude"); public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( NAME, GeoBoundsAggregatorSupplier.class @@ -68,7 +71,7 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< ); static { ValuesSourceAggregationBuilder.declareFields(PARSER, false, false, false); - PARSER.declareBoolean(GeoBoundsAggregationBuilder::wrapLongitude, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD); + PARSER.declareBoolean(GeoBoundsAggregationBuilder::wrapLongitude, WRAP_LONGITUDE_FIELD); } public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @@ -121,13 +124,6 @@ public GeoBoundsAggregationBuilder wrapLongitude(boolean wrapLongitude) { return this; } - /** - * Get whether to wrap longitudes. - */ - public boolean wrapLongitude() { - return wrapLongitude; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.NONE; @@ -145,7 +141,7 @@ protected GeoBoundsAggregatorFactory innerBuild( @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD.getPreferredName(), wrapLongitude); + builder.field(WRAP_LONGITUDE_FIELD.getPreferredName(), wrapLongitude); return builder; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java similarity index 51% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java index 054e8d4cb1c6c..a6518ea702be6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java @@ -30,17 +30,13 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.DoubleArray; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.search.aggregations.Aggregator; -import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.support.ValuesSource; @@ -51,22 +47,11 @@ import java.util.Map; /** - * Aggregate all docs into a geographic bounds + * Aggregate all docs into a geographic bounds for field GeoPoint. * * @opensearch.internal */ -final class GeoBoundsAggregator extends MetricsAggregator { - - static final ParseField WRAP_LONGITUDE_FIELD = new ParseField("wrap_longitude"); - - private final ValuesSource.GeoPoint valuesSource; - private final boolean wrapLongitude; - DoubleArray tops; - DoubleArray bottoms; - DoubleArray posLefts; - DoubleArray posRights; - DoubleArray negLefts; - DoubleArray negRights; +final class GeoBoundsAggregator extends AbstractGeoBoundsAggregator { GeoBoundsAggregator( String name, @@ -76,25 +61,7 @@ final class GeoBoundsAggregator extends MetricsAggregator { boolean wrapLongitude, Map metadata ) throws IOException { - super(name, aggregationContext, parent, metadata); - // TODO: stop expecting nulls here - this.valuesSource = valuesSourceConfig.hasValues() ? (ValuesSource.GeoPoint) valuesSourceConfig.getValuesSource() : null; - this.wrapLongitude = wrapLongitude; - if (valuesSource != null) { - final BigArrays bigArrays = context.bigArrays(); - tops = bigArrays.newDoubleArray(1, false); - tops.fill(0, tops.size(), Double.NEGATIVE_INFINITY); - bottoms = bigArrays.newDoubleArray(1, false); - bottoms.fill(0, bottoms.size(), Double.POSITIVE_INFINITY); - posLefts = bigArrays.newDoubleArray(1, false); - posLefts.fill(0, posLefts.size(), Double.POSITIVE_INFINITY); - posRights = bigArrays.newDoubleArray(1, false); - posRights.fill(0, posRights.size(), Double.NEGATIVE_INFINITY); - negLefts = bigArrays.newDoubleArray(1, false); - negLefts.fill(0, negLefts.size(), Double.POSITIVE_INFINITY); - negRights = bigArrays.newDoubleArray(1, false); - negRights.fill(0, negRights.size(), Double.NEGATIVE_INFINITY); - } + super(name, aggregationContext, parent, valuesSourceConfig, wrapLongitude, metadata); } @Override @@ -107,25 +74,10 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - if (bucket >= tops.size()) { - long from = tops.size(); - tops = bigArrays.grow(tops, bucket + 1); - tops.fill(from, tops.size(), Double.NEGATIVE_INFINITY); - bottoms = bigArrays.resize(bottoms, tops.size()); - bottoms.fill(from, bottoms.size(), Double.POSITIVE_INFINITY); - posLefts = bigArrays.resize(posLefts, tops.size()); - posLefts.fill(from, posLefts.size(), Double.POSITIVE_INFINITY); - posRights = bigArrays.resize(posRights, tops.size()); - posRights.fill(from, posRights.size(), Double.NEGATIVE_INFINITY); - negLefts = bigArrays.resize(negLefts, tops.size()); - negLefts.fill(from, negLefts.size(), Double.POSITIVE_INFINITY); - negRights = bigArrays.resize(negRights, tops.size()); - negRights.fill(from, negRights.size(), Double.NEGATIVE_INFINITY); - } + setBucketSize(bucket, bigArrays); if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); - for (int i = 0; i < valuesCount; ++i) { GeoPoint value = values.nextValue(); double top = tops.get(bucket); @@ -163,38 +115,4 @@ public void collect(int doc, long bucket) throws IOException { } }; } - - @Override - public InternalAggregation buildAggregation(long owningBucketOrdinal) { - if (valuesSource == null) { - return buildEmptyAggregation(); - } - double top = tops.get(owningBucketOrdinal); - double bottom = bottoms.get(owningBucketOrdinal); - double posLeft = posLefts.get(owningBucketOrdinal); - double posRight = posRights.get(owningBucketOrdinal); - double negLeft = negLefts.get(owningBucketOrdinal); - double negRight = negRights.get(owningBucketOrdinal); - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metadata()); - } - - @Override - public InternalAggregation buildEmptyAggregation() { - return new InternalGeoBounds( - name, - Double.NEGATIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, - wrapLongitude, - metadata() - ); - } - - @Override - public void doClose() { - Releasables.close(tops, bottoms, posLefts, posRights, negLefts, negRights); - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index 2c6b75842b6f5..149e052b4db7d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java index 87018242ee8df..7c708de88a49c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java index a482fcfdf08dd..7643ac9d9a010 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java new file mode 100644 index 0000000000000..49b455bbf389e --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.aggregations.BaseAggregationTestCase; + +import java.util.Collection; +import java.util.Collections; + +public class GeoBoundsAggregationBuilderTests extends BaseAggregationTestCase { + + /** + * This registers the GeoShape mapper with the Tests so that it can be used for testing the aggregation builders + * + * @return A Collection containing {@link GeoModulePlugin} + */ + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + @Override + protected GeoBoundsAggregationBuilder createTestAggregatorBuilder() { + GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(randomAlphaOfLengthBetween(1, 20)); + String field = randomAlphaOfLengthBetween(3, 20); + factory.field(field); + if (randomBoolean()) { + factory.wrapLongitude(randomBoolean()); + } + if (randomBoolean()) { + factory.missing("0,0"); + } + return factory; + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java similarity index 88% rename from server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java index 6440c62e58e18..ee7a3c7e3faa2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java @@ -30,26 +30,42 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.AggregationInspectionHelper; +import org.opensearch.geo.tests.common.RandomGeoGenerator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.AggregatorTestCase; -import org.opensearch.search.aggregations.support.AggregationInspectionHelper; -import org.opensearch.test.geo.RandomGeoGenerator; -import static org.opensearch.search.aggregations.metrics.InternalGeoBoundsTests.GEOHASH_TOLERANCE; +import java.util.Collections; +import java.util.List; + import static org.hamcrest.Matchers.closeTo; public class GeoBoundsAggregatorTests extends AggregatorTestCase { + public static final double GEOHASH_TOLERANCE = 1E-5D; + + /** + * Overriding the Search Plugins list with {@link GeoModulePlugin} so that the testcase will know that this plugin is + * to be loaded during the tests. + * @return List of {@link SearchPlugin} + */ + @Override + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + public void testEmpty() throws Exception { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { GeoBoundsAggregationBuilder aggBuilder = new GeoBoundsAggregationBuilder("my_agg").field("field").wrapLongitude(false); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java similarity index 81% rename from server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java index e3857efff5d4d..22915212ff415 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java @@ -30,11 +30,18 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; +import org.opensearch.common.ParseField; +import org.opensearch.common.xcontent.ContextParser; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.ParsedAggregation; import org.opensearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,6 +51,30 @@ public class InternalGeoBoundsTests extends InternalAggregationTestCase { static final double GEOHASH_TOLERANCE = 1E-5D; + /** + * Overriding the method so that tests can get the aggregation specs for namedWriteable. + * + * @return GeoPlugin + */ + @Override + protected SearchPlugin registerPlugin() { + return new GeoModulePlugin(); + } + + /** + * Overriding with the {@link ParsedGeoBounds} so that it can be parsed. We need to do this as {@link GeoModulePlugin} + * is registering this Aggregation. + * + * @return a List of {@link NamedXContentRegistry.Entry} + */ + @Override + protected List getNamedXContents() { + final List namedXContents = new ArrayList<>(getDefaultNamedXContents()); + final ContextParser parser = (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c); + namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoBoundsAggregationBuilder.NAME), parser)); + return namedXContents; + } + @Override protected InternalGeoBounds createTestInstance(String name, Map metadata) { // we occasionally want to test top = Double.NEGATIVE_INFINITY since this triggers empty xContent object diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java new file mode 100644 index 0000000000000..c1f27b71c326d --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; + +public class AggregationBuilders { + /** + * Create a new {@link GeoBounds} aggregation with the given name. + */ + public static GeoBoundsAggregationBuilder geoBounds(String name) { + return new GeoBoundsAggregationBuilder(name); + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java new file mode 100644 index 0000000000000..208187bf34a5c --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; + +public class AggregationInspectionHelper { + + public static boolean hasValue(InternalGeoBounds agg) { + return (agg.topLeft() == null && agg.bottomRight() == null) == false; + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java new file mode 100644 index 0000000000000..2cf32c36b97ec --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.common.geo.GeoPoint; + +import java.util.Random; + +/** + * Random geo generation utilities for randomized {@code geo_point} type testing + * does not depend on jts or spatial4j. Use RandomShapeGenerator to create random OGC compliant shapes. + * This is a copy of the file present in the server folder. We need to keep both as there are tests which are + * dependent on that file. + */ +public class RandomGeoGenerator { + + public static void randomPoint(Random r, double[] pt) { + final double[] min = { -180, -90 }; + final double[] max = { 180, 90 }; + randomPointIn(r, min[0], min[1], max[0], max[1], pt); + } + + public static void randomPointIn( + Random r, + final double minLon, + final double minLat, + final double maxLon, + final double maxLat, + double[] pt + ) { + assert pt != null && pt.length == 2; + + // normalize min and max + double[] min = { normalizeLongitude(minLon), normalizeLatitude(minLat) }; + double[] max = { normalizeLongitude(maxLon), normalizeLatitude(maxLat) }; + final double[] tMin = new double[2]; + final double[] tMax = new double[2]; + tMin[0] = Math.min(min[0], max[0]); + tMax[0] = Math.max(min[0], max[0]); + tMin[1] = Math.min(min[1], max[1]); + tMax[1] = Math.max(min[1], max[1]); + + pt[0] = tMin[0] + r.nextDouble() * (tMax[0] - tMin[0]); + pt[1] = tMin[1] + r.nextDouble() * (tMax[1] - tMin[1]); + } + + public static GeoPoint randomPoint(Random r) { + return randomPointIn(r, -180, -90, 180, 90); + } + + public static GeoPoint randomPointIn(Random r, final double minLon, final double minLat, final double maxLon, final double maxLat) { + double[] pt = new double[2]; + randomPointIn(r, minLon, minLat, maxLon, maxLat, pt); + return new GeoPoint(pt[1], pt[0]); + } + + /** Puts latitude in range of -90 to 90. */ + private static double normalizeLatitude(double latitude) { + if (latitude >= -90 && latitude <= 90) { + return latitude; // common case, and avoids slight double precision shifting + } + double off = Math.abs((latitude + 90) % 360); + return (off <= 180 ? off : 360 - off) - 90; + } + + /** Puts longitude in range of -180 to +180. */ + private static double normalizeLongitude(double longitude) { + if (longitude >= -180 && longitude <= 180) { + return longitude; // common case, and avoids slight double precision shifting + } + double off = (longitude + 180) % 360; + if (off < 0) { + return 180 + off; + } else if (off == 0 && longitude > 0) { + return 180; + } else { + return -180 + off; + } + } +} diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 34201069d7b7b..7b567eb9110c5 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.ingest.common.IngestCommonPlugin' + classname 'org.opensearch.ingest.common.IngestCommonModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index f27c903d8795f..784dad8cea49f 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -63,7 +63,7 @@ public class IngestRestartIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(IngestCommonPlugin.class, CustomScriptPlugin.class); + return Arrays.asList(IngestCommonModulePlugin.class, CustomScriptPlugin.class); } @Override diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java similarity index 98% rename from modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java rename to modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 969f77aa85152..c786785a008d7 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -60,7 +60,7 @@ import java.util.Map; import java.util.function.Supplier; -public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPlugin { +public class IngestCommonModulePlugin extends Plugin implements ActionPlugin, IngestPlugin { static final Setting WATCHDOG_INTERVAL = Setting.timeSetting( "ingest.grok.watchdog.interval", @@ -73,7 +73,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl Setting.Property.NodeScope ); - public IngestCommonPlugin() {} + public IngestCommonModulePlugin() {} @Override public Map getProcessors(Processor.Parameters parameters) { diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index f3be0fe61d4be..7dce788f3a4a4 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -35,7 +35,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' - classname 'org.opensearch.ingest.geoip.IngestGeoIpPlugin' + classname 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin' } dependencies { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index e88c77b8e33f4..65378ca79041c 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -75,7 +75,7 @@ public List> getSettings() { @Override protected Collection> nodePlugins() { - return Arrays.asList(IngestGeoIpPlugin.class, IngestGeoIpSettingsPlugin.class); + return Arrays.asList(IngestGeoIpModulePlugin.class, IngestGeoIpSettingsPlugin.class); } @Override diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index 030f75bf48e18..ebffe61f6e756 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -49,7 +49,7 @@ import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import java.io.IOException; import java.net.InetAddress; diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java similarity index 99% rename from modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java rename to modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 790a9bb4bf978..7869e4a7de7fc 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -64,7 +64,7 @@ import java.util.function.Function; import java.util.stream.Stream; -public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable { +public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java index cda2f5692b0db..d459686162cd0 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -37,7 +37,7 @@ import org.opensearch.common.Randomness; import org.opensearch.index.VersionType; import org.opensearch.ingest.IngestDocument; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.StreamsUtils; import org.junit.AfterClass; @@ -73,7 +73,7 @@ public static void loadDatabaseReaders() throws IOException { Files.createDirectories(geoIpConfigDir); copyDatabaseFiles(geoIpDir); - databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); } @AfterClass @@ -279,7 +279,7 @@ public void testLazyLoading() throws Exception { // Loading another database reader instances, because otherwise we can't test lazy loading as the // database readers used at class level are reused between tests. (we want to keep that otherwise running this // test will take roughly 4 times more time) - Map databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + Map databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) { assertNull(lazyLoader.databaseReader.get()); @@ -336,7 +336,7 @@ public void testLoadingCustomDatabase() throws IOException { * Loading another database reader instances, because otherwise we can't test lazy loading as the database readers used at class * level are reused between tests. (we want to keep that otherwise running this test will take roughly 4 times more time). */ - final Map databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + final Map databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); final GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) { assertNull(lazyLoader.databaseReader.get()); @@ -365,9 +365,9 @@ public void testDatabaseNotExistsInDir() throws IOException { Files.createDirectories(geoIpConfigDir); } copyDatabaseFiles(geoIpDir); - final String databaseFilename = randomFrom(IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES); + final String databaseFilename = randomFrom(IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES); Files.delete(geoIpDir.resolve(databaseFilename)); - final IOException e = expectThrows(IOException.class, () -> IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); + final IOException e = expectThrows(IOException.class, () -> IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); assertThat(e, hasToString(containsString("expected database [" + databaseFilename + "] to exist in [" + geoIpDir + "]"))); } @@ -377,9 +377,9 @@ public void testDatabaseExistsInConfigDir() throws IOException { final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); copyDatabaseFiles(geoIpDir); - final String databaseFilename = randomFrom(IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES); + final String databaseFilename = randomFrom(IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES); copyDatabaseFile(geoIpConfigDir, databaseFilename); - final IOException e = expectThrows(IOException.class, () -> IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); + final IOException e = expectThrows(IOException.class, () -> IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); assertThat(e, hasToString(containsString("expected database [" + databaseFilename + "] to not exist in [" + geoIpConfigDir + "]"))); } @@ -388,7 +388,7 @@ private static void copyDatabaseFile(final Path path, final String databaseFilen } private static void copyDatabaseFiles(final Path path) throws IOException { - for (final String databaseFilename : IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES) { + for (final String databaseFilename : IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES) { copyDatabaseFile(path, databaseFilename); } } diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java index 34c80fec520aa..8b94e8cc114ed 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java @@ -37,7 +37,7 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.RandomDocumentPicks; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java similarity index 95% rename from modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java rename to modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index 540d68b0982eb..e79354b3d7cd0 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -34,12 +34,12 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import static org.mockito.Mockito.mock; -public class IngestGeoIpPluginTests extends OpenSearchTestCase { +public class IngestGeoIpModulePluginTests extends OpenSearchTestCase { public void testCachesAndEvictsResults() { GeoIpCache cache = new GeoIpCache(1); diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index a3752ad1c7f7e..187e72d192a3d 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { description 'Ingest processor that extracts information from a user agent' - classname 'org.opensearch.ingest.useragent.IngestUserAgentPlugin' + classname 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin' } restResources { diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java similarity index 96% rename from modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java rename to modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index dc005ae36dff8..cd96fcf2347ef 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -49,7 +49,7 @@ import java.util.Map; import java.util.stream.Stream; -public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { +public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", @@ -85,7 +85,7 @@ static Map createUserAgentParsers(Path userAgentConfigD UserAgentParser defaultParser = new UserAgentParser( DEFAULT_PARSER_NAME, - IngestUserAgentPlugin.class.getResourceAsStream("/regexes.yml"), + IngestUserAgentModulePlugin.class.getResourceAsStream("/regexes.yml"), cache ); userAgentParsers.put(DEFAULT_PARSER_NAME, defaultParser); diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java index 0625f1f8fd1af..df88e98e7fc4f 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java @@ -313,7 +313,13 @@ public UserAgentProcessor create( ) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "user_agent"); - String regexFilename = readStringProperty(TYPE, processorTag, config, "regex_file", IngestUserAgentPlugin.DEFAULT_PARSER_NAME); + String regexFilename = readStringProperty( + TYPE, + processorTag, + config, + "regex_file", + IngestUserAgentModulePlugin.DEFAULT_PARSER_NAME + ); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); boolean useECS = readBooleanProperty(TYPE, processorTag, config, "ecs", true); diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java index 72815a37f46de..576136436ef33 100644 --- a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java @@ -91,7 +91,7 @@ public static void createUserAgentParsers() throws IOException { } } - userAgentParsers = IngestUserAgentPlugin.createUserAgentParsers(userAgentConfigDir, new UserAgentCache(1000)); + userAgentParsers = IngestUserAgentModulePlugin.createUserAgentParsers(userAgentConfigDir, new UserAgentCache(1000)); } public void testBuildDefaults() throws Exception { diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 203c332069c5f..114c750e34b17 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Lucene expressions integration for OpenSearch' - classname 'org.opensearch.script.expression.ExpressionPlugin' + classname 'org.opensearch.script.expression.ExpressionModulePlugin' } dependencies { diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 deleted file mode 100644 index 2d216277b3a8e..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5583bcd3a24d3aae40b0a3152458021844ac09aa \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..ec6906d730ac1 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +9f23e695b0c864fa9722e4f67d950266ca64d37b \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 952b00dda608c..69d5e639e4aec 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -78,7 +78,7 @@ public class MoreExpressionIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(ExpressionPlugin.class); + return Collections.singleton(ExpressionModulePlugin.class); } private SearchRequestBuilder buildRequest(String script, Object... params) { diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index 5aade265439d2..cf14b47fa9f3e 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -59,7 +59,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Collections.singleton(ExpressionPlugin.class); + return Collections.singleton(ExpressionModulePlugin.class); } public void testAllOpsDisabledIndexedScripts() throws IOException { diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java similarity index 95% rename from modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java rename to modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java index 440a03c63bd98..7640fcf7a3bc5 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java @@ -40,7 +40,7 @@ import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; -public class ExpressionPlugin extends Plugin implements ScriptPlugin { +public class ExpressionModulePlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 511a6b144c21a..14eafd8d43e13 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Mustache scripting integration for OpenSearch' - classname 'org.opensearch.script.mustache.MustachePlugin' + classname 'org.opensearch.script.mustache.MustacheModulePlugin' hasClientJar = true // For the template apis and query } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index 617f1f4f738a0..fbb8ebdec384c 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -58,7 +58,7 @@ public class MultiSearchTemplateIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(MustachePlugin.class); + return Collections.singleton(MustacheModulePlugin.class); } public void testBasic() throws Exception { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java index 61f047a32f1c1..87ef8b810f5e0 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java @@ -62,7 +62,7 @@ public class SearchTemplateIT extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(MustachePlugin.class); + return Collections.singleton(MustacheModulePlugin.class); } @Before diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java similarity index 96% rename from modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java rename to modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java index 498dc2ac57cc7..03be9d7efb2db 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java @@ -54,7 +54,7 @@ import java.util.List; import java.util.function.Supplier; -public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin, SearchPlugin { +public class MustacheModulePlugin extends Plugin implements ScriptPlugin, ActionPlugin, SearchPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index c997068ccb6c9..f29c48aecf5df 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -37,7 +37,7 @@ apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' - classname 'org.opensearch.painless.PainlessPlugin' + classname 'org.opensearch.painless.PainlessModulePlugin' } testClusters.all { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java similarity index 98% rename from modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java rename to modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index 9a235df0fb184..4a432b8f17b25 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -81,7 +81,7 @@ /** * Registers Painless as a plugin. */ -public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { +public final class PainlessModulePlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { private static final Map, List> allowlists; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java index 424d88cbc9e00..6e318eda91985 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java @@ -36,7 +36,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.query.MatchQueryBuilder; -import org.opensearch.painless.PainlessPlugin; +import org.opensearch.painless.PainlessModulePlugin; import org.opensearch.painless.action.PainlessExecuteAction.Request; import org.opensearch.painless.action.PainlessExecuteAction.Response; import org.opensearch.plugins.Plugin; @@ -60,7 +60,7 @@ public class PainlessExecuteApiTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(PainlessPlugin.class); + return Collections.singleton(PainlessModulePlugin.class); } public void testDefaults() throws IOException { diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 08758c7ab2bda..b16176ca5aa72 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { description 'Adds advanced field mappers' - classname 'org.opensearch.index.mapper.MapperExtrasPlugin' + classname 'org.opensearch.index.mapper.MapperExtrasModulePlugin' hasClientJar = true } diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java similarity index 96% rename from modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java rename to modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java index ebb86bc055825..081a3414c3675 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java @@ -43,7 +43,7 @@ import java.util.List; import java.util.Map; -public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPlugin { +public class MapperExtrasModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java index d9e40fac1ad0f..d498116efc108 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java @@ -47,7 +47,7 @@ public class BWCTemplateTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); + return pluginList(MapperExtrasModulePlugin.class); } public void testBeatsTemplatesBWC() throws Exception { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java index 908e5db6196c3..1e001eb2f55a5 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java @@ -72,7 +72,7 @@ protected void assertExistsQuery(MappedFieldType fieldType, Query query, ParseCo @Override protected Collection getPlugins() { - return List.of(new MapperExtrasPlugin()); + return List.of(new MapperExtrasModulePlugin()); } static int getFrequency(TokenStream tk) throws IOException { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java index 3161e7462d2a0..63487fd7baa89 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java @@ -57,7 +57,7 @@ public void setup() { @Override protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); + return pluginList(MapperExtrasModulePlugin.class); } public void testBasics() throws Exception { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java index 55d825d1b53bb..33e780c622a82 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -58,7 +58,7 @@ protected void assertExistsQuery(MapperService mapperService) { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return org.opensearch.common.collect.List.of(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index e19f9dd7988e1..74c79b0db469b 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -53,7 +53,7 @@ public class ScaledFloatFieldMapperTests extends MapperTestCase { @Override protected Collection getPlugins() { - return singletonList(new MapperExtrasPlugin()); + return singletonList(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index 7c4b8956d9e3c..6ab2293620893 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -133,7 +133,7 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return org.opensearch.common.collect.List.of(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java index e9d3767373b95..1ebec6b963b53 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java @@ -62,7 +62,7 @@ public class TokenCountFieldMapperTests extends MapperTestCase { @Override protected Collection getPlugins() { - return Collections.singletonList(new MapperExtrasPlugin()); + return Collections.singletonList(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java index e183ba6f6735c..f57aac8a244b7 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.index.mapper.MapperExtrasPlugin; +import org.opensearch.index.mapper.MapperExtrasModulePlugin; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.RankFeatureQueryBuilder.ScoreFunction; import org.opensearch.plugins.Plugin; @@ -78,7 +78,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected Collection> getPlugins() { - return Arrays.asList(MapperExtrasPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(MapperExtrasModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle index f76ca739faf81..07453e1f70f1c 100644 --- a/modules/opensearch-dashboards/build.gradle +++ b/modules/opensearch-dashboards/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { description 'Plugin exposing APIs for OpenSearch Dashboards system indices' - classname 'org.opensearch.dashboards.OpenSearchDashboardsPlugin' + classname 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin' } dependencies { diff --git a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java similarity index 98% rename from modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java rename to modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java index 534218ef438e7..09fd52ff65c66 100644 --- a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java +++ b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java @@ -75,7 +75,7 @@ import static java.util.Collections.unmodifiableList; -public class OpenSearchDashboardsPlugin extends Plugin implements SystemIndexPlugin { +public class OpenSearchDashboardsModulePlugin extends Plugin implements SystemIndexPlugin { public static final Setting> OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING = Setting.listSetting( "opensearch_dashboards.system_indices", diff --git a/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java b/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java similarity index 79% rename from modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java rename to modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java index a74af8cd257b5..1573113d58ecd 100644 --- a/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java +++ b/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java @@ -44,28 +44,28 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; -public class OpenSearchDashboardsPluginTests extends OpenSearchTestCase { +public class OpenSearchDashboardsModulePluginTests extends OpenSearchTestCase { public void testOpenSearchDashboardsIndexNames() { assertThat( - new OpenSearchDashboardsPlugin().getSettings(), - contains(OpenSearchDashboardsPlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING) + new OpenSearchDashboardsModulePlugin().getSettings(), + contains(OpenSearchDashboardsModulePlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING) ); assertThat( - new OpenSearchDashboardsPlugin().getSystemIndexDescriptors(Settings.EMPTY) + new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors(Settings.EMPTY) .stream() .map(SystemIndexDescriptor::getIndexPattern) .collect(Collectors.toList()), contains(".opensearch_dashboards", ".opensearch_dashboards_*", ".reporting-*", ".apm-agent-configuration", ".apm-custom-link") ); final List names = Collections.unmodifiableList(Arrays.asList("." + randomAlphaOfLength(4), "." + randomAlphaOfLength(5))); - final List namesFromDescriptors = new OpenSearchDashboardsPlugin().getSystemIndexDescriptors( - Settings.builder().putList(OpenSearchDashboardsPlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING.getKey(), names).build() + final List namesFromDescriptors = new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors( + Settings.builder().putList(OpenSearchDashboardsModulePlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING.getKey(), names).build() ).stream().map(SystemIndexDescriptor::getIndexPattern).collect(Collectors.toList()); assertThat(namesFromDescriptors, is(names)); assertThat( - new OpenSearchDashboardsPlugin().getSystemIndexDescriptors(Settings.EMPTY) + new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors(Settings.EMPTY) .stream() .anyMatch(systemIndexDescriptor -> systemIndexDescriptor.matchesIndexPattern(".opensearch_dashboards-event-log-7-1")), is(false) diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 3c71a731e6a6a..d509e65106e7b 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'This module adds the support parent-child queries and aggregations' - classname 'org.opensearch.join.ParentJoinPlugin' + classname 'org.opensearch.join.ParentJoinModulePlugin' hasClientJar = true } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index 5d6d4fb333d49..71d7c0928e643 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -38,7 +38,7 @@ import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexModule; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; @@ -60,7 +60,7 @@ protected boolean ignoreExternalCluster() { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, ParentJoinModulePlugin.class); } @Override diff --git a/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java b/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java similarity index 95% rename from modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java rename to modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java index 6889e38c04ff3..31e5be7f9979c 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java @@ -50,9 +50,9 @@ import java.util.List; import java.util.Map; -public class ParentJoinPlugin extends Plugin implements SearchPlugin, MapperPlugin { +public class ParentJoinModulePlugin extends Plugin implements SearchPlugin, MapperPlugin { - public ParentJoinPlugin() {} + public ParentJoinModulePlugin() {} @Override public List> getQueries() { diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java index 71088e1d5391d..086176bfbde6c 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java @@ -32,7 +32,7 @@ package org.opensearch.join.aggregations; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -44,7 +44,7 @@ public class ChildrenTests extends BaseAggregationTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java index 0d134592fa678..d1b4f3c3ebc27 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -61,7 +61,7 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.ShardId; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; import org.opensearch.plugins.SearchPlugin; @@ -350,6 +350,6 @@ private void testCaseTermsParentTerms(Query query, IndexSearcher indexSearcher, @Override protected List getSearchPlugins() { - return Collections.singletonList(new ParentJoinPlugin()); + return Collections.singletonList(new ParentJoinModulePlugin()); } } diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java index 8eaedc4aa15b0..3884c856d9a0e 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java @@ -35,7 +35,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.NamedXContentRegistry.Entry; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; @@ -50,7 +50,7 @@ public class InternalChildrenTests extends InternalSingleBucketAggregationTestCa @Override protected SearchPlugin registerPlugin() { - return new ParentJoinPlugin(); + return new ParentJoinModulePlugin(); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java index cd5236ab49a39..72bfc55888147 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java @@ -34,7 +34,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry.Entry; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; @@ -49,7 +49,7 @@ public class InternalParentTests extends InternalSingleBucketAggregationTestCase @Override protected SearchPlugin registerPlugin() { - return new ParentJoinPlugin(); + return new ParentJoinModulePlugin(); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java index abb258f37198b..4f08e004ea208 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java @@ -35,7 +35,7 @@ import java.util.Arrays; import java.util.Collection; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -44,7 +44,7 @@ public class ParentTests extends BaseAggregationTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java index 2ed06ee0c0ea9..61a1b761f17ab 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -62,7 +62,7 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.ShardId; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; import org.opensearch.plugins.SearchPlugin; @@ -287,6 +287,6 @@ private void testCase(Query query, IndexSearcher indexSearcher, Consumer getSearchPlugins() { - return Collections.singletonList(new ParentJoinPlugin()); + return Collections.singletonList(new ParentJoinModulePlugin()); } } diff --git a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java index a9ac151dd3806..53e9495b707fe 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java @@ -44,7 +44,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -56,7 +56,7 @@ public class ParentJoinFieldMapperTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singletonList(ParentJoinPlugin.class); + return Collections.singletonList(ParentJoinModulePlugin.class); } public void testSingleLevel() throws Exception { diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java index 5595c98a439bf..5a7b51b64210c 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java @@ -63,7 +63,7 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.index.similarity.SimilarityService; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; @@ -99,7 +99,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java index 0f983799a6d25..32c9e490be165 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java @@ -50,7 +50,7 @@ import org.opensearch.index.query.QueryShardException; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; @@ -83,7 +83,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java index 41bc717db1fc8..995206120d075 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java @@ -45,7 +45,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -72,7 +72,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 1738de5a55748..2312f7bda80b2 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Percolator module adds capability to index queries and query these queries by specifying documents' - classname 'org.opensearch.percolator.PercolatorPlugin' + classname 'org.opensearch.percolator.PercolatorModulePlugin' hasClientJar = true } diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index 8d3c37bc9b039..8c0bfb378bccd 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -44,7 +44,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.geo.GeoPlugin; +import org.opensearch.geo.GeoModulePlugin; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; @@ -93,7 +93,7 @@ protected boolean addMockGeoShapeFieldMapper() { @Override protected Collection> nodePlugins() { - return Arrays.asList(PercolatorPlugin.class, GeoPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, GeoModulePlugin.class); } public void testPercolatorQuery() throws Exception { diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java similarity index 96% rename from modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java rename to modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java index f841860d3930a..c0416f0336a10 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java @@ -46,7 +46,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { +public class PercolatorModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public List> getQueries() { return singletonList(new QuerySpec<>(PercolateQueryBuilder.NAME, PercolateQueryBuilder::new, PercolateQueryBuilder::fromXContent)); diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java index e59aa227e3dc7..1e884f591cbf8 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java @@ -149,7 +149,7 @@ public class CandidateQueryTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(PercolatorPlugin.class); + return Collections.singleton(PercolatorModulePlugin.class); } @Before diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java index 87aa28a3346bc..8a744a038442f 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java @@ -97,7 +97,7 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(PercolatorPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java index fe9c486b68166..fc18a7af937f9 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java @@ -95,7 +95,7 @@ import org.opensearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.opensearch.index.query.functionscore.ScriptScoreFunctionBuilder; import org.opensearch.indices.TermsLookup; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.query.HasChildQueryBuilder; import org.opensearch.join.query.HasParentQueryBuilder; import org.opensearch.plugins.Plugin; @@ -145,7 +145,12 @@ public class PercolatorFieldMapperTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class, FoolMeScriptPlugin.class, ParentJoinPlugin.class); + return pluginList( + InternalSettingsPlugin.class, + PercolatorModulePlugin.class, + FoolMeScriptPlugin.class, + ParentJoinModulePlugin.class + ); } @Override diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java index 1d77c9d472864..3d44a94297790 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java @@ -78,7 +78,7 @@ public class PercolatorQuerySearchTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Arrays.asList(PercolatorPlugin.class, CustomScriptPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 2b1d1e9abc4b4..4232d583dc984 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'The Rank Eval module adds APIs to evaluate ranking quality.' - classname 'org.opensearch.index.rankeval.RankEvalPlugin' + classname 'org.opensearch.index.rankeval.RankEvalModulePlugin' hasClientJar = true } diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index ea80b59711b8a..6eb974c77a5f3 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -65,7 +65,7 @@ public class RankEvalRequestIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(RankEvalPlugin.class); + return Arrays.asList(RankEvalModulePlugin.class); } @Before diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java similarity index 98% rename from modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java rename to modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java index a1eaa0f62f0f1..af960207e5175 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java @@ -53,7 +53,7 @@ import java.util.List; import java.util.function.Supplier; -public class RankEvalPlugin extends Plugin implements ActionPlugin { +public class RankEvalModulePlugin extends Plugin implements ActionPlugin { @Override public List> getActions() { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java index 56d23d8ca3184..ab34435c5f346 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java @@ -56,12 +56,14 @@ public class EvalQueryQualityTests extends OpenSearchTestCase { - private static NamedWriteableRegistry namedWritableRegistry = new NamedWriteableRegistry(new RankEvalPlugin().getNamedWriteables()); + private static NamedWriteableRegistry namedWritableRegistry = new NamedWriteableRegistry( + new RankEvalModulePlugin().getNamedWriteables() + ); @SuppressWarnings("resource") @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new RankEvalPlugin().getNamedXContent()); + return new NamedXContentRegistry(new RankEvalModulePlugin().getNamedXContent()); } public static EvalQueryQuality randomEvalQueryQuality() { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java index 401bb1b2a7bb7..c2a041ea59aee 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java @@ -47,21 +47,21 @@ public class RankEvalRequestTests extends AbstractWireSerializingTestCase { - private static RankEvalPlugin rankEvalPlugin = new RankEvalPlugin(); + private static RankEvalModulePlugin rankEvalModulePlugin = new RankEvalModulePlugin(); @AfterClass public static void releasePluginResources() throws IOException { - rankEvalPlugin.close(); + rankEvalModulePlugin.close(); } @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(rankEvalPlugin.getNamedXContent()); + return new NamedXContentRegistry(rankEvalModulePlugin.getNamedXContent()); } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(rankEvalPlugin.getNamedWriteables()); + return new NamedWriteableRegistry(rankEvalModulePlugin.getNamedWriteables()); } @Override diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java index b79b26eb0af2e..31db13889be4b 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java @@ -71,7 +71,7 @@ public class RankEvalSpecTests extends OpenSearchTestCase { @SuppressWarnings("resource") @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new RankEvalPlugin().getNamedXContent()); + return new NamedXContentRegistry(new RankEvalModulePlugin().getNamedXContent()); } private static List randomList(Supplier randomSupplier) { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 37526a924da73..cad7d67f3ef84 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -41,7 +41,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' - classname 'org.opensearch.index.reindex.ReindexPlugin' + classname 'org.opensearch.index.reindex.ReindexModulePlugin' hasClientJar = true } diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java index 6d313e06263b3..05cb87b3f9165 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java @@ -46,7 +46,7 @@ import org.opensearch.index.reindex.DeleteByQueryAction; import org.opensearch.index.reindex.DeleteByQueryRequestBuilder; import org.opensearch.index.reindex.ReindexAction; -import org.opensearch.index.reindex.ReindexPlugin; +import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequestBuilder; import org.opensearch.index.reindex.RethrottleAction; import org.opensearch.index.reindex.RethrottleRequestBuilder; @@ -88,7 +88,7 @@ protected boolean ignoreExternalCluster() { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); + return Arrays.asList(ReindexModulePlugin.class, ReindexCancellationPlugin.class); } @Before diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java similarity index 96% rename from modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java rename to modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java index 865ae26f6f54d..28c6b4038ecfd 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.index.reindex.spi.RemoteReindexExtension; import org.opensearch.plugins.ExtensiblePlugin; -import org.opensearch.plugins.ExtensiblePlugin.ExtensionLoader; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionResponse; @@ -62,7 +61,6 @@ import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; import java.util.ArrayList; import java.util.Arrays; @@ -73,9 +71,9 @@ import static java.util.Collections.singletonList; -public class ReindexPlugin extends Plugin implements ActionPlugin, ExtensiblePlugin { +public class ReindexModulePlugin extends Plugin implements ActionPlugin, ExtensiblePlugin { public static final String NAME = "reindex"; - private static final Logger logger = LogManager.getLogger(ReindexPlugin.class); + private static final Logger logger = LogManager.getLogger(ReindexModulePlugin.class); @Override public List> getActions() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java index ce252de292d63..22d560b19c699 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java @@ -7,10 +7,11 @@ import org.opensearch.action.ActionListener; import org.opensearch.index.reindex.BulkByScrollResponse; +import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequest; /** - * This interface provides an extension point for {@link org.opensearch.index.reindex.ReindexPlugin}. + * This interface provides an extension point for {@link ReindexModulePlugin}. * This interface can be implemented to provide a custom Rest interceptor and {@link ActionListener} * The Rest interceptor can be used to pre-process any reindex request and perform any action * on the response. The ActionListener listens to the success and failure events on every reindex request diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 8ce850a936557..dd7eb977bbe48 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -65,7 +65,7 @@ import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.watcher.ResourceWatcherService; import org.junit.Before; @@ -87,7 +87,7 @@ public class ReindexFromRemoteWithAuthTests extends OpenSearchSingleNodeTestCase @Override protected Collection> getPlugins() { - return Arrays.asList(Netty4Plugin.class, ReindexFromRemoteWithAuthTests.TestPlugin.class, ReindexPlugin.class); + return Arrays.asList(Netty4ModulePlugin.class, ReindexFromRemoteWithAuthTests.TestPlugin.class, ReindexModulePlugin.class); } @Override @@ -100,7 +100,7 @@ protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); // Allowlist reindexing from the http host we're going to use settings.put(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.getKey(), "127.0.0.1:*"); - settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); + settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME); return settings.build(); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java index 00747d85221c6..be4bacce9b57c 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java @@ -24,7 +24,7 @@ * The test can be removed along with removing support of the deprecated setting. */ public class ReindexRenamedSettingTests extends OpenSearchTestCase { - private final ReindexPlugin plugin = new ReindexPlugin(); + private final ReindexModulePlugin plugin = new ReindexModulePlugin(); /** * Validate the both settings are known and supported. diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java index 8ce9cf74bb8be..8c113973f4415 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java @@ -46,7 +46,7 @@ public class ReindexSingleNodeTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Arrays.asList(ReindexPlugin.class); + return Arrays.asList(ReindexModulePlugin.class); } public void testDeprecatedSort() { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java index 0ce3a9f3461c3..0941516194f6e 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java @@ -51,7 +51,7 @@ public abstract class ReindexTestCase extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class); + return Arrays.asList(ReindexModulePlugin.class); } protected ReindexRequestBuilder reindex() { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index 0b052f8fd57a3..0a01843405d9f 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -49,7 +49,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.junit.After; import java.util.ArrayList; @@ -84,7 +84,7 @@ public void forceUnblockAllExecutors() { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, Netty4Plugin.class); + return Arrays.asList(ReindexModulePlugin.class, Netty4ModulePlugin.class); } /** diff --git a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 b/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 deleted file mode 100644 index f40f0242448e8..0000000000000 --- a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 702f0e9bb0f8b..7a697623eb8d9 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -38,7 +38,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Module for URL repository' - classname 'org.opensearch.plugin.repository.url.URLRepositoryPlugin' + classname 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin' } restResources { diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index aa274549f3a9b..1bf461d67862b 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -39,7 +39,7 @@ import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.plugin.repository.url.URLRepositoryPlugin; +import org.opensearch.plugin.repository.url.URLRepositoryModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotState; @@ -59,7 +59,7 @@ public class URLSnapshotRestoreIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(URLRepositoryPlugin.class); + return Collections.singletonList(URLRepositoryModulePlugin.class); } public void testUrlRepository() throws Exception { diff --git a/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java similarity index 96% rename from modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java rename to modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java index d1b08d443805f..00d697e9fb0f6 100644 --- a/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Map; -public class URLRepositoryPlugin extends Plugin implements RepositoryPlugin { +public class URLRepositoryModulePlugin extends Plugin implements RepositoryPlugin { @Override public List> getSettings() { diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 26e094a9eeae1..726092ffe4273 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -30,5 +30,5 @@ opensearchplugin { description 'Integrates OpenSearch with systemd' - classname 'org.opensearch.systemd.SystemdPlugin' + classname 'org.opensearch.systemd.SystemdModulePlugin' } diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java similarity index 96% rename from modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java rename to modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java index 8abdbbb74ae7f..a22b3f862e017 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java @@ -55,9 +55,9 @@ import java.util.Collections; import java.util.function.Supplier; -public class SystemdPlugin extends Plugin implements ClusterPlugin { +public class SystemdModulePlugin extends Plugin implements ClusterPlugin { - private static final Logger logger = LogManager.getLogger(SystemdPlugin.class); + private static final Logger logger = LogManager.getLogger(SystemdModulePlugin.class); private final boolean enabled; @@ -66,11 +66,11 @@ final boolean isEnabled() { } @SuppressWarnings("unused") - public SystemdPlugin() { + public SystemdModulePlugin() { this(System.getenv("OPENSEARCH_SD_NOTIFY")); } - SystemdPlugin(final String esSDNotify) { + SystemdModulePlugin(final String esSDNotify) { logger.trace("OPENSEARCH_SD_NOTIFY is set to [{}]", esSDNotify); if (esSDNotify == null) { enabled = false; diff --git a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java similarity index 87% rename from modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java rename to modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java index 63d97a7486f58..532d47cd009e0 100644 --- a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java @@ -56,7 +56,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class SystemdPluginTests extends OpenSearchTestCase { +public class SystemdModulePluginTests extends OpenSearchTestCase { final Scheduler.Cancellable extender = mock(Scheduler.Cancellable.class); final ThreadPool threadPool = mock(ThreadPool.class); @@ -67,14 +67,14 @@ public class SystemdPluginTests extends OpenSearchTestCase { } public void testIsImplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(null); + final SystemdModulePlugin plugin = new SystemdModulePlugin(null); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } public void testIsExplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(Boolean.FALSE.toString()); + final SystemdModulePlugin plugin = new SystemdModulePlugin(Boolean.FALSE.toString()); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); @@ -85,7 +85,7 @@ public void testInvalid() { s -> Boolean.TRUE.toString().equals(s) || Boolean.FALSE.toString().equals(s), () -> randomAlphaOfLength(4) ); - final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdPlugin(esSDNotify)); + final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdModulePlugin(esSDNotify)); assertThat(e, hasToString(containsString("OPENSEARCH_SD_NOTIFY set to unexpected value [" + esSDNotify + "]"))); } @@ -113,9 +113,9 @@ public void testOnNodeStartedNotEnabled() { private void runTestOnNodeStarted( final String esSDNotify, final int rc, - final BiConsumer, SystemdPlugin> assertions + final BiConsumer, SystemdModulePlugin> assertions ) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::onNodeStarted, "READY=1"); + runTest(esSDNotify, rc, assertions, SystemdModulePlugin::onNodeStarted, "READY=1"); } public void testCloseSuccess() { @@ -138,21 +138,25 @@ public void testCloseNotEnabled() { runTestClose(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); } - private void runTestClose(final String esSDNotify, final int rc, final BiConsumer, SystemdPlugin> assertions) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::close, "STOPPING=1"); + private void runTestClose( + final String esSDNotify, + final int rc, + final BiConsumer, SystemdModulePlugin> assertions + ) { + runTest(esSDNotify, rc, assertions, SystemdModulePlugin::close, "STOPPING=1"); } private void runTest( final String esSDNotify, final int rc, - final BiConsumer, SystemdPlugin> assertions, - final CheckedConsumer invocation, + final BiConsumer, SystemdModulePlugin> assertions, + final CheckedConsumer invocation, final String expectedState ) { final AtomicBoolean invoked = new AtomicBoolean(); final AtomicInteger invokedUnsetEnvironment = new AtomicInteger(); final AtomicReference invokedState = new AtomicReference<>(); - final SystemdPlugin plugin = new SystemdPlugin(esSDNotify) { + final SystemdModulePlugin plugin = new SystemdModulePlugin(esSDNotify) { @Override int sd_notify(final int unset_environment, final String state) { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 450eaed14fa46..b72cb6d868d79 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -49,7 +49,7 @@ apply plugin: 'opensearch.publish' */ opensearchplugin { description 'Netty 4 based transport implementation' - classname 'org.opensearch.transport.Netty4Plugin' + classname 'org.opensearch.transport.Netty4ModulePlugin' hasClientJar = true } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java index 9915b9fe51be2..9b086e0b8baa9 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.netty4.Netty4Transport; import java.util.Collection; @@ -60,13 +60,13 @@ protected Settings nodeSettings(int nodeOrdinal) { if (randomBoolean()) { builder.put(Netty4Transport.WORKER_COUNT.getKey(), random().nextInt(3) + 1); } - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME); - builder.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME); return builder.build(); } @Override protected Collection> nodePlugins() { - return Collections.singletonList(Netty4Plugin.class); + return Collections.singletonList(Netty4ModulePlugin.class); } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java index a572a181a46fd..e6604abf126da 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -41,7 +41,7 @@ import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.TransportInfo; import java.net.Inet4Address; @@ -60,7 +60,7 @@ public class Netty4TransportPublishAddressIT extends OpenSearchNetty4IntegTestCa protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME) .build(); } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java similarity index 98% rename from modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java rename to modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 73cfe4e46fbda..8a18210fd963e 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -57,7 +57,7 @@ import java.util.Map; import java.util.function.Supplier; -public class Netty4Plugin extends Plugin implements NetworkPlugin { +public class Netty4ModulePlugin extends Plugin implements NetworkPlugin { public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 deleted file mode 100644 index df4ae8d72dd2b..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..83c10845cd35a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2f6cb0fd7387c6e0db3b86eef7d8677cea3e88a0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 deleted file mode 100644 index 675bf726d2a65..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87c1357612f2f483174d1a63ea8c6680a1696bac \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..29387f38bc10c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +6aff23715a2fba88d844ac83c61decce8ed480bd \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 deleted file mode 100644 index 8987f89c913df..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d032dbeb3f4015741336a877dd4b0e62099246c \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..54b451abf5049 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +f82d3eba195134f663865e9de3f511e16fbc7351 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 deleted file mode 100644 index 00d66c733c548..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe6ac8772b545e0abd0c755cd4bd07caad58edb9 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..87474064fbe0f --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2af6e1996e696b1721a2ec7382bac9aa5096eecb \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 deleted file mode 100644 index 0c521b5f5ef6a..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -288726e13b598c341e81aef8b5c9ce53f51889d0 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..6d35832a1a643 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ec01d7f91f711abd75b539bb66a437db7cf1ca67 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 deleted file mode 100644 index ba98dd7e06f71..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -166d02f7f98f18c6607335030a404fcad8f57cd6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..f93d1a153cd26 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +7041b3fa92b8687a84c4ce666b5718bbbc315db1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 deleted file mode 100644 index 88ac9a13e8ce3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c0e4177aa87a4be2826a360f656f3559ea3f997 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..77589a361badf --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +0a5ec9a237c2539e3cbabfadff707252e33b3599 \ No newline at end of file diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 951c2df53ba59..08974b902c418 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; import org.junit.BeforeClass; @@ -52,9 +52,9 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @SuppressWarnings("unchecked") @BeforeClass public static void setUpTransport() { - nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4Plugin.class, NioTransportPlugin.class)); - clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); + nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } private static String getTypeKey(Class clazz) { @@ -63,8 +63,8 @@ private static String getTypeKey(Class clazz) { } else if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_TRANSPORT_NAME; } else { - assert clazz.equals(Netty4Plugin.class); - return Netty4Plugin.NETTY_TRANSPORT_NAME; + assert clazz.equals(Netty4ModulePlugin.class); + return Netty4ModulePlugin.NETTY_TRANSPORT_NAME; } } @@ -72,8 +72,8 @@ private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; } else { - assert clazz.equals(Netty4Plugin.class); - return Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME; + assert clazz.equals(Netty4ModulePlugin.class); + return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; } } @@ -92,7 +92,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); } @Override diff --git a/release-notes/opensearch.release-notes-2.2.0.md b/release-notes/opensearch.release-notes-2.2.0.md new file mode 100644 index 0000000000000..74e76cfe46b5a --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.0.md @@ -0,0 +1,79 @@ +## 2022-08-05 Version 2.2.0 Release Notes + +### Features/Enhancements + +* Task consumer Integration ([#2293](https://github.com/opensearch-project/opensearch/pull/2293)) ([#4141](https://github.com/opensearch-project/opensearch/pull/4141)) +* [Backport 2.x] [Segment Replication] Add SegmentReplicationTargetService to orchestrate replication events. ([#4074](https://github.com/opensearch-project/opensearch/pull/4074)) +* Support task resource tracking in OpenSearch ([#3982](https://github.com/opensearch-project/opensearch/pull/3982)) ([#4087](https://github.com/opensearch-project/opensearch/pull/4087)) +* Making shard copy count a multiple of attribute count ([#3462](https://github.com/opensearch-project/opensearch/pull/3462)) ([#4086](https://github.com/opensearch-project/opensearch/pull/4086)) +* [Backport 2.x] [Segment Rreplication] Adding CheckpointRefreshListener to trigger when Segment replication is turned on and Primary shard refreshes ([#4044](https://github.com/opensearch-project/opensearch/pull/4044)) +* Add doc_count field mapper ([#3985](https://github.com/opensearch-project/opensearch/pull/3985)) ([#4037](https://github.com/opensearch-project/opensearch/pull/4037)) +* Parallelize stale blobs deletion during snapshot delete ([#3796](https://github.com/opensearch-project/opensearch/pull/3796)) ([#3990](https://github.com/opensearch-project/opensearch/pull/3990)) +* [Backport 2.x] [Segment Replication] Add a new Engine implementation for replicas with segment replication enabled. ([#4003](https://github.com/opensearch-project/opensearch/pull/4003)) +* [Backport 2.x] Adds a new parameter, max_analyzer_offset, for the highlighter ([#4031](https://github.com/opensearch-project/opensearch/pull/4031)) +* Update merge on refresh and merge on commit defaults in Opensearch (Lucene 9.3) ([#3561](https://github.com/opensearch-project/opensearch/pull/3561)) ([#4013](https://github.com/opensearch-project/opensearch/pull/4013)) +* Make HybridDirectory MMAP Extensions Configurable ([#3837](https://github.com/opensearch-project/opensearch/pull/3837)) ([#3970](https://github.com/opensearch-project/opensearch/pull/3970)) +* Add option to disable chunked transfer-encoding ([#3864](https://github.com/opensearch-project/opensearch/pull/3864)) ([#3885](https://github.com/opensearch-project/opensearch/pull/3885)) +* Introducing TranslogManager implementations decoupled from the Engine [2.x] ([#3820](https://github.com/opensearch-project/opensearch/pull/3820)) +* Changing default no_master_block from write to metadata_write ([#3621](https://github.com/opensearch-project/opensearch/pull/3621)) ([#3756](https://github.com/opensearch-project/opensearch/pull/3756)) + +### Bug Fixes + +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/opensearch/pull/4143)) ([#4145](https://github.com/opensearch-project/opensearch/pull/4145)) +* Binding empty instance of SegmentReplicationCheckpointPublisher when Feature Flag is off in IndicesModule.java file. ([#4119](https://github.com/opensearch-project/opensearch/pull/4119)) +* Fix the bug that masterOperation(with task param) is bypassed ([#4103](https://github.com/opensearch-project/opensearch/pull/4103)) ([#4115](https://github.com/opensearch-project/opensearch/pull/4115)) +* Fixing flaky org.opensearch.cluster.routing.allocation.decider.DiskThresholdDeciderIT.testHighWatermarkNotExceeded test case ([#4012](https://github.com/opensearch-project/opensearch/pull/4012)) ([#4014](https://github.com/opensearch-project/opensearch/pull/4014)) +* Correct typo: Rutime -> Runtime ([#3896](https://github.com/opensearch-project/opensearch/pull/3896)) ([#3898](https://github.com/opensearch-project/opensearch/pull/3898)) +* Fixing implausibly old time stamp 1970-01-01 00:00:00 by using the timestamp from the Git revision instead of default 0 value ([#3883](https://github.com/opensearch-project/opensearch/pull/3883)) ([#3891](https://github.com/opensearch-project/opensearch/pull/3891)) + +### Infrastructure + +* Correctly ignore depandabot branches during push ([#4077](https://github.com/opensearch-project/opensearch/pull/4077)) ([#4113](https://github.com/opensearch-project/opensearch/pull/4113)) +* Build performance improvements ([#3926](https://github.com/opensearch-project/opensearch/pull/3926)) ([#3937](https://github.com/opensearch-project/opensearch/pull/3937)) +* PR coverage requirement and default settings ([#3931](https://github.com/opensearch-project/opensearch/pull/3931)) ([#3938](https://github.com/opensearch-project/opensearch/pull/3938)) +* [Backport 2.x] Fail build on wildcard imports ([#3940](https://github.com/opensearch-project/opensearch/pull/3940)) +* Don't run EmptyDirTaskTests in a Docker container ([#3792](https://github.com/opensearch-project/opensearch/pull/3792)) ([#3912](https://github.com/opensearch-project/opensearch/pull/3912)) +* Add coverage, gha, jenkins server, documentation and forum badges ([#3886](https://github.com/opensearch-project/opensearch/pull/3886)) +* Unable to use Systemd module with tar distribution ([#3755](https://github.com/opensearch-project/opensearch/pull/3755)) ([#3903](https://github.com/opensearch-project/opensearch/pull/3903)) +* Ignore backport / autocut / dependentbot branches for gradle checks ([#3816](https://github.com/opensearch-project/opensearch/pull/3816)) ([#3825](https://github.com/opensearch-project/opensearch/pull/3825)) +* Setup branch push coverage and fix coverage uploads ([#3793](https://github.com/opensearch-project/opensearch/pull/3793)) ([#3811](https://github.com/opensearch-project/opensearch/pull/3811)) +* Enable XML test reports for Jenkins integration ([#3799](https://github.com/opensearch-project/opensearch/pull/3799)) ([#3803](https://github.com/opensearch-project/opensearch/pull/3803)) + +### Maintenance + +* OpenJDK Update (July 2022 Patch releases) ([#4023](https://github.com/opensearch-project/opensearch/pull/4023)) ([#4092](https://github.com/opensearch-project/opensearch/pull/4092)) +* Update to Lucene 9.3.0 ([#4043](https://github.com/opensearch-project/opensearch/pull/4043)) ([#4088](https://github.com/opensearch-project/opensearch/pull/4088)) +* Bump commons-configuration2 from 2.7 to 2.8.0 in /plugins/repository-hdfs ([#3764](https://github.com/opensearch-project/opensearch/pull/3764)) ([#3783](https://github.com/opensearch-project/opensearch/pull/3783)) +* Use bash in systemd-entrypoint shebang ([#4008](https://github.com/opensearch-project/opensearch/pull/4008)) ([#4009](https://github.com/opensearch-project/opensearch/pull/4009)) +* Bump com.gradle.enterprise from 3.10.1 to 3.10.2 ([#3568](https://github.com/opensearch-project/opensearch/pull/3568)) ([#3934](https://github.com/opensearch-project/opensearch/pull/3934)) +* Bump log4j-core in /buildSrc/src/testKit/thirdPartyAudit/sample_jars ([#3763](https://github.com/opensearch-project/opensearch/pull/3763)) ([#3784](https://github.com/opensearch-project/opensearch/pull/3784)) +* Added bwc version 1.3.5 ([#3911](https://github.com/opensearch-project/opensearch/pull/3911)) ([#3913](https://github.com/opensearch-project/opensearch/pull/3913)) +* Update to Gradle 7.5 ([#3594](https://github.com/opensearch-project/opensearch/pull/3594)) ([#3904](https://github.com/opensearch-project/opensearch/pull/3904)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/opensearch/pull/3868)) ([#3874](https://github.com/opensearch-project/opensearch/pull/3874)) +* Upgrade MinIO image version ([#3541](https://github.com/opensearch-project/opensearch/pull/3541)) ([#3867](https://github.com/opensearch-project/opensearch/pull/3867)) +* Add netty-transport-native-unix-common to modules/transport-netty4/bu… ([#3848](https://github.com/opensearch-project/opensearch/pull/3848)) ([#3853](https://github.com/opensearch-project/opensearch/pull/3853)) +* Update outdated dependencies ([#3821](https://github.com/opensearch-project/opensearch/pull/3821)) ([#3854](https://github.com/opensearch-project/opensearch/pull/3854)) +* Added bwc version 2.1.1 ([#3806](https://github.com/opensearch-project/opensearch/pull/3806)) +* Upgrade netty from 4.1.73.Final to 4.1.78.Final ([#3772](https://github.com/opensearch-project/opensearch/pull/3772)) ([#3778](https://github.com/opensearch-project/opensearch/pull/3778)) +* Bump protobuf-java from 3.21.1 to 3.21.2 in /plugins/repository-hdfs ([#3711](https://github.com/opensearch-project/opensearch/pull/3711)) ([#3726](https://github.com/opensearch-project/opensearch/pull/3726)) +* Upgrading AWS SDK dependency for native plugins ([#3694](https://github.com/opensearch-project/opensearch/pull/3694)) ([#3701](https://github.com/opensearch-project/opensearch/pull/3701)) + +### Refactoring + +* [Backport 2.x] Changes to encapsulate Translog into TranslogManager ([#4095](https://github.com/opensearch-project/opensearch/pull/4095)) ([#4142](https://github.com/opensearch-project/opensearch/pull/4142)) +* Deprecate and rename abstract methods in interfaces that contain 'master' in name ([#4121](https://github.com/opensearch-project/opensearch/pull/4121)) ([#4123](https://github.com/opensearch-project/opensearch/pull/4123)) +* [Backport 2.x] Integrate Engine with decoupled Translog interfaces ([#3822](https://github.com/opensearch-project/opensearch/pull/3822)) +* Deprecate class FakeThreadPoolMasterService, BlockMasterServiceOnMaster and BusyMasterServiceDisruption ([#4058](https://github.com/opensearch-project/opensearch/pull/4058)) ([#4068](https://github.com/opensearch-project/opensearch/pull/4068)) +* Rename classes with name 'MasterService' to 'ClusterManagerService' in directory 'test/framework' ([#4051](https://github.com/opensearch-project/opensearch/pull/4051)) ([#4057](https://github.com/opensearch-project/opensearch/pull/4057)) +* Deprecate class 'MasterService' and create alternative class 'ClusterManagerService' ([#4022](https://github.com/opensearch-project/opensearch/pull/4022)) ([#4050](https://github.com/opensearch-project/opensearch/pull/4050)) +* Deprecate and Rename abstract methods from 'Master' terminology to 'ClusterManager'. ([#4032](https://github.com/opensearch-project/opensearch/pull/4032)) ([#4048](https://github.com/opensearch-project/opensearch/pull/4048)) +* Deprecate public methods and variables that contain 'master' terminology in class 'NoMasterBlockService' and 'MasterService' ([#4006](https://github.com/opensearch-project/opensearch/pull/4006)) ([#4038](https://github.com/opensearch-project/opensearch/pull/4038)) +* Deprecate public methods and variables that contain 'master' terminology in 'client' directory ([#3966](https://github.com/opensearch-project/opensearch/pull/3966)) ([#3981](https://github.com/opensearch-project/opensearch/pull/3981)) +* [segment replication]Introducing common Replication interfaces for segment replication and recovery code paths ([#3234](https://github.com/opensearch-project/opensearch/pull/3234)) ([#3984](https://github.com/opensearch-project/opensearch/pull/3984)) +* Deprecate public methods and variables that contain 'master' terminology in 'test/framework' directory ([#3978](https://github.com/opensearch-project/opensearch/pull/3978)) ([#3987](https://github.com/opensearch-project/opensearch/pull/3987)) +* [Backport 2.x] [Segment Replication] Moving RecoveryState.Index to a top-level class and renaming ([#3971](https://github.com/opensearch-project/opensearch/pull/3971)) +* Rename and deprecate public methods that contains 'master' in the name in 'server' directory ([#3647](https://github.com/opensearch-project/opensearch/pull/3647)) ([#3964](https://github.com/opensearch-project/opensearch/pull/3964)) +* [2.x] Deprecate public class names with master terminology ([#3871](https://github.com/opensearch-project/opensearch/pull/3871)) ([#3914](https://github.com/opensearch-project/opensearch/pull/3914)) +* [Backport 2.x] Rename public classes with 'Master' to 'ClusterManager' ([#3870](https://github.com/opensearch-project/opensearch/pull/3870)) +* Revert renaming masterOperation() to clusterManagerOperation() ([#3681](https://github.com/opensearch-project/opensearch/pull/3681)) ([#3714](https://github.com/opensearch-project/opensearch/pull/3714)) +* Revert renaming method onMaster() and offMaster() in interface LocalNodeMasterListener ([#3686](https://github.com/opensearch-project/opensearch/pull/3686)) ([#3693](https://github.com/opensearch-project/opensearch/pull/3693)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json new file mode 100644 index 0000000000000..6af49f75b9f6e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json @@ -0,0 +1,34 @@ +{ + "remote_store.restore":{ + "documentation":{ + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/remote-store#restore", + "description":"Restores from remote store." + }, + "stability":"experimental", + "url":{ + "paths":[ + { + "path":"/_remotestore/_restore", + "methods":[ + "POST" + ] + } + ] + }, + "params":{ + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" + }, + "wait_for_completion":{ + "type":"boolean", + "description":"Should this request wait until the operation has completed before returning", + "default":false + } + }, + "body":{ + "description":"A comma separated list of index IDs", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 6f276f669f815..25d3dd160e031 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -17,6 +17,12 @@ setup: id: 1 body: { text: some short words with a stupendously long one } + - do: + index: + index: test + id: 2 + body: { text: sentence with UPPERCASE WORDS } + - do: indices.refresh: index: [test] @@ -76,6 +82,25 @@ setup: - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} +--- +"search with uppercase regex": + - skip: + version: " - 2.99.99" + reason: uppercase regex not supported before 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + query_string: + default_field: text + query: /UPPERCASE/ + + - match: {hits.total: 1} + - match: {hits.max_score: 1} + - match: {hits.hits.0._score: 1} + --- "search index prefixes with span_multi": - do: diff --git a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 deleted file mode 100644 index 2e260eb028f4c..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03496708a19a8a55a0dc4f61f8aa2febc6e8977c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..0805590fd6efd --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +3920c527fd5eee69e09f614391cef4e05c581c7f \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 deleted file mode 100644 index 1dda17ee92fdb..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95ea01ee0d1e543e18e3cf58d8a6a27a587a7239 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..a8c648e4c192a --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +d1dfcd42ea257355d5cbc64ac2f47142a550ae52 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0.jar.sha1 b/server/licenses/lucene-core-9.3.0.jar.sha1 deleted file mode 100644 index fd870008c5bd4..0000000000000 --- a/server/licenses/lucene-core-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a030180999bc3f1a65f23f53b38098ca9daeee79 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..779c9796ceae7 --- /dev/null +++ b/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ae4757f88e97036b30eb1eac1d21da6dabc85a5e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0.jar.sha1 b/server/licenses/lucene-grouping-9.3.0.jar.sha1 deleted file mode 100644 index 6f63ca177d3c3..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -883071196e53ec93d2a53dcc8211ee30be6c00dc \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..9167482284f9d --- /dev/null +++ b/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +75485e3222b096027463378fe3bb2c8d1f529d25 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 deleted file mode 100644 index 78264d8ee3713..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e895c49b9991ea2ec08855c425b9eae44a08764 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..2090b009a57fe --- /dev/null +++ b/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +d2a3d1f326f6d3bd6033b5620dc84f3c20a0412d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0.jar.sha1 b/server/licenses/lucene-join-9.3.0.jar.sha1 deleted file mode 100644 index 5e641f5f01075..0000000000000 --- a/server/licenses/lucene-join-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04baaae4ce4a35ae919150dd17cd1e63b0da9d24 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..df74fa911f7d2 --- /dev/null +++ b/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2c6f6058c765a955e0544c6050aeee3a5e376e47 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0.jar.sha1 b/server/licenses/lucene-memory-9.3.0.jar.sha1 deleted file mode 100644 index c8e86c7674ede..0000000000000 --- a/server/licenses/lucene-memory-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a2203b332edc1366b9789f5286296e109dbc8c4 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..3e9d65d73d36d --- /dev/null +++ b/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2a155679022106c7db356da32563580d8de043d7 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0.jar.sha1 b/server/licenses/lucene-misc-9.3.0.jar.sha1 deleted file mode 100644 index 11a459a9f52ba..0000000000000 --- a/server/licenses/lucene-misc-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -61b502c9557247b6803a346c0bab20c9dc89d125 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..f056cfe5b86ef --- /dev/null +++ b/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +7c7ac2027a12bf02657ec3a421c8039736b98344 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0.jar.sha1 b/server/licenses/lucene-queries-9.3.0.jar.sha1 deleted file mode 100644 index 2b577bd33b46a..0000000000000 --- a/server/licenses/lucene-queries-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8fe3bce3c05015c5fdb78279f36b9f1a75b98d8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..41ea4a342f949 --- /dev/null +++ b/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +25978bb82b9f78537f0511f0be64253be19de6fd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 deleted file mode 100644 index b106860bf9f3e..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78f259a66d48f77a2d2b96a0a858efa08eba72dc \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..e0687571df957 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +e3b6ce41d5bd73fdcc80b5b2a40283c03525aa96 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 deleted file mode 100644 index 82c2c6d85ca4c..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ee318cf8e9a70c2c99e03e157465316a3d4a17a \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..e03c731914757 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ae8649d2d01a416acdbe7c29f14b47a5594acb85 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 deleted file mode 100644 index 8bbc5359487ff..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9b226b49ae987a4226791f023562187583eb9ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..ea8b5cd1ddb1d --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +58049352bb5fc8683c389eb2eb879fb4f16ff9b3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 deleted file mode 100644 index 31132ef0ad6df..0000000000000 --- a/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -201aa61856ae44fa494504591aed54fd9b75af16 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..a0c0dbe07af8f --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2d3a8f802e1bb439d945de81ba6b16d01b24d58a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0.jar.sha1 b/server/licenses/lucene-suggest-9.3.0.jar.sha1 deleted file mode 100644 index 71a263aa163f8..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb5d7243ba67616edbda1ecf421c615dd595752d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..1f332eac16c72 --- /dev/null +++ b/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +11cdb21cf08feb19e074b4a101e1550dfd544379 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index d37285211f774..61059f83f0e77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -470,6 +470,9 @@ public void onTaskUnregistered(Task task) {} @Override public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} }); } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener @@ -651,6 +654,9 @@ public void waitForTaskCompletion(Task task) { waitForWaitingToStart.countDown(); } + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + @Override public void onTaskRegistered(Task task) {} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java index e38b128c04fde..a75057356fe8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java @@ -42,7 +42,10 @@ import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.rest.RestStatus; import org.opensearch.script.MockScriptPlugin; @@ -74,7 +77,11 @@ public class WaitUntilRefreshIT extends OpenSearchIntegTestCase { @Override public Settings indexSettings() { // Use a shorter refresh interval to speed up the tests. We'll be waiting on this interval several times. - return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms").build(); + final Settings.Builder builder = Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms"); + if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { + builder.put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } + return builder.build(); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 2bf73b34247b3..3d8da7eac7690 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -78,6 +78,7 @@ import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; @@ -675,6 +676,7 @@ public static final IndexShard newIndexShard( () -> {}, RetentionLeaseSyncer.EMPTY, cbs, + new InternalTranslogFactory(), SegmentReplicationCheckpointPublisher.EMPTY, null ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a1cc0148dcdac..8566cc5556861 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -15,10 +15,14 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.ShardSegments; +import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; @@ -29,6 +33,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -36,6 +41,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -70,6 +77,109 @@ protected boolean addMockInternalEngine() { return false; } + public void testPrimaryStopped_ReplicaPromoted() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + + // index another doc but don't refresh, we will ensure this is searchable once replica is promoted. + client().prepareIndex(INDEX_NAME).setId("2").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // stop the primary node - we only have one shard on here. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + + final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); + assertNotNull(replicaShardRouting); + assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + + // assert we can index into the new primary. + client().prepareIndex(INDEX_NAME).setId("3").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + + // start another node, index another doc and replicate. + String nodeC = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeC).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 4); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 4); + assertSegmentStats(REPLICA_COUNT); + } + + public void testRestartPrimary() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), primary); + + final int initialDocCount = 1; + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertDocCounts(initialDocCount, replica, primary); + + internalCluster().restartNode(primary); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertDocCounts(initialDocCount, replica, primary); + assertSegmentStats(REPLICA_COUNT); + } + + public void testCancelPrimaryAllocation() throws Exception { + // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final int initialDocCount = 1; + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertDocCounts(initialDocCount, replica, primary); + + final IndexShard indexShard = getIndexShard(primary); + client().admin() + .cluster() + .prepareReroute() + .add(new CancelAllocationCommand(INDEX_NAME, indexShard.shardId().id(), primary, true)) + .execute() + .actionGet(); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertDocCounts(initialDocCount, replica, primary); + assertSegmentStats(REPLICA_COUNT); + } + public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { final String nodeA = internalCluster().startNode(); final String nodeB = internalCluster().startNode(); @@ -111,6 +221,54 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { } } + public void testMultipleShards() throws Exception { + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(1, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + assertSegmentStats(REPLICA_COUNT); + } + } + public void testReplicationAfterForceMerge() throws Exception { final String nodeA = internalCluster().startNode(); final String nodeB = internalCluster().startNode(); @@ -189,9 +347,8 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); - final Index index = resolveIndex(INDEX_NAME); - IndexShard primaryShard = getIndexShard(index, primaryNode); - IndexShard replicaShard = getIndexShard(index, replicaNode); + IndexShard primaryShard = getIndexShard(primaryNode); + IndexShard replicaShard = getIndexShard(replicaNode); assertEquals( primaryShard.translogStats().estimatedNumberOfOperations(), replicaShard.translogStats().estimatedNumberOfOperations() @@ -199,6 +356,69 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { assertSegmentStats(REPLICA_COUNT); } + public void testDeleteOperations() throws Exception { + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + final int initialDocCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + // wait a short amount of time to give replication a chance to complete. + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + + Set ids = indexer.getIds(); + String id = ids.toArray()[0].toString(); + client(nodeA).prepareDelete(INDEX_NAME, id).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + refresh(INDEX_NAME); + waitForReplicaUpdate(); + assertBusy(() -> { + final long nodeA_Count = client(nodeA).prepareSearch(INDEX_NAME) + .setSize(0) + .setPreference("_only_local") + .get() + .getHits() + .getTotalHits().value; + assertEquals(expectedHitCount - 1, nodeA_Count); + final long nodeB_Count = client(nodeB).prepareSearch(INDEX_NAME) + .setSize(0) + .setPreference("_only_local") + .get() + .getHits() + .getTotalHits().value; + assertEquals(expectedHitCount - 1, nodeB_Count); + }, 5, TimeUnit.SECONDS); + } + } + private void assertSegmentStats(int numberOfReplicas) throws IOException { final IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest()).actionGet(); @@ -237,8 +457,7 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { final ShardRouting replicaShardRouting = shardSegment.getShardRouting(); ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); - final Index index = resolveIndex(INDEX_NAME); - IndexShard indexShard = getIndexShard(index, replicaNode.getName()); + IndexShard indexShard = getIndexShard(replicaNode.getName()); final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); // calls to readCommit will fail if a valid commit point and all its segments are not in the store. SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); @@ -262,21 +481,24 @@ private void waitForReplicaUpdate() throws Exception { final Map> segmentListMap = segmentsByShardType(replicationGroupSegments); final List primaryShardSegmentsList = segmentListMap.get(true); final List replicaShardSegments = segmentListMap.get(false); - + // if we don't have any segments yet, proceed. final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); - final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); - final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); - for (ShardSegments shardSegments : replicaShardSegments) { - final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() - .stream() - .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); - assertTrue(isReplicaCaughtUpToPrimary); + if (primaryShardSegments.getSegments().isEmpty() == false) { + final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); + final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); + for (ShardSegments shardSegments : replicaShardSegments) { + final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() + .stream() + .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); + assertTrue(isReplicaCaughtUpToPrimary); + } } } }); } - private IndexShard getIndexShard(Index index, String node) { + private IndexShard getIndexShard(String node) { + final Index index = resolveIndex(INDEX_NAME); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); IndexService indexService = indicesService.indexServiceSafe(index); final Optional shardId = indexService.shardIds().stream().findFirst(); @@ -293,7 +515,8 @@ private List getShardSegments(IndicesSegmentResponse indicesSeg } private Map getLatestSegments(ShardSegments segments) { - final Long latestPrimaryGen = segments.getSegments().stream().map(Segment::getGeneration).max(Long::compare).get(); + final Optional generation = segments.getSegments().stream().map(Segment::getGeneration).max(Long::compare); + final Long latestPrimaryGen = generation.get(); return segments.getSegments() .stream() .filter(s -> s.getGeneration() == latestPrimaryGen) @@ -303,4 +526,31 @@ private Map getLatestSegments(ShardSegments segments) { private Map> segmentsByShardType(ShardSegments[] replicationGroupSegments) { return Arrays.stream(replicationGroupSegments).collect(Collectors.groupingBy(s -> s.getShardRouting().primary())); } + + @Nullable + private ShardRouting getShardRoutingForNodeName(String nodeName) { + final ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + for (IndexShardRoutingTable shardRoutingTable : state.routingTable().index(INDEX_NAME)) { + for (ShardRouting shardRouting : shardRoutingTable.activeShards()) { + final String nodeId = shardRouting.currentNodeId(); + final DiscoveryNode discoveryNode = state.nodes().resolveNode(nodeId); + if (discoveryNode.getName().equals(nodeName)) { + return shardRouting; + } + } + } + return null; + } + + private void assertDocCounts(int expectedDocCount, String... nodeNames) { + for (String node : nodeNames) { + assertHitCount(client(node).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedDocCount); + } + } + + private DiscoveryNode getNodeContainingPrimaryShard() { + final ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + final ShardRouting primaryShard = state.routingTable().index(INDEX_NAME).shard(0).primaryShard(); + return state.nodes().resolveNode(primaryShard.currentNodeId()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index 7d3f06760882d..26bfe59618275 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -39,7 +39,6 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.opensearch.search.aggregations.metrics.Cardinality; -import org.opensearch.search.aggregations.metrics.GeoBounds; import org.opensearch.search.aggregations.metrics.GeoCentroid; import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; @@ -47,7 +46,6 @@ import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; -import static org.opensearch.search.aggregations.AggregationBuilders.geoBounds; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.percentiles; @@ -213,28 +211,6 @@ public void testStats() { assertEquals(4, stats.getAvg(), 0); } - public void testUnmappedGeoBounds() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")) - .get(); - assertSearchResponse(response); - GeoBounds bounds = response.getAggregations().get("bounds"); - assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5)); - assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); - } - - public void testGeoBounds() { - SearchResponse response = client().prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")).get(); - assertSearchResponse(response); - GeoBounds bounds = response.getAggregations().get("bounds"); - assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5)); - assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); - } - public void testGeoCentroid() { SearchResponse response = client().prepareSearch("idx") .addAggregation(geoCentroid("centroid").field("location").missing("2,1")) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index d4c2920c8a452..ba512d3fbcdd9 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -95,7 +95,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 71c900beb5319..052d2ec2b5764 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -61,6 +61,8 @@ import org.opensearch.action.admin.cluster.node.usage.TransportNodesUsageAction; import org.opensearch.action.admin.cluster.remote.RemoteInfoAction; import org.opensearch.action.admin.cluster.remote.TransportRemoteInfoAction; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.restore.TransportRestoreRemoteStoreAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -234,12 +236,14 @@ import org.opensearch.action.search.ClearScrollAction; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; +import org.opensearch.action.search.TransportGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -265,6 +269,7 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.seqno.RetentionLeaseActions; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.breaker.CircuitBreakerService; @@ -312,6 +317,7 @@ import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; import org.opensearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; +import org.opensearch.rest.action.admin.cluster.RestRestoreRemoteStoreAction; import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; @@ -663,8 +669,12 @@ public void reg // point in time actions actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); + // Remote Store + actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -850,6 +860,11 @@ public void initRestHandlers(Supplier nodesInCluster) { } } registerHandler.accept(new RestCatAction(catActions)); + + // Remote Store APIs + if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { + registerHandler.accept(new RestRestoreRemoteStoreAction()); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index 796ea023edd40..aede3fe5b1cc0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -65,8 +66,15 @@ public static long waitForCompletionTimeout(TimeValue timeout) { private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject - public TransportListTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { + public TransportListTasksAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + TaskResourceTrackingService taskResourceTrackingService + ) { super( ListTasksAction.NAME, clusterService, @@ -77,6 +85,7 @@ public TransportListTasksAction(ClusterService clusterService, TransportService TaskInfo::new, ThreadPool.Names.MANAGEMENT ); + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -106,6 +115,8 @@ protected void processTasks(ListTasksRequest request, Consumer operation) } taskManager.waitForTaskCompletion(task, timeoutNanos); }); + } else { + operation = operation.andThen(taskResourceTrackingService::refreshResourceStats); } super.processTasks(request, operation); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..46b1bc14e8537 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionType; + +/** + * Restore remote store action + * + * @opensearch.internal + */ +public final class RestoreRemoteStoreAction extends ActionType { + + public static final RestoreRemoteStoreAction INSTANCE = new RestoreRemoteStoreAction(); + public static final String NAME = "cluster:admin/remotestore/restore"; + + private RestoreRemoteStoreAction() { + super(NAME, RestoreRemoteStoreResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java new file mode 100644 index 0000000000000..80bf96b6b2562 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.Nullable; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; +import org.opensearch.snapshots.RestoreInfo; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Contains information about remote store restores + * + * @opensearch.internal + */ +public final class RestoreRemoteStoreResponse extends ActionResponse implements ToXContentObject { + + @Nullable + private final RestoreInfo restoreInfo; + + public RestoreRemoteStoreResponse(@Nullable RestoreInfo restoreInfo) { + this.restoreInfo = restoreInfo; + } + + public RestoreRemoteStoreResponse(StreamInput in) throws IOException { + super(in); + restoreInfo = RestoreInfo.readOptionalRestoreInfo(in); + } + + /** + * Returns restore information if remote store restore was completed before this method returned, null otherwise + * + * @return restore information or null + */ + public RestoreInfo getRestoreInfo() { + return restoreInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(restoreInfo); + } + + public RestStatus status() { + if (restoreInfo == null) { + return RestStatus.ACCEPTED; + } + return restoreInfo.status(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (restoreInfo != null) { + builder.field("remote_store"); + restoreInfo.toXContent(builder, params); + } else { + builder.field("accepted", true); + } + builder.endObject(); + return builder; + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "restore_remote_store", + true, + v -> { + RestoreInfo restoreInfo = (RestoreInfo) v[0]; + Boolean accepted = (Boolean) v[1]; + assert (accepted == null && restoreInfo != null) || (accepted != null && accepted && restoreInfo == null) : "accepted: [" + + accepted + + "], restoreInfo: [" + + restoreInfo + + "]"; + return new RestoreRemoteStoreResponse(restoreInfo); + } + ); + + static { + PARSER.declareObject( + optionalConstructorArg(), + (parser, context) -> RestoreInfo.fromXContent(parser), + new ParseField("remote_store") + ); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("accepted")); + } + + public static RestoreRemoteStoreResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RestoreRemoteStoreResponse that = (RestoreRemoteStoreResponse) o; + return Objects.equals(restoreInfo, that.restoreInfo); + } + + @Override + public int hashCode() { + return Objects.hash(restoreInfo); + } + + @Override + public String toString() { + return "RestoreRemoteStoreResponse{" + "restoreInfo=" + restoreInfo + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..7304ba25717ac --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.snapshots.RestoreService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for restore remote store operation + * + * @opensearch.internal + */ +public final class TransportRestoreRemoteStoreAction extends TransportClusterManagerNodeAction< + RestoreRemoteStoreRequest, + RestoreRemoteStoreResponse> { + private final RestoreService restoreService; + + @Inject + public TransportRestoreRemoteStoreAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + RestoreService restoreService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + RestoreRemoteStoreAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + RestoreRemoteStoreRequest::new, + indexNameExpressionResolver + ); + this.restoreService = restoreService; + } + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + protected RestoreRemoteStoreResponse read(StreamInput in) throws IOException { + return new RestoreRemoteStoreResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(RestoreRemoteStoreRequest request, ClusterState state) { + // Restoring a remote store might change the global state and create/change an index, + // so we need to check for METADATA_WRITE and WRITE blocks + ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + if (blockException != null) { + return blockException; + } + return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); + + } + + @Override + protected void clusterManagerOperation( + final RestoreRemoteStoreRequest request, + final ClusterState state, + final ActionListener listener + ) { + restoreService.restoreFromRemoteStore( + request, + ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { + if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { + RestoreClusterStateListener.createAndRegisterListener( + clusterService, + restoreCompletionResponse, + delegatedListener, + RestoreRemoteStoreResponse::new + ); + } else { + delegatedListener.onResponse(new RestoreRemoteStoreResponse(restoreCompletionResponse.getRestoreInfo())); + } + }) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java index 363b7179f3c6c..10348f5ccfe6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Restore Snapshot transport handler. */ +/** Restore remote store transport handler. */ package org.opensearch.action.admin.cluster.remotestore.restore; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index 7d2ca99e3dbf5..d0f78e85e26a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.RestoreInProgress; @@ -44,6 +45,8 @@ import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; +import java.util.function.Function; + import static org.opensearch.snapshots.RestoreService.restoreInProgress; /** @@ -51,22 +54,27 @@ * * @opensearch.internal */ -public class RestoreClusterStateListener implements ClusterStateListener { +public class RestoreClusterStateListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(RestoreClusterStateListener.class); private final ClusterService clusterService; private final String uuid; - private final ActionListener listener; + private final String restoreIdentifier; + private final ActionListener listener; + private final Function actionResponseFactory; private RestoreClusterStateListener( ClusterService clusterService, RestoreService.RestoreCompletionResponse response, - ActionListener listener + ActionListener listener, + Function actionResponseFactory ) { this.clusterService = clusterService; this.uuid = response.getUuid(); + this.restoreIdentifier = response.getSnapshot() != null ? response.getSnapshot().getSnapshotId().getName() : "remote_store"; this.listener = listener; + this.actionResponseFactory = actionResponseFactory; } @Override @@ -78,23 +86,23 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { // on the current cluster-manager and as such it might miss some intermediary cluster states due to batching. // Clean up listener in that case and acknowledge completion of restore operation to client. clusterService.removeListener(this); - listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null)); + listener.onResponse(actionResponseFactory.apply(null)); } else if (newEntry == null) { clusterService.removeListener(this); ImmutableOpenMap shards = prevEntry.shards(); - assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); + assert prevEntry.state().completed() : "expected completed snapshot/remote store restore state but was " + prevEntry.state(); assert RestoreService.completed(shards) : "expected all restore entries to be completed"; RestoreInfo ri = new RestoreInfo( - prevEntry.snapshot().getSnapshotId().getName(), + restoreIdentifier, prevEntry.indices(), shards.size(), shards.size() - RestoreService.failedShards(shards) ); - RestoreSnapshotResponse response = new RestoreSnapshotResponse(ri); - logger.debug("restore of [{}] completed", prevEntry.snapshot().getSnapshotId()); + T response = actionResponseFactory.apply(ri); + logger.debug("restore of [{}] completed", restoreIdentifier); listener.onResponse(response); } else { - // restore not completed yet, wait for next cluster state update + logger.debug("restore not completed yet, wait for next cluster state update"); } } @@ -102,11 +110,12 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { * Creates a cluster state listener and registers it with the cluster service. The listener passed as a * parameter will be called when the restore is complete. */ - public static void createAndRegisterListener( + public static void createAndRegisterListener( ClusterService clusterService, RestoreService.RestoreCompletionResponse response, - ActionListener listener + ActionListener listener, + Function actionResponseFactory ) { - clusterService.addListener(new RestoreClusterStateListener(clusterService, response, listener)); + clusterService.addListener(new RestoreClusterStateListener(clusterService, response, listener, actionResponseFactory)); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index e7d95b9e40880..c2f79b2a27157 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -109,7 +109,12 @@ protected void clusterManagerOperation( ) { restoreService.restoreSnapshot(request, ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { - RestoreClusterStateListener.createAndRegisterListener(clusterService, restoreCompletionResponse, delegatedListener); + RestoreClusterStateListener.createAndRegisterListener( + clusterService, + restoreCompletionResponse, + delegatedListener, + RestoreSnapshotResponse::new + ); } else { delegatedListener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1597b31e89871..0876bf93a557b 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -302,7 +302,16 @@ public void onFailure(Exception t) { * It is possible to run into connection exceptions here because we are getting the connection early and might * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. */ - fork(() -> onShardFailure(shardIndex, shard, shardIt, e)); + fork(() -> { + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + // In this case calling onShardFailure() would overflow the operations counter, so the best we could do + // here is to fail the phase and move on to the next one. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(this, "The phase has failed", e); + } else { + onShardFailure(shardIndex, shard, shardIt, e); + } + }); } finally { executeNext(pendingExecutions, thread); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java new file mode 100644 index 0000000000000..c90f75e3c0aed --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -0,0 +1,35 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Inner node get all pits request + */ +public class GetAllPitNodeRequest extends BaseNodeRequest { + + public GetAllPitNodeRequest() { + super(); + } + + public GetAllPitNodeRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java new file mode 100644 index 0000000000000..ba308a1a6ea1e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -0,0 +1,69 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Inner node get all pits response + */ +public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { + + /** + * List of active PITs in the associated node + */ + private final List pitInfos; + + public GetAllPitNodeResponse(DiscoveryNode node, List pitInfos) { + super(node); + if (pitInfos == null) { + throw new IllegalArgumentException("Pits info cannot be null"); + } + this.pitInfos = Collections.unmodifiableList(pitInfos); + } + + public GetAllPitNodeResponse(StreamInput in) throws IOException { + super(in); + this.pitInfos = Collections.unmodifiableList(in.readList(ListPitInfo::new)); + } + + public List getPitInfos() { + return pitInfos; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(pitInfos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("node", this.getNode().getName()); + builder.startArray("pitInfos"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + + builder.endArray(); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java new file mode 100644 index 0000000000000..b4ad2f6641087 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request to get all active PIT IDs from all nodes of cluster + */ +public class GetAllPitNodesRequest extends BaseNodesRequest { + + @Inject + public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { + super(concreteNodes); + } + + public GetAllPitNodesRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java new file mode 100644 index 0000000000000..4a454e7145eff --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This class transforms active PIT objects from all nodes to unique PIT objects + */ +public class GetAllPitNodesResponse extends BaseNodesResponse implements ToXContentObject { + + /** + * List of unique PITs across all nodes + */ + private final Set pitInfos = new HashSet<>(); + + public GetAllPitNodesResponse(StreamInput in) throws IOException { + super(in); + } + + public GetAllPitNodesResponse( + ClusterName clusterName, + List getAllPitNodeResponse, + List failures + ) { + super(clusterName, getAllPitNodeResponse, failures); + Set uniquePitIds = new HashSet<>(); + pitInfos.addAll( + getAllPitNodeResponse.stream() + .flatMap(p -> p.getPitInfos().stream().filter(t -> uniquePitIds.add(t.getPitId()))) + .collect(Collectors.toList()) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("pitInfos"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public List readNodesFrom(StreamInput in) throws IOException { + return in.readList(GetAllPitNodeResponse::new); + } + + @Override + public void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + public List getPitInfos() { + return Collections.unmodifiableList(new ArrayList<>(pitInfos)); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java new file mode 100644 index 0000000000000..16e65cb785a7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for listing all PIT reader contexts + */ +public class GetAllPitsAction extends ActionType { + public static final GetAllPitsAction INSTANCE = new GetAllPitsAction(); + public static final String NAME = "indices:data/read/point_in_time/readall"; + + private GetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java new file mode 100644 index 0000000000000..4499e7d6e8ef5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This holds information about pit reader context such as pit id and creation time + */ +public class ListPitInfo implements ToXContentFragment, Writeable { + private final String pitId; + private final long creationTime; + private final long keepAlive; + + public ListPitInfo(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.creationTime = creationTime; + this.keepAlive = keepAlive; + } + + public ListPitInfo(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.creationTime = in.readLong(); + this.keepAlive = in.readLong(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("pitId", pitId); + builder.field("creationTime", creationTime); + builder.field("keepAlive", keepAlive); + builder.endObject(); + return builder; + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 4c5f1a1c0fc4f..0b79b77fd6014 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -18,10 +19,17 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -38,11 +46,13 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; + private final TransportService transportService; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService) { + public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; + this.transportService = transportService; } /** @@ -52,6 +62,9 @@ public void deletePitContexts( Map> nodeToContextsMap, ActionListener listener ) { + if (nodeToContextsMap.size() == 0) { + listener.onResponse(new DeletePitResponse(Collections.emptyList())); + } final Set clusters = nodeToContextsMap.values() .stream() .flatMap(Collection::stream) @@ -130,4 +143,43 @@ public void onFailure(final Exception e) { } }, size); } + + /** + * Get all active point in time contexts + */ + public void getAllPits(ActionListener getAllPitsListener) { + final List nodes = new ArrayList<>(); + for (ObjectCursor cursor : clusterService.state().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + transportService.sendRequest( + transportService.getLocalNode(), + GetAllPitsAction.NAME, + new GetAllPitNodesRequest(disNodesArr), + new TransportResponseHandler() { + + @Override + public void handleResponse(GetAllPitNodesResponse response) { + getAllPitsListener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + getAllPitsListener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetAllPitNodesResponse read(StreamInput in) throws IOException { + return new GetAllPitNodesResponse(in); + } + } + ); + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index e5f25595b9ec8..c9d0d6e2d3d47 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -32,12 +32,14 @@ package org.opensearch.action.search; +import org.opensearch.common.MemoizedSupplier; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.TaskId; import java.util.Map; +import java.util.function.Supplier; /** * Task storing information about a currently running search shard request. @@ -46,9 +48,33 @@ * @opensearch.internal */ public class SearchShardTask extends CancellableTask { + // generating metadata in a lazy way since source can be quite big + private final MemoizedSupplier metadataSupplier; public SearchShardTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, () -> ""); + } + + public SearchShardTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + Supplier metadataSupplier + ) { super(id, type, action, description, parentTaskId, headers); + this.metadataSupplier = new MemoizedSupplier<>(metadataSupplier); + } + + public String getTaskMetadata() { + return metadataSupplier.get(); + } + + @Override + public boolean supportsResourceTracking() { + return true; } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 89f23bb9bdaeb..987485fe44c65 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -80,6 +80,11 @@ public final String getDescription() { return descriptionSupplier.get(); } + @Override + public boolean supportsResourceTracking() { + return true; + } + /** * Attach a {@link SearchProgressListener} to this task. */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 23515dd28b329..241b3de72a258 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -218,16 +218,6 @@ public void sendFreePITContexts( ); } - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { - transportService.sendRequest( - connection, - FREE_ALL_PIT_CONTEXTS_ACTION_NAME, - TransportRequest.Empty.INSTANCE, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) - ); - } - public void sendExecuteDfs( Transport.Connection connection, final ShardSearchRequest request, @@ -528,14 +518,6 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); - transportService.registerRequestHandler( - FREE_ALL_PIT_CONTEXTS_ACTION_NAME, - ThreadPool.Names.SAME, - TransportRequest.Empty::new, - (request, channel, task) -> { channel.sendResponse(searchService.freeAllPitContexts()); } - ); - TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, DeletePitResponse::new); - transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index d67979d1c87c5..f9e36c479dd54 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -11,18 +11,17 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; -import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Transport action for deleting point in time searches - supports deleting list and all point in time searches @@ -83,19 +82,22 @@ private void deletePits(ActionListener listener, DeletePitReq } /** - * Delete all active PIT reader contexts + * Delete all active PIT reader contexts leveraging list all PITs + * + * For Cross cluster PITs : + * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be + * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which + * we can completely delete the PIT contexts in both local and remote cluster. + * - fully remote PITs will not be deleted as 'Get all PITs' operates on local cluster only and no PIT info can + * be retrieved when it's fully remote. */ private void deleteAllPits(ActionListener listener) { - // TODO: Use list all PITs to delete all PITs in case of remote cluster use case - int size = clusterService.state().getNodes().getSize(); - ActionListener groupedActionListener = pitService.getDeletePitGroupedListener(listener, size); - for (final DiscoveryNode node : clusterService.state().getNodes()) { - try { - Transport.Connection connection = searchTransportService.getConnection(null, node); - searchTransportService.sendFreeAllPitContexts(connection, groupedActionListener); - } catch (Exception e) { - groupedActionListener.onFailure(e); - } - } + // Get all PITs and execute delete operation for the PITs. + pitService.getAllPits(ActionListener.wrap(getAllPitNodesResponse -> { + DeletePitRequest deletePitRequest = new DeletePitRequest( + getAllPitNodesResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()) + ); + deletePits(listener, deletePitRequest); + }, listener::onFailure)); } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java new file mode 100644 index 0000000000000..21a64e388fa7b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + GetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index a60648e50ff31..71ae187b48c4e 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -40,6 +40,7 @@ import org.opensearch.action.ActionResponse; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskId; @@ -93,31 +94,39 @@ public final Task execute(Request request, ActionListener listener) { */ final Releasable unregisterChildNode = registerChildNode(request.getParentTask()); final Task task; + try { task = taskManager.register("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(response); + + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(e); + } } - } - }); + }); + } finally { + storedContext.close(); + } + return task; } @@ -134,25 +143,30 @@ public final Task execute(Request request, TaskListener listener) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(task, response); + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(task, response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(task, e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(task, e); + } } - } - }); + }); + } finally { + storedContext.close(); + } return task; } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index d65ba3eddf776..a97f4ffe555b6 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -125,22 +125,27 @@ protected void masterOperation(Request request, ClusterState state, ActionListen throw new UnsupportedOperationException("Must be overridden"); } + // TODO: Add abstract keyword after removing the deprecated masterOperation() protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { masterOperation(request, state, listener); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} */ + /** + * Override this operation if access to the task parameter is needed + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} + */ @Deprecated protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - clusterManagerOperation(task, request, state, listener); + clusterManagerOperation(request, state, listener); } /** * Override this operation if access to the task parameter is needed */ + // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - clusterManagerOperation(request, state, listener); + masterOperation(task, request, state, listener); } protected boolean localExecute(Request request) { diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index 1411ff7b30695..c43256a61e8b4 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -88,6 +88,7 @@ protected final void clusterManagerOperation(final Request request, final Cluste doClusterManagerOperation(request, concreteIndices, state, listener); } + // TODO: Add abstract keyword after removing the deprecated doMasterOperation() protected void doClusterManagerOperation( Request request, String[] concreteIndices, diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index f4eaa979ff18c..7a7b98bf724f6 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -62,6 +62,8 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; @@ -577,6 +579,11 @@ public interface ClusterAdminClient extends OpenSearchClient { */ void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener); + /** + * Restores from remote store. + */ + void restoreRemoteStore(RestoreRemoteStoreRequest request, ActionListener listener); + /** * Restores a snapshot. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index f99454a8a8913..7084a856ab3d1 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -77,6 +77,9 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; @@ -1109,6 +1112,11 @@ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { + execute(RestoreRemoteStoreAction.INSTANCE, request, listener); + } + @Override public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) { return new RestoreSnapshotRequestBuilder(this, RestoreSnapshotAction.INSTANCE, repository, snapshot); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 900dceb8564c9..f8ba520e465e2 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -94,6 +94,7 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -396,6 +397,7 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); + bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c86aa00a6f2a2..c07dcc5daaee6 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -42,21 +42,21 @@ public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** * Called when local node is elected to be the cluster-manager */ - void onMaster(); + void onClusterManager(); /** * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. */ - void offMaster(); + void offClusterManager(); @Override default void clusterChanged(ClusterChangedEvent event) { final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedClusterManager(); final boolean isClusterManager = event.localNodeClusterManager(); if (wasClusterManager == false && isClusterManager) { - onMaster(); + onClusterManager(); } else if (wasClusterManager && isClusterManager == false) { - offMaster(); + offClusterManager(); } } } diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index eebfb60d8472d..31c0b294b8004 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -41,4 +41,33 @@ @Deprecated public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { + /** + * Called when local node is elected to be the cluster-manager. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #onClusterManager()} + */ + @Deprecated + void onMaster(); + + /** + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #offClusterManager()} + */ + @Deprecated + void offMaster(); + + /** + * Called when local node is elected to be the cluster-manager. + */ + @Override + default void onClusterManager() { + onMaster(); + } + + /** + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. + */ + @Override + default void offClusterManager() { + offMaster(); + } } diff --git a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java index a3f74cb45a880..750f4b177cb86 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java @@ -48,6 +48,18 @@ public interface AckedRequest { /** * Returns the timeout for the request to be completed on the cluster-manager node + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout()} */ - TimeValue masterNodeTimeout(); + @Deprecated + default TimeValue masterNodeTimeout() { + throw new UnsupportedOperationException("Must be overridden"); + } + + /** + * Returns the timeout for the request to be completed on the cluster-manager node + */ + // TODO: Remove default implementation after removing the deprecated masterNodeTimeout() + default TimeValue clusterManagerNodeTimeout() { + return masterNodeTimeout(); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 442137fb70e1f..759891e88039b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -283,13 +283,77 @@ public Iterator> settings() { Property.Final ); - public static final String SETTING_REMOTE_STORE = "index.remote_store"; + public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + + public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; /** * Used to specify if the index data should be persisted in the remote store. */ - public static final Setting INDEX_REMOTE_STORE_SETTING = Setting.boolSetting( - SETTING_REMOTE_STORE, + public static final Setting INDEX_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( + SETTING_REMOTE_STORE_ENABLED, + false, + new Setting.Validator<>() { + + @Override + public void validate(final Boolean value) {} + + @Override + public void validate(final Boolean value, final Map, Object> settings) { + final Object replicationType = settings.get(INDEX_REPLICATION_TYPE_SETTING); + if (replicationType != ReplicationType.SEGMENT && value == true) { + throw new IllegalArgumentException( + "To enable " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + ", " + + INDEX_REPLICATION_TYPE_SETTING.getKey() + + " should be set to " + + ReplicationType.SEGMENT + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REPLICATION_TYPE_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + + /** + * Used to specify if the index translog operations should be persisted in the remote store. + */ + public static final Setting INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING = Setting.boolSetting( + SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false, + new Setting.Validator<>() { + + @Override + public void validate(final Boolean value) {} + + @Override + public void validate(final Boolean value, final Map, Object> settings) { + final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (isRemoteSegmentStoreEnabled == false && value == true) { + throw new IllegalArgumentException( + "Settings " + + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() + + " cannot be enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is set to " + + settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING) + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, Property.IndexScope, Property.Final ); diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 74be762bfbcf9..2692a8fa2b914 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -125,7 +125,7 @@ * @opensearch.internal */ public class Lucene { - public static final String LATEST_CODEC = "Lucene92"; + public static final String LATEST_CODEC = "Lucene94"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index bee3428188026..971fb518ff1da 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -41,6 +41,8 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -575,7 +577,9 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS, ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, - IndexingPressure.MAX_INDEXING_BYTES + IndexingPressure.MAX_INDEXING_BYTES, + TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED, + TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a2aac1f2c54c5..a3fa2c7ee3112 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -61,6 +61,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -218,11 +219,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * is ready for production release, the feature flag can be removed, and the * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ - public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( + public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, + Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - IndexMetadata.INDEX_REMOTE_STORE_SETTING + Arrays.asList(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 16b39bb2e33f9..7b4dfb7d64bb6 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -88,9 +88,9 @@ public SettingsModule( registerSetting(setting); } - for (Map.Entry featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { + for (Map.Entry> featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { if (FeatureFlags.isEnabled(featureFlaggedSetting.getKey())) { - registerSetting(featureFlaggedSetting.getValue()); + featureFlaggedSetting.getValue().forEach(feature -> registerSetting(feature)); } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 14f9486b4baf0..ec1024bbe5f30 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -40,6 +40,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.Node; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.TaskAwareRunnable; import java.util.List; import java.util.Optional; @@ -55,6 +57,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; /** @@ -182,13 +185,24 @@ public static OpenSearchThreadPoolExecutor newResizable( int size, int queueCapacity, ThreadFactory threadFactory, - ThreadContext contextHolder + ThreadContext contextHolder, + AtomicReference runnableTaskListener ) { if (queueCapacity <= 0) { throw new IllegalArgumentException("queue capacity for [" + name + "] executor must be positive, got: " + queueCapacity); } + Function runnableWrapper; + if (runnableTaskListener != null) { + runnableWrapper = (runnable) -> { + TaskAwareRunnable taskAwareRunnable = new TaskAwareRunnable(contextHolder, runnable, runnableTaskListener); + return new TimedRunnable(taskAwareRunnable); + }; + } else { + runnableWrapper = TimedRunnable::new; + } + return new QueueResizableOpenSearchThreadPoolExecutor( name, size, @@ -196,7 +210,7 @@ public static OpenSearchThreadPoolExecutor newResizable( 0, TimeUnit.MILLISECONDS, new ResizableBlockingQueue<>(ConcurrentCollections.newBlockingQueue(), queueCapacity), - TimedRunnable::new, + runnableWrapper, threadFactory, new OpenSearchAbortPolicy(), contextHolder diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 5e2381c949c00..5b9a77c75dddb 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -66,6 +66,7 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -135,16 +136,23 @@ public StoredContext stashContext() { * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. * Otherwise when context is stash, it should be empty. */ + + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( + threadContextStruct = threadContextStruct.putHeaders( MapBuilder.newMapBuilder() .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) .immutableMap() ); - threadLocal.set(threadContextStruct); - } else { - threadLocal.set(DEFAULT_CONTEXT); } + + if (context.transientHeaders.containsKey(TASK_ID)) { + threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); + } + + threadLocal.set(threadContextStruct); + return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index b9cfebaa98521..4dc9396751fc9 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -313,6 +313,11 @@ private static ShardStoreInfo shardStoreInfo(NodeGatewayStartedShards nodeShardS NodeGatewayStartedShards::primary ).reversed(); + private static final Comparator HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR = Comparator.comparing( + NodeGatewayStartedShards::replicationCheckpoint, + Comparator.nullsLast(Comparator.naturalOrder()) + ); + /** * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching * inSyncAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but @@ -381,6 +386,12 @@ protected static NodeShardsResult buildNodeShardsResult( } } + /** + * Orders the active shards copies based on below comparators + * 1. No store exception i.e. shard copy is readable + * 2. Prefer previous primary shard + * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + */ final Comparator comparator; // allocation preference if (matchAnyShard) { // prefer shards with matching allocation ids @@ -388,9 +399,11 @@ protected static NodeShardsResult buildNodeShardsResult( (NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId()) ).reversed(); comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR) - .thenComparing(PRIMARY_FIRST_COMPARATOR); + .thenComparing(PRIMARY_FIRST_COMPARATOR) + .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } else { - comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR); + comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR) + .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } nodeShardStates.sort(comparator); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 78b4fa287ef59..c43f539243d7a 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; @@ -56,11 +57,13 @@ import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.shard.ShardStateMetadata; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -195,6 +198,7 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { clusterService.localNode(), allocationId, shardStateMetadata.primary, + null, exception ); } @@ -202,10 +206,16 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetadata.primary); + final IndexShard shard = indicesService.getShardOrNull(shardId); + return new NodeGatewayStartedShards( + clusterService.localNode(), + allocationId, + shardStateMetadata.primary, + shard != null ? shard.getLatestReplicationCheckpoint() : null + ); } logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), null, false); + return new NodeGatewayStartedShards(clusterService.localNode(), null, false, null); } catch (Exception e) { throw new OpenSearchException("failed to load started shards", e); } @@ -349,10 +359,10 @@ public String getCustomDataPath() { * @opensearch.internal */ public static class NodeGatewayStartedShards extends BaseNodeResponse { - private final String allocationId; private final boolean primary; private final Exception storeException; + private final ReplicationCheckpoint replicationCheckpoint; public NodeGatewayStartedShards(StreamInput in) throws IOException { super(in); @@ -363,16 +373,33 @@ public NodeGatewayStartedShards(StreamInput in) throws IOException { } else { storeException = null; } + if (in.getVersion().onOrAfter(Version.V_2_3_0) && in.readBoolean()) { + replicationCheckpoint = new ReplicationCheckpoint(in); + } else { + replicationCheckpoint = null; + } } - public NodeGatewayStartedShards(DiscoveryNode node, String allocationId, boolean primary) { - this(node, allocationId, primary, null); + public NodeGatewayStartedShards( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint + ) { + this(node, allocationId, primary, replicationCheckpoint, null); } - public NodeGatewayStartedShards(DiscoveryNode node, String allocationId, boolean primary, Exception storeException) { + public NodeGatewayStartedShards( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException + ) { super(node); this.allocationId = allocationId; this.primary = primary; + this.replicationCheckpoint = replicationCheckpoint; this.storeException = storeException; } @@ -384,6 +411,10 @@ public boolean primary() { return this.primary; } + public ReplicationCheckpoint replicationCheckpoint() { + return this.replicationCheckpoint; + } + public Exception storeException() { return this.storeException; } @@ -399,6 +430,14 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } + if (out.getVersion().onOrAfter(Version.V_2_3_0)) { + if (replicationCheckpoint != null) { + out.writeBoolean(true); + replicationCheckpoint.writeTo(out); + } else { + out.writeBoolean(false); + } + } } @Override @@ -414,7 +453,8 @@ public boolean equals(Object o) { return primary == that.primary && Objects.equals(allocationId, that.allocationId) - && Objects.equals(storeException, that.storeException); + && Objects.equals(storeException, that.storeException) + && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); } @Override @@ -422,6 +462,7 @@ public int hashCode() { int result = (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); return result; } @@ -432,6 +473,9 @@ public String toString() { if (storeException != null) { buf.append(",storeException=").append(storeException); } + if (replicationCheckpoint != null) { + buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); + } buf.append("]"); return buf.toString(); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 210df9d342cb7..e1427df1c34ab 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -88,6 +88,7 @@ import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; @@ -547,7 +548,9 @@ public synchronized IndexShard createShard( () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService, - this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null, + // TODO Replace with remote translog factory in the follow up PR + this.indexSettings.isRemoteTranslogStoreEnabled() ? null : new InternalTranslogFactory(), + this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7c9b9755a2434..657cb1ee55cb9 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -559,6 +559,7 @@ public final class IndexSettings { private final int numberOfShards; private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; + private final boolean isRemoteTranslogStoreEnabled; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -718,8 +719,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE, false); - + isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); + isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -971,6 +972,13 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } + /** + * Returns if remote translog store is enabled for this index. + */ + public boolean isRemoteTranslogStoreEnabled() { + return isRemoteTranslogStoreEnabled; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index ff254a63fadb6..b1e73b3855759 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec.Mode; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.mapper.MapperService; @@ -62,8 +62,8 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene92Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene92Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene94Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene94Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index fd0c66983208a..c101321e47350 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,7 +36,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene92Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene94Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 4ae6646ed14f0..ba30103f70269 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -51,8 +51,10 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.threadpool.ThreadPool; @@ -150,6 +152,8 @@ public Supplier retentionLeasesSupplier() { private final TranslogConfig translogConfig; + private final TranslogFactory translogFactory; + public EngineConfig( ShardId shardId, ThreadPool threadPool, @@ -253,7 +257,8 @@ public EngineConfig( retentionLeasesSupplier, primaryTermSupplier, tombstoneDocSupplier, - false + false, + new InternalTranslogFactory() ); } @@ -284,7 +289,8 @@ public EngineConfig( Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier, - boolean isReadOnlyReplica + boolean isReadOnlyReplica, + TranslogFactory translogFactory ) { if (isReadOnlyReplica && indexSettings.isSegRepEnabled() == false) { throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); @@ -328,6 +334,7 @@ public EngineConfig( this.primaryTermSupplier = primaryTermSupplier; this.tombstoneDocSupplier = tombstoneDocSupplier; this.isReadOnlyReplica = isReadOnlyReplica; + this.translogFactory = translogFactory; } /** @@ -532,6 +539,14 @@ public boolean isReadOnlyReplica() { return indexSettings.isSegRepEnabled() && isReadOnlyReplica; } + /** + * Returns the underlying translog factory + * @return the translog factory + */ + public TranslogFactory getTranslogFactory() { + return translogFactory; + } + /** * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index c8aec3570f8b5..f0db086e47816 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -28,6 +28,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.PluginsService; @@ -147,7 +148,8 @@ public EngineConfig newEngineConfig( Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, - boolean isReadOnlyReplica + boolean isReadOnlyReplica, + TranslogFactory translogFactory ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -178,7 +180,8 @@ public EngineConfig newEngineConfig( retentionLeasesSupplier, primaryTermSupplier, tombstoneDocSupplier, - isReadOnlyReplica + isReadOnlyReplica, + translogFactory ); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 7d90c2ad653be..16599141b1345 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -289,7 +289,8 @@ public void onFailure(String reason, Exception ex) { () -> getLocalCheckpointTracker(), translogUUID, new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), - this::ensureOpen + this::ensureOpen, + engineConfig.getTranslogFactory() ); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index af175be286b13..6f5b7030ed65f 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -69,6 +69,12 @@ public NRTReplicationEngine(EngineConfig engineConfig) { this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); this.readerManager = readerManager; this.readerManager.addListener(completionStatsCache); + for (ReferenceManager.RefreshListener listener : engineConfig.getExternalRefreshListener()) { + this.readerManager.addListener(listener); + } + for (ReferenceManager.RefreshListener listener : engineConfig.getInternalRefreshListener()) { + this.readerManager.addListener(listener); + } final Map userData = store.readLastCommittedSegmentsInfo().getUserData(); final String translogUUID = Objects.requireNonNull(userData.get(Translog.TRANSLOG_UUID_KEY)); translogManagerRef = new WriteOnlyTranslogManager( @@ -95,7 +101,8 @@ public void onAfterTranslogSync() { } } }, - this + this, + engineConfig.getTranslogFactory() ); this.translogManager = translogManagerRef; } catch (IOException e) { @@ -122,6 +129,23 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); } + /** + * Persist the latest live SegmentInfos. + * + * This method creates a commit point from the latest SegmentInfos. It is intended to be used when this shard is about to be promoted as the new primary. + * + * TODO: If this method is invoked while the engine is currently updating segments on its reader, wait for that update to complete so the updated segments are used. + * + * + * @throws IOException - When there is an IO error committing the SegmentInfos. + */ + public void commitSegmentInfos() throws IOException { + // TODO: This method should wait for replication events to finalize. + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + translogManager.syncTranslog(); + } + @Override public String getHistoryUUID() { return loadHistoryUUID(lastCommittedSegmentInfos.userData); @@ -193,6 +217,18 @@ protected ReferenceManager getReferenceManager(Search return readerManager; } + /** + * Refreshing of this engine will only happen internally when a new set of segments is received. The engine will ignore external + * refresh attempts so we can return false here. Further Engine's existing implementation reads DirectoryReader.isCurrent after acquiring a searcher. + * With this Engine's NRTReplicationReaderManager, This will use StandardDirectoryReader's implementation which determines if the reader is current by + * comparing the on-disk SegmentInfos version against the one in the reader, which at refresh points will always return isCurrent false and then refreshNeeded true. + * Even if this method returns refresh as needed, we ignore it and only ever refresh with incoming SegmentInfos. + */ + @Override + public boolean refreshNeeded() { + return false; + } + @Override public Closeable acquireHistoryRetentionLock() { throw new UnsupportedOperationException("Not implemented"); diff --git a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java index 4bc37084675ea..f6c5bf7640a73 100644 --- a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java @@ -195,14 +195,15 @@ public void trimUnreferencedTranslogFiles() throws TranslogException { final TranslogDeletionPolicy translogDeletionPolicy = new DefaultTranslogDeletionPolicy(-1, -1, 0); translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); try ( - Translog translog = new Translog( - translogConfig, - translogUuid, - translogDeletionPolicy, - engineConfig.getGlobalCheckpointSupplier(), - engineConfig.getPrimaryTermSupplier(), - seqNo -> {} - ) + Translog translog = engineConfig.getTranslogFactory() + .newTranslog( + translogConfig, + translogUuid, + translogDeletionPolicy, + engineConfig.getGlobalCheckpointSupplier(), + engineConfig.getPrimaryTermSupplier(), + seqNo -> {} + ) ) { translog.trimUnreferencedReaders(); // refresh the translog stats diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index cebe262fee5d1..f426768119c1d 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -258,14 +258,15 @@ private static TranslogStats translogStats(final EngineConfig config, final Segm final long localCheckpoint = Long.parseLong(infos.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); try ( - Translog translog = new Translog( - translogConfig, - translogUuid, - translogDeletionPolicy, - config.getGlobalCheckpointSupplier(), - config.getPrimaryTermSupplier(), - seqNo -> {} - ) + Translog translog = config.getTranslogFactory() + .newTranslog( + translogConfig, + translogUuid, + translogDeletionPolicy, + config.getGlobalCheckpointSupplier(), + config.getPrimaryTermSupplier(), + seqNo -> {} + ) ) { return translog.stats(); } diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index e816b366c3153..d7ce6ae8aba3e 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.comparators.TermOrdValComparator; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.BigArrays; @@ -99,7 +100,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; final BytesRef missingBytes = (BytesRef) missingObject(missingValue, reversed); if (indexFieldData instanceof IndexOrdinalsFieldData) { - return new FieldComparator.TermOrdValComparator(numHits, null, sortMissingLast) { + FieldComparator cmp = new TermOrdValComparator(numHits, indexFieldData.getFieldName(), sortMissingLast, reversed) { @Override protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException { @@ -121,13 +122,9 @@ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String f return new ReplaceMissing(selectedValues, missingBytes); } } - - @Override - public void setScorer(Scorable scorer) { - BytesRefFieldComparatorSource.this.setScorer(scorer); - } - }; + cmp.disableSkipping(); + return cmp; } return new FieldComparator.TermValComparator(numHits, null, sortMissingLast) { diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index fcbca6049ec0b..1702c7700cf60 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -380,6 +380,10 @@ protected static void parseProperties(ObjectMapper.Builder objBuilder, Map fieldBuilder = typeParser.parse(realFieldName, propNode, parserContext); for (int i = fieldNameParts.length - 2; i >= 0; --i) { diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index cdb7464ff250a..6d59e861eb32f 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -56,7 +56,6 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.Fuzziness; @@ -565,7 +564,7 @@ private Query getPrefixQuerySingle(String field, String termStr) throws ParseExc if (currentFieldType == null || currentFieldType.getTextSearchInfo() == TextSearchInfo.NONE) { return newUnmappedFieldQuery(field); } - setAnalyzer(forceAnalyzer == null ? queryBuilder.context.getSearchAnalyzer(currentFieldType) : forceAnalyzer); + setAnalyzer(getSearchAnalyzer(currentFieldType)); Query query = null; if (currentFieldType.getTextSearchInfo().isTokenized() == false) { query = currentFieldType.prefixQuery(termStr, getMultiTermRewriteMethod(), context); @@ -741,6 +740,13 @@ private Query getWildcardQuerySingle(String field, String termStr) throws ParseE } } + private Analyzer getSearchAnalyzer(MappedFieldType currentFieldType) { + if (forceAnalyzer == null) { + return queryBuilder.context.getSearchAnalyzer(currentFieldType); + } + return forceAnalyzer; + } + @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { final int maxAllowedRegexLength = context.getIndexSettings().getMaxRegexLength(); @@ -781,11 +787,8 @@ private Query getRegexpQuerySingle(String field, String termStr) throws ParseExc if (currentFieldType == null) { return newUnmappedFieldQuery(field); } - if (forceAnalyzer != null) { - setAnalyzer(forceAnalyzer); - return super.getRegexpQuery(field, termStr); - } - return currentFieldType.regexpQuery(termStr, RegExp.ALL, 0, getDeterminizeWorkLimit(), getMultiTermRewriteMethod(), context); + setAnalyzer(getSearchAnalyzer(currentFieldType)); + return super.getRegexpQuery(field, termStr); } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(field, e); diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index fe23000902608..38b18355fd98d 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.search.stats; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -77,6 +78,10 @@ public static class Stats implements Writeable, ToXContentFragment { private long suggestTimeInMillis; private long suggestCurrent; + private long pitCount; + private long pitTimeInMillis; + private long pitCurrent; + private Stats() { // for internal use, initializes all counts to 0 } @@ -91,6 +96,9 @@ public Stats( long scrollCount, long scrollTimeInMillis, long scrollCurrent, + long pitCount, + long pitTimeInMillis, + long pitCurrent, long suggestCount, long suggestTimeInMillis, long suggestCurrent @@ -110,6 +118,10 @@ public Stats( this.suggestCount = suggestCount; this.suggestTimeInMillis = suggestTimeInMillis; this.suggestCurrent = suggestCurrent; + + this.pitCount = pitCount; + this.pitTimeInMillis = pitTimeInMillis; + this.pitCurrent = pitCurrent; } private Stats(StreamInput in) throws IOException { @@ -128,6 +140,12 @@ private Stats(StreamInput in) throws IOException { suggestCount = in.readVLong(); suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + pitCount = in.readVLong(); + pitTimeInMillis = in.readVLong(); + pitCurrent = in.readVLong(); + } } public void add(Stats stats) { @@ -146,6 +164,10 @@ public void add(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; suggestCurrent += stats.suggestCurrent; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public void addForClosingShard(Stats stats) { @@ -162,6 +184,10 @@ public void addForClosingShard(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public long getQueryCount() { @@ -212,6 +238,22 @@ public long getScrollCurrent() { return scrollCurrent; } + public long getPitCount() { + return pitCount; + } + + public TimeValue getPitTime() { + return new TimeValue(pitTimeInMillis); + } + + public long getPitTimeInMillis() { + return pitTimeInMillis; + } + + public long getPitCurrent() { + return pitCurrent; + } + public long getSuggestCount() { return suggestCount; } @@ -249,6 +291,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestCount); out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(pitCount); + out.writeVLong(pitTimeInMillis); + out.writeVLong(pitCurrent); + } } @Override @@ -265,6 +313,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SCROLL_TIME_IN_MILLIS, Fields.SCROLL_TIME, getScrollTime()); builder.field(Fields.SCROLL_CURRENT, scrollCurrent); + builder.field(Fields.PIT_TOTAL, pitCount); + builder.humanReadableField(Fields.PIT_TIME_IN_MILLIS, Fields.PIT_TIME, getPitTime()); + builder.field(Fields.PIT_CURRENT, pitCurrent); + builder.field(Fields.SUGGEST_TOTAL, suggestCount); builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); @@ -385,6 +437,10 @@ static final class Fields { static final String SCROLL_TIME = "scroll_time"; static final String SCROLL_TIME_IN_MILLIS = "scroll_time_in_millis"; static final String SCROLL_CURRENT = "scroll_current"; + static final String PIT_TOTAL = "point_in_time_total"; + static final String PIT_TIME = "point_in_time_time"; + static final String PIT_TIME_IN_MILLIS = "point_in_time_time_in_millis"; + static final String PIT_CURRENT = "point_in_time_current"; static final String SUGGEST_TOTAL = "suggest_total"; static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 3ef3571c75e59..6d0eb3a5949ca 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -187,6 +187,18 @@ public void onFreeScrollContext(ReaderContext readerContext) { totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onNewPitContext(ReaderContext readerContext) { + totalStats.pitCurrent.inc(); + } + + @Override + public void onFreePitContext(ReaderContext readerContext) { + totalStats.pitCurrent.dec(); + assert totalStats.pitCurrent.count() >= 0; + totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); + } + /** * Holder of statistics values * @@ -203,10 +215,12 @@ static final class StatsHolder { * for one-thousand times as long (i.e., scrolls that execute for almost twelve days on average). */ final MeanMetric scrollMetric = new MeanMetric(); + final MeanMetric pitMetric = new MeanMetric(); final MeanMetric suggestMetric = new MeanMetric(); final CounterMetric queryCurrent = new CounterMetric(); final CounterMetric fetchCurrent = new CounterMetric(); final CounterMetric scrollCurrent = new CounterMetric(); + final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); SearchStats.Stats stats() { @@ -220,6 +234,9 @@ SearchStats.Stats stats() { scrollMetric.count(), TimeUnit.MICROSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(), + pitMetric.count(), + TimeUnit.MICROSECONDS.toMillis(pitMetric.sum()), + pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), suggestCurrent.count() diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index ac6754bf6a74a..fb046e2310d93 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -40,7 +40,7 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh) { + if (didRefresh && shard.state() != IndexShardState.CLOSED && shard.getReplicationTracker().isPrimaryMode()) { publisher.publish(shard); } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9694f3dd37f80..67a8e691fda0d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -151,6 +151,7 @@ import org.opensearch.index.store.StoreStats; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.index.translog.TranslogRecoveryRunner; import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.warmer.ShardIndexWarmerService; @@ -241,6 +242,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final GlobalCheckpointListeners globalCheckpointListeners; private final PendingReplicationActions pendingReplicationActions; private final ReplicationTracker replicationTracker; + private final SegmentReplicationCheckpointPublisher checkpointPublisher; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; @@ -305,9 +307,8 @@ Runnable getGlobalCheckpointSyncer() { private final AtomicReference pendingRefreshLocation = new AtomicReference<>(); private final RefreshPendingLocationListener refreshPendingLocationListener; private volatile boolean useRetentionLeasesInPeerRecovery; - private final ReferenceManager.RefreshListener checkpointRefreshListener; - private final Store remoteStore; + private final TranslogFactory translogFactory; public IndexShard( final ShardRouting shardRouting, @@ -330,6 +331,7 @@ public IndexShard( final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService, + final TranslogFactory translogFactory, @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore ) throws IOException { @@ -414,12 +416,9 @@ public boolean shouldCache(Query query) { persistMetadata(path, indexSettings, shardRouting, null, logger); this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases(); this.refreshPendingLocationListener = new RefreshPendingLocationListener(); - if (checkpointPublisher != null) { - this.checkpointRefreshListener = new CheckpointRefreshListener(this, checkpointPublisher); - } else { - this.checkpointRefreshListener = null; - } + this.checkpointPublisher = checkpointPublisher; this.remoteStore = remoteStore; + this.translogFactory = translogFactory; } public ThreadPool getThreadPool() { @@ -623,6 +622,11 @@ public void updateShardState( + newRouting; assert getOperationPrimaryTerm() == newPrimaryTerm; try { + if (indexSettings.isSegRepEnabled()) { + // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. + assert newRouting.primary() && currentRouting.primary() == false; + promoteNRTReplicaToPrimary(); + } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); /* @@ -1396,10 +1400,13 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Returns the lastest Replication Checkpoint that shard received. Shards will return an EMPTY checkpoint before - * the engine is opened. + * Returns the latest ReplicationCheckpoint that shard received. + * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { + if (indexSettings.isSegRepEnabled() == false) { + return null; + } if (getEngineOrNull() == null) { return ReplicationCheckpoint.empty(shardId); } @@ -1431,6 +1438,10 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp logger.trace(() -> new ParameterizedMessage("Ignoring new replication checkpoint - shard is not started {}", state())); return false; } + if (getReplicationTracker().isPrimaryMode()) { + logger.warn("Ignoring new replication checkpoint - shard is in primaryMode and cannot receive any checkpoints."); + return false; + } ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); if (localCheckpoint.isAheadOf(requestCheckpoint)) { logger.trace( @@ -3220,8 +3231,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory)); } - if (this.checkpointRefreshListener != null) { - internalRefreshListener.add(checkpointRefreshListener); + if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) { + internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } return this.engineConfigFactory.newEngineConfig( @@ -3247,7 +3258,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro replicationTracker::getRetentionLeases, () -> getOperationPrimaryTerm(), tombstoneDocSupplier(), - indexSettings.isSegRepEnabled() && shardRouting.primary() == false + indexSettings.isSegRepEnabled() && shardRouting.primary() == false, + translogFactory ); } @@ -3778,6 +3790,10 @@ public boolean scheduledRefresh() { if (listenerNeedsRefresh == false // if we have a listener that is waiting for a refresh we need to force it && isSearchIdle() && indexSettings.isExplicitRefresh() == false + && indexSettings.isSegRepEnabled() == false + // Indices with segrep enabled will never wait on a refresh and ignore shard idle. Primary shards push out new segments only + // after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving + // a new set of segments. && active.get()) { // it must be active otherwise we might not free up segment memory once the shard became inactive // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will @@ -4107,4 +4123,26 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } + + /** + * With segment replication enabled - prepare the shard's engine to be promoted as the new primary. + * + * If this shard is currently using a replication engine, this method: + * 1. Invokes {@link NRTReplicationEngine#commitSegmentInfos()} to ensure the engine can be reopened as writeable from the latest refresh point. + * InternalEngine opens its IndexWriter from an on-disk commit point, but this replica may have recently synced from a primary's refresh point, meaning it has documents searchable in its in-memory SegmentInfos + * that are not part of a commit point. This ensures that those documents are made part of a commit and do not need to be reindexed after promotion. + * 2. Invokes resetEngineToGlobalCheckpoint - This call performs the engine swap, opening up as a writeable engine and replays any operations in the xlog. The operations indexed from xlog here will be + * any ack'd writes that were not copied to this replica before promotion. + */ + private void promoteNRTReplicaToPrimary() { + assert shardRouting.primary() && indexSettings.isSegRepEnabled(); + getReplicationEngine().ifPresentOrElse(engine -> { + try { + engine.commitSegmentInfos(); + resetEngineToGlobalCheckpoint(); + } catch (IOException e) { + throw new EngineException(shardId, "Unable to update replica to writeable engine, failing shard", e); + } + }, () -> { throw new EngineException(shardId, "Expected replica engine to be of type NRTReplicationEngine"); }); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 855457f275122..62e2b12896411 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -33,7 +33,7 @@ * * @opensearch.internal */ -public final class RemoteDirectory extends Directory { +public class RemoteDirectory extends Directory { private final BlobContainer blobContainer; @@ -50,6 +50,16 @@ public String[] listAll() throws IOException { return blobContainer.listBlobs().keySet().stream().sorted().toArray(String[]::new); } + /** + * Returns names of files with given prefix in this directory. + * @param filenamePrefix The prefix to match against file names in the directory + * @return A list of the matching filenames in the directory + * @throws IOException if there were any failures in reading from the blob container + */ + public Collection listFilesByPrefix(String filenamePrefix) throws IOException { + return blobContainer.listBlobsByPrefix(filenamePrefix).keySet(); + } + /** * Removes an existing file in the directory. * diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java new file mode 100644 index 0000000000000..d7d6b29d08bfc --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -0,0 +1,372 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.opensearch.common.UUIDs; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * A RemoteDirectory extension for remote segment store. We need to make sure we don't overwrite a segment file once uploaded. + * In order to prevent segment overwrite which can occur due to two primary nodes for the same shard at the same time, + * a unique suffix is added to the uploaded segment file. This class keeps track of filename of segments stored + * in remote segment store vs filename in local filesystem and provides the consistent Directory interface so that + * caller will be accessing segment files in the same way as {@code FSDirectory}. Apart from storing actual segment files, + * remote segment store also keeps track of refresh checkpoints as metadata in a separate path which is handled by + * another instance of {@code RemoteDirectory}. + * @opensearch.internal + */ +public final class RemoteSegmentStoreDirectory extends FilterDirectory { + /** + * Each segment file is uploaded with unique suffix. + * For example, _0.cfe in local filesystem will be uploaded to remote segment store as _0.cfe__gX7bNIIBrs0AUNsR2yEG + */ + public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; + + public static final MetadataFilenameUtils.MetadataFilenameComparator METADATA_FILENAME_COMPARATOR = + new MetadataFilenameUtils.MetadataFilenameComparator(); + + /** + * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data + */ + private final RemoteDirectory remoteDataDirectory; + /** + * remoteMetadataDirectory is used to store metadata files at path: cluster_UUID/index_UUID/shardId/segments/metadata + */ + private final RemoteDirectory remoteMetadataDirectory; + + /** + * To prevent explosion of refresh metadata files, we replace refresh files for the given primary term and generation + * This is achieved by uploading refresh metadata file with the same UUID suffix. + */ + private String metadataFileUniqueSuffix; + + /** + * Keeps track of local segment filename to uploaded filename along with other attributes like checksum. + * This map acts as a cache layer for uploaded segment filenames which helps avoid calling listAll() each time. + * It is important to initialize this map on creation of RemoteSegmentStoreDirectory and update it on each upload and delete. + */ + private Map segmentsUploadedToRemoteStore; + + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + public RemoteSegmentStoreDirectory(RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory) throws IOException { + super(remoteDataDirectory); + this.remoteDataDirectory = remoteDataDirectory; + this.remoteMetadataDirectory = remoteMetadataDirectory; + init(); + } + + /** + * Initializes the cache which keeps track of all the segment files uploaded to the remote segment store. + * As this cache is specific to an instance of RemoteSegmentStoreDirectory, it is possible that cache becomes stale + * if another instance of RemoteSegmentStoreDirectory is used to upload/delete segment files. + * It is caller's responsibility to call init() again to ensure that cache is properly updated. + * @throws IOException if there were any failures in reading the metadata file + */ + public void init() throws IOException { + this.metadataFileUniqueSuffix = UUIDs.base64UUID(); + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(readLatestMetadataFile()); + } + + /** + * Read the latest metadata file to get the list of segments uploaded to the remote segment store. + * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. + * The format of refresh metadata filename is: refresh_metadata__PrimaryTerm__Generation__UUID + * Refresh metadata files keep track of active segments for the shard at the time of refresh. + * In order to get the list of segment files uploaded to the remote segment store, we need to read the latest metadata file. + * Each metadata file contains a map where + * Key is - Segment local filename and + * Value is - local filename::uploaded filename::checksum + * @return Map of segment filename to uploaded filename with checksum + * @throws IOException if there were any failures in reading the metadata file + */ + private Map readLatestMetadataFile() throws IOException { + Map segmentMetadataMap = new HashMap<>(); + + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + Optional latestMetadataFile = metadataFiles.stream().max(METADATA_FILENAME_COMPARATOR); + + if (latestMetadataFile.isPresent()) { + logger.info("Reading latest Metadata file {}", latestMetadataFile.get()); + segmentMetadataMap = readMetadataFile(latestMetadataFile.get()); + } else { + logger.info("No metadata file found, this can happen for new index with no data uploaded to remote segment store"); + } + + return segmentMetadataMap; + } + + private Map readMetadataFile(String metadataFilename) throws IOException { + try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { + Map segmentMetadata = indexInput.readMapOfStrings(); + return segmentMetadata.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> UploadedSegmentMetadata.fromString(entry.getValue()))); + } + } + + /** + * Metadata of a segment that is uploaded to remote segment store. + */ + static class UploadedSegmentMetadata { + private static final String SEPARATOR = "::"; + private final String originalFilename; + private final String uploadedFilename; + private final String checksum; + + UploadedSegmentMetadata(String originalFilename, String uploadedFilename, String checksum) { + this.originalFilename = originalFilename; + this.uploadedFilename = uploadedFilename; + this.checksum = checksum; + } + + @Override + public String toString() { + return String.join(SEPARATOR, originalFilename, uploadedFilename, checksum); + } + + public static UploadedSegmentMetadata fromString(String uploadedFilename) { + String[] values = uploadedFilename.split(SEPARATOR); + return new UploadedSegmentMetadata(values[0], values[1], values[2]); + } + } + + /** + * Contains utility methods that provide various parts of metadata filename along with comparator + * Each metadata filename is of format: PREFIX__PrimaryTerm__Generation__UUID + */ + static class MetadataFilenameUtils { + public static final String SEPARATOR = "__"; + public static final String METADATA_PREFIX = "metadata"; + + /** + * Comparator to sort the metadata filenames. The order of sorting is: Primary Term, Generation, UUID + * Even though UUID sort does not provide any info on recency, it provides a consistent way to sort the filenames. + */ + static class MetadataFilenameComparator implements Comparator { + @Override + public int compare(String first, String second) { + String[] firstTokens = first.split(SEPARATOR); + String[] secondTokens = second.split(SEPARATOR); + if (!firstTokens[0].equals(secondTokens[0])) { + return firstTokens[0].compareTo(secondTokens[0]); + } + long firstPrimaryTerm = getPrimaryTerm(firstTokens); + long secondPrimaryTerm = getPrimaryTerm(secondTokens); + if (firstPrimaryTerm != secondPrimaryTerm) { + return firstPrimaryTerm > secondPrimaryTerm ? 1 : -1; + } else { + long firstGeneration = getGeneration(firstTokens); + long secondGeneration = getGeneration(secondTokens); + if (firstGeneration != secondGeneration) { + return firstGeneration > secondGeneration ? 1 : -1; + } else { + return getUuid(firstTokens).compareTo(getUuid(secondTokens)); + } + } + } + } + + // Visible for testing + static String getMetadataFilename(long primaryTerm, long generation, String uuid) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + Long.toString(primaryTerm), + Long.toString(generation, Character.MAX_RADIX), + uuid + ); + } + + // Visible for testing + static long getPrimaryTerm(String[] filenameTokens) { + return Long.parseLong(filenameTokens[1]); + } + + // Visible for testing + static long getGeneration(String[] filenameTokens) { + return Long.parseLong(filenameTokens[2], Character.MAX_RADIX); + } + + // Visible for testing + static String getUuid(String[] filenameTokens) { + return filenameTokens[3]; + } + } + + /** + * Returns list of all the segment files uploaded to remote segment store till the last refresh checkpoint. + * Any segment file that is uploaded without corresponding metadata file will not be visible as part of listAll(). + * We chose not to return cache entries for listAll as cache can have entries for stale segments as well. + * Even if we plan to delete stale segments from remote segment store, it will be a periodic operation. + * @return segment filenames stored in remote segment store + * @throws IOException if there were any failures in reading the metadata file + */ + @Override + public String[] listAll() throws IOException { + return readLatestMetadataFile().keySet().toArray(new String[0]); + } + + /** + * Delete segment file from remote segment store. + * @param name the name of an existing segment file in local filesystem. + * @throws IOException if the file exists but could not be deleted. + */ + @Override + public void deleteFile(String name) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + remoteDataDirectory.deleteFile(remoteFilename); + segmentsUploadedToRemoteStore.remove(name); + } + } + + /** + * Returns the byte length of a segment file in the remote segment store. + * @param name the name of an existing segment file in local filesystem. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist in the cache or remote segment store + */ + @Override + public long fileLength(String name) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + return remoteDataDirectory.fileLength(remoteFilename); + } else { + throw new NoSuchFileException(name); + } + } + + /** + * Creates and returns a new instance of {@link RemoteIndexOutput} which will be used to copy files to the remote + * segment store. + * @param name the name of the file to create. + * @throws IOException in case of I/O error + */ + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + return remoteDataDirectory.createOutput(getNewRemoteSegmentFilename(name), context); + } + + /** + * Opens a stream for reading an existing file and returns {@link RemoteIndexInput} enclosing the stream. + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist either in cache or remote segment store + */ + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + return remoteDataDirectory.openInput(remoteFilename, context); + } else { + throw new NoSuchFileException(name); + } + } + + /** + * Copies an existing src file from directory from to a non-existent file dest in this directory. + * Once the segment is uploaded to remote segment store, update the cache accordingly. + */ + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + String remoteFilename = getNewRemoteSegmentFilename(dest); + remoteDataDirectory.copyFrom(from, src, remoteFilename, context); + String checksum = getChecksumOfLocalFile(from, src); + UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum); + segmentsUploadedToRemoteStore.put(src, segmentMetadata); + } + + /** + * Checks if the file exists in the uploadedSegments cache and the checksum matches. + * It is important to match the checksum as the same segment filename can be used for different + * segments due to a concurrency issue. + * @param localFilename filename of segment stored in local filesystem + * @param checksum checksum of the segment file + * @return true if file exists in cache and checksum matches. + */ + public boolean containsFile(String localFilename, String checksum) { + return segmentsUploadedToRemoteStore.containsKey(localFilename) + && segmentsUploadedToRemoteStore.get(localFilename).checksum.equals(checksum); + } + + /** + * Upload metadata file + * @param segmentFiles segment files that are part of the shard at the time of the latest refresh + * @param storeDirectory instance of local directory to temporarily create metadata file before upload + * @param primaryTerm primary term to be used in the name of metadata file + * @param generation commit generation + * @throws IOException in case of I/O error while uploading the metadata file + */ + public void uploadMetadata(Collection segmentFiles, Directory storeDirectory, long primaryTerm, long generation) + throws IOException { + synchronized (this) { + String metadataFilename = MetadataFilenameUtils.getMetadataFilename(primaryTerm, generation, this.metadataFileUniqueSuffix); + IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT); + Map uploadedSegments = new HashMap<>(); + for (String file : segmentFiles) { + if (segmentsUploadedToRemoteStore.containsKey(file)) { + uploadedSegments.put(file, segmentsUploadedToRemoteStore.get(file).toString()); + } else { + throw new NoSuchFileException(file); + } + } + indexOutput.writeMapOfStrings(uploadedSegments); + indexOutput.close(); + storeDirectory.sync(Collections.singleton(metadataFilename)); + remoteMetadataDirectory.copyFrom(storeDirectory, metadataFilename, metadataFilename, IOContext.DEFAULT); + storeDirectory.deleteFile(metadataFilename); + } + } + + private String getChecksumOfLocalFile(Directory directory, String file) throws IOException { + try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { + return Long.toString(CodecUtil.retrieveChecksum(indexInput)); + } + } + + private String getExistingRemoteFilename(String localFilename) { + if (segmentsUploadedToRemoteStore.containsKey(localFilename)) { + return segmentsUploadedToRemoteStore.get(localFilename).uploadedFilename; + } else { + return null; + } + } + + private String getNewRemoteSegmentFilename(String localFilename) { + return localFilename + SEGMENT_NAME_UUID_SEPARATOR + UUIDs.base64UUID(); + } + + private String getLocalSegmentFilename(String remoteFilename) { + return remoteFilename.split(SEGMENT_NAME_UUID_SEPARATOR)[0]; + } + + // Visible for testing + Map getSegmentsUploadedToRemoteStore() { + return this.segmentsUploadedToRemoteStore; + } +} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 6828ab7d91b2c..58598ab2d08f4 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -110,8 +110,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -121,6 +121,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -799,6 +800,47 @@ public void beforeClose() { shardLock.setDetails("closing shard"); } + /** + * This method should only be used with Segment Replication. + * Perform a commit from a live {@link SegmentInfos}. Replica engines with segrep do not have an IndexWriter and Lucene does not currently + * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit + * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal + * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. + * + * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then + * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. + * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. + * + * @param latestSegmentInfos {@link SegmentInfos} The latest active infos + * @param maxSeqNo The engine's current maxSeqNo + * @param processedCheckpoint The engine's current processed checkpoint. + * @throws IOException when there is an IO error committing. + */ + public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { + assert indexSettings.isSegRepEnabled(); + metadataLock.writeLock().lock(); + try { + final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); + userData.put(LOCAL_CHECKPOINT_KEY, String.valueOf(processedCheckpoint)); + userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); + latestSegmentInfos.setUserData(userData, true); + latestSegmentInfos.commit(directory()); + + // similar to TrimUnsafeCommits, create a commit with an appending IW, this will delete old commits and ensure all files + // associated with the SegmentInfos.commit are fsynced. + final List existingCommits = DirectoryReader.listCommits(directory); + assert existingCommits.isEmpty() == false : "Expected at least one commit but none found"; + final IndexCommit lastIndexCommit = existingCommits.get(existingCommits.size() - 1); + assert latestSegmentInfos.getSegmentsFileName().equals(lastIndexCommit.getSegmentsFileName()); + try (IndexWriter writer = newAppendingIndexWriter(directory, lastIndexCommit)) { + writer.setLiveCommitData(lastIndexCommit.getUserData().entrySet()); + writer.commit(); + } + } finally { + metadataLock.writeLock().unlock(); + } + } + /** * A store directory * @@ -1102,6 +1144,30 @@ public Map asMap() { private static final String LIV_FILE_EXTENSION = "liv"; // lucene 5 delete file private static final String SEGMENT_INFO_EXTENSION = "si"; + /** + * Helper method used to group store files according to segment and commit. + * + * @see MetadataSnapshot#recoveryDiff(MetadataSnapshot) + * @see MetadataSnapshot#segmentReplicationDiff(MetadataSnapshot) + */ + private Iterable> getGroupedFilesIterable() { + final Map> perSegment = new HashMap<>(); + final List perCommitStoreFiles = new ArrayList<>(); + for (StoreFileMetadata meta : this) { + final String segmentId = IndexFileNames.parseSegmentName(meta.name()); + final String extension = IndexFileNames.getExtension(meta.name()); + if (IndexFileNames.SEGMENTS.equals(segmentId) + || DEL_FILE_EXTENSION.equals(extension) + || LIV_FILE_EXTENSION.equals(extension)) { + // only treat del files as per-commit files fnm files are generational but only for upgradable DV + perCommitStoreFiles.add(meta); + } else { + perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta); + } + } + return Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles)); + } + /** * Returns a diff between the two snapshots that can be used for recovery. The given snapshot is treated as the * recovery target and this snapshot as the source. The returned diff will hold a list of files that are: @@ -1139,23 +1205,8 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { final List identical = new ArrayList<>(); final List different = new ArrayList<>(); final List missing = new ArrayList<>(); - final Map> perSegment = new HashMap<>(); - final List perCommitStoreFiles = new ArrayList<>(); - - for (StoreFileMetadata meta : this) { - final String segmentId = IndexFileNames.parseSegmentName(meta.name()); - final String extension = IndexFileNames.getExtension(meta.name()); - if (IndexFileNames.SEGMENTS.equals(segmentId) - || DEL_FILE_EXTENSION.equals(extension) - || LIV_FILE_EXTENSION.equals(extension)) { - // only treat del files as per-commit files fnm files are generational but only for upgradable DV - perCommitStoreFiles.add(meta); - } else { - perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta); - } - } final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles))) { + for (List segmentFiles : getGroupedFilesIterable()) { identicalFiles.clear(); boolean consistent = true; for (StoreFileMetadata meta : segmentFiles) { @@ -1190,6 +1241,51 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { return recoveryDiff; } + /** + * Segment Replication method + * Returns a diff between the two snapshots that can be used for getting list of files to copy over to a replica for segment replication. The given snapshot is treated as the + * target and this snapshot as the source. The returned diff will hold a list of files that are: + *
    + *
  • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
  • + *
  • different: they exist in both snapshots but their they are not identical
  • + *
  • missing: files that exist in the source but not in the target
  • + *
+ */ + public RecoveryDiff segmentReplicationDiff(MetadataSnapshot recoveryTargetSnapshot) { + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); + final ArrayList identicalFiles = new ArrayList<>(); + for (List segmentFiles : getGroupedFilesIterable()) { + identicalFiles.clear(); + boolean consistent = true; + for (StoreFileMetadata meta : segmentFiles) { + StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); + if (storeFileMetadata == null) { + // Do not consider missing files as inconsistent in SegRep as replicas may lag while primary updates + // documents and generate new files specific to a segment + missing.add(meta); + } else if (storeFileMetadata.isSame(meta) == false) { + consistent = false; + different.add(meta); + } else { + identicalFiles.add(meta); + } + } + if (consistent) { + identical.addAll(identicalFiles); + } else { + different.addAll(identicalFiles); + } + } + RecoveryDiff recoveryDiff = new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing) + ); + return recoveryDiff; + } + /** * Returns the number of files in this snapshot */ diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java new file mode 100644 index 0000000000000..566eda4fe4a6e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * Translog Factory for the local on-disk {@link Translog} + * + * @opensearch.internal + */ +public class InternalTranslogFactory implements TranslogFactory { + + @Override + public Translog newTranslog( + TranslogConfig translogConfig, + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + + return new Translog( + translogConfig, + translogUUID, + translogDeletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer + ); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index ac82cf246cc55..fd52e02132006 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -54,7 +54,8 @@ public InternalTranslogManager( Supplier localCheckpointTrackerSupplier, String translogUUID, TranslogEventListener translogEventListener, - LifecycleAware engineLifeCycleAware + LifecycleAware engineLifeCycleAware, + TranslogFactory translogFactory ) throws IOException { this.shardId = shardId; this.readLock = readLock; @@ -67,7 +68,7 @@ public InternalTranslogManager( if (tracker != null) { tracker.markSeqNoAsPersisted(seqNo); } - }, translogUUID); + }, translogUUID, translogFactory); assert translog.getGeneration() != null; this.translog = translog; assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; @@ -333,10 +334,11 @@ protected Translog openTranslog( TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier, LongConsumer persistedSequenceNumberConsumer, - String translogUUID + String translogUUID, + TranslogFactory translogFactory ) throws IOException { - return new Translog( + return translogFactory.newTranslog( translogConfig, translogUUID, translogDeletionPolicy, diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java new file mode 100644 index 0000000000000..5500bda99808d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * Translog Factory to enable creation of various local on-disk + * and remote store flavors of {@link Translog} + * + * @opensearch.internal + */ +@FunctionalInterface +public interface TranslogFactory { + + Translog newTranslog( + final TranslogConfig config, + final String translogUUID, + final TranslogDeletionPolicy deletionPolicy, + final LongSupplier globalCheckpointSupplier, + final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java index 09f5f38a9f6a9..96a2dd05851c0 100644 --- a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java @@ -35,7 +35,8 @@ public WriteOnlyTranslogManager( Supplier localCheckpointTrackerSupplier, String translogUUID, TranslogEventListener translogEventListener, - LifecycleAware engineLifecycleAware + LifecycleAware engineLifecycleAware, + TranslogFactory translogFactory ) throws IOException { super( translogConfig, @@ -47,7 +48,8 @@ public WriteOnlyTranslogManager( localCheckpointTrackerSupplier, translogUUID, translogEventListener, - engineLifecycleAware + engineLifecycleAware, + translogFactory ); } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index a9b032c98b70f..dfebe5f7cabf2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -24,7 +24,10 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.function.Predicate; +import java.util.stream.Collectors; /** * Manages references to ongoing segrep events on a node. @@ -38,7 +41,7 @@ class OngoingSegmentReplications { private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; - private final Map nodesToHandlers; + private final Map allocationIdToHandlers; /** * Constructor. @@ -50,7 +53,7 @@ class OngoingSegmentReplications { this.indicesService = indicesService; this.recoverySettings = recoverySettings; this.copyStateMap = Collections.synchronizedMap(new HashMap<>()); - this.nodesToHandlers = ConcurrentCollections.newConcurrentMap(); + this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } /** @@ -96,8 +99,7 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro * @param listener {@link ActionListener} that resolves when sending files is complete. */ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener listener) { - final DiscoveryNode node = request.getTargetNode(); - final SegmentReplicationSourceHandler handler = nodesToHandlers.get(node); + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.get(request.getTargetAllocationId()); if (handler != null) { if (handler.isReplicating()) { throw new OpenSearchException( @@ -108,7 +110,7 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { - final SegmentReplicationSourceHandler sourceHandler = nodesToHandlers.remove(node); + final SegmentReplicationSourceHandler sourceHandler = allocationIdToHandlers.remove(request.getTargetAllocationId()); if (sourceHandler != null) { removeCopyState(sourceHandler.getCopyState()); } @@ -123,19 +125,6 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); + } + + /** + * Cancel any ongoing replications for a given {@link DiscoveryNode} + * + * @param node {@link DiscoveryNode} node for which to cancel replication events. + */ + void cancelReplication(DiscoveryNode node) { + cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); + } /** @@ -186,19 +180,25 @@ boolean isInCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { } int size() { - return nodesToHandlers.size(); + return allocationIdToHandlers.size(); } int cachedCopyStateSize() { return copyStateMap.size(); } - private SegmentReplicationSourceHandler createTargetHandler(DiscoveryNode node, CopyState copyState, FileChunkWriter fileChunkWriter) { + private SegmentReplicationSourceHandler createTargetHandler( + DiscoveryNode node, + CopyState copyState, + String allocationId, + FileChunkWriter fileChunkWriter + ) { return new SegmentReplicationSourceHandler( node, fileChunkWriter, copyState.getShard().getThreadPool(), copyState, + allocationId, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), recoverySettings.getMaxConcurrentFileChunks() ); @@ -231,4 +231,23 @@ private synchronized void removeCopyState(CopyState copyState) { copyStateMap.remove(copyState.getRequestedReplicationCheckpoint()); } } + + /** + * Remove handlers from allocationIdToHandlers map based on a filter predicate. + * This will also decref the handler's CopyState reference. + */ + private void cancelHandlers(Predicate predicate, String reason) { + final List allocationIds = allocationIdToHandlers.values() + .stream() + .filter(predicate) + .map(SegmentReplicationSourceHandler::getAllocationId) + .collect(Collectors.toList()); + for (String allocationId : allocationIds) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 8911302a722f5..2d21653c1924c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -27,6 +27,7 @@ import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transports; @@ -54,6 +55,8 @@ class SegmentReplicationSourceHandler { private final List resources = new CopyOnWriteArrayList<>(); private final Logger logger; private final AtomicBoolean isReplicating = new AtomicBoolean(); + private final DiscoveryNode targetNode; + private final String allocationId; /** * Constructor. @@ -70,9 +73,11 @@ class SegmentReplicationSourceHandler { FileChunkWriter writer, ThreadPool threadPool, CopyState copyState, + String allocationId, int fileChunkSizeInBytes, int maxConcurrentFileChunks ) { + this.targetNode = targetNode; this.shard = copyState.getShard(); this.logger = Loggers.getLogger( SegmentReplicationSourceHandler.class, @@ -89,6 +94,7 @@ class SegmentReplicationSourceHandler { fileChunkSizeInBytes, maxConcurrentFileChunks ); + this.allocationId = allocationId; this.copyState = copyState; } @@ -99,16 +105,24 @@ class SegmentReplicationSourceHandler { * @param listener {@link ActionListener} that completes with the list of files sent. */ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListener listener) { + final ReplicationTimer timer = new ReplicationTimer(); if (isReplicating.compareAndSet(false, true) == false) { throw new OpenSearchException("Replication to {} is already running.", shard.shardId()); } future.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); final Closeable releaseResources = () -> IOUtils.close(resources); try { - + timer.start(); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + timer.stop(); + logger.trace( + "[replication id {}] Source node failed to send files to target node [{}], timing: {}", + request.getReplicationId(), + request.getTargetNode().getId(), + timer.time() + ); }; RunUnderPrimaryPermit.run(() -> { @@ -118,7 +132,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene logger.debug( "delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), - request.getTargetNode() + targetNode ); throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); } @@ -142,18 +156,17 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene transfer.start(); sendFileStep.whenComplete(r -> { - final String targetAllocationId = request.getTargetAllocationId(); - RunUnderPrimaryPermit.run( - () -> shard.markAllocationIdAsInSync(targetAllocationId, request.getCheckpoint().getSeqNo()), - shard.shardId() + " marking " + targetAllocationId + " as in sync", - shard, - cancellableThreads, - logger - ); try { future.onResponse(new GetSegmentFilesResponse(List.of(storeFileMetadata))); } finally { IOUtils.close(resources); + timer.stop(); + logger.trace( + "[replication id {}] Source node completed sending files to target node [{}], timing: {}", + request.getReplicationId(), + request.getTargetNode().getId(), + timer.time() + ); } }, onFailure); } catch (Exception e) { @@ -175,4 +188,12 @@ CopyState getCopyState() { public boolean isReplicating() { return isReplicating.get(); } + + public DiscoveryNode getTargetNode() { + return targetNode; + } + + public String getAllocationId() { + return allocationId; + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index d428459884f97..0cee731fde2cb 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; @@ -25,6 +26,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; @@ -86,6 +88,8 @@ public SegmentReplicationSourceService( private class CheckpointInfoRequestHandler implements TransportRequestHandler { @Override public void messageReceived(CheckpointInfoRequest request, TransportChannel channel, Task task) throws Exception { + final ReplicationTimer timer = new ReplicationTimer(); + timer.start(); final RemoteSegmentFileChunkWriter segmentSegmentFileChunkWriter = new RemoteSegmentFileChunkWriter( request.getReplicationId(), recoverySettings, @@ -109,6 +113,16 @@ public void messageReceived(CheckpointInfoRequest request, TransportChannel chan copyState.getPendingDeleteFiles() ) ); + timer.stop(); + logger.trace( + new ParameterizedMessage( + "[replication id {}] Source node sent checkpoint info [{}] to target node [{}], timing: {}", + request.getReplicationId(), + copyState.getCheckpoint(), + request.getTargetNode().getId(), + timer.time() + ) + ); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index 838c06a4785ef..f865ba1332186 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -8,10 +8,14 @@ package org.opensearch.indices.replication; +import org.opensearch.common.collect.Tuple; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationTimer; +import java.util.ArrayList; +import java.util.List; + /** * ReplicationState implementation to track Segment Replication events. * @@ -26,10 +30,12 @@ public class SegmentReplicationState implements ReplicationState { */ public enum Stage { DONE((byte) 0), - INIT((byte) 1), - - REPLICATING((byte) 2); + REPLICATING((byte) 2), + GET_CHECKPOINT_INFO((byte) 3), + FILE_DIFF((byte) 4), + GET_FILES((byte) 5), + FINALIZE_REPLICATION((byte) 6); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -60,13 +66,27 @@ public static Stage fromId(byte id) { private Stage stage; private final ReplicationLuceneIndex index; - private final ReplicationTimer timer; + private final ReplicationTimer overallTimer; + private final ReplicationTimer stageTimer; + private final List> timingData; + private long replicationId; public SegmentReplicationState(ReplicationLuceneIndex index) { stage = Stage.INIT; this.index = index; - timer = new ReplicationTimer(); - timer.start(); + // Timing data will have as many entries as stages, plus one + // additional entry for the overall timer + timingData = new ArrayList<>(Stage.values().length + 1); + overallTimer = new ReplicationTimer(); + stageTimer = new ReplicationTimer(); + stageTimer.start(); + // set an invalid value by default + this.replicationId = -1L; + } + + public SegmentReplicationState(ReplicationLuceneIndex index, long replicationId) { + this(index); + this.replicationId = replicationId; } @Override @@ -74,9 +94,17 @@ public ReplicationLuceneIndex getIndex() { return index; } + public long getReplicationId() { + return replicationId; + } + @Override public ReplicationTimer getTimer() { - return timer; + return overallTimer; + } + + public List> getTimingData() { + return timingData; } public Stage getStage() { @@ -90,6 +118,12 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + // save the timing data for the current step + stageTimer.stop(); + timingData.add(new Tuple<>(stage.name(), stageTimer.time())); + // restart the step timer + stageTimer.reset(); + stageTimer.start(); stage = next; } @@ -97,16 +131,29 @@ public void setStage(Stage stage) { switch (stage) { case INIT: this.stage = Stage.INIT; - getIndex().reset(); break; case REPLICATING: validateAndSetStage(Stage.INIT, stage); - getIndex().start(); + // only start the overall timer once we've started replication + overallTimer.start(); break; - case DONE: + case GET_CHECKPOINT_INFO: validateAndSetStage(Stage.REPLICATING, stage); - getIndex().stop(); - getTimer().stop(); + break; + case FILE_DIFF: + validateAndSetStage(Stage.GET_CHECKPOINT_INFO, stage); + break; + case GET_FILES: + validateAndSetStage(Stage.FILE_DIFF, stage); + break; + case FINALIZE_REPLICATION: + validateAndSetStage(Stage.GET_FILES, stage); + break; + case DONE: + validateAndSetStage(Stage.FINALIZE_REPLICATION, stage); + // add the overall timing data + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 516cfa91a787b..a658ffc09d590 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -64,7 +64,7 @@ public SegmentReplicationTarget( super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); this.checkpoint = checkpoint; this.source = source; - this.state = new SegmentReplicationState(stateIndex); + this.state = new SegmentReplicationState(stateIndex, getId()); this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, getPrefix(), logger, this::ensureRefCount); } @@ -139,7 +139,9 @@ public void startReplication(ActionListener listener) { final StepListener getFilesListener = new StepListener<>(); final StepListener finalizeListener = new StepListener<>(); + logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. + state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); checkpointInfoListener.whenComplete(checkpointInfo -> getFiles(checkpointInfo, getFilesListener), listener::onFailure); @@ -152,14 +154,16 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); - final Store.RecoveryDiff diff = snapshot.recoveryDiff(localMetadata); - logger.debug("Replication diff {}", diff); - // Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming snapshot - // from - // source that means the local copy of the segment has been corrupted/changed in some way and we throw an IllegalStateException to - // fail the shard + final Store.RecoveryDiff diff = snapshot.segmentReplicationDiff(localMetadata); + logger.trace("Replication diff {}", diff); + /* + * Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming + * snapshot from source that means the local copy of the segment has been corrupted/changed in some way and we throw an + * IllegalStateException to fail the shard + */ if (diff.different.isEmpty() == false) { getFilesListener.onFailure( new IllegalStateException( @@ -177,15 +181,18 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { multiFileWriter.renameAllTempFiles(); final Store store = store(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index f699f0edba842..a79ce195ad83b 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -116,7 +116,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - + logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // Checks if received checkpoint is already present and ahead then it replaces old received checkpoint if (latestReceivedCheckpoint.get(replicaShard.shardId()) != null) { if (receivedCheckpoint.isAheadOf(latestReceivedCheckpoint.get(replicaShard.shardId()))) { @@ -139,6 +139,14 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe startReplication(receivedCheckpoint, replicaShard, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication complete, timing data: {}", + replicaShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); // if we received a checkpoint during the copy event that is ahead of this // try and process it. if (latestReceivedCheckpoint.get(replicaShard.shardId()).isAheadOf(replicaShard.getLatestReplicationCheckpoint())) { @@ -154,6 +162,14 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication failed, timing data: {}", + replicaShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); if (sendShardFailure == true) { logger.error("replication failure", e); replicaShard.failShard("replication failure", e); @@ -172,9 +188,9 @@ public void startReplication( startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); } - public void startReplication(final SegmentReplicationTarget target) { + // pkg-private for integration tests + void startReplication(final SegmentReplicationTarget target) { final long replicationId = onGoingReplications.start(target, recoverySettings.activityTimeout()); - logger.trace(() -> new ParameterizedMessage("Starting replication {}", replicationId)); threadPool.generic().execute(new ReplicationRunner(replicationId)); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 8093b6aee88f9..cc51082639cdb 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -29,6 +29,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -103,7 +104,10 @@ final void publish(IndexShard indexShard) { // we have to execute under the system context so that if security is enabled the sync is authorized threadContext.markAsSystemContext(); PublishCheckpointRequest request = new PublishCheckpointRequest(indexShard.getLatestReplicationCheckpoint()); + final ReplicationCheckpoint checkpoint = request.getCheckpoint(); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); + final ReplicationTimer timer = new ReplicationTimer(); + timer.start(); transportService.sendChildRequest( clusterService.localNode(), transportPrimaryAction, @@ -123,12 +127,23 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { + timer.stop(); + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] Completed publishing checkpoint [{}], timing: {}", + indexShard.shardId().getId(), + checkpoint, + timer.time() + ) + ); task.setPhase("finished"); taskManager.unregister(task); } @Override public void handleException(TransportException e) { + timer.stop(); + logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { @@ -151,6 +166,13 @@ public void handleException(TransportException e) { } } ); + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] Publishing replication checkpoint [{}]", + checkpoint.getShardId().getId(), + checkpoint + ) + ); } } @@ -168,7 +190,7 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh Objects.requireNonNull(request); Objects.requireNonNull(replica); ActionListener.completeWith(listener, () -> { - logger.trace("Checkpoint received on replica {}", request); + logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); if (request.getCheckpoint().getShardId().equals(replica.shardId())) { replicationService.onNewCheckpoint(request.getCheckpoint(), replica); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index abcef1bd91944..6a4e5e449f178 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -23,7 +23,7 @@ * * @opensearch.internal */ -public class ReplicationCheckpoint implements Writeable { +public class ReplicationCheckpoint implements Writeable, Comparable { private final ShardId shardId; private final long primaryTerm; @@ -107,6 +107,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(segmentInfosVersion); } + @Override + public int compareTo(ReplicationCheckpoint other) { + return this.isAheadOf(other) ? -1 : 1; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -125,10 +130,13 @@ public int hashCode() { } /** - * Checks if other is aheadof current replication point by comparing segmentInfosVersion. Returns true for null + * Checks if current replication checkpoint is AheadOf `other` replication checkpoint point by first comparing + * primaryTerm followed by segmentInfosVersion. Returns true when `other` is null. */ public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { - return other == null || segmentInfosVersion > other.getSegmentInfosVersion() || primaryTerm > other.getPrimaryTerm(); + return other == null + || primaryTerm > other.getPrimaryTerm() + || (primaryTerm == other.getPrimaryTerm() && segmentInfosVersion > other.getSegmentInfosVersion()); } @Override diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index bc5f17759d498..d3f0912cab638 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -39,10 +39,12 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -219,6 +221,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -338,6 +341,7 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; + private final AtomicReference runnableTaskListener; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -447,7 +451,8 @@ protected Node( final List> executorBuilders = pluginsService.getExecutorBuilders(settings); - final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + runnableTaskListener = new AtomicReference<>(); + final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -1095,6 +1100,11 @@ public Node start() throws NodeValidationException { TransportService transportService = injector.getInstance(TransportService.class); transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); + + TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); + runnableTaskListener.set(taskResourceTrackingService); + transportService.start(); assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..fca6745167bb4 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Restores data from remote store + * + * @opensearch.api + */ +public final class RestRestoreRemoteStoreAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(POST, "/_remotestore/_restore")); + } + + @Override + public String getName() { + return "restore_remote_store_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + RestoreRemoteStoreRequest restoreRemoteStoreRequest = new RestoreRemoteStoreRequest(); + restoreRemoteStoreRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", restoreRemoteStoreRequest.masterNodeTimeout()) + ); + restoreRemoteStoreRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); + request.applyContentParser(p -> restoreRemoteStoreRequest.source(p.mapOrdered())); + return channel -> client.admin().cluster().restoreRemoteStore(restoreRemoteStoreRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0995497eba50e..80e025a3651a8 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -185,12 +185,10 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalAvg; import org.opensearch.search.aggregations.metrics.InternalCardinality; import org.opensearch.search.aggregations.metrics.InternalExtendedStats; -import org.opensearch.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.search.aggregations.metrics.InternalGeoCentroid; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -664,12 +662,6 @@ private ValuesSourceRegistry registerAggregations(List plugins) { .addResultReader(InternalTopHits::new), builder ); - registerAggregation( - new AggregationSpec(GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, GeoBoundsAggregationBuilder.PARSER) - .addResultReader(InternalGeoBounds::new) - .setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators), - builder - ); registerAggregation( new AggregationSpec( GeoCentroidAggregationBuilder.NAME, diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 747b13b031824..4bd95da193668 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -42,7 +42,11 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; @@ -142,6 +146,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -946,6 +951,21 @@ public PitReaderContext getPitReaderContext(ShardSearchContextId id) { return null; } + /** + * This method returns all active PIT reader contexts + */ + public List getAllPITReaderContexts() { + final List pitContextsInfo = new ArrayList<>(); + for (ReaderContext ctx : activeReaders.values()) { + if (ctx instanceof PitReaderContext) { + final PitReaderContext context = (PitReaderContext) ctx; + ListPitInfo pitInfo = new ListPitInfo(context.getPitId(), context.getCreationTime(), context.getKeepAlive()); + pitContextsInfo.add(pitInfo); + } + } + return pitContextsInfo; + } + final SearchContext createContext( ReaderContext readerContext, ShardSearchRequest request, @@ -1102,22 +1122,6 @@ public DeletePitResponse freeReaderContextsIfFound(List deleteResults = new ArrayList<>(); - for (ReaderContext readerContext : activeReaders.values()) { - if (readerContext instanceof PitReaderContext) { - boolean result = freeReaderContext(readerContext.id()); - DeletePitInfo deletePitInfo = new DeletePitInfo(result, ((PitReaderContext) readerContext).getPitId()); - deleteResults.add(deletePitInfo); - } - } - return new DeletePitResponse(deleteResults); - } - private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java index 01e0f95b0d750..382455093309d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java @@ -78,8 +78,6 @@ import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBounds; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroid; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; @@ -364,13 +362,6 @@ public static TopHitsAggregationBuilder topHits(String name) { return new TopHitsAggregationBuilder(name); } - /** - * Create a new {@link GeoBounds} aggregation with the given name. - */ - public static GeoBoundsAggregationBuilder geoBounds(String name) { - return new GeoBoundsAggregationBuilder(name); - } - /** * Create a new {@link GeoCentroid} aggregation with the given name. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index 8d036503d1330..f36c4620d5b33 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -54,7 +54,6 @@ import org.opensearch.search.aggregations.metrics.InternalAvg; import org.opensearch.search.aggregations.metrics.InternalCardinality; import org.opensearch.search.aggregations.metrics.InternalExtendedStats; -import org.opensearch.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.search.aggregations.metrics.InternalGeoCentroid; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -191,10 +190,6 @@ public static boolean hasValue(InternalExtendedStats agg) { return agg.getCount() > 0; } - public static boolean hasValue(InternalGeoBounds agg) { - return (agg.topLeft() == null && agg.bottomRight() == null) == false; - } - public static boolean hasValue(InternalGeoCentroid agg) { return agg.centroid() != null && agg.count() > 0; } diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java index 1b2400bdadfa1..98e84136a8847 100644 --- a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -61,7 +61,7 @@ public Releasable updatePitIdAndKeepAlive(long keepAliveInMillis, String pitId, } public long getCreationTime() { - return this.creationTime.get(); + return this.creationTime.get() == null ? 0 : this.creationTime.get(); } public void setCreationTime(final long creationTime) { diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index fe644e44f297f..898aa2e2c6745 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -117,6 +117,10 @@ protected long nowInMillis() { return indexShard.getThreadPool().relativeTimeInMillis(); } + public long getKeepAlive() { + return keepAlive.get(); + } + @Override public final void close() { if (closed.compareAndSet(false, true)) { diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index a4d7b2bed516c..828c2f8c78d69 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -51,6 +51,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContent; import org.opensearch.index.Index; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -71,6 +72,7 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; +import java.util.Collections; import java.util.Arrays; import java.util.Map; import java.util.function.Function; @@ -85,6 +87,8 @@ * @opensearch.internal */ public class ShardSearchRequest extends TransportRequest implements IndicesRequest { + public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + private final String clusterAlias; private final ShardId shardId; private final int numberOfShards; @@ -501,7 +505,7 @@ public String getClusterAlias() { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); } @Override @@ -510,6 +514,16 @@ public String getDescription() { return "shardId[" + shardId() + "]"; } + public String getMetadataSupplier() { + StringBuilder sb = new StringBuilder(); + if (source != null) { + sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + } else { + sb.append("source[]"); + } + return sb.toString(); + } + public Rewriteable getRewriteable() { return new RequestRewritable(this); } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java index ae2f9e8fab989..ca74942decb50 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java @@ -123,7 +123,7 @@ public IndicesOptions indicesOptions() { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); } public String getDescription() { @@ -137,4 +137,7 @@ public String getDescription() { return sb.toString(); } + public String getMetadataSupplier() { + return shardSearchRequest().getMetadataSupplier(); + } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index a14de23d81d09..417498467622a 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -116,7 +116,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.snapshots.SnapshotUtils.filterIndices; @@ -227,7 +227,7 @@ public ClusterState execute(ClusterState currentState) { logger.warn("Remote store restore is not supported for non-existent index. Skipping: {}", index); continue; } - if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE, false)) { + if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) { throw new IllegalStateException( "cannot restore index [" diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index 522ecac5ef698..d052d374ef1f0 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -34,7 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; +import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.xcontent.ToXContent; @@ -47,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; /** * Current task information @@ -78,6 +81,15 @@ public class Task { private final Map> resourceStats; + private final List> resourceTrackingCompletionListeners; + + /** + * Keeps track of the number of active resource tracking threads for this task. It is initialized to 1 to track + * the task's own/self thread. When this value becomes 0, all threads have been marked inactive and the resource + * tracking can be stopped for this task. + */ + private final AtomicInteger numActiveResourceTrackingThreads = new AtomicInteger(1); + /** * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). */ @@ -89,7 +101,18 @@ public class Task { private final long startTimeNanos; public Task(long id, String type, String action, String description, TaskId parentTask, Map headers) { - this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers, new ConcurrentHashMap<>()); + this( + id, + type, + action, + description, + parentTask, + System.currentTimeMillis(), + System.nanoTime(), + headers, + new ConcurrentHashMap<>(), + new ArrayList<>() + ); } public Task( @@ -101,7 +124,8 @@ public Task( long startTime, long startTimeNanos, Map headers, - ConcurrentHashMap> resourceStats + ConcurrentHashMap> resourceStats, + List> resourceTrackingCompletionListeners ) { this.id = id; this.type = type; @@ -112,6 +136,7 @@ public Task( this.startTimeNanos = startTimeNanos; this.headers = headers; this.resourceStats = resourceStats; + this.resourceTrackingCompletionListeners = resourceTrackingCompletionListeners; } /** @@ -291,7 +316,8 @@ public void startThreadResourceTracking(long threadId, ResourceStatsType statsTy ); } } - threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); + threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + incrementResourceTrackingThreads(); } /** @@ -331,6 +357,7 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { threadResourceInfo.setActive(false); threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + decrementResourceTrackingThreads(); return; } } @@ -338,6 +365,17 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); } + /** + * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that + * the ThreadPool on which the task runs on have runnable wrapper similar to + * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newResizable} + * + * @return true if resource tracking is supported by the task + */ + public boolean supportsResourceTracking() { + return false; + } + /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try @@ -370,4 +408,63 @@ public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOE throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } } + + /** + * Registers a task resource tracking completion listener on this task if resource tracking is still active. + * Returns true on successful subscription, false otherwise. + */ + public boolean addResourceTrackingCompletionListener(NotifyOnceListener listener) { + if (numActiveResourceTrackingThreads.get() > 0) { + resourceTrackingCompletionListeners.add(listener); + return true; + } + + return false; + } + + /** + * Increments the number of active resource tracking threads. + * + * @return the number of active resource tracking threads. + */ + public int incrementResourceTrackingThreads() { + return numActiveResourceTrackingThreads.incrementAndGet(); + } + + /** + * Decrements the number of active resource tracking threads. + * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's + * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method + * is called exactly once on all registered listeners. + * + * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce + * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). + * This ensures that the number of active threads doesn't drop to zero pre-maturely. + * + * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed + * with the response, any thread usage info captured after the task is unregistered may be irrelevant. + * + * @return the number of active resource tracking threads. + */ + public int decrementResourceTrackingThreads() { + int count = numActiveResourceTrackingThreads.decrementAndGet(); + + if (count == 0) { + List listenerExceptions = new ArrayList<>(); + resourceTrackingCompletionListeners.forEach(listener -> { + try { + listener.onResponse(this); + } catch (Exception e1) { + try { + listener.onFailure(e1); + } catch (Exception e2) { + listenerExceptions.add(e2); + } + } + }); + ExceptionsHelper.maybeThrowRuntimeAndSuppress(listenerExceptions); + } + + return count; + } } diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 9bb931ea7f0aa..334cde81dfb6a 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -44,12 +44,15 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; +import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; @@ -57,6 +60,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpChannel; @@ -74,6 +78,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -91,7 +96,18 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - /** Rest headers that are copied to the task */ + public static final String TASK_RESOURCE_CONSUMERS_ATTRIBUTES = "task_resource_consumers.enabled"; + + public static final Setting TASK_RESOURCE_CONSUMERS_ENABLED = Setting.boolSetting( + TASK_RESOURCE_CONSUMERS_ATTRIBUTES, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Rest headers that are copied to the task + */ private final List taskHeaders; private final ThreadPool threadPool; @@ -105,6 +121,7 @@ public class TaskManager implements ClusterStateApplier { private final Map banedParents = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; + private final SetOnce taskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; @@ -112,10 +129,26 @@ public class TaskManager implements ClusterStateApplier { private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); private final SetOnce cancellationService = new SetOnce<>(); + private volatile boolean taskResourceConsumersEnabled; + private final Set> taskResourceConsumer; + + public static TaskManager createTaskManagerWithClusterSettings( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { + final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders); + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_CONSUMERS_ENABLED, taskManager::setTaskResourceConsumersEnabled); + return taskManager; + } + public TaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { this.threadPool = threadPool; this.taskHeaders = new ArrayList<>(taskHeaders); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.taskResourceConsumersEnabled = TASK_RESOURCE_CONSUMERS_ENABLED.get(settings); + this.taskResourceConsumer = Set.of(new TopNSearchTasksLogger(settings)); } public void setTaskResultsService(TaskResultsService taskResultsService) { @@ -127,6 +160,14 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS this.cancellationService.set(taskCancellationService); } + public void setTaskResourceTrackingService(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService.set(taskResourceTrackingService); + } + + public void setTaskResourceConsumersEnabled(boolean taskResourceConsumersEnabled) { + this.taskResourceConsumersEnabled = taskResourceConsumersEnabled; + } + /** * Registers a task without parent task */ @@ -152,6 +193,30 @@ public Task register(String type, String action, TaskAwareRequest request) { logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); } + if (task.supportsResourceTracking()) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(Task task) { + // Stop tracking the task once the last thread has been marked inactive. + if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { + taskResourceTrackingService.get().stopTracking(task); + } + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + logger.debug( + "failed to register a completion listener as task resource tracking has already completed [taskId={}]", + task.getId() + ); + } + } + if (task instanceof CancellableTask) { registerCancellableTask(task); } else { @@ -204,6 +269,20 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { */ public Task unregister(Task task) { logger.trace("unregister task for id: {}", task.getId()); + + // Decrement the task's self-thread as part of unregistration. + task.decrementResourceTrackingThreads(); + + if (taskResourceConsumersEnabled) { + for (Consumer taskConsumer : taskResourceConsumer) { + try { + taskConsumer.accept(task); + } catch (Exception e) { + logger.error("error encountered when updating the consumer", e); + } + } + } + if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { @@ -363,6 +442,7 @@ public int getBanCount() { * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. *

* This method is called when a parent task that has children is cancelled. + * * @return a list of pending cancellable child tasks */ public List setBan(TaskId parentTaskId, String reason) { @@ -450,6 +530,18 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } + /** + * Takes actions when a task is registered and its execution starts + * + * @param task getting executed. + * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns + */ + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + if (taskResourceTrackingService.get() == null) return () -> {}; + + return taskResourceTrackingService.get().startTracking(task); + } + private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java new file mode 100644 index 0000000000000..c3cad117390e4 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import com.sun.management.ThreadMXBean; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; + +/** + * Service that helps track resource usage of tasks running on a node. + */ +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class TaskResourceTrackingService implements RunnableTaskExecutionListener { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( + "task_resource_tracking.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final String TASK_ID = "TASK_ID"; + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ThreadPool threadPool; + private volatile boolean taskResourceTrackingEnabled; + + @Inject + public TaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); + this.threadPool = threadPool; + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); + } + + public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { + this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingEnabled() { + return taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingSupported() { + return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); + } + + /** + * Executes logic only if task supports resource tracking and resource tracking setting is enabled. + *

+ * 1. Starts tracking the task in map of resourceAwareTasks. + * 2. Adds Task Id in thread context to make sure it's available while task is processed across multiple threads. + * + * @param task for which resources needs to be tracked + * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. + */ + public ThreadContext.StoredContext startTracking(Task task) { + if (task.supportsResourceTracking() == false + || isTaskResourceTrackingEnabled() == false + || isTaskResourceTrackingSupported() == false) { + return () -> {}; + } + + logger.debug("Starting resource tracking for task: {}", task.getId()); + resourceAwareTasks.put(task.getId(), task); + return addTaskIdToThreadContext(task); + } + + /** + * Stops tracking task registered earlier for tracking. + *

+ * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. + *

+ * It's also responsible to stop tracking the current thread's resources against this task if not already done. + * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister + * happens before runnable finishes. + * + * @param task task which has finished and doesn't need resource tracking. + */ + public void stopTracking(Task task) { + logger.debug("Stopping resource tracking for task: {}", task.getId()); + try { + if (isCurrentThreadWorkingOnTask(task)) { + taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); + } + } catch (Exception e) { + logger.warn("Failed while trying to mark the task execution on current thread completed.", e); + assert false; + } finally { + resourceAwareTasks.remove(task.getId()); + } + } + + /** + * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these + * and how much resources these have consumed till now. + * + * @param tasks for which resource stats needs to be refreshed. + */ + public void refreshResourceStats(Task... tasks) { + if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { + return; + } + + for (Task task : tasks) { + if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { + refreshResourceStats(task); + } + } + } + + private void refreshResourceStats(Task resourceAwareTask) { + try { + logger.debug("Refreshing resource stats for Task: {}", resourceAwareTask.getId()); + List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); + threadsWorkingOnTask.forEach( + threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) + ); + } catch (IllegalStateException e) { + logger.debug("Resource stats already updated."); + } + + } + + /** + * Called when a thread starts working on a task's runnable. + * + * @param taskId of the task for which runnable is starting + * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this + * thread + */ + @Override + public void taskExecutionStartedOnThread(long taskId, long threadId) { + try { + final Task task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("Task execution started on thread. Task: {}, Thread: {}", taskId, threadId); + task.startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); + assert false; + } + + } + + /** + * Called when a thread finishes working on a task's runnable. + * + * @param taskId of the task for which runnable is complete + * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread + */ + @Override + public void taskExecutionFinishedOnThread(long taskId, long threadId) { + try { + final Task task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("Task execution finished on thread. Task: {}, Thread: {}", taskId, threadId); + task.stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); + assert false; + } + } + + public Map getResourceAwareTasks() { + return Collections.unmodifiableMap(resourceAwareTasks); + } + + private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { + ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( + ResourceStats.MEMORY, + threadMXBean.getThreadAllocatedBytes(threadId) + ); + ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); + return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; + } + + private boolean isCurrentThreadWorkingOnTask(Task task) { + long threadId = Thread.currentThread().getId(); + List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); + + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + return true; + } + } + return false; + } + + private List getThreadsWorkingOnTask(Task task) { + List activeThreads = new ArrayList<>(); + for (List threadResourceInfos : task.getResourceStats().values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + activeThreads.add(threadResourceInfo.getThreadId()); + } + } + } + return activeThreads; + } + + /** + * Adds Task Id in the ThreadContext. + *

+ * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext + * as well. + * + * @param task for which Task Id needs to be added in ThreadContext. + * @return StoredContext reference to restore the ThreadContext from which we created a new one. + * Caller can call context.restore() to get the existing ThreadContext back. + */ + private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { + ThreadContext threadContext = threadPool.getThreadContext(); + ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); + threadContext.putTransient(TASK_ID, task.getId()); + return storedContext; + } + +} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java index a0b38649b6420..de49d86d1d5c4 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -17,11 +17,13 @@ * @opensearch.internal */ public class ThreadResourceInfo { + private final long threadId; private volatile boolean isActive = true; private final ResourceStatsType statsType; private final ResourceUsageInfo resourceUsageInfo; - public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + public ThreadResourceInfo(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + this.threadId = threadId; this.statsType = statsType; this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); } @@ -45,12 +47,16 @@ public ResourceStatsType getStatsType() { return statsType; } + public long getThreadId() { + return threadId; + } + public ResourceUsageInfo getResourceUsageInfo() { return resourceUsageInfo; } @Override public String toString() { - return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive + ", threadId=" + threadId; } } diff --git a/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java b/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java new file mode 100644 index 0000000000000..1755db3ab4ae8 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.logging.OpenSearchLogMessage; + +import java.util.HashMap; +import java.util.Map; + +/** + * Search shard task information that will be extracted from Task and converted into + * format that will be logged + * + * @opensearch.internal + */ +public final class SearchShardTaskDetailsLogMessage extends OpenSearchLogMessage { + SearchShardTaskDetailsLogMessage(SearchShardTask task) { + super(prepareMap(task), message(task)); + } + + private static Map prepareMap(SearchShardTask task) { + Map messageFields = new HashMap<>(); + messageFields.put("taskId", task.getId()); + messageFields.put("type", task.getType()); + messageFields.put("action", task.getAction()); + messageFields.put("description", task.getDescription()); + messageFields.put("start_time_millis", task.getStartTime()); + messageFields.put("parentTaskId", task.getParentTaskId()); + messageFields.put("resource_stats", task.getResourceStats()); + messageFields.put("metadata", task.getTaskMetadata()); + return messageFields; + } + + // Message will be used in plaintext logs + private static String message(SearchShardTask task) { + StringBuilder sb = new StringBuilder(); + sb.append("taskId:[") + .append(task.getId()) + .append("], ") + .append("type:[") + .append(task.getType()) + .append("], ") + .append("action:[") + .append(task.getAction()) + .append("], ") + .append("description:[") + .append(task.getDescription()) + .append("], ") + .append("start_time_millis:[") + .append(task.getStartTime()) + .append("], ") + .append("resource_stats:[") + .append(task.getResourceStats()) + .append("], ") + .append("metadata:[") + .append(task.getTaskMetadata()) + .append("]"); + return sb.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java new file mode 100644 index 0000000000000..dd7e200d7f4b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.Task; + +import java.util.Comparator; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.function.Consumer; + +/** + * A simple listener that logs resource information of high memory consuming search tasks + * + * @opensearch.internal + */ +public class TopNSearchTasksLogger implements Consumer { + public static final String TASK_DETAILS_LOG_PREFIX = "task.detailslog"; + public static final String LOG_TOP_QUERIES_SIZE = "cluster.task.consumers.top_n.size"; + public static final String LOG_TOP_QUERIES_FREQUENCY = "cluster.task.consumers.top_n.frequency"; + + private static final Logger SEARCH_TASK_DETAILS_LOGGER = LogManager.getLogger(TASK_DETAILS_LOG_PREFIX + ".search"); + + // number of memory expensive search tasks that are logged + private static final Setting LOG_TOP_QUERIES_SIZE_SETTING = Setting.intSetting( + LOG_TOP_QUERIES_SIZE, + 10, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + // frequency in which memory expensive search tasks are logged + private static final Setting LOG_TOP_QUERIES_FREQUENCY_SETTING = Setting.timeSetting( + LOG_TOP_QUERIES_FREQUENCY, + TimeValue.timeValueSeconds(60L), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final int topQueriesSize; + private final long topQueriesLogFrequencyInNanos; + private final Queue> topQueries; + private long lastReportedTimeInNanos = System.nanoTime(); + + public TopNSearchTasksLogger(Settings settings) { + this.topQueriesSize = LOG_TOP_QUERIES_SIZE_SETTING.get(settings); + this.topQueriesLogFrequencyInNanos = LOG_TOP_QUERIES_FREQUENCY_SETTING.get(settings).getNanos(); + this.topQueries = new PriorityQueue<>(topQueriesSize, Comparator.comparingLong(Tuple::v1)); + } + + /** + * Called when task is unregistered and task has resource stats present. + */ + @Override + public void accept(Task task) { + if (task instanceof SearchShardTask) { + recordSearchTask((SearchShardTask) task); + } + } + + private synchronized void recordSearchTask(SearchShardTask searchTask) { + final long memory_in_bytes = searchTask.getTotalResourceUtilization(ResourceStats.MEMORY); + if (System.nanoTime() - lastReportedTimeInNanos >= topQueriesLogFrequencyInNanos) { + publishTopNEvents(); + lastReportedTimeInNanos = System.nanoTime(); + } + if (topQueries.size() >= topQueriesSize && topQueries.peek().v1() < memory_in_bytes) { + // evict the element + topQueries.poll(); + } + if (topQueries.size() < topQueriesSize) { + topQueries.offer(new Tuple<>(memory_in_bytes, searchTask)); + } + } + + private void publishTopNEvents() { + logTopResourceConsumingQueries(); + topQueries.clear(); + } + + private void logTopResourceConsumingQueries() { + for (Tuple topQuery : topQueries) { + SEARCH_TASK_DETAILS_LOGGER.info(new SearchShardTaskDetailsLogMessage(topQuery.v2())); + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/consumer/package-info.java b/server/src/main/java/org/opensearch/tasks/consumer/package-info.java new file mode 100644 index 0000000000000..40219a1cead5b --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Support for adding consumers to consume task related information. + */ +package org.opensearch.tasks.consumer; diff --git a/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java index fd9ca1d3b5f3b..23f8a8979f821 100644 --- a/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java @@ -20,6 +20,7 @@ import java.util.Locale; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicReference; /** * A builder for resizable executors. @@ -30,12 +31,26 @@ public final class ResizableExecutorBuilder extends ExecutorBuilder sizeSetting; private final Setting queueSizeSetting; + private final AtomicReference runnableTaskListener; - ResizableExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) { - this(settings, name, size, queueSize, "thread_pool." + name); + ResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final AtomicReference runnableTaskListener + ) { + this(settings, name, size, queueSize, "thread_pool." + name, runnableTaskListener); } - public ResizableExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) { + public ResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final String prefix, + final AtomicReference runnableTaskListener + ) { super(name); final String sizeKey = settingsKey(prefix, "size"); this.sizeSetting = new Setting<>( @@ -50,6 +65,7 @@ public ResizableExecutorBuilder(final Settings settings, final String name, fina queueSize, new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Dynamic } ); + this.runnableTaskListener = runnableTaskListener; } @Override @@ -77,7 +93,8 @@ ThreadPool.ExecutorHolder build(final ResizableExecutorSettings settings, final size, queueSize, threadFactory, - threadContext + threadContext, + runnableTaskListener ); final ThreadPool.Info info = new ThreadPool.Info( name(), diff --git a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java new file mode 100644 index 0000000000000..03cd66f80d044 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +/** + * Listener for events when a runnable execution starts or finishes on a thread and is aware of the task for which the + * runnable is associated to. + */ +public interface RunnableTaskExecutionListener { + + /** + * Sends an update when ever a task's execution start on a thread + * + * @param taskId of task which has started + * @param threadId of thread which is executing the task + */ + void taskExecutionStartedOnThread(long taskId, long threadId); + + /** + * + * Sends an update when task execution finishes on a thread + * + * @param taskId of task which has finished + * @param threadId of thread which executed the task + */ + void taskExecutionFinishedOnThread(long taskId, long threadId); +} diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java new file mode 100644 index 0000000000000..183b9b2f4cf9a --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.WrappedRunnable; +import org.opensearch.tasks.TaskManager; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +import static java.lang.Thread.currentThread; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +/** + * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to + * entities listening to the events. + * + * It's able to associate runnable with a task with the help of task Id available in thread context. + */ +public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + private final Runnable original; + private final ThreadContext threadContext; + private final AtomicReference runnableTaskListener; + + public TaskAwareRunnable( + final ThreadContext threadContext, + final Runnable original, + final AtomicReference runnableTaskListener + ) { + this.original = original; + this.threadContext = threadContext; + this.runnableTaskListener = runnableTaskListener; + } + + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + public boolean isForceExecution() { + return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution(); + } + + @Override + public void onRejection(final Exception e) { + if (original instanceof AbstractRunnable) { + ((AbstractRunnable) original).onRejection(e); + } else { + ExceptionsHelper.reThrowIfNotNull(e); + } + } + + @Override + protected void doRun() throws Exception { + assert runnableTaskListener.get() != null : "Listener should be attached"; + Long taskId = threadContext.getTransient(TASK_ID); + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionStartedOnThread(taskId, currentThread().getId()); + } else { + logger.debug("Task Id not available in thread context. Skipping update. Thread Info: {}", Thread.currentThread()); + } + try { + original.run(); + } finally { + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionFinishedOnThread(taskId, currentThread().getId()); + } + } + } + + @Override + public Runnable unwrap() { + return original; + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index cc8d81d2a7b4b..928b4871590c6 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -69,6 +69,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -200,6 +201,14 @@ public Collection builders() { ); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { + this(settings, null, customBuilders); + } + + public ThreadPool( + final Settings settings, + final AtomicReference runnableTaskListener, + final ExecutorBuilder... customBuilders + ) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -211,8 +220,11 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui builders.put(Names.WRITE, new FixedExecutorBuilder(settings, Names.WRITE, allocatedProcessors, 10000)); builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, allocatedProcessors, 1000)); builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); - builders.put(Names.SEARCH, new ResizableExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000)); - builders.put(Names.SEARCH_THROTTLED, new ResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100)); + builders.put( + Names.SEARCH, + new ResizableExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000, runnableTaskListener) + ); + builders.put(Names.SEARCH_THROTTLED, new ResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, runnableTaskListener)); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded // the assumption here is that the listeners should be very lightweight on the listeners side diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index b65b72b745a01..dbd6f651a6b3c 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -37,6 +37,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -86,6 +87,8 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); + Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { @@ -104,6 +107,7 @@ public void processMessageReceived(Request request, TransportChannel channel) th unregisterTask = null; } finally { Releasables.close(unregisterTask); + contextToRestore.restore(); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 1a280f2475e5d..aaba06196bc59 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -210,7 +210,7 @@ public TransportService( setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); - taskManager = createTaskManager(settings, threadPool, taskHeaders); + taskManager = createTaskManager(settings, clusterSettings, threadPool, taskHeaders); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); @@ -246,8 +246,17 @@ public TaskManager getTaskManager() { return taskManager; } - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { - return new TaskManager(settings, threadPool, taskHeaders); + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { + if (clusterSettings != null) { + return TaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); + } else { + return new TaskManager(settings, threadPool, taskHeaders); + } } /** diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 7756eb12bb3f4..9bd44185baf24 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -75,6 +75,9 @@ public synchronized void onTaskUnregistered(Task task) { @Override public void waitForTaskCompletion(Task task) {} + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + public synchronized List> getEvents() { return Collections.unmodifiableList(new ArrayList<>(events)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java new file mode 100644 index 0000000000000..654d5cde7bb00 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -0,0 +1,653 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.node.tasks; + +import com.sun.management.ThreadMXBean; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskId; +import org.opensearch.tasks.TaskInfo; +import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.test.tasks.MockTaskManagerListener; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class ResourceAwareTasksTests extends TaskManagerTestCase { + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + public static class ResourceAwareNodeRequest extends BaseNodeRequest { + protected String requestName; + + public ResourceAwareNodeRequest() { + super(); + } + + public ResourceAwareNodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public ResourceAwareNodeRequest(NodesRequest request) { + requestName = request.requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "ResourceAwareNodeRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } + + @Override + public boolean supportsResourceTracking() { + return true; + } + }; + } + } + + public static class NodesRequest extends BaseNodesRequest { + private final String requestName; + + private NodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public NodesRequest(String requestName, String... nodesIds) { + super(nodesIds); + this.requestName = requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "NodesRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + }; + } + } + + /** + * Simulates a task which executes work on search executor. + */ + class ResourceAwareNodesAction extends AbstractTestNodesAction { + private final TaskTestContext taskTestContext; + private final boolean blockForCancellation; + + ResourceAwareNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + boolean shouldBlock, + TaskTestContext taskTestContext + ) { + super(actionName, threadPool, clusterService, transportService, NodesRequest::new, ResourceAwareNodeRequest::new); + this.taskTestContext = taskTestContext; + this.blockForCancellation = shouldBlock; + } + + @Override + protected ResourceAwareNodeRequest newNodeRequest(NodesRequest request) { + return new ResourceAwareNodeRequest(request); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request, Task task) { + assert task.supportsResourceTracking(); + + AtomicLong threadId = new AtomicLong(); + Future result = threadPool.executor(ThreadPool.Names.SEARCH).submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") + protected void doRun() { + taskTestContext.memoryConsumptionWhenExecutionStarts = threadMXBean.getThreadAllocatedBytes( + Thread.currentThread().getId() + ); + threadId.set(Thread.currentThread().getId()); + + // operationStartValidator will be called just before the task execution. + if (taskTestContext.operationStartValidator != null) { + taskTestContext.operationStartValidator.accept(task, threadId.get()); + } + + // operationFinishedValidator will be called just after all task threads are marked inactive and + // the task is unregistered. + if (taskTestContext.operationFinishedValidator != null) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(Task task) { + taskTestContext.operationFinishedValidator.accept(task, threadId.get()); + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + fail("failed to register a completion listener as task resource tracking has already completed"); + } + } + + Object[] allocation1 = new Object[1000000]; // 4MB + + if (blockForCancellation) { + // Simulate a job that takes forever to finish + // Using periodic checks method to identify that the task was cancelled + try { + boolean taskCancelled = waitUntil(((CancellableTask) task)::isCancelled); + if (taskCancelled) { + throw new TaskCancelledException("Task Cancelled"); + } else { + fail("It should have thrown an exception"); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + + } + + Object[] allocation2 = new Object[1000000]; // 4MB + } + }); + + try { + result.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e.getCause()); + } + + return new NodeResponse(clusterService.localNode()); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request) { + throw new UnsupportedOperationException("the task parameter is required"); + } + } + + private TaskTestContext startResourceAwareNodesAction( + TestNode node, + boolean blockForCancellation, + TaskTestContext taskTestContext, + ActionListener listener + ) { + NodesRequest request = new NodesRequest("Test Request", node.getNodeId()); + + taskTestContext.requestCompleteLatch = new CountDownLatch(1); + + ResourceAwareNodesAction action = new ResourceAwareNodesAction( + "internal:resourceAction", + threadPool, + node.clusterService, + node.transportService, + blockForCancellation, + taskTestContext + ); + taskTestContext.mainTask = action.execute(request, listener); + return taskTestContext; + } + + private static class TaskTestContext { + private Task mainTask; + private CountDownLatch requestCompleteLatch; + private BiConsumer operationStartValidator; + private BiConsumer operationFinishedValidator; + private long memoryConsumptionWhenExecutionStarts; + } + + public void testBasicTaskResourceTracking() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4000000; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedArrayAllocationOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDuringTaskCancellation() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + // allocations are completed before the task is cancelled + long expectedArrayAllocationOverhead = 4000000; // Task's memory overhead due to array allocations + long taskCancellationOverhead = 30000; // Task cancellation overhead ~ 30Kb + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + long expectedOverhead = expectedArrayAllocationOverhead + taskCancellationOverhead; + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], true, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Cancel main task + CancelTasksRequest request = new CancelTasksRequest(); + request.setReason("Cancelling request to verify Task resource tracking behaviour"); + request.setTaskId(new TaskId(testNodes[0].getNodeId(), taskTestContext.mainTask.getId())); + ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertEquals(0, resourceTasks.size()); + assertNull(throwableReference.get()); + assertNotNull(responseReference.get()); + assertEquals(1, responseReference.get().failureCount()); + assertEquals(TaskCancelledException.class, findActualException(responseReference.get().failures().get(0)).getClass()); + } + + public void testTaskResourceTrackingDisabled() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDisabledWhileTaskInProgress() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(false); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4000000; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedArrayAllocationOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingEnabledWhileTaskInProgress() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + assertEquals(0, resourceTasks.size()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportListTasksAction, + new ListTasksRequest().setActions("internal:resourceAction*").setDetailed(true) + ); + + TaskInfo taskInfo = listTasksResponse.getTasks().get(1); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getCpuTimeInNanos() > 0); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getMemoryInBytes() > 0); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskIdPersistsInThreadContext() throws InterruptedException { + setup(true, true); + + final List taskIdsAddedToThreadContext = new ArrayList<>(); + final List taskIdsRemovedFromThreadContext = new ArrayList<>(); + AtomicLong actualTaskIdInThreadContext = new AtomicLong(-1); + AtomicLong expectedTaskIdInThreadContext = new AtomicLong(-2); + + ((MockTaskManager) testNodes[0].transportService.getTaskManager()).addListener(new MockTaskManagerListener() { + @Override + public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) { + if (closeableInvoked) { + taskIdsRemovedFromThreadContext.add(task.getId()); + } else { + taskIdsAddedToThreadContext.add(task.getId()); + } + } + + @Override + public void onTaskRegistered(Task task) {} + + @Override + public void onTaskUnregistered(Task task) { + if (task.getAction().equals("internal:resourceAction[n]")) { + expectedTaskIdInThreadContext.set(task.getId()); + actualTaskIdInThreadContext.set(threadPool.getThreadContext().getTransient(TASK_ID)); + } + } + }); + + TaskTestContext taskTestContext = new TaskTestContext(); + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + taskTestContext.requestCompleteLatch.await(); + + assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); + assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); + } + + private void setup(boolean resourceTrackingEnabled, boolean useMockTaskManager) { + Settings settings = Settings.builder() + .put("task_resource_tracking.enabled", resourceTrackingEnabled) + .put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), useMockTaskManager) + .build(); + setupTestNodes(settings); + connectNodes(testNodes[0]); + + runnableTaskListener.set(testNodes[0].taskResourceTrackingService); + } + + private Throwable findActualException(Exception e) { + Throwable throwable = e.getCause(); + while (throwable.getCause() != null) { + throwable = throwable.getCause(); + } + return throwable; + } + + private void assertTasksRequestFinishedSuccessfully(NodesResponse nodesResponse, Throwable throwable) { + assertNull(throwable); + assertNotNull(nodesResponse); + assertEquals(0, nodesResponse.failureCount()); + } + + private void assertMemoryUsageWithinLimits(long actual, long expected) { + // 5% buffer up to 200 KB to account for classloading overhead. + long maxOverhead = Math.min(200000, expected * 5 / 100); + assertThat(actual, lessThanOrEqualTo(expected + maxOverhead)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 4383b21aa7e74..68cf69e30f8a6 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -53,14 +53,17 @@ import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -74,6 +77,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -89,10 +93,12 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase { protected ThreadPool threadPool; protected TestNode[] testNodes; protected int nodesCount; + protected AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } public void setupTestNodes(Settings settings) { @@ -213,11 +219,16 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { Collections.emptySet() ) { @Override - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(settings, threadPool, taskHeaders); + return super.createTaskManager(settings, clusterSettings, threadPool, taskHeaders); } } }; @@ -225,14 +236,22 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool transportService.start(); clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); + taskResourceTrackingService = new TaskResourceTrackingService(settings, clusterService.getClusterSettings(), threadPool); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); + transportListTasksAction = new TransportListTasksAction( + clusterService, + transportService, + actionFilters, + taskResourceTrackingService + ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } public final ClusterService clusterService; public final TransportService transportService; + public final TaskResourceTrackingService taskResourceTrackingService; private final SetOnce discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java new file mode 100644 index 0000000000000..b52729c1bbfd7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.snapshots.RestoreInfo; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RestoreRemoteStoreResponseTests extends AbstractXContentTestCase { + + @Override + protected RestoreRemoteStoreResponse createTestInstance() { + if (randomBoolean()) { + String name = "remote_store"; + List indices = new ArrayList<>(); + indices.add("test0"); + indices.add("test1"); + int totalShards = randomIntBetween(1, 1000); + int successfulShards = randomIntBetween(0, totalShards); + return new RestoreRemoteStoreResponse(new RestoreInfo(name, indices, totalShards, successfulShards)); + } else { + return new RestoreRemoteStoreResponse((RestoreInfo) null); + } + } + + @Override + protected RestoreRemoteStoreResponse doParseInstance(XContentParser parser) throws IOException { + return RestoreRemoteStoreResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 4b98870422ce8..202f1b7dcb5b4 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -91,6 +91,7 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Answers.RETURNS_MOCKS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; @@ -224,7 +225,7 @@ public void setupAction() { remoteResponseHandler = ArgumentCaptor.forClass(TransportResponseHandler.class); // setup services that will be called by action - transportService = mock(TransportService.class); + transportService = mock(TransportService.class, RETURNS_MOCKS); clusterService = mock(ClusterService.class); localIngest = true; // setup nodes for local and remote diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index b44b59b8a4ad5..ad2657517df9a 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -477,6 +477,57 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testExecutePhaseOnShardFailure() throws InterruptedException { + final Index index = new Index("test", UUID.randomUUID().toString()); + + final SearchShardIterator[] shards = IntStream.range(0, 2 + randomInt(3)) + .mapToObj(i -> new SearchShardIterator(null, new ShardId(index, i), List.of("n1", "n2", "n3"), null, null, null)) + .toArray(SearchShardIterator[]::new); + + final AtomicBoolean fail = new AtomicBoolean(true); + final CountDownLatch latch = new CountDownLatch(1); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setMaxConcurrentShardRequests(5); + + final ArraySearchPhaseResults queryResult = new ArraySearchPhaseResults<>(shards.length); + AbstractSearchAsyncAction action = createAction( + searchRequest, + queryResult, + new ActionListener() { + @Override + public void onResponse(SearchResponse response) {} + + @Override + public void onFailure(Exception e) { + try { + // We end up here only when onPhaseDone() is called (causing NPE) and + // ending up in the onPhaseFailure() callback + if (fail.compareAndExchange(true, false)) { + assertThat(e, instanceOf(SearchPhaseExecutionException.class)); + throw new RuntimeException("Simulated exception"); + } + } finally { + executor.submit(() -> latch.countDown()); + } + } + }, + false, + false, + new AtomicLong(), + shards + ); + action.run(); + assertTrue(latch.await(1, TimeUnit.SECONDS)); + + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, action.buildShardFailures(), null, null); + assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); + assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); + assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); + assertSame(searchResponse.getHits(), internalSearchResponse.hits()); + assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 2c65d6ffcc813..a5c6e1c12b79c 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -219,7 +219,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +308,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +406,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +494,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java index ec83cb45697d9..433cd9dfa3e89 100644 --- a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -8,7 +8,14 @@ package org.opensearch.action.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.junit.Assert; import org.opensearch.Version; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -21,7 +28,10 @@ import org.opensearch.search.internal.ShardSearchContextId; import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import static org.opensearch.test.OpenSearchTestCase.between; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; @@ -81,4 +91,41 @@ public static String getPitId() { } return SearchContextId.encode(array.asList(), aliasFilters, version); } + + public static void assertUsingGetAllPits(Client client, String id, long creationTime) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + Assert.assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); + Assert.assertEquals(getPitResponse.getPitInfos().get(0).getCreationTime(), creationTime); + } + + public static void assertGetAllPitsEmpty(Client client) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + Assert.assertEquals(0, getPitResponse.getPitInfos().size()); + } } diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index e2db4fdbc97ef..7a1d9a6fe963c 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -7,13 +7,13 @@ */ package org.opensearch.action.search; -import org.apache.lucene.search.TotalHits; import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -25,10 +25,6 @@ import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.SearchHit; -import org.opensearch.search.SearchHits; -import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; @@ -109,15 +105,6 @@ public void setupData() { new TaskId(randomLong() + ":" + randomLong()), Collections.emptyMap() ); - InternalSearchResponse response = new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), - InternalAggregations.EMPTY, - null, - null, - false, - null, - 1 - ); clusterServiceMock = mock(ClusterService.class); ClusterState state = mock(ClusterState.class); @@ -178,7 +165,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -224,7 +211,11 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce transportService.start(); transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); List deletePitInfos = new ArrayList<>(); @@ -238,7 +229,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -307,7 +312,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -366,7 +371,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -434,7 +439,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -480,7 +485,11 @@ public void testDeleteAllPitWhenNodeIsDown() { transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); @@ -496,7 +505,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -543,7 +566,11 @@ public void testDeleteAllPitWhenAllNodesAreDown() { SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); t.start(); @@ -554,7 +581,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -600,7 +641,11 @@ public void testDeleteAllPitFailure() { transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextId, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 is down"))); @@ -616,7 +661,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 9ec9e656257d6..1195ed2590b1e 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -274,6 +274,43 @@ protected void clusterManagerOperation(Task task, Request request, ClusterState } } + /* The test is copied from testLocalOperationWithoutBlocks() + to validate the backwards compatibility for the deprecated method masterOperation(with task parameter). */ + public void testDeprecatedMasterOperationWithTaskParameterCanBeCalled() throws ExecutionException, InterruptedException { + final boolean clusterManagerOperationFailure = randomBoolean(); + + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + final Exception exception = new Exception(); + final Response response = new Response(); + + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + + new Action("internal:testAction", transportService, clusterService, threadPool) { + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + if (clusterManagerOperationFailure) { + listener.onFailure(exception); + } else { + listener.onResponse(response); + } + } + }.execute(request, listener); + assertTrue(listener.isDone()); + + if (clusterManagerOperationFailure) { + try { + listener.get(); + fail("Expected exception but returned proper result"); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), equalTo(exception)); + } + } else { + assertThat(listener.get(), equalTo(response)); + } + } + public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException { final boolean retryableBlock = randomBoolean(); final boolean unblockBeforeTimeout = randomBoolean(); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 62dae0622eb85..e6da768650088 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -299,12 +299,12 @@ public void testLocalNodeClusterManagerListenerCallbacks() { AtomicBoolean isClusterManager = new AtomicBoolean(); timedClusterApplierService.addLocalNodeClusterManagerListener(new LocalNodeClusterManagerListener() { @Override - public void onMaster() { + public void onClusterManager() { isClusterManager.set(true); } @Override - public void offMaster() { + public void offClusterManager() { isClusterManager.set(false); } }); diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 9c70accaca3e4..64286e47b4966 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -48,6 +48,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -154,6 +155,15 @@ public void testNewContextWithClearedTransients() { assertEquals(1, threadContext.getResponseHeaders().get("baz").size()); } + public void testStashContextWithPreservedTransients() { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient("foo", "bar"); + threadContext.putTransient(TASK_ID, 1); + threadContext.stashContext(); + assertNull(threadContext.getTransient("foo")); + assertEquals(1, (int) threadContext.getTransient(TASK_ID)); + } + public void testStashWithOrigin() { final String origin = randomAlphaOfLengthBetween(4, 16); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index 4a1ecb9661687..3c39ec9f03b2a 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -62,6 +62,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -205,6 +206,203 @@ public void testShardLockObtainFailedException() { assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } + /** + * Tests that replica with the highest primary term version will be selected as target + */ + public void testPreferReplicaWithHighestPrimaryTerm() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 22, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + // Assert node2's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId2) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that replica with highest primary ter version will be selected as target + */ + public void testPreferReplicaWithNullReplicationCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 40, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id should be used as it has highest replication checkpoint + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that null ReplicationCheckpoint are ignored + */ + public void testPreferReplicaWithAllNullReplicationCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, null, null); + testAllocator.addData(node2, allocId2, false, null, null); + testAllocator.addData(node3, allocId3, true, null, null); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id should be used as it was previous primary + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that replica with highest segment info version will be selected as target on equal primary terms + */ + public void testPreferReplicaWithHighestSegmentInfoVersion() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 3)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + // Assert node2's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId2) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that prefer allocation of replica at lower checkpoint but in sync set + */ + public void testOutOfSyncHighestRepCheckpointIsIgnored() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that prefer allocation of older primary over replica with higher replication checkpoint + */ + public void testPreferAllocatingPreviousPrimaryWithLowerRepCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, true, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node1.getId()) + ); + // Assert node1's allocation id is used with highest replication checkpoint + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId1) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + /** * Tests that when one node returns a ShardLockObtainFailedException and another properly loads the store, it will * select the second node as target @@ -219,7 +417,7 @@ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() { allocId2 ); testAllocator.addData(node1, allocId1, randomBoolean(), new ShardLockObtainFailedException(shardId, "test")); - testAllocator.addData(node2, allocId2, randomBoolean(), null); + testAllocator.addData(node2, allocId2, randomBoolean()); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -601,17 +799,42 @@ public TestAllocator clear() { return this; } + public TestAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint + ) { + return addData(node, allocationId, primary, replicationCheckpoint, null); + } + public TestAllocator addData(DiscoveryNode node, String allocationId, boolean primary) { - return addData(node, allocationId, primary, null); + return addData(node, allocationId, primary, ReplicationCheckpoint.empty(shardId), null); } public TestAllocator addData(DiscoveryNode node, String allocationId, boolean primary, @Nullable Exception storeException) { + return addData(node, allocationId, primary, ReplicationCheckpoint.empty(shardId), storeException); + } + + public TestAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + @Nullable Exception storeException + ) { if (data == null) { data = new HashMap<>(); } data.put( node, - new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, allocationId, primary, storeException) + new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards( + node, + allocationId, + primary, + replicationCheckpoint, + storeException + ) ); return this; } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 4b3dc041b9f54..e02eac85beafb 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -41,8 +41,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -57,7 +57,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; -import static org.opensearch.common.settings.IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS; public class IndexSettingsTests extends OpenSearchTestCase { @@ -770,26 +769,111 @@ public void testRemoteStoreExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_REMOTE_STORE, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); assertTrue(settings.isRemoteStoreEnabled()); } + public void testRemoteTranslogStoreDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertFalse(settings.isRemoteTranslogStoreEnabled()); + } + + public void testRemoteTranslogStoreExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteTranslogStoreEnabled()); + } + + public void testRemoteTranslogStoreNullSetting() { + Settings indexSettings = Settings.builder() + .put("index.remote_store.translog.enabled", "null") + .put("index.remote_store.enabled", randomBoolean()) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals("Failed to parse value [null] as only [true] or [false] are allowed.", iae.getMessage()); + } + public void testUpdateRemoteStoreFails() { Set> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(FEATURE_FLAGGED_INDEX_SETTINGS.get(FeatureFlags.REMOTE_STORE)); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING); IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); IllegalArgumentException error = expectThrows( IllegalArgumentException.class, () -> settings.updateSettings( - Settings.builder().put("index.remote_store", randomBoolean()).build(), + Settings.builder().put("index.remote_store.enabled", randomBoolean()).build(), Settings.builder(), Settings.builder(), "index" ) ); - assertEquals(error.getMessage(), "final index setting [index.remote_store], not updateable"); + assertEquals(error.getMessage(), "final index setting [index.remote_store.enabled], not updateable"); + } + + public void testUpdateRemoteTranslogStoreFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store.translog.enabled", randomBoolean()).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store.translog.enabled], not updateable"); + } + + public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { + Settings indexSettings = Settings.builder() + .put("index.remote_store.translog.enabled", true) + .put("index.remote_store.enabled", false) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.translog.enabled cannot be enabled when index.remote_store.enabled is set to false", + iae.getMessage() + ); + } + + public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.DOCUMENT) + .put("index.remote_store.enabled", true) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); + } + + public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { + Settings indexSettings = Settings.builder().put("index.remote_store.enabled", true).build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } } diff --git a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java index 38c8491d79150..75a346e444b73 100644 --- a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java @@ -86,8 +86,8 @@ public static void init() throws IllegalAccessException { @AfterClass public static void cleanup() { - appender.stop(); Loggers.removeAppender(testLogger1, appender); + appender.stop(); } public void testLevelPrecedence() { diff --git a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java index ae159092a4833..ea146ec20b16a 100644 --- a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java @@ -84,9 +84,9 @@ public static void init() throws IllegalAccessException { @AfterClass public static void cleanup() { - appender.stop(); Loggers.removeAppender(queryLog, appender); Loggers.removeAppender(fetchLog, appender); + appender.stop(); } @Override diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 0275066f9af1b..0a6338333bffc 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -65,21 +65,21 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene92Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene94Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService().codec("default"); - assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService().codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_COMPRESSION, codec); } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene94Codec.Mode expected, Codec actual) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); iwc.setCodec(actual); @@ -91,7 +91,7 @@ private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Co SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene92Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene94Codec.Mode.valueOf(v)); ir.close(); dir.close(); } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index 340811352a203..575997dc2609e 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -70,7 +70,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene92Codec() { + indexWriterConfig.setCodec(new Lucene94Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index 7ddd92ea7b36e..269d89352fb18 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -16,6 +16,7 @@ import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecServiceFactory; import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogReader; @@ -66,7 +67,8 @@ public void testCreateEngineConfigFromFactory() { () -> new RetentionLeases(0, 0, Collections.emptyList()), null, null, - false + false, + new InternalTranslogFactory() ); assertNotNull(config.getCodec()); @@ -143,7 +145,8 @@ public void testCreateCodecServiceFromFactory() { () -> new RetentionLeases(0, 0, Collections.emptyList()), null, null, - false + false, + new InternalTranslogFactory() ); assertNotNull(config.getCodec()); } diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java index 1c6d06e9bcc08..1754d6082b86d 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -102,7 +103,8 @@ private EngineConfig createReadOnlyEngine(IndexSettings indexSettings) { () -> RetentionLeases.EMPTY, null, null, - true + true, + new InternalTranslogFactory() ); } } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 675ff860c3334..1fe1a37dedae0 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -12,18 +12,25 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.hamcrest.MatcherAssert; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -31,6 +38,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; public class NRTReplicationEngineTests extends EngineTestCase { @@ -210,6 +219,49 @@ public void testTrimTranslogOps() throws Exception { } } + public void testCommitSegmentInfos() throws Exception { + // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints + // stored in user data. + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() + ); + try ( + final Store nrtEngineStore = createStore(indexSettings, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) + .stream() + .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) + .collect(Collectors.toList()); + for (Engine.Operation op : operations) { + applyOperation(nrtEngine, op); + } + + final SegmentInfos previousInfos = nrtEngine.getLatestSegmentInfos(); + LocalCheckpointTracker localCheckpointTracker = nrtEngine.getLocalCheckpointTracker(); + final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); + final long processedCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); + nrtEngine.commitSegmentInfos(); + + // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. + final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); + assertEquals(previousInfos.getGeneration(), latestSegmentInfos.getLastGeneration()); + Map userData = latestSegmentInfos.getUserData(); + assertEquals(processedCheckpoint, localCheckpointTracker.getProcessedCheckpoint()); + assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); + + // read infos from store and assert userdata + final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(nrtEngineStore.directory()); + final SegmentInfos committedInfos = SegmentInfos.readCommit(nrtEngineStore.directory(), lastCommitSegmentsFileName); + userData = committedInfos.getUserData(); + assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); + assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + } + } + private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine, SegmentInfos expectedSegmentInfos) throws IOException { assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 079475d9f3554..d6c89342c9df2 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -178,6 +178,26 @@ public void testFieldsWithFilledArrayShouldThrowException() throws Exception { } } + public void testDotAsFieldName() throws Exception { + String mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(".") + .field("type", "text") + .endObject() + .endObject() + .endObject() + ); + + try { + createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); + fail("Expected MapperParsingException"); + } catch (MapperParsingException e) { + assertThat(e.getMessage(), containsString("Invalid field name")); + } + } + public void testFieldPropertiesArray() throws Exception { String mapping = Strings.toString( XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 4682d35411b78..7d2d8e38d066e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -45,9 +45,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -75,6 +75,9 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getScrollCount()); assertEquals(equalTo, stats.getScrollTimeInMillis()); assertEquals(equalTo, stats.getScrollCurrent()); + assertEquals(equalTo, stats.getPitCount()); + assertEquals(equalTo, stats.getPitTimeInMillis()); + assertEquals(equalTo, stats.getPitCurrent()); assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index b65528ae207fd..8c00ab97a46ea 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2660,7 +2660,7 @@ public void restoreShard( public void testRestoreShardFromRemoteStore() throws IOException { IndexShard target = newStartedShard( true, - Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE, true).build(), + Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(), new InternalEngineFactory() ); @@ -3522,7 +3522,7 @@ public void testCheckpointRefreshListenerWithNull() throws IOException { } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint */ diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java new file mode 100644 index 0000000000000..23371a39871c7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -0,0 +1,256 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { + + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + + /** + * Test that latestReplicationCheckpoint returns null only for docrep enabled indices + */ + public void testReplicationCheckpointNullForDocRep() throws IOException { + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "DOCUMENT").put(Settings.EMPTY).build(); + final IndexShard indexShard = newStartedShard(false, indexSettings); + assertNull(indexShard.getLatestReplicationCheckpoint()); + closeShards(indexShard); + } + + /** + * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices + */ + public void testReplicationCheckpointNotNullForSegReb() throws IOException { + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); + final IndexShard indexShard = newStartedShard(indexSettings); + final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); + assertNotNull(replicationCheckpoint); + closeShards(indexShard); + } + + public void testSegmentReplication_Index_Update_Delete() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(2, settings, mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + + final int numDocs = randomIntBetween(100, 200); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + } + + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.assertAllEqual(numDocs); + + for (int i = 0; i < numDocs; i++) { + // randomly update docs. + if (randomBoolean()) { + shards.index( + new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", XContentType.JSON) + ); + } + } + + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + final List docs = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docs); + } + for (int i = 0; i < numDocs; i++) { + // randomly delete. + if (randomBoolean()) { + shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); + } + } + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + final List docsAfterDelete = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); + } + } + } + + public void testIgnoreShardIdle() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + primary.scheduledRefresh(); + replica.scheduledRefresh(); + + primary.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + replica.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + + // Update the search_idle setting, this will put both shards into search idle. + Settings updatedSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) + .build(); + primary.indexSettings().getScopedSettings().applySettings(updatedSettings); + replica.indexSettings().getScopedSettings().applySettings(updatedSettings); + + primary.scheduledRefresh(); + replica.scheduledRefresh(); + + // Shards without segrep will register a new RefreshListener on the engine and return true when registered, + // assert with segrep enabled that awaitShardSearchActive does not register a listener. + primary.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + replica.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + } + } + + /** + * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. + */ + public void testPublishCheckpointOnPrimaryMode() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(true); + CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); + refreshListener.afterRefresh(true); + + // verify checkpoint is published + verify(mock, times(1)).publish(any()); + closeShards(shard); + } + + /** + * here we are starting a new primary shard in PrimaryMode initially and starting relocation handoff. Later we complete relocation handoff then shard is no longer + * in PrimaryMode, and we test if the shard does not publish checkpoint after refresh. + */ + public void testPublishCheckpointAfterRelocationHandOff() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(true); + CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); + String id = shard.routingEntry().allocationId().getId(); + + // Starting relocation handoff + shard.getReplicationTracker().startRelocationHandoff(id); + + // Completing relocation handoff + shard.getReplicationTracker().completeRelocationHandoff(); + refreshListener.afterRefresh(true); + + // verify checkpoint is not published + verify(mock, times(0)).publish(any()); + closeShards(shard); + } + + public void testNRTReplicaPromotedAsPrimary() throws Exception { + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + final IndexShard replica = shards.getReplicas().get(1); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + oldPrimary.refresh("Test"); + replicateSegments(oldPrimary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int totalDocs = numDocs + shards.indexDocs(randomInt(10)); + + assertDocCounts(oldPrimary, totalDocs, totalDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, numDocs); + } + + // promote the replica + shards.syncGlobalCheckpoint(); + assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); + shards.promoteReplicaToPrimary(nextPrimary); + + // close and start the oldPrimary as a replica. + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, asList(replica)); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + /** + * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because + * it asserts point in time seqNos are relative to the doc counts. + */ + private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) throws IOException { + assertDocCount(indexShard, expectedSearchableDocCount); + // assigned seqNos start at 0, so assert max & local seqNos are 1 less than our persisted doc count. + assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getMaxSeqNo()); + assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getLocalCheckpoint()); + // processed cp should be 1 less than our searchable doc count. + assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 2ded77d2cecfd..97575248b4ad3 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -15,11 +15,13 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -67,6 +69,24 @@ public void testListAllException() throws IOException { assertThrows(IOException.class, () -> remoteDirectory.listAll()); } + public void testListFilesByPrefix() throws IOException { + Map fileNames = Stream.of("abc", "abd", "abe", "abf", "abg") + .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); + + when(blobContainer.listBlobsByPrefix("ab")).thenReturn(fileNames); + + Collection actualFileNames = remoteDirectory.listFilesByPrefix("ab"); + Collection expectedFileName = Set.of("abc", "abd", "abe", "abf", "abg"); + assertEquals(expectedFileName, actualFileNames); + } + + public void testListFilesByPrefixException() throws IOException { + when(blobContainer.listBlobsByPrefix("abc")).thenThrow(new IOException("Error reading blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefix("abc")); + verify(blobContainer).listBlobsByPrefix("abc"); + } + public void testDeleteFile() throws IOException { remoteDirectory.deleteFile("segment_1"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java new file mode 100644 index 0000000000000..4eabfa74625f2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -0,0 +1,339 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.junit.Before; +import org.opensearch.common.collect.Set; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.startsWith; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteSegmentStoreDirectoryTests extends OpenSearchTestCase { + private RemoteDirectory remoteDataDirectory; + private RemoteDirectory remoteMetadataDirectory; + + private RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; + + @Before + public void setup() throws IOException { + remoteDataDirectory = mock(RemoteDirectory.class); + remoteMetadataDirectory = mock(RemoteDirectory.class); + + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(remoteDataDirectory, remoteMetadataDirectory); + } + + public void testUploadedSegmentMetadataToString() { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = new RemoteSegmentStoreDirectory.UploadedSegmentMetadata( + "abc", + "pqr", + "123456" + ); + assertEquals("abc::pqr::123456", metadata.toString()); + } + + public void testUploadedSegmentMetadataFromString() { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = RemoteSegmentStoreDirectory.UploadedSegmentMetadata.fromString( + "_0.cfe::_0.cfe__uuidxyz::4567" + ); + assertEquals("_0.cfe::_0.cfe__uuidxyz::4567", metadata.toString()); + } + + public void testGetMetadataFilename() { + // Generation 23 is replaced by n due to radix 32 + assertEquals( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX + "__12__n__uuid1", + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, "uuid1") + ); + } + + public void testGetPrimaryTermGenerationUuid() { + String[] filenameTokens = "abc__12__n__uuid_xyz".split(RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR); + assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); + assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); + assertEquals("uuid_xyz", RemoteSegmentStoreDirectory.MetadataFilenameUtils.getUuid(filenameTokens)); + } + + public void testMetadataFilenameComparator() { + List metadataFilenames = new ArrayList<>( + List.of( + "abc__10__20__uuid1", + "abc__12__2__uuid2", + "pqr__1__1__uuid0", + "abc__3__n__uuid3", + "abc__10__8__uuid8", + "abc__3__a__uuid4", + "abc__3__a__uuid5" + ) + ); + metadataFilenames.sort(RemoteSegmentStoreDirectory.METADATA_FILENAME_COMPARATOR); + assertEquals( + List.of( + "abc__3__a__uuid4", + "abc__3__a__uuid5", + "abc__3__n__uuid3", + "abc__10__8__uuid8", + "abc__10__20__uuid1", + "abc__12__2__uuid2", + "pqr__1__1__uuid0" + ), + metadataFilenames + ); + } + + public void testInitException() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + new IOException("Error") + ); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.init()); + } + + public void testInitNoMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + List.of() + ); + + remoteSegmentStoreDirectory.init(); + Map actualCache = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertEquals(Set.of(), actualCache.keySet()); + } + + private Map getDummyMetadata(String prefix, int commitGeneration) { + Map metadata = new HashMap<>(); + metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000)); + metadata.put( + "segments_" + commitGeneration, + "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000) + ); + return metadata; + } + + private void populateMetadata() throws IOException { + List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + metadataFiles + ); + + IndexInput indexInput = mock(IndexInput.class); + Map dummyMetadata = getDummyMetadata("_0", 1); + when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata); + when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput); + } + + public void testInit() throws IOException { + populateMetadata(); + + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv") + ); + + remoteSegmentStoreDirectory.init(); + + Map actualCache = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertEquals(Set.of("_0.cfe", "_0.cfs", "_0.si", "segments_1"), actualCache.keySet()); + } + + public void testListAll() throws IOException { + populateMetadata(); + + assertEquals(Set.of("_0.cfe", "_0.cfs", "_0.si", "segments_1"), Set.of(remoteSegmentStoreDirectory.listAll())); + } + + public void testDeleteFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertTrue(uploadedSegments.containsKey("_0.si")); + assertFalse(uploadedSegments.containsKey("_100.si")); + + remoteSegmentStoreDirectory.deleteFile("_0.si"); + remoteSegmentStoreDirectory.deleteFile("_100.si"); + + verify(remoteDataDirectory).deleteFile(startsWith("_0.si")); + verify(remoteDataDirectory, times(0)).deleteFile(startsWith("_100.si")); + assertFalse(uploadedSegments.containsKey("_0.si")); + } + + public void testDeleteFileException() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(any()); + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteFile("_0.si")); + } + + public void testFileLenght() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertTrue(uploadedSegments.containsKey("_0.si")); + + when(remoteDataDirectory.fileLength(startsWith("_0.si"))).thenReturn(1234L); + + assertEquals(1234L, remoteSegmentStoreDirectory.fileLength("_0.si")); + } + + public void testFileLenghtNoSuchFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertFalse(uploadedSegments.containsKey("_100.si")); + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.fileLength("_100.si")); + } + + public void testCreateOutput() throws IOException { + IndexOutput indexOutput = mock(IndexOutput.class); + when(remoteDataDirectory.createOutput(startsWith("abc"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + assertEquals(indexOutput, remoteSegmentStoreDirectory.createOutput("abc", IOContext.DEFAULT)); + } + + public void testCreateOutputException() { + when(remoteDataDirectory.createOutput(startsWith("abc"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.createOutput("abc", IOContext.DEFAULT)); + } + + public void testOpenInput() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + IndexInput indexInput = mock(IndexInput.class); + when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + + assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testOpenInputNoSuchFile() { + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testOpenInputException() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testCopyFrom() throws IOException { + String filename = "_100.si"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); + indexOutput.writeString("Hello World!"); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + storeDirectory.sync(List.of(filename)); + + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT); + assertTrue(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + + storeDirectory.close(); + } + + public void testCopyFromException() throws IOException { + String filename = "_100.si"; + Directory storeDirectory = LuceneTestCase.newDirectory(); + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + doThrow(new IOException("Error")).when(remoteDataDirectory).copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT)); + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + + storeDirectory.close(); + } + + public void testContainsFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata + Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + uploadedSegmentMetadataMap.put( + "_100.si", + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + ); + + assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234")); + } + + public void testUploadMetadataEmpty() throws IOException { + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + Collection segmentFiles = List.of("s1", "s2", "s3"); + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, storeDirectory, 12L, 24L)); + } + + public void testUploadMetadataNonEmpty() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + Collection segmentFiles = List.of("_0.si"); + remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, storeDirectory, 12L, 24L); + + verify(remoteMetadataDirectory).copyFrom( + eq(storeDirectory), + startsWith("metadata__12__o"), + startsWith("metadata__12__o"), + eq(IOContext.DEFAULT) + ); + String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString(); + verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString)); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 5171f0dfa1d18..234abfba66622 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -47,7 +47,8 @@ public void testRecoveryFromTranslog() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -85,7 +86,8 @@ public void onBeginTranslogRecovery() { beginTranslogRecoveryInvoked.set(true); } }, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -122,7 +124,8 @@ public void testTranslogRollsGeneration() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -150,7 +153,8 @@ public void testTranslogRollsGeneration() throws IOException { () -> new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED), translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -183,7 +187,8 @@ public void testTrimOperationsFromTranslog() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -213,7 +218,8 @@ public void testTrimOperationsFromTranslog() throws IOException { () -> new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED), translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -260,7 +266,8 @@ public void onAfterTranslogSync() { } } }, - () -> {} + () -> {}, + new InternalTranslogFactory() ); translogManagerAtomicReference.set(translogManager); Engine.Index index = indexForDoc(doc); diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index d42e75871a45a..38c55620e1223 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -155,6 +155,9 @@ public void testCancelReplication() throws IOException { } public void testMultipleReplicasUseSameCheckpoint() throws IOException { + IndexShard secondReplica = newShard(primary.shardId(), false); + recoverReplica(secondReplica, primary, true); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -172,7 +175,7 @@ public void testMultipleReplicasUseSameCheckpoint() throws IOException { final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( 1L, - replica.routingEntry().allocationId().getId(), + secondReplica.routingEntry().allocationId().getId(), replicaDiscoveryNode, testCheckpoint ); @@ -187,6 +190,7 @@ public void testMultipleReplicasUseSameCheckpoint() throws IOException { assertEquals(0, copyState.refCount()); assertEquals(0, replications.size()); assertEquals(0, replications.cachedCopyStateSize()); + closeShards(secondReplica); } public void testStartCopyWithoutPrepareStep() { @@ -272,4 +276,40 @@ public void onFailure(Exception e) { } }); } + + public void testCancelAllReplicationsForShard() throws IOException { + // This tests when primary has multiple ongoing replications. + IndexShard replica_2 = newShard(primary.shardId(), false); + recoverReplica(replica_2, primary, true); + + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + + final CopyState copyState = replications.prepareForReplication(request, mock(FileChunkWriter.class)); + assertEquals(1, copyState.refCount()); + + final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( + 1L, + replica_2.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + testCheckpoint + ); + replications.prepareForReplication(secondRequest, mock(FileChunkWriter.class)); + + assertEquals(2, copyState.refCount()); + assertEquals(2, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + + // cancel the primary's ongoing replications. + replications.cancel(primary, "Test"); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + closeShards(replica_2); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 70061c54d0da2..2c52772649acc 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -15,7 +15,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -41,7 +43,8 @@ public class SegmentReplicationSourceHandlerTests extends IndexShardTestCase { @Override public void setUp() throws Exception { super.setUp(); - primary = newStartedShard(true); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); + primary = newStartedShard(true, settings); replica = newShard(primary.shardId(), false); recoverReplica(replica, primary, true); replicaDiscoveryNode = replica.recoveryState().getTargetNode(); @@ -63,6 +66,7 @@ public void testSendFiles() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -100,6 +104,7 @@ public void testSendFiles_emptyRequest() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -138,6 +143,7 @@ public void testSendFileFails() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -175,6 +181,7 @@ public void testReplicationAlreadyRunning() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 8b4bda7de50ad..d3a6d1a97dacc 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -13,6 +13,7 @@ import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShard; @@ -50,7 +51,10 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @Override public void setUp() throws Exception { super.setUp(); - final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) + .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); final TransportService transportService = mock(TransportService.class); @@ -96,8 +100,8 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept ); final SegmentReplicationTarget spy = Mockito.spy(target); doAnswer(invocation -> { - // setting stage to REPLICATING so transition in markAsDone succeeds on listener completion - target.state().setStage(SegmentReplicationState.Stage.REPLICATING); + // set up stage correctly so the transition in markAsDone succeeds on listener completion + moveTargetToFinalStage(target); final ActionListener listener = invocation.getArgument(0); listener.onResponse(null); return null; @@ -119,7 +123,7 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.REPLICATING, state.getStage()); + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); assertEquals(expectedError, e.getCause()); assertTrue(sendShardFailure); } @@ -127,8 +131,6 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept ); final SegmentReplicationTarget spy = Mockito.spy(target); doAnswer(invocation -> { - // setting stage to REPLICATING so transition in markAsDone succeeds on listener completion - target.state().setStage(SegmentReplicationState.Stage.REPLICATING); final ActionListener listener = invocation.getArgument(0); listener.onFailure(expectedError); return null; @@ -204,6 +206,23 @@ public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOExc closeShard(indexShard, false); } + /** + * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. + */ + public void testRejectCheckpointOnShardPrimaryMode() throws IOException { + SegmentReplicationTargetService spy = spy(sut); + + // Starting a new shard in PrimaryMode. + IndexShard primaryShard = newStartedShard(true); + IndexShard spyShard = spy(primaryShard); + doNothing().when(spy).startReplication(any(), any(), any()); + spy.onNewCheckpoint(aheadCheckpoint, spyShard); + + // Verify that checkpoint is not processed as shard is in PrimaryMode. + verify(spy, times(0)).startReplication(any(), any(), any()); + closeShards(primaryShard); + } + public void testReplicationOnDone() throws IOException { SegmentReplicationTargetService spy = spy(sut); IndexShard spyShard = spy(indexShard); @@ -250,4 +269,17 @@ public void testBeforeIndexShardClosed_CancelsOngoingReplications() { sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); verify(spy, times(1)).cancel(any()); } + + /** + * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order + * until the final, non-terminal stage. + */ + private void moveTargetToFinalStage(SegmentReplicationTarget target) { + SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); + assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); + // Skip the first two stages (DONE and INIT) and iterate until the last value + for (int i = 2; i < stageValues.length; i++) { + target.state().setStage(stageValues[i]); + } + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index a0944ee249859..11217a46b3c69 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -8,29 +8,52 @@ package org.opensearch.indices.replication; -import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.Version; import org.junit.Assert; import org.mockito.Mockito; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.DummyShardLock; +import org.opensearch.test.IndexSettingsModule; +import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.Random; +import java.util.Arrays; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -69,7 +92,12 @@ public class SegmentReplicationTargetTests extends IndexShardTestCase { 0 ); - SegmentInfos testSegmentInfos; + private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build() + ); + + private SegmentInfos testSegmentInfos; @Override public void setUp() throws Exception { @@ -135,6 +163,7 @@ public void getSegmentFiles( public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any(), anyLong()); + segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); } @@ -142,7 +171,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { - logger.error("Unexpected test error", e); + logger.error("Unexpected onFailure", e); Assert.fail(); } }); @@ -186,6 +215,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -228,6 +258,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -272,6 +303,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -316,6 +348,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -357,14 +390,123 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assert (e instanceof IllegalStateException); + segrepTarget.fail(new OpenSearchException(e), false); + } + }); + } + + /** + * This tests ensures that new files generated on primary (due to delete operation) are not considered missing on replica + * @throws IOException + */ + public void test_MissingFiles_NotCausingFailure() throws IOException { + int docCount = 1 + random().nextInt(10); + // Generate a list of MetadataSnapshot containing two elements. The second snapshot contains extra files + // generated due to delete operations. These two snapshots can then be used in test to mock the primary shard + // snapshot (2nd element which contains delete operations) and replica's existing snapshot (1st element). + List storeMetadataSnapshots = generateStoreMetadataSnapshot(docCount); + + SegmentReplicationSource segrepSource = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onResponse( + new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1), buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE)) + ); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); + } + }; + SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( + SegmentReplicationTargetService.SegmentReplicationListener.class + ); + + segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); + when(segrepTarget.getMetadataSnapshot()).thenReturn(storeMetadataSnapshots.get(0)); + segrepTarget.startReplication(new ActionListener() { + @Override + public void onResponse(Void replicationResponse) { + logger.info("No error processing checkpoint info"); + segrepTarget.markAsDone(); + } + + @Override + public void onFailure(Exception e) { + logger.error("Unexpected onFailure", e); + Assert.fail(); } }); } + /** + * Generates a list of Store.MetadataSnapshot with two elements where second snapshot has extra files due to delete + * operation. A list of snapshots is returned so that identical files have same checksum. + * @param docCount + * @return + * @throws IOException + */ + private List generateStoreMetadataSnapshot(int docCount) throws IOException { + List docList = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + Document document = new Document(); + String text = new String(new char[] { (char) (97 + i), (char) (97 + i) }); + document.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + document.add(new TextField("str", text, Field.Store.YES)); + docList.add(document); + } + long seed = random().nextLong(); + Random random = new Random(seed); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setUseCompoundFile(true); + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); + IndexWriter writer = new IndexWriter(store.directory(), iwc); + for (Document d : docList) { + writer.addDocument(d); + } + writer.commit(); + Store.MetadataSnapshot storeMetadata = store.getMetadata(); + // Delete one document to generate .liv file + writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(docCount)))); + writer.commit(); + Store.MetadataSnapshot storeMetadataWithDeletes = store.getMetadata(); + deleteContent(store.directory()); + writer.close(); + store.close(); + return Arrays.asList(storeMetadata, storeMetadataWithDeletes); + } + + private static void deleteContent(Directory directory) throws IOException { + final String[] files = directory.listAll(); + final List exceptions = new ArrayList<>(); + for (String file : files) { + try { + directory.deleteFile(file); + } catch (NoSuchFileException | FileNotFoundException e) { + // ignore + } catch (IOException e) { + exceptions.add(e); + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + @Override public void tearDown() throws Exception { super.tearDown(); - segrepTarget.markAsDone(); closeShards(spyIndexShard, indexShard); } } diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 5c3c43af9cb66..b730dc01c4871 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; @@ -23,6 +24,9 @@ import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -63,6 +67,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -72,7 +77,11 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); } public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, InterruptedException { @@ -86,9 +95,15 @@ public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse response = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); assertEquals(4, response.getSuccessfulShards()); assertEquals(4, service.getActiveContexts()); + + validatePitStats("index", 1, 0, 0); + validatePitStats("index1", 1, 0, 0); service.doClose(); + validatePitStats("index", 0, 1, 0); + validatePitStats("index1", 0, 1, 0); } public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, InterruptedException { @@ -99,7 +114,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); - + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -109,7 +124,11 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); } public void testCreatePITWithNonExistentIndex() { @@ -128,7 +147,7 @@ public void testCreatePITWithNonExistentIndex() { service.doClose(); } - public void testCreatePITOnCloseIndex() { + public void testCreatePITOnCloseIndex() throws ExecutionException, InterruptedException { createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); @@ -144,6 +163,7 @@ public void testCreatePITOnCloseIndex() { SearchService service = getInstanceFromNode(SearchService.class); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); service.doClose(); } @@ -165,6 +185,7 @@ public void testPitSearchOnDeletedIndex() throws ExecutionException, Interrupted }); assertTrue(ex.getMessage().contains("no such index [index]")); SearchService service = getInstanceFromNode(SearchService.class); + PitTestsUtil.assertGetAllPitsEmpty(client()); assertEquals(0, service.getActiveContexts()); service.doClose(); } @@ -190,8 +211,12 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + client().admin().indices().prepareClose("index").get(); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { SearchResponse searchResponse = client().prepareSearch() @@ -201,6 +226,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx }); assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); // PIT reader contexts are lost after close, verifying it with open index api client().admin().indices().prepareOpen("index").get(); @@ -239,7 +265,10 @@ public void testMaxOpenPitContexts() throws Exception { + "This limit can be set by changing the [search.max_open_pit_context] setting." ) ); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); } public void testOpenPitContextsConcurrently() throws Exception { @@ -285,7 +314,9 @@ public void testOpenPitContextsConcurrently() throws Exception { thread.join(); } assertThat(service.getActiveContexts(), equalTo(maxPitContexts)); + validatePitStats("index", maxPitContexts, 0, 0); service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); } /** @@ -314,6 +345,7 @@ public void testPitAfterUpdateIndex() throws Exception { request.setIndices(new String[] { "test" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); SearchService service = getInstanceFromNode(SearchService.class); assertThat( @@ -453,9 +485,12 @@ public void testPitAfterUpdateIndex() throws Exception { .getTotalHits().value, Matchers.equalTo(0L) ); + validatePitStats("test", 1, 0, 0); } finally { service.doClose(); assertEquals(0, service.getActiveContexts()); + validatePitStats("test", 0, 1, 0); + PitTestsUtil.assertGetAllPitsEmpty(client()); } } @@ -467,6 +502,7 @@ public void testConcurrentSearches() throws Exception { request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); Thread[] threads = new Thread[5]; CountDownLatch latch = new CountDownLatch(threads.length); @@ -495,7 +531,21 @@ public void testConcurrentSearches() throws Exception { SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); assertEquals(0, service.getActiveContexts()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); + PitTestsUtil.assertGetAllPitsEmpty(client()); + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java index 89607b9201cd9..e69b2cc523638 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -11,6 +11,8 @@ import org.junit.After; import org.junit.Before; import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.search.CreatePitAction; @@ -76,6 +78,7 @@ public void testDeletePit() throws Exception { execute = client().execute(CreatePitAction.INSTANCE, request); pitResponse = execute.get(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 10, 0); DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = deleteExecute.get(); @@ -84,6 +87,7 @@ public void testDeletePit() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 10); /** * Checking deleting the same PIT id again results in succeeded */ @@ -102,6 +106,7 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { CreatePitResponse pitResponse = execute.get(); List pitIds = new ArrayList<>(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 0); /** * Delete Pit #1 @@ -113,9 +118,11 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 5); execute = client().execute(CreatePitAction.INSTANCE, request); pitResponse = execute.get(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 5); /** * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ @@ -126,6 +133,7 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 10); } public void testDeletePitWithValidAndInvalidIds() throws Exception { @@ -148,6 +156,8 @@ public void testDeleteAllPits() throws Exception { client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); ensureGreen(); createPitOnIndex("index1"); + validatePitStats("index", 5, 0); + validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); /** @@ -160,6 +170,8 @@ public void testDeleteAllPits() throws Exception { assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 5); + validatePitStats("index1", 0, 5); client().admin().indices().prepareDelete("index1").get(); } @@ -181,7 +193,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { DeletePitResponse deletePITResponse = execute.get(); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertTrue(pitIds.contains(deletePitInfo.getPitId())); - assertFalse(deletePitInfo.isSuccessful()); } } catch (Exception e) { throw new AssertionError(e); @@ -205,9 +216,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } public void testDeleteAllPitsWhileNodeDrop() throws Exception { - createPitOnIndex("index"); createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + createPitOnIndex("index1"); ensureGreen(); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @@ -218,7 +229,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { DeletePitResponse deletePITResponse = execute.get(); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); - assertFalse(deletePitInfo.isSuccessful()); } } catch (Exception e) { assertTrue(e.getMessage().contains("Node not connected")); @@ -226,18 +236,14 @@ public Settings onNodeStopped(String nodeName) throws Exception { return super.onNodeStopped(nodeName); } }); - ensureGreen(); /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + * once the node restarts, all active contexts are cleared in the node ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { - assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); - assertTrue(deletePitInfo.isSuccessful()); - } + assertEquals(0, deletePITResponse.getDeletePitResults().size()); client().admin().indices().prepareDelete("index1").get(); } @@ -330,4 +336,16 @@ public void onFailure(Exception e) {} } } + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices(index); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long pitCount = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCount(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedPitCount, pitCount); + } + } diff --git a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java similarity index 61% rename from server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java rename to server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index 27d8f27add898..29126d786770e 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -8,18 +8,27 @@ package org.opensearch.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.action.search.GetAllPitsAction; +import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.builder.PointInTimeBuilder; @@ -27,15 +36,19 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import java.util.ArrayList; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -45,7 +58,7 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class CreatePitMultiNodeTests extends OpenSearchIntegTestCase { +public class PitMultiNodeTests extends OpenSearchIntegTestCase { @Before public void setupIndex() throws ExecutionException, InterruptedException { @@ -70,6 +83,8 @@ public void testPit() throws Exception { .get(); assertEquals(2, searchResponse.getSuccessfulShards()); assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 2, 2); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); } public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { @@ -82,6 +97,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { ExecutionException ex = expectThrows(ExecutionException.class, execute::get); assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); assertTrue(ex.getMessage().contains("Partial shards failure")); + validatePitStats("index", 0, 0); return super.onNodeStopped(nodeName); } }); @@ -95,6 +111,7 @@ public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exce public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); assertEquals(1, pitResponse.getSuccessfulShards()); assertEquals(2, pitResponse.getTotalShards()); SearchResponse searchResponse = client().prepareSearch("index") @@ -103,6 +120,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { .get(); assertEquals(1, searchResponse.getSuccessfulShards()); assertEquals(1, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); return super.onNodeStopped(nodeName); } }); @@ -124,6 +142,8 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertEquals(1, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getSkippedShards()); assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); return super.onNodeStopped(nodeName); } }); @@ -264,7 +284,7 @@ public void testConcurrentCreatesWithDeletes() throws InterruptedException, Exec AtomicInteger numSuccess = new AtomicInteger(); TestThreadPool testThreadPool = null; try { - testThreadPool = new TestThreadPool(CreatePitMultiNodeTests.class.getName()); + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); int concurrentRuns = randomIntBetween(20, 50); List operationThreads = new ArrayList<>(); @@ -312,4 +332,144 @@ public void onFailure(Exception e) {} ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); } } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedOpenContexts) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices("index"); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long openContexts = indicesStatsResponse.getIndex(index).getTotal().search.getOpenContexts(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedOpenContexts, openContexts); + } + + public void testGetAllPits() throws Exception { + client().admin().indices().prepareCreate("index1").get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + CreatePitResponse pitResponse1 = client().execute(CreatePitAction.INSTANCE, request).get(); + CreatePitResponse pitResponse2 = client().execute(CreatePitAction.INSTANCE, request).get(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + assertEquals(3, getPitResponse.getPitInfos().size()); + List resultPitIds = getPitResponse.getPitInfos().stream().map(p -> p.getPitId()).collect(Collectors.toList()); + // asserting that we get all unique PIT IDs + Assert.assertTrue(resultPitIds.contains(pitResponse.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse1.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse2.getId())); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testGetAllPitsDuringNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + // we still get a pit id from the data node which is up + assertEquals(1, getPitResponse.getPitInfos().size()); + // failure for node drop + assertEquals(1, getPitResponse.failures().size()); + assertTrue(getPitResponse.failures().get(0).getMessage().contains("Failed node")); + return super.onNodeStopped(nodeName); + } + }); + } + + private DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public void testConcurrentGetWithDeletes() throws InterruptedException, ExecutionException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + List pitIds = new ArrayList<>(); + String id = client().execute(CreatePitAction.INSTANCE, createPitRequest).get().getId(); + pitIds.add(id); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); + int concurrentRuns = randomIntBetween(20, 50); + + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + long randomDeleteThread = randomLongBetween(0, concurrentRuns - 1); + for (int i = 0; i < concurrentRuns; i++) { + int currentThreadIteration = i; + Runnable thread = () -> { + if (currentThreadIteration == randomDeleteThread) { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + if (getAllPitNodesResponse.failures().isEmpty()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest, listener); + } else { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index ecc28470b0eb2..1f824d40eb638 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1414,6 +1414,7 @@ public void testOpenReaderContext() { searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); future.actionGet(); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1428,7 +1429,7 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard ); } - public void testDeletePitReaderContext() { + public void testDeletePitReaderContext() throws ExecutionException, InterruptedException { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); @@ -1442,6 +1443,8 @@ public void testDeletePitReaderContext() { contextIds.add(pitSearchContextIdForNode); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); // assert true for reader context not found @@ -1449,19 +1452,7 @@ public void testDeletePitReaderContext() { assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); // adding this assert to showcase behavior difference assertFalse(searchService.freeReaderContext(future.actionGet())); - } - - public void testDeleteAllPitReaderContexts() { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - PlainActionFuture future = new PlainActionFuture<>(); - searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - assertThat(searchService.getActiveContexts(), equalTo(2)); - searchService.freeAllPitContexts(); - assertThat(searchService.getActiveContexts(), equalTo(0)); + validatePitStats("index", 0, 1, 0); } public void testPitContextMaxKeepAlive() { @@ -1484,9 +1475,10 @@ public void testPitContextMaxKeepAlive() { ex.getMessage() ); assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); } - public void testUpdatePitId() { + public void testUpdatePitId() throws ExecutionException, InterruptedException { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); @@ -1506,7 +1498,10 @@ public void testUpdatePitId() { assertTrue(updateResponse.getKeepAlive() == updateRequest.getKeepAlive()); assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); assertTrue(searchService.freeReaderContext(future.actionGet())); + validatePitStats("index", 0, 1, 0); } public void testUpdatePitIdMaxKeepAlive() { @@ -1538,6 +1533,7 @@ public void testUpdatePitIdMaxKeepAlive() { ex.getMessage() ); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1558,5 +1554,15 @@ public void testUpdatePitIdWithInvalidReaderId() { assertEquals("No search context found for id [" + id.getId() + "]", ex.getMessage()); assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 421865013a28c..111ce23f8a0cb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -80,7 +80,6 @@ import org.opensearch.search.aggregations.metrics.InternalSumTests; import org.opensearch.search.aggregations.metrics.InternalAvgTests; import org.opensearch.search.aggregations.metrics.InternalCardinalityTests; -import org.opensearch.search.aggregations.metrics.InternalGeoBoundsTests; import org.opensearch.search.aggregations.metrics.InternalGeoCentroidTests; import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesRanksTests; import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesTests; @@ -142,7 +141,6 @@ private static List> getAggsTests() { aggsTests.add(new InternalStatsBucketTests()); aggsTests.add(new InternalExtendedStatsTests()); aggsTests.add(new InternalExtendedStatsBucketTests()); - aggsTests.add(new InternalGeoBoundsTests()); aggsTests.add(new InternalGeoCentroidTests()); aggsTests.add(new InternalHistogramTests()); aggsTests.add(new InternalDateHistogramTests()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java deleted file mode 100644 index e132426680fc8..0000000000000 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.aggregations.metrics; - -import org.opensearch.search.aggregations.BaseAggregationTestCase; - -public class GeoBoundsTests extends BaseAggregationTestCase { - - @Override - protected GeoBoundsAggregationBuilder createTestAggregatorBuilder() { - GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(randomAlphaOfLengthBetween(1, 20)); - String field = randomAlphaOfLengthBetween(3, 20); - factory.field(field); - if (randomBoolean()) { - factory.wrapLongitude(randomBoolean()); - } - if (randomBoolean()) { - factory.missing("0,0"); - } - return factory; - } - -} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 39f86942f2c4b..4b8eec70f2c1a 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -203,6 +203,7 @@ import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1754,6 +1755,8 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); + transportService.getTaskManager() + .setTaskResourceTrackingService(new TaskResourceTrackingService(settings, clusterSettings, threadPool)); repositoriesService = new RepositoriesService( settings, clusterService, diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index 0f09b0de34206..ab49109eb8247 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.FakeTcpChannel; @@ -59,6 +60,7 @@ import java.util.Set; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -67,10 +69,12 @@ public class TaskManagerTests extends OpenSearchTestCase { private ThreadPool threadPool; + private AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } @After diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java new file mode 100644 index 0000000000000..8ba23c5d3219c --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; +import org.opensearch.action.search.SearchTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.tasks.ResourceStats.MEMORY; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private TaskResourceTrackingService taskResourceTrackingService; + + @Before + public void setup() { + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), new AtomicReference<>()); + taskResourceTrackingService = new TaskResourceTrackingService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @After + public void terminateThreadPool() { + terminate(threadPool); + } + + public void testThreadContextUpdateOnTrackingStart() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + + String key = "KEY"; + String value = "VALUE"; + + // Prepare thread context + threadPool.getThreadContext().putHeader(key, value); + threadPool.getThreadContext().putTransient(key, value); + threadPool.getThreadContext().addResponseHeader(key, value); + + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + + // All headers should be preserved and Task Id should also be included in thread context + verifyThreadContextFixedHeaders(key, value); + assertEquals((long) threadPool.getThreadContext().getTransient(TASK_ID), task.getId()); + + storedContext.restore(); + + // Post restore only task id should be removed from the thread context + verifyThreadContextFixedHeaders(key, value); + assertNull(threadPool.getThreadContext().getTransient(TASK_ID)); + } + + public void testStopTrackingHandlesCurrentActiveThread() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + long threadId = Thread.currentThread().getId(); + taskResourceTrackingService.taskExecutionStartedOnThread(task.getId(), threadId); + + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue()); + + taskResourceTrackingService.stopTracking(task); + + // Makes sure stop tracking marks the current active thread inactive and refreshes the resource stats before returning. + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + assertTrue(task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue() > 0); + } + + private void verifyThreadContextFixedHeaders(String key, String value) { + assertEquals(threadPool.getThreadContext().getHeader(key), value); + assertEquals(threadPool.getThreadContext().getTransient(key), value); + assertEquals(threadPool.getThreadContext().getResponseHeaders().get(key).get(0), value); + } + +} diff --git a/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java new file mode 100644 index 0000000000000..641fdef4891bd --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.ResourceStatsType; +import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class SearchShardTaskDetailsLogMessageTests extends OpenSearchSingleNodeTestCase { + public void testTaskDetailsLogHasJsonFields() { + SearchShardTask task = new SearchShardTask( + 0, + "n/a", + "n/a", + "test", + null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"), + () -> "test_metadata" + ); + SearchShardTaskDetailsLogMessage p = new SearchShardTaskDetailsLogMessage(task); + + assertThat(p.getValueFor("taskId"), equalTo("0")); + assertThat(p.getValueFor("type"), equalTo("n/a")); + assertThat(p.getValueFor("action"), equalTo("n/a")); + assertThat(p.getValueFor("description"), equalTo("test")); + assertThat(p.getValueFor("parentTaskId"), equalTo(null)); + // when no resource information present + assertThat(p.getValueFor("resource_stats"), equalTo("{}")); + assertThat(p.getValueFor("metadata"), equalTo("test_metadata")); + + task.startThreadResourceTracking( + 0, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 0L), + new ResourceUsageMetric(ResourceStats.CPU, 0L) + ); + task.updateThreadResourceStats( + 0, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 100), + new ResourceUsageMetric(ResourceStats.CPU, 100) + ); + assertThat( + p.getValueFor("resource_stats"), + equalTo("{0=[{cpu_time_in_nanos=100, memory_in_bytes=100}, stats_type=worker_stats, is_active=true, threadId=0]}") + ); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java new file mode 100644 index 0000000000000..a8fd3623ef09d --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.MockAppender; +import org.opensearch.common.settings.Settings; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.ResourceStatsType; +import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Collections; + +import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_FREQUENCY; +import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_SIZE; + +public class TopNSearchTasksLoggerTests extends OpenSearchSingleNodeTestCase { + static MockAppender appender; + static Logger searchLogger = LogManager.getLogger(TopNSearchTasksLogger.TASK_DETAILS_LOG_PREFIX + ".search"); + + private TopNSearchTasksLogger topNSearchTasksLogger; + + @BeforeClass + public static void init() throws IllegalAccessException { + appender = new MockAppender("trace_appender"); + appender.start(); + Loggers.addAppender(searchLogger, appender); + } + + @AfterClass + public static void cleanup() { + Loggers.removeAppender(searchLogger, appender); + appender.stop(); + } + + public void testLoggerWithTasks() { + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "0ms").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + generateTasks(5); + LogEvent logEvent = appender.getLastEventAndReset(); + assertNotNull(logEvent); + assertEquals(logEvent.getLevel(), Level.INFO); + assertTrue(logEvent.getMessage().getFormattedMessage().contains("cpu_time_in_nanos=300, memory_in_bytes=300")); + } + + public void testLoggerWithoutTasks() { + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "500ms").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + + assertNull(appender.getLastEventAndReset()); + } + + public void testLoggerWithHighFrequency() { + // setting the frequency to a really large value and confirming that nothing gets written to log file. + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "10m").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + generateTasks(5); + generateTasks(2); + + assertNull(appender.getLastEventAndReset()); + } + + // generate search tasks and updates the topN search tasks logger consumer. + public void generateTasks(int numberOfTasks) { + for (int i = 0; i < numberOfTasks; i++) { + Task task = new SearchShardTask( + i, + "n/a", + "n/a", + "test", + null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"), + () -> "n/a" + ); + task.startThreadResourceTracking( + i, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 0L), + new ResourceUsageMetric(ResourceStats.CPU, 0L) + ); + task.updateThreadResourceStats( + i, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, i * 100L), + new ResourceUsageMetric(ResourceStats.CPU, i * 100L) + ); + topNSearchTasksLogger.accept(task); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 5e22a7c145a39..b3f062aef4fbe 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -76,6 +76,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; @@ -139,6 +140,21 @@ protected ReplicationGroup createGroup(int replicas, Settings settings) throws I return new ReplicationGroup(metadata); } + protected ReplicationGroup createGroup(int replicas, Settings settings, EngineFactory engineFactory) throws IOException { + return createGroup(replicas, settings, indexMapping, engineFactory); + } + + protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory) + throws IOException { + IndexMetadata metadata = buildIndexMetadata(replicas, settings, mappings); + return new ReplicationGroup(metadata) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return engineFactory; + } + }; + } + protected IndexMetadata buildIndexMetadata(int replicas) throws IOException { return buildIndexMetadata(replicas, indexMapping); } @@ -191,6 +207,7 @@ protected class ReplicationGroup implements AutoCloseable, Iterable private final AtomicInteger docId = new AtomicInteger(); boolean closed = false; private volatile ReplicationTargets replicationTargets; + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer( new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index c5ee54450cce2..f446538acccbb 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -33,9 +33,15 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.junit.Assert; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; @@ -58,6 +64,7 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -82,6 +89,8 @@ import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; @@ -94,7 +103,14 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -112,6 +128,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -122,6 +139,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -538,6 +556,7 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, breakerService, + new InternalTranslogFactory(), checkpointPublisher, remoteStore ); @@ -1133,4 +1152,117 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } }; } + + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + */ + public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Store.MetadataSnapshot primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + } + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + + final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); + final SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + try ( + final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( + replicationId + ) + ) { + writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); + } catch (IOException e) { + listener.onFailure(e); + } + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); + } + }; + + for (IndexShard replica : replicaShards) { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + ReplicationCheckpoint.empty(replica.shardId), + replica, + source, + new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { + final SegmentInfos replicaInfos = snapshot.get(); + final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); + final Store.RecoveryDiff recoveryDiff = primaryMetadata.recoveryDiff(replicaMetadata); + assertTrue(recoveryDiff.missing.isEmpty()); + assertTrue(recoveryDiff.different.isEmpty()); + assertEquals(recoveryDiff.identical.size(), primaryMetadata.size()); + assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + countDownLatch.countDown(); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected replication failure in test", e); + Assert.fail("test replication should not fail: " + e); + } + } + ); + replicationCollection.start(target, TimeValue.timeValueMillis(5000)); + target.startReplication(new ActionListener<>() { + @Override + public void onResponse(Void o) { + replicationCollection.markAsDone(target.getId()); + } + + @Override + public void onFailure(Exception e) { + replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); + } + }); + } + countDownLatch.await(3, TimeUnit.SECONDS); + } + + private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException { + for (StoreFileMetadata md : files) { + try (IndexInput in = primary.store().directory().openInput(md.name(), IOContext.READONCE)) { + int pos = 0; + while (pos < md.length()) { + int length = between(1, Math.toIntExact(md.length() - pos)); + byte[] buffer = new byte[length]; + in.readBytes(buffer, 0, length); + target.writeFileChunk(md, pos, new BytesArray(buffer), pos + length == md.length(), 0, mock(ActionListener.class)); + pos += length; + } + } + } + } } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index f138de152a488..a4099d66de28e 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -117,7 +117,6 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -129,7 +128,6 @@ import org.opensearch.search.aggregations.metrics.ParsedAvg; import org.opensearch.search.aggregations.metrics.ParsedCardinality; import org.opensearch.search.aggregations.metrics.ParsedExtendedStats; -import org.opensearch.search.aggregations.metrics.ParsedGeoBounds; import org.opensearch.search.aggregations.metrics.ParsedGeoCentroid; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentiles; @@ -261,7 +259,6 @@ public ReduceContext forFinalReduction() { map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 8d8df2fec39f9..1ab7785b17f5e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2097,6 +2097,7 @@ protected Collection> getMockPlugins() { if (addMockGeoShapeFieldMapper()) { mocks.add(TestGeoShapeFieldMapperPlugin.class); } + return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index c2e90f0369e6c..478b692fb06ef 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -127,6 +127,7 @@ public void assertAfterTest() throws Exception { /** * Returns the number of data and cluster-manager eligible nodes in the cluster. */ + // TODO: Add abstract keyword after removing the deprecated numDataAndMasterNodes() public int numDataAndClusterManagerNodes() { return numDataAndMasterNodes(); } diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 54c92f4d519aa..a36dc26685eb4 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -43,6 +43,7 @@ import org.opensearch.gateway.ReplicaShardAllocator; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; import java.util.Collections; @@ -71,6 +72,7 @@ public class TestGatewayAllocator extends GatewayAllocator { Map> knownAllocations = new HashMap<>(); DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES; + Map shardIdNodeToReplicationCheckPointMap = new HashMap<>(); PrimaryShardAllocator primaryShardAllocator = new PrimaryShardAllocator() { @Override @@ -90,7 +92,8 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR routing -> new NodeGatewayStartedShards( currentNodes.get(routing.currentNodeId()), routing.allocationId().getId(), - routing.primary() + routing.primary(), + getReplicationCheckpoint(shardId, routing.currentNodeId()) ) ) ); @@ -99,6 +102,10 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR } }; + private ReplicationCheckpoint getReplicationCheckpoint(ShardId shardId, String nodeName) { + return shardIdNodeToReplicationCheckPointMap.getOrDefault(getReplicationCheckPointKey(shardId, nodeName), null); + } + ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator() { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { @@ -156,4 +163,12 @@ public void allocateUnassigned( public void addKnownAllocation(ShardRouting shard) { knownAllocations.computeIfAbsent(shard.currentNodeId(), id -> new HashMap<>()).put(shard.shardId(), shard); } + + public String getReplicationCheckPointKey(ShardId shardId, String nodeName) { + return shardId.toString() + "_" + nodeName; + } + + public void addReplicationCheckpoint(ShardId shardId, String nodeName, ReplicationCheckpoint replicationCheckpoint) { + shardIdNodeToReplicationCheckPointMap.putIfAbsent(getReplicationCheckPointKey(shardId, nodeName), replicationCheckpoint); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java index e60871f67ea54..677ec7a0a6600 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskManager; @@ -127,6 +128,21 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { super.waitForTaskCompletion(task, untilInNanos); } + @Override + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, false); + } + + ThreadContext.StoredContext storedContext = super.taskExecutionStarted(task); + return () -> { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, true); + } + storedContext.restore(); + }; + } + public void addListener(MockTaskManagerListener listener) { listeners.add(listener); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java index eb8361ac552fc..f15f878995aa2 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java @@ -43,4 +43,7 @@ public interface MockTaskManagerListener { void onTaskUnregistered(Task task); void waitForTaskCompletion(Task task); + + void taskExecutionStarted(Task task, Boolean closeableInvoked); + } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 9b9baebd540c3..c80b120ad0148 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -261,11 +261,16 @@ private static TransportAddress[] extractTransportAddresses(TransportService tra } @Override - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(settings, threadPool, taskHeaders); + return super.createTaskManager(settings, clusterSettings, threadPool, taskHeaders); } } @@ -530,7 +535,6 @@ public void clearCallback() { /** * Adds a new handling behavior that is used when the defined request is received. - * */ public void addRequestHandlingBehavior( String actionName, diff --git a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java index 5f8611d99f0a0..2d97d5bffee01 100644 --- a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java @@ -40,6 +40,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicReference; public class TestThreadPool extends ThreadPool { @@ -47,12 +48,29 @@ public class TestThreadPool extends ThreadPool { private volatile boolean returnRejectingExecutor = false; private volatile ThreadPoolExecutor rejectingExecutor; + public TestThreadPool( + String name, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + this(name, Settings.EMPTY, runnableTaskListener, customBuilders); + } + public TestThreadPool(String name, ExecutorBuilder... customBuilders) { this(name, Settings.EMPTY, customBuilders); } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); + this(name, settings, null, customBuilders); + } + + public TestThreadPool( + String name, + Settings settings, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), runnableTaskListener, customBuilders); } @Override