Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added security CI workflow and OS build qualifier #358

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 57 additions & 0 deletions .github/workflows/security-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
name: Security tests
# This workflow is triggered on pull requests to main branch
on:
pull_request:
branches:
- '*'
push:
branches:
- '*'

jobs:
build:
# Job name
name: Build and Run Security tests
runs-on: ubuntu-latest
steps:
# This step uses the setup-java Github action: https://github.com/actions/setup-java
- name: Set Up JDK 11
uses: actions/setup-java@v1
with:
java-version: 11
# This step uses the checkout Github action: https://github.com/actions/checkout
- name: Checkout Branch
uses: actions/checkout@v2
# Security plugin dependency
- name: Checkout security
uses: actions/checkout@v2
with:
repository: 'opensearch-project/security'
path: security
ref: 'main'
- name: Build security
working-directory: ./security
run: |
./gradlew clean build -Dbuild.snapshot=false -x test
cp build/distributions/opensearch-security-*.zip ../src/test/resources/security/plugin/opensearch-security.zip
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do open an issue to replace this build with using a maven snapshot of security and link it back to opensearch-project/opensearch-build#716.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tracked here: #274

- name: Build and run Replication tests
run: |
ls -al src/test/resources/security/plugin
./gradlew clean release -Dbuild.snapshot=true -PnumNodes=1 -Psecurity=true
- name: Upload failed logs
uses: actions/upload-artifact@v2
if: failure()
with:
name: logs
path: |
build/testclusters/integTest-*/logs/*
build/testclusters/leaderCluster-*/logs/*
build/testclusters/followCluster-*/logs/*
- name: Create Artifact Path
run: |
mkdir -p cross-cluster-replication-artifacts
cp ./build/distributions/*.zip cross-cluster-replication-artifacts
- name: Uploads coverage
with:
fetch-depth: 2
uses: codecov/codecov-action@v1.2.1
22 changes: 14 additions & 8 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,23 @@ import org.opensearch.gradle.test.RestIntegTestTask

buildscript {
ext {
isSnapshot = "true" == System.getProperty("build.snapshot", "false")
opensearch_version = System.getProperty("opensearch.version", "2.0.0-SNAPSHOT")
isSnapshot = "true" == System.getProperty("build.snapshot", "true")
opensearch_version = System.getProperty("opensearch.version", "2.0.0-alpha1-SNAPSHOT")
buildVersionQualifier = System.getProperty("build.version_qualifier", "alpha1")
// Taken from https://github.com/opensearch-project/alerting/blob/main/build.gradle#L33
// 1.0.0 -> 1.0.0.0, and 1.0.0-SNAPSHOT -> 1.0.0.0-SNAPSHOT
opensearch_build = opensearch_version.replaceAll(/(\.\d)([^\d]*)$/, '$1.0$2')
version_tokens = opensearch_version.tokenize('-')
opensearch_build = version_tokens[0] + '.0'
if (buildVersionQualifier) {
opensearch_build += "-${buildVersionQualifier}"
}
if (isSnapshot) {
opensearch_build += "-SNAPSHOT"
}


// for bwc tests
opensearch_previous_version = System.getProperty("bwc_older_version", "1.1.0")
opensearch_previous_version = System.getProperty("bwc_older_version", "1.3.1")
plugin_previous_version = opensearch_previous_version.replaceAll(/(\.\d)([^\d]*)$/, '$1.0$2')

common_utils_version = System.getProperty("common_utils.version", opensearch_build)
Expand Down Expand Up @@ -71,10 +80,7 @@ plugins {

allprojects {
group = "org.opensearch"
version = "${opensearch_version}" - "-SNAPSHOT" + ".0"
if (isSnapshot) {
version += "-SNAPSHOT"
}
version = "${opensearch_build}"
}

apply plugin: 'java'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ class TransportGetChangesAction @Inject constructor(threadPool: ThreadPool, clus
if(!fetchFromTranslog) {
log.debug("Fetching changes from lucene for ${request.shardId} - from:${request.fromSeqNo}, to:$toSeqNo")
relativeStartNanos = System.nanoTime()
indexShard.newChangesSnapshot("odr", request.fromSeqNo, toSeqNo, true).use { snapshot ->
indexShard.newChangesSnapshot("odr", request.fromSeqNo, toSeqNo, true, true).use { snapshot ->
ops = ArrayList(snapshot.totalOperations())
var op = snapshot.next()
while (op != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,7 @@ class TransportReplayChangesAction @Inject constructor(settings: Settings, trans
if (result.resultType == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
waitForMappingUpdate {
// fetch mappings from the leader cluster when applying on PRIMARY...
syncRemoteMapping(request.leaderAlias, request.leaderIndex, request.shardId()!!.indexName,
op.docType())
syncRemoteMapping(request.leaderAlias, request.leaderIndex, request.shardId()!!.indexName)
}
result = primaryShard.applyTranslogOperation(op, Engine.Operation.Origin.PRIMARY)
}
Expand Down Expand Up @@ -156,21 +155,13 @@ class TransportReplayChangesAction @Inject constructor(settings: Settings, trans
return WriteReplicaResult(request, location, null, replicaShard, log)
}

private fun Translog.Operation.docType(): String {
return when (this) {
is Translog.Index -> type()
is Translog.Delete -> type()
else -> TODO("Operation ${opType()} not expected to have a document type")
}
}

/**
* Fetches the index mapping from the leader cluster, applies it to the local cluster's clusterManager and then waits
* for the mapping to become available on the current shard. Should only be called on the primary shard .
*/
private suspend fun syncRemoteMapping(leaderAlias: String, leaderIndex: String,
followerIndex: String, type: String) {
log.debug("Syncing mappings from ${leaderAlias}:${leaderIndex}/${type} -> $followerIndex...")
followerIndex: String) {
log.debug("Syncing mappings from ${leaderAlias}:${leaderIndex} -> $followerIndex...")
val remoteClient = client.getRemoteClusterClient(leaderAlias)
val options = IndicesOptions.strictSingleIndexNoExpandForbidClosed()
val getMappingsRequest = GetMappingsRequest().indices(leaderIndex).indicesOptions(options)
Expand Down Expand Up @@ -212,13 +203,13 @@ class TransportReplayChangesAction @Inject constructor(settings: Settings, trans
return when (opType()!!) {
Translog.Operation.Type.CREATE, Translog.Operation.Type.INDEX -> {
val sourceOp = this as Translog.Index
Translog.Index(sourceOp.type(), sourceOp.id(), sourceOp.seqNo(), operationPrimaryTerm,
Translog.Index(sourceOp.id(), sourceOp.seqNo(), operationPrimaryTerm,
sourceOp.version(), BytesReference.toBytes(sourceOp.source()),
sourceOp.routing(), sourceOp.autoGeneratedIdTimestamp)
}
Translog.Operation.Type.DELETE -> {
val sourceOp = this as Translog.Delete
Translog.Delete(sourceOp.type(), sourceOp.id(), sourceOp.uid(), sourceOp.seqNo(), operationPrimaryTerm,
Translog.Delete(sourceOp.id(), sourceOp.seqNo(), operationPrimaryTerm,
sourceOp.version())
}
Translog.Operation.Type.NO_OP -> {
Expand All @@ -233,7 +224,7 @@ class TransportReplayChangesAction @Inject constructor(settings: Settings, trans
// Unset auto gen timestamp as we use external Id from the leader index
if (opType()!! == Translog.Operation.Type.CREATE || opType()!! == Translog.Operation.Type.INDEX ) {
val sourceOp = this as Translog.Index
return Translog.Index(sourceOp.type(), sourceOp.id(), sourceOp.seqNo(), sourceOp.primaryTerm(),
return Translog.Index(sourceOp.id(), sourceOp.seqNo(), sourceOp.primaryTerm(),
sourceOp.version(), BytesReference.toBytes(sourceOp.source()),
sourceOp.routing(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ class ReplicationMetadataStore constructor(val client: Client, val clusterServic

private suspend fun createIndex(): CreateIndexResponse {
val createIndexReq = CreateIndexRequest(REPLICATION_CONFIG_SYSTEM_INDEX, configStoreSettings())
.mapping(MAPPING_TYPE, REPLICATION_CONFIG_SYSTEM_INDEX_MAPPING, XContentType.JSON)
.mapping(REPLICATION_CONFIG_SYSTEM_INDEX_MAPPING, XContentType.JSON)
return client.suspending(client.admin().indices()::create, defaultContext = true)(createIndexReq)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ class RemoteClusterRestoreLeaderService @Inject constructor(private val indicesS
val store = leaderIndexShard.store()
var metadataSnapshot = Store.MetadataSnapshot.EMPTY
store.performOp({
metadataSnapshot = store.getMetadata(indexCommitRef.indexCommit)
metadataSnapshot = store.getMetadata(indexCommitRef.get())
})

// Identifies the seq no to start the replication operations from
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,22 @@

package org.opensearch.replication.repository

import org.apache.lucene.index.IndexCommit
import org.opensearch.replication.util.performOp
import org.apache.lucene.store.IOContext
import org.apache.lucene.store.IndexInput
import org.opensearch.OpenSearchException
import org.opensearch.common.concurrent.GatedCloseable
import org.opensearch.index.engine.Engine
import org.opensearch.index.shard.IndexShard
import org.opensearch.index.store.Store
import java.io.Closeable

class RestoreContext(val restoreUUID: String,
val shard: IndexShard,
val indexCommitRef: Engine.IndexCommitRef,
val metadataSnapshot: Store.MetadataSnapshot,
val replayOperationsFrom: Long): Closeable {
val shard: IndexShard,
val indexCommitRef: GatedCloseable<IndexCommit>,
val metadataSnapshot: Store.MetadataSnapshot,
val replayOperationsFrom: Long): Closeable {

companion object {
private const val INITIAL_FILE_CACHE_CAPACITY = 20
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class RemoteClusterTranslogService : AbstractLifecycleComponent(){
public fun getHistoryOfOperations(indexShard: IndexShard, startSeqNo: Long, toSeqNo: Long): List<Translog.Operation> {
log.trace("Fetching translog snapshot for $indexShard - from $startSeqNo to $toSeqNo")
// TODO: Revisit the method after closing the issue: https://github.com/opensearch-project/OpenSearch/issues/2482
val snapshot = indexShard.getHistoryOperations(SOURCE_NAME, startSeqNo, toSeqNo)
val snapshot = indexShard.getHistoryOperations(SOURCE_NAME, startSeqNo, toSeqNo, true)

// Total ops to be fetched (both toSeqNo and startSeqNo are inclusive)
val opsSize = toSeqNo - startSeqNo + 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class TranslogSequencerTests : OpenSearchTestCase() {
var seqNo = startSeqNo
val changes = randomList(1, randomIntBetween(1, 512)) {
seqNo = seqNo.inc()
Translog.Index("_doc", randomAlphaOfLength(10).toLowerCase(Locale.ROOT), seqNo,
Translog.Index(randomAlphaOfLength(10).toLowerCase(Locale.ROOT), seqNo,
1L, "{}".toByteArray(Charsets.UTF_8))
}
return Pair(GetChangesResponse(changes, startSeqNo.inc(), startSeqNo, -1), seqNo)
Expand Down
Binary file not shown.