diff --git a/.ci/openshift-ci/build-root/e2e-test.sh b/.ci/openshift-ci/build-root/e2e-test.sh
index c739dedb35..79dd16c7af 100644
--- a/.ci/openshift-ci/build-root/e2e-test.sh
+++ b/.ci/openshift-ci/build-root/e2e-test.sh
@@ -84,6 +84,9 @@ echo "===== Environment:"
env
echo "=================="
+echo "Login command:"
+echo "oc login $TEST_CLUSTER_URL -u $SYSADMIN_USERNAME -p $SYSADMIN_PASSWORD --insecure-skip-tls-verify"
+
echo "Calling script runner....."
# Script path is relative to checkout folder
diff --git a/.ci/openshift-ci/build-root/scripts/excluded-directories.txt b/.ci/openshift-ci/build-root/scripts/excluded-directories.txt
new file mode 100644
index 0000000000..7b9e295859
--- /dev/null
+++ b/.ci/openshift-ci/build-root/scripts/excluded-directories.txt
@@ -0,0 +1,3 @@
+# Quickstarts that should not be tested on OpenShift.
+# One per line with no trailing spaces, and make sure to have a newline at the end
+# microprofile-reactive-messaging-kafka
diff --git a/.ci/openshift-ci/build-root/scripts/included-directories.txt b/.ci/openshift-ci/build-root/scripts/included-directories.txt
deleted file mode 100644
index ece97a533e..0000000000
--- a/.ci/openshift-ci/build-root/scripts/included-directories.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-#Quickstarts not yet ported
-cmt
-ee-security
-helloworld-ws
-helloworld
-jaxrs-client
-kitchensink
-numberguess
-remote-helloworld-mdb
-servlet-security
-temperature-converter
-thread-racing
-todo-backend
-websocket-hello
\ No newline at end of file
diff --git a/.ci/openshift-ci/build-root/scripts/openshift-test-runner.sh b/.ci/openshift-ci/build-root/scripts/openshift-test-runner.sh
index 3194b92c3a..3ca0502f81 100755
--- a/.ci/openshift-ci/build-root/scripts/openshift-test-runner.sh
+++ b/.ci/openshift-ci/build-root/scripts/openshift-test-runner.sh
@@ -34,6 +34,11 @@ poll_marker_files() {
wait_marker_files() {
echo "Waiting $seconds. 'oc rsh' in and either 'touch continue' to stop waiting, or 'touch exit' to abort the test run. The latter will result in the test being reported as failed"
+ echo "You can log in to the cluster started by the CI to diagnose problems with the following commands"
+ echo "oc login $TEST_CLUSTER_URL -u $SYSADMIN_USERNAME -p $SYSADMIN_PASSWORD --insecure-skip-tls-verify"
+ #echo "oc get pods"
+ #echo "oc rsh pod/"
+
found_file=$(poll_marker_files $1)
if [ -z "${found_file}" ]; then
echo "Wait timed out - continuing"
@@ -112,7 +117,7 @@ getPrTouchedDirs() {
fi
IFS='/' read -ra parts <<< "${file}"
- if [ "${#parts[@]}" == 1 ]; then
+ if [ "${#parts[@]}" == 1 ] && [ "${parts[0]}" != 'enable-wait' ] && [ "${parts[0]}" != 'continue' ] && [ "${parts[0]}" != '.gitignore' ] ; then
echo "Changed detected in ${file} which is in the root directory. All tests will need to be run."
root_dir_file_changed=1
break
@@ -141,10 +146,9 @@ filterDirectories() {
declare -a tmp
for fileName in "${test_directories[@]}"; do
# Quickstarts that have not been migrated yet
- # TODO once everything has a quickstart_xxx_ci.yml file we can remove the included-directories check
- grep -q "^${fileName}$" included-directories.txt
- is_in_included_txt="$?"
- if [ "${is_in_included_txt}" != "0" ] && [ ! -f "${basedir}/.github/workflows/quickstart_${fileName}_ci.yml" ]; then
+ grep -q "^${fileName}$" excluded-directories.txt
+ is_in_excluded="$?"
+ if [ "${is_in_excluded}" = "0" ] || [ ! -f "${basedir}/.github/workflows/quickstart_${fileName}_ci.yml" ]; then
# echo "Skipping ${fileName}!"
continue
fi
@@ -167,11 +171,6 @@ if [ -f "${basedir}/enable-wait" ]; then
wait_marker_files 3600
popd
fi
-if [ -f "${basedir}/abort" ]; then
- # Add ability to abort test run between quickstart runs
- echo "${basedir}/abort file found. Exiting"
- exit 1
-fi
if [ "${JOB_TYPE}" = "presubmit" ]; then
getPrTouchedDirs
@@ -211,6 +210,11 @@ for fileName in "${test_directories[@]}"; do
echo "${fileName}"
else
runQuickstart "${script_directory}" "${fileName}"
+ if [ -f "${basedir}/abort" ]; then
+ # Add ability to abort test run between quickstart runs
+ echo "${basedir}/abort file found. Exiting"
+ exit 1
+ fi
fi
done
diff --git a/.ci/openshift-ci/build-root/scripts/overridable-functions.sh b/.ci/openshift-ci/build-root/scripts/overridable-functions.sh
index db4aa001ae..e7a0f93a03 100644
--- a/.ci/openshift-ci/build-root/scripts/overridable-functions.sh
+++ b/.ci/openshift-ci/build-root/scripts/overridable-functions.sh
@@ -53,7 +53,7 @@ function helmInstall() {
application="${1}"
helm_set_arguments="$2"
- # '--atomic' waits until the pods are ready, and removes everything if something went wrong
+ # '--wait' waits until the pods are ready
# `--timeout` sets the timeout for the wait.
# https://helm.sh/docs/helm/helm_install/ has more details
# Don't quote ${helm_set_arguments} since then it fails when there are none
@@ -81,4 +81,12 @@ function getHelmSetVariablePrefix() {
function helmInstallFailed() {
# Noop - the main work is done elsewhere
echo ""
+}
+
+# More output when the tests have failed
+# Parameters
+# 1 - application name
+#
+function testsFailed() {
+ echo ""
}
\ No newline at end of file
diff --git a/.ci/openshift-ci/build-root/scripts/qs-overrides/microprofile-reactive-messaging-kafka/overridable-functions.sh b/.ci/openshift-ci/build-root/scripts/qs-overrides/microprofile-reactive-messaging-kafka/overridable-functions.sh
new file mode 100644
index 0000000000..11b64461e6
--- /dev/null
+++ b/.ci/openshift-ci/build-root/scripts/qs-overrides/microprofile-reactive-messaging-kafka/overridable-functions.sh
@@ -0,0 +1,144 @@
+# These functions are 'overridable in the individual quickstarts.
+# To do so create a ./qs-overrides/${qs_dir}/overridable-functions.sh and override the
+# functions you need to. ${qs_dir} in this case is the same as the name of the quickstart directory
+# that you want to tweak
+
+# Installs any prerequisites before doing the Helm install.
+# The current directory is the quickstart directory.
+# The default is to use the quickstart directory as the name, but in some cases
+# a quickstart may need to shorten the name of the application in order to control
+# the length of the resources created by OpenShift
+#
+# Parameters
+# 1 - the name of the qs directory (not the full path)
+function applicationName() {
+ #echo "${1}"
+ # The fill microprofile-reactive-messaging-kafka name results in names of generated resources which are too long for
+ # OpenShift to handle
+ echo "mp-rm-qs"
+}
+
+
+# Installs any prerequisites before doing the Helm install.
+# The current directory is the quickstart directory
+#
+# Parameters
+# 1 - application name
+function installPrerequisites()
+{
+ application="${1}"
+ echo "Creating amq-streams-operator-group"
+
+ oc apply -f - <
+ # We do this check first because it takes a while to appear
+ oc get pods -l app.kubernetes.io/instance='my-cluster',app.kubernetes.io/name='entity-operator' | grep "my-cluster-entity-operator" || continue
+
+ # Wait 10 seconds for all pods to come up, and renter the loop if not
+ oc wait pod -l app.kubernetes.io/instance='my-cluster' --for=condition=Ready --timeout=10s || continue
+
+ # If we got here, everything is up, so we can proceed
+ break
+ done
+}
+
+
+# Cleans any prerequisites after doing the Helm uninstall.
+# The current directory is the quickstart directory
+#
+# Parameters
+# 1 - application name
+function cleanPrerequisites()
+{
+ # TODO There are a few topics created that need cleaning up
+
+ oc delete kafka my-cluster
+ oc delete subscription amq-streams-subscription
+ oc delete operatorgroup amq-streams-operator-group
+ oc delete deployment amq-streams-cluster-operator-v2.5.0-1
+}
diff --git a/.ci/openshift-ci/build-root/scripts/run-quickstart-test-on-openshift.sh b/.ci/openshift-ci/build-root/scripts/run-quickstart-test-on-openshift.sh
index 698d32da20..a8319ede60 100755
--- a/.ci/openshift-ci/build-root/scripts/run-quickstart-test-on-openshift.sh
+++ b/.ci/openshift-ci/build-root/scripts/run-quickstart-test-on-openshift.sh
@@ -145,6 +145,7 @@ fi
echo "Performing Helm install and waiting for completion.... (${additional_arguments})"
# helmInstall is from overridable-functions.sh
helm_install_ret=$(helmInstall "${application}" "${helm_set_arguments}")
+
# For some reason the above sometimes becomes a multi-line string. actual The exit code will be
# on the last line
helm_install_ret=$(echo "${helm_install_ret}"| tail -n 1)
@@ -177,17 +178,14 @@ if [ "${QS_UNSIGNED_SERVER_CERT}" = "1" ]; then
truststore_properties="-Djavax.net.ssl.trustStore=${script_directory}/InstallCert/jssecacerts -Djavax.net.ssl.trustStorePassword=changeit"
fi
-
-# I am using 'integration-test failsafe:verify' here rather than just using 'verify'. The reason for this is
-# plain 'verify' gives an exit status of 0 even when the test fails.
-# If I just use 'failsafe:verify' the proper exit code is returned when the test fails BUT we don't see any output of the test.
-# Using 'integration-test failsafe:verify' I get the proper exit code and output
-# TODO Remove arq-remote once all tests have been migrated
-mvn -B integration-test failsafe:verify -Parq-remote,integration-testing -Dserver.host=https://${route} ${QS_MAVEN_REPOSITORY} ${truststore_properties}
-
+mvn -B verify -Pintegration-testing -Dserver.host=https://${route} ${QS_MAVEN_REPOSITORY} ${truststore_properties}
if [ "$?" != "0" ]; then
- test_status=1
+ echo "Tests failed!"
+ echo "Dumping the application pod(s)"
+ oc logs deployment/"${application}"
+ testsFailed
fi
+
################################################################################################
# Helm uninstall
echo "Running Helm uninstall"
diff --git a/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci.yml b/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci.yml
new file mode 100644
index 0000000000..bacba424de
--- /dev/null
+++ b/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci.yml
@@ -0,0 +1,18 @@
+name: WildFly microprofile-reactive-messaging-kafka Quickstart CI
+
+on:
+ # TEMP
+ push:
+ pull_request:
+ types: [opened, synchronize, reopened, ready_for_review]
+ paths:
+ - 'microprofile-reactive-messaging-kafka/**'
+ - '.github/workflows/quickstart_ci.yml'
+jobs:
+ call-quickstart_ci:
+ uses: ./.github/workflows/quickstart_ci.yml
+ with:
+ QUICKSTART_PATH: microprofile-reactive-messaging-kafka
+ TEST_BOOTABLE_JAR: true
+ # See https://issues.redhat.com/browse/WFLY-18676 for why we are excluding this on Windows for now.
+ MATRIX_OS: '"ubuntu-20.04"'
\ No newline at end of file
diff --git a/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci_before.sh b/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci_before.sh
new file mode 100755
index 0000000000..a5b7f08ed8
--- /dev/null
+++ b/.github/workflows/quickstart_microprofile-reactive-messaging-kafka_ci_before.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+# This image will be moved to SmallRye at some point
+docker run -d -p 9092:9092 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 quay.io/ogunalp/kafka-native:0.5.0-kafka-3.6.0
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index e06aae9d16..4d8b342d8d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,3 +34,4 @@ build.metadata
*.lock
*.tgz
/**/nbproject/
+.ci/openshift-ci/build-root/scripts/InstallCert/jssecacerts
diff --git a/microprofile-reactive-messaging-kafka/README.adoc b/microprofile-reactive-messaging-kafka/README.adoc
index b4522c66f6..91b3649cd4 100644
--- a/microprofile-reactive-messaging-kafka/README.adoc
+++ b/microprofile-reactive-messaging-kafka/README.adoc
@@ -11,9 +11,8 @@ The `microprofile-reactive-messaging-kafka` quickstart demonstrates the use of t
:standalone-server-type: microprofile
:archiveType: war
:archiveName: {artifactId}
-:reactive-messaging:
-:custom-bootable-jar-layers:
-:uses-h2:
+:helm-install-prerequisites: ../microprofile-reactive-messaging-kafka/helm-install-prerequisites.adoc
+:strimzi-version: v1beta2
== What is it?
@@ -51,8 +50,8 @@ include::../shared-doc/build-and-deploy-the-quickstart.adoc[leveloffset=+1]
// Undeploy the Quickstart
include::../shared-doc/undeploy-the-quickstart.adoc[leveloffset=+1]
-// Run the Arquillian Tests
-include::../shared-doc/run-arquillian-tests.adoc[leveloffset=+1]
+// Server Distribution Testing
+include::../shared-doc/run-integration-tests-with-server-distribution.adoc[leveloffset=+2]
// Run the Quickstart in Red Hat CodeReady Studio or Eclipse
include::../shared-doc/run-the-quickstart-in-jboss-developer-studio.adoc[leveloffset=+1]
@@ -676,8 +675,9 @@ public class RootResource {
DatabaseBean dbBean;
@GET
+ @Path("/db")
@Produces(MediaType.TEXT_PLAIN)
- public String getRootResponse() {
+ public String getDatabaseEntries() {
List entries = dbBean.loadAllTimedEntries();
StringBuffer sb = new StringBuffer();
for (TimedEntry t : entries) {
@@ -888,6 +888,7 @@ Let's check that our application works!
. Make sure xref:start_the_eap_standalone_server[the {productName} server is started] as described above.
. {productName} ships with all the modules to run MicroProfile Reactive Messaging applications with Kafka, but the functionality is not enabled out of the box, so we need to enable it. Run: `$ __{jbossHomeName}__/bin/jboss-cli.sh --connect --file=enable-reactive-messaging.cli` to set everything up. The `enable-reactive-messaging.cli` file used can be found link:enable-reactive-messaging.cli[here].
+*NOTE*: This is only required if running against the downloaded server. If the `microprofile-reactive-messaging-kafka` layer is provisioned, as is done by the `openshift` and `bootable-jar` maven profiles, the Kafka functionality is there
. Open new terminal and navigate to the root directory of your project.
. Type the following command to build and deploy the project:
@@ -930,7 +931,7 @@ Then we get another section where it is using the randomised order
In both parts of the log we see that all messages reach the `logAllMessages()` method, while only `Hello` and `Kafka` reach the `receiveFromKafka()` method which saves them to the RDBMS.
-To inspect what was stored in the database, go to http://localhost:8080/microprofile-reactive-messaging-kafka in your browser and you should see something like:
+To inspect what was stored in the database, go to http://localhost:8080/microprofile-reactive-messaging-kafka/db in your browser and you should see something like:
[source, options="nowrap"]
----
@@ -963,58 +964,12 @@ data: two
data: three
```
-
-ifdef::EAPXPRelease[]
-// Getting Started with OpenShift
-include::../shared-doc/xp-openshift-getting-started.adoc[leveloffset=+1]
-//Prepare OpenShift for Quickstart Deployment
-include::../shared-doc/xp-create-project.adoc[leveloffset=+2]
-// This does not seem to be needed when using Helm for creating the application
-// Import the Latest {xpaasproduct-shortname} Image Streams and Templates
-// include::../shared-doc/xp-import-imagestreams-templates.adoc[leveloffset=+2]
-
-[[installing-kafka-on-openshift]]
-=== Installing Kafka on OpenShift
-Kafka on OpenShift is provided by AMQ Streams.
-
-// Install AMQ Streams
-include::../shared-doc/xp-install-amq-streams.adoc[leveloffset=+3]
-
-
-// Deploy the {ProductShortName} Source-to-Image (S2I) Quickstart to OpenShift
-[[deploy-to-openshift]]
-== Deploy the {ProductShortName} Source-to-Image (S2I) Quickstart to OpenShift
-
-We will use Helm to install the application. Make sure you have downloaded the https://docs.openshift.com/container-platform/4.9/applications/working_with_helm_charts/installing-helm.html[Helm CLI Tool].
-
-[[xp-deploy-project-amq-streams]]
-=== Deploy the {ProductShortName} Source-to-Image (S2I) Quickstart to OpenShift with AMQ Streams
-This section describes how to deploy the application to OpenShift interacting with Kafka provided by AMQ Streams.
-
-First add the Helm repository, if you have not done so already, as described in {helmRepoUrl}.
-
-Then run the following command to deploy and start the application (note we are using the name `mp-rm-qs` for our application here):
-
-[source,subs="attributes+",options="nowrap"]
-----
-helm install mp-rm-qs -f ./helm-amq-streams.yml {helmChartName} --set build.uri={githubRepoCodeUrl} --set build.ref={WildFlyQuickStartRepoTag}
-----
-
-NOTE: Although the above command will return quite quickly, it will take a while until the application pod is actually brought up. In the OpenShift console you will see a pod whose name starts with mp-rm-qs in the `ErrImagePull` or `ImagePullBackoff` state until the build has completed.
-
-The contents of the link:helm-amq-streams.yml[`helm-amq-streams.yml`] file specify:
-
-* The Galleon layers to use when creating the server under `build/s2i/galleonLayers`
-** `microprofile-reactive-messaging-kafka` contains the MicroProfile Reactive Messaging with Kafka functionality
-** `h2-default-datasource` enables the HSQL database we store data in
-** `cloud-server` provides among other things the web server functionality
-* For both the build (under `build/env`) and the deploy `deploy/env`) parts, we specify environment variables to increase some JVM arguments from the default.
-
-Run `oc get route` to find the URL of our application.
-
-//Bootable JAR
+// Bootable JAR
include::../shared-doc/build-and-run-the-quickstart-with-bootable-jar.adoc[leveloffset=+1]
+// OpenShift
+include::../shared-doc/build-and-run-the-quickstart-with-openshift.adoc[leveloffset=+1]
+
== Conclusion
MicroProfile Reactive Messaging and Reactive Streams Operators allow you to publish to, process and consume streams, optionally backed by Kafka, by adding `@Incoming` and `@Outgoing` annotations to your methods. 'Paired' streams work in memory. To map 'un-paired' streams to be backed by Kafka you need to provide configuration via MicroProfile Config.
diff --git a/microprofile-reactive-messaging-kafka/helm-amq-streams.yml b/microprofile-reactive-messaging-kafka/charts/helm.yaml
similarity index 53%
rename from microprofile-reactive-messaging-kafka/helm-amq-streams.yml
rename to microprofile-reactive-messaging-kafka/charts/helm.yaml
index 157da3e616..cdb2ecd76e 100644
--- a/microprofile-reactive-messaging-kafka/helm-amq-streams.yml
+++ b/microprofile-reactive-messaging-kafka/charts/helm.yaml
@@ -2,22 +2,10 @@
# Will need a diff commit in the upstream-to-product repository
build:
uri: https://github.com/wildfly/quickstart.git
+ ref: main
contextDir: microprofile-reactive-messaging-kafka
- mode: s2i
- s2i:
- galleonLayers:
- - cloud-server
- - h2-default-datasource
- - microprofile-reactive-messaging-kafka
- env:
- - name: MAVEN_OPTS
- value: -XX:MetaspaceSize=256m -XX:MaxMetaspaceSize=256m
deploy:
replicas: 1
env:
- name: MP_MESSAGING_CONNECTOR_SMALLRYE_KAFKA_BOOTSTRAP_SERVERS
value: my-cluster-kafka-bootstrap:9092
- - name: GC_MAX_METASPACE_SIZE
- value: "256"
- - name: GC_METASPACE_SIZE
- value: "96"
diff --git a/microprofile-reactive-messaging-kafka/helm-bootable-jar-amq-streams.yml b/microprofile-reactive-messaging-kafka/helm-bootable-jar-amq-streams.yml
deleted file mode 100644
index e0234b815d..0000000000
--- a/microprofile-reactive-messaging-kafka/helm-bootable-jar-amq-streams.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# TODO Update to point to the released quickstarts and image
-# Will need a diff commit in the upstream-to-product repository
-build:
- uri: https://github.com/wildfly/quickstart.git
- contextDir: microprofile-reactive-messaging-kafka
- mode: bootable-jar
- env:
-# - name: ARTIFACT_DIR
-# value: microprofile-reactive-messaging-kafka/target
- - name: MAVEN_OPTS
- value: -XX:MetaspaceSize=256m -XX:MaxMetaspaceSize=256m
- - name: MAVEN_ARGS_APPEND
- # Use the bootable-jar-openshift profile to ensure that the application
- # can be deployed on OpenShift but disable JKube as the image will be
- # built and deployed by this chart.
- value: -Pbootable-jar-openshift -Djkube.skip=true
-deploy:
- replicas: 1
- env:
- - name: GC_MAX_METASPACE_SIZE
- value: "256"
- - name: GC_METASPACE_SIZE
- value: "96"
- - name: MP_MESSAGING_CONNECTOR_SMALLRYE_KAFKA_BOOTSTRAP_SERVERS
- value: my-cluster-kafka-bootstrap:9092
- volumeMounts:
- - name: reactive-messaging-properties
- mountPath: /etc/config/reactive-messaging-properties
- volumes:
- - name: reactive-messaging-properties
- configMap:
- name: reactive-messaging-properties
diff --git a/microprofile-reactive-messaging-kafka/helm-install-prerequisites.adoc b/microprofile-reactive-messaging-kafka/helm-install-prerequisites.adoc
new file mode 100644
index 0000000000..2f4263bb3f
--- /dev/null
+++ b/microprofile-reactive-messaging-kafka/helm-install-prerequisites.adoc
@@ -0,0 +1,74 @@
+=== Install AMQ Streams on OpenShift
+
+The functionality of this quickstart depends on a running instance of the
+https://access.redhat.com/products/red-hat-amq#streams[AMQ Streams] Operator. AMQ Streams is a Red Hat project based on Apache Kafka. To deploy AMQ Streams in the Openshift environment:
+
+. Log in into the Openshift console as `kubeadmin` user (or any cluster administrator).
+. Navigate to `Operators` -> `OperatorHub`.
+. Search for `AMQ Streams` - click on the 'AMQ Streams' operator.
++
+Install it with the default values and wait for the message telling you it has been installed and is ready for use.
+. In your terminal, run the following command to set up a Kafka cluster called `my-cluster` in your project:
++
+[options="nowrap",subs="+attributes"]
+----
+$ oc apply -f - <Quickstart: microprofile-reactive-messaging-kafka
-
- 30.0.0.Beta1
- 30.0.0.Beta1
-
- 4.3.1
+
+ 29.0.0.Final
+
+ ${version.server}
+ ${version.server}
+ 4.0.0.Final
+ 4.2.0.Final
+ 10.0.0.Final
+
3.0.0
- 1.5.1.Final
- 2.9.1
2.0.0.Final
-
- 30.0.0.Beta1
- 10.0.0.Final
- 1.0.1
- registry.redhat.io/ubi8/openjdk-11:latest
@@ -112,7 +109,7 @@
org.wildfly.bom
wildfly-microprofile
- ${version.microprofile.bom}
+ ${version.bom.microprofile}
pom
import
@@ -120,7 +117,7 @@
org.wildfly.bom
wildfly-ee-with-tools
- ${version.server.bom}
+ ${version.bom.ee}
pom
import
@@ -205,42 +202,35 @@
test
-
+
- org.springframework.kafka
- spring-kafka-test
+ org.wildfly.arquillian
+ wildfly-arquillian-common
test
- ${version.org.springframework.kafka}
-
- io.rest-assured
- rest-assured
+ org.eclipse.microprofile.rest.client
+ microprofile-rest-client-api
test
- ${version.io.rest-assured}
- org.wildfly.arquillian
- wildfly-arquillian-common
+ org.apache.httpcomponents
+ httpclient
test
- org.jboss
- jboss-dmr
+ org.jboss.logging
+ commons-logging-jboss-logging
test
- ${version.org.jboss.jboss-dmr}
+
io.smallrye.config
smallrye-config-core
${version.io.smallrye-config}
test
-
- org.eclipse.microprofile.rest.client
- microprofile-rest-client-api
- test
-
+
org.jboss.resteasy.microprofile
microprofile-rest-client-base
@@ -254,25 +244,23 @@
test
-
- org.apache.httpcomponents
- httpclient
- test
-
-
- org.jboss.logging
- commons-logging-jboss-logging
- test
-
-
-
- org.jboss.arquillian.junit
- arquillian-junit-container
- test
-
+
+
+
+ org.wildfly.plugins
+ wildfly-maven-plugin
+ ${version.plugin.wildfly}
+
+
+ org.wildfly.plugins
+ wildfly-jar-maven-plugin
+ ${version.plugin.wildfly-jar}
+
+
+
org.asciidoctor
@@ -280,8 +268,8 @@
- ${version.server.bom}
- ${version.microprofile.bom}
+ ${version.bom.ee}
+ ${version.bom.microprofile}
@@ -289,65 +277,6 @@
-
-
- arq-managed
-
-
- org.wildfly.arquillian
- wildfly-arquillian-container-managed
- test
-
-
-
-
-
- org.apache.maven.plugins
- maven-failsafe-plugin
- ${version.failsafe.plugin}
-
-
-
- integration-test
- verify
-
-
-
-
-
-
-
-
-
- arq-remote
-
-
- org.wildfly.arquillian
- wildfly-arquillian-container-remote
- test
-
-
-
-
-
- org.apache.maven.plugins
- maven-failsafe-plugin
- ${version.failsafe.plugin}
-
-
-
- integration-test
- verify
-
-
-
-
-
-
-
bootable-jar
@@ -355,9 +284,8 @@
org.wildfly.plugins
wildfly-jar-maven-plugin
- ${version.wildfly-jar.maven.plugin}
- wildfly@maven(org.jboss.universe:community-universe)#${version.server.bootable-jar}
+ wildfly@maven(org.jboss.universe:community-universe)#${version.server}
cloud-server
h2-default-datasource
@@ -378,25 +306,29 @@
+
- bootable-jar-openshift
+ openshift
org.wildfly.plugins
- wildfly-jar-maven-plugin
- ${version.wildfly-jar.maven.plugin}
+ wildfly-maven-plugin
- wildfly@maven(org.jboss.universe:community-universe)#${version.server.bootable-jar}
+
+
+ org.wildfly:wildfly-galleon-pack:${version.server}
+
+
+ org.wildfly.cloud:wildfly-cloud-galleon-pack:${version.pack.cloud}
+
+
cloud-server
h2-default-datasource
microprofile-reactive-messaging-kafka
-
- true
-
-
+ ROOT.war
@@ -406,38 +338,31 @@
+
+
+
+
+
+ integration-testing
+
+
- org.eclipse.jkube
- openshift-maven-plugin
- ${version.jkube.maven.plugin}
+ org.apache.maven.plugins
+ maven-failsafe-plugin
+
+
+ **/BasicRuntimeIT
+ **/ReactiveMessagingKafkaIT
+
+
- resource
- build
+ integration-test
+ verify
-
-
-
-
- NodePort
-
-
-
-
-
-
- 256
- 96
- my-cluster-kafka-bootstrap:9092
-
-
-
diff --git a/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/MessagingBean.java b/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/MessagingBean.java
index 34e76dc8f6..e4c96e4eac 100644
--- a/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/MessagingBean.java
+++ b/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/MessagingBean.java
@@ -66,33 +66,45 @@ public PublisherBuilder filter(PublisherBuilder messages) {
@Incoming("sender")
@Outgoing("to-kafka")
public Message sendToKafka(String msg) {
- TimedEntry te = new TimedEntry(new Timestamp(System.currentTimeMillis()), msg);
- Message m = Message.of(te);
- // Just use the hash as the Kafka key for this example
- int key = te.getMessage().hashCode();
-
- // Create Metadata containing the Kafka key
- OutgoingKafkaRecordMetadata md = OutgoingKafkaRecordMetadata
- .builder()
- .withKey(key)
- .build();
-
- // The returned message will have the metadata added
- return KafkaMetadataUtil.writeOutgoingKafkaMetadata(m, md);
+ try {
+ System.out.println("TEMP (sendToKafka) " + msg);
+ TimedEntry te = new TimedEntry(new Timestamp(System.currentTimeMillis()), msg);
+ Message m = Message.of(te);
+ // Just use the hash as the Kafka key for this example
+ int key = te.getMessage().hashCode();
+
+ // Create Metadata containing the Kafka key
+ OutgoingKafkaRecordMetadata md = OutgoingKafkaRecordMetadata
+ .builder()
+ .withKey(key)
+ .build();
+
+ // The returned message will have the metadata added
+ return KafkaMetadataUtil.writeOutgoingKafkaMetadata(m, md);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
}
@Incoming("from-kafka")
public CompletionStage receiveFromKafka(Message message) {
- TimedEntry payload = message.getPayload();
-
- IncomingKafkaRecordMetadata md = KafkaMetadataUtil.readIncomingKafkaMetadata(message).get();
- String msg =
- "Received from Kafka, storing it in database\n" +
- "\t%s\n" +
- "\tkey: %d; partition: %d, topic: %s";
- msg = String.format(msg, payload, md.getKey(), md.getPartition(), md.getTopic());
- System.out.println(msg);
- dbBean.store(payload);
- return message.ack();
+ try {
+ TimedEntry payload = message.getPayload();
+ System.out.println("TEMP (receiveFromKafka) " + payload);
+
+ IncomingKafkaRecordMetadata md = KafkaMetadataUtil.readIncomingKafkaMetadata(message).get();
+ String msg =
+ "Received from Kafka, storing it in database\n" +
+ "\t%s\n" +
+ "\tkey: %d; partition: %d, topic: %s";
+ msg = String.format(msg, payload, md.getKey(), md.getPartition(), md.getTopic());
+ System.out.println(msg);
+ dbBean.store(payload);
+ return message.ack();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
}
}
\ No newline at end of file
diff --git a/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/RootResource.java b/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/RootResource.java
index a1621a2b2e..156a222ab0 100644
--- a/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/RootResource.java
+++ b/microprofile-reactive-messaging-kafka/src/main/java/org/wildfly/quickstarts/microprofile/reactive/messaging/RootResource.java
@@ -31,8 +31,9 @@ public class RootResource {
DatabaseBean dbBean;
@GET
+ @Path("/db")
@Produces(MediaType.TEXT_PLAIN)
- public String getRootResponse() {
+ public String getDatabaseEntries() {
List entries = dbBean.loadAllTimedEntries();
StringBuffer sb = new StringBuffer();
for (TimedEntry t : entries) {
@@ -41,4 +42,10 @@ public String getRootResponse() {
}
return sb.toString();
}
+
+ @GET
+ @Produces(MediaType.TEXT_PLAIN)
+ public String getRootResponse() {
+ return "MicroProfile Reactive Messaging with Kafka quickstart deployed successfully. You can find the available operations in the included README file.";
+ }
}
diff --git a/microprofile-reactive-messaging-kafka/src/main/jkube/route.yml b/microprofile-reactive-messaging-kafka/src/main/jkube/route.yml
deleted file mode 100644
index a33a4056a7..0000000000
--- a/microprofile-reactive-messaging-kafka/src/main/jkube/route.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-spec:
- tls:
- insecureEdgeTerminationPolicy: Redirect
- termination: edge
- to:
- kind: Service
- name: microprofile-reactive-messaging-kafka
diff --git a/microprofile-reactive-messaging-kafka/src/main/resources/META-INF/microprofile-config.properties b/microprofile-reactive-messaging-kafka/src/main/resources/META-INF/microprofile-config.properties
index 4c0c3f9968..cb2d941e66 100644
--- a/microprofile-reactive-messaging-kafka/src/main/resources/META-INF/microprofile-config.properties
+++ b/microprofile-reactive-messaging-kafka/src/main/resources/META-INF/microprofile-config.properties
@@ -14,3 +14,6 @@ mp.messaging.incoming.from-kafka.key.deserializer=org.apache.kafka.common.serial
# Configure Kafka group.id to prevent warn message - if not set, some default value is generated automatically.
mp.messaging.connector.smallrye-kafka.group.id="microprofile-reactive-messaging-kafka-group-id"
+# Needed as per https://github.com/smallrye/smallrye-reactive-messaging/issues/845 since the consumer
+# joins after the messages are sent
+mp.messaging.incoming.from-kafka.auto.offset.reset=earliest
diff --git a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/BasicRuntimeIT.java b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/BasicRuntimeIT.java
new file mode 100644
index 0000000000..7aff4b1357
--- /dev/null
+++ b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/BasicRuntimeIT.java
@@ -0,0 +1,27 @@
+package org.wildfly.quickstarts.microprofile.reactive.messaging.test;
+
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.wildfly.quickstarts.microprofile.reactive.messaging.test.TestUtils.getServerHost;
+
+public class BasicRuntimeIT {
+
+ private final CloseableHttpClient httpClient = HttpClientBuilder.create().build();
+ @Test
+ public void testHTTPEndpointIsAvailable() throws IOException {
+ HttpGet httpGet = new HttpGet(getServerHost());
+ CloseableHttpResponse httpResponse = httpClient.execute(httpGet);
+
+ assertEquals("Successful call", 200, httpResponse.getStatusLine().getStatusCode());
+
+ httpResponse.close();
+
+ }
+}
diff --git a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/EnableReactiveExtensionsSetupTask.java b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/EnableReactiveExtensionsSetupTask.java
deleted file mode 100644
index 46b31ae43c..0000000000
--- a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/EnableReactiveExtensionsSetupTask.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * JBoss, Home of Professional Open Source.
- * Copyright 2020, Red Hat, Inc., and individual contributors
- * as indicated by the @author tags. See the copyright.txt file in the
- * distribution for a full listing of individual contributors.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This software is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-
-package org.wildfly.quickstarts.microprofile.reactive.messaging.test;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-
-import org.jboss.as.arquillian.api.ServerSetupTask;
-import org.jboss.as.arquillian.container.ManagementClient;
-import org.jboss.as.controller.client.ModelControllerClient;
-import org.jboss.as.controller.client.helpers.ClientConstants;
-import org.jboss.dmr.ModelNode;
-import org.junit.Assert;
-import org.xnio.IoUtils;
-
-/**
- * @author Kabir Khan
- */
-public class EnableReactiveExtensionsSetupTask implements ServerSetupTask {
- private static final String MODULE_REACTIVE_MESSAGING = "org.wildfly.extension.microprofile.reactive-messaging-smallrye";
- private static final String MODULE_REACTIVE_STREAMS_OPERATORS = "org.wildfly.extension.microprofile.reactive-streams-operators-smallrye";
- private static final String SUBSYSTEM_REACTIVE_MESSAGING = "microprofile-reactive-messaging-smallrye";
- private static final String SUBSYSTEM_REACTIVE_STREAMS_OPERATORS = "microprofile-reactive-streams-operators-smallrye";
- public static final int TIMEOUT = 30000;
-
- public static final String ADD = "add";
- public static final String ADDRESS = "address";
- public static final String ADMIN_ONLY = "admin-only";
- public static final String CHILD_TYPE = "child-type";
- public static final String EXTENSION = "extension";
- public static final String FAILED = "failed";
- public static final String NAME = "name";
- public static final String OPERATION = "operation";
- public static final String OUTCOME = "outcome";
- public static final String READ_ATTRIBUTE = "read-attribute";
- public static final String READ_CHILDREN_NAMES = "read-children-names";
- public static final String RELOAD = "reload";
- public static final String RELOAD_REQUIRED = "reload-required";
- public static final String REMOVE = "remove";
- public static final String RESULT = "result";
- public static final String RUNNING = "running";
- public static final String SERVER_CONFIG = "server-config";
- public static final String SERVER_STATE = "server-state";
- public static final String SUBSYSTEM = "subsystem";
- public static final String SUCCESS = "success";
-
- List removeOps = new ArrayList<>();
-
- public EnableReactiveExtensionsSetupTask() {
- }
-
- @Override
- public void setup(ManagementClient managementClient, String containerId) throws Exception {
- boolean rsoExt = !containsChild(managementClient, EXTENSION, MODULE_REACTIVE_STREAMS_OPERATORS);
- boolean rsoSs = !containsChild(managementClient, SUBSYSTEM, SUBSYSTEM_REACTIVE_STREAMS_OPERATORS);
- boolean rmExt = !containsChild(managementClient, EXTENSION, MODULE_REACTIVE_MESSAGING);
- boolean rmSs = !containsChild(managementClient, SUBSYSTEM, SUBSYSTEM_REACTIVE_MESSAGING);
-
- if (rsoExt) {
- addExtension(managementClient, MODULE_REACTIVE_STREAMS_OPERATORS);
- removeOps.add(createRemoveExtension(MODULE_REACTIVE_STREAMS_OPERATORS));
- }
- if (rmExt) {
- addExtension(managementClient, MODULE_REACTIVE_MESSAGING);
- removeOps.add(createRemoveExtension(MODULE_REACTIVE_MESSAGING));
- }
- if (rsoSs) {
- addSubsystem(managementClient, SUBSYSTEM_REACTIVE_STREAMS_OPERATORS);
- removeOps.add(createRemoveSubsystem(SUBSYSTEM_REACTIVE_STREAMS_OPERATORS));
- }
- if (rmSs) {
- addSubsystem(managementClient, SUBSYSTEM_REACTIVE_MESSAGING);
- removeOps.add(createRemoveSubsystem(SUBSYSTEM_REACTIVE_MESSAGING));
- }
-
- reloadIfRequired(managementClient);
- }
-
- @Override
- public void tearDown(ManagementClient managementClient, String s) throws Exception {
- for (ListIterator it = removeOps.listIterator(removeOps.size()); it.hasPrevious(); ) {
- ModelNode op = it.previous();
- executeOperation(managementClient, op);
- }
-
- reloadIfRequired(managementClient);
- }
-
- private boolean containsChild(ManagementClient managementClient, String childType, String childName) throws Exception {
- ModelNode op = new ModelNode();
- op.get(OPERATION).set(READ_CHILDREN_NAMES);
- op.get(CHILD_TYPE).set(childType);
- ModelNode result = executeOperation(managementClient, op);
- List names = result.asList();
- for (ModelNode name : names) {
- if (name.asString().equals(childName)) {
- return true;
- }
- }
- return false;
- }
-
- private void addExtension(ManagementClient managementClient, String name) throws Exception {
- ModelNode op = createOperation(getExtensionAddress(name), ADD);
- executeOperation(managementClient, op);
- }
-
- private void addSubsystem(ManagementClient managementClient, String name) throws Exception {
- ModelNode op = createOperation(getSubsystemAddress(name), ADD);
- executeOperation(managementClient, op);
- }
-
- private ModelNode createRemoveExtension(String name) throws Exception {
- return createOperation(getExtensionAddress(name), REMOVE);
- }
-
- private ModelNode createRemoveSubsystem(String name) throws Exception {
- return createOperation(getSubsystemAddress(name), REMOVE);
- }
-
- private ModelNode createOperation(ModelNode address, String operationName) throws Exception {
- ModelNode op = new ModelNode();
- op.get(ADDRESS).set(address);
- op.get(OPERATION).set(operationName);
- return op;
- }
-
- private ModelNode executeOperation(ManagementClient managementClient, ModelNode op) throws Exception{
- ModelNode result = managementClient.getControllerClient().execute(op);
- if (!result.get(OUTCOME).asString().equals(SUCCESS)) {
- throw new IllegalStateException(result.asString());
- }
- return result.get(RESULT);
- }
-
- private void reloadIfRequired(final ManagementClient managementClient) throws Exception {
- String runningState = getContainerRunningState(managementClient);
- if (RELOAD_REQUIRED.equalsIgnoreCase(runningState)) {
- executeReloadAndWaitForCompletion(managementClient);
- } else {
- Assert.assertEquals("Server state 'running' is expected", RUNNING, runningState);
- }
- }
-
- private String getContainerRunningState(ManagementClient managementClient) throws IOException {
- return getContainerRunningState(managementClient.getControllerClient());
- }
-
- /**
- * Gets the current value of the server root resource's {@code server-state} attribute.
- * @param modelControllerClient client to use to read the state
- * @return the server state. Will not be {@code null}.
- * @throws IOException if there is an IO problem reading the state
- */
- private String getContainerRunningState(ModelControllerClient modelControllerClient) throws IOException {
- ModelNode operation = new ModelNode();
- operation.get(ADDRESS).setEmptyList();
- operation.get(OPERATION).set(READ_ATTRIBUTE);
- operation.get(NAME).set(SERVER_STATE);
- ModelNode rsp = modelControllerClient.execute(operation);
- return SUCCESS.equals(rsp.get(OUTCOME).asString()) ? rsp.get(RESULT).asString() : FAILED;
- }
-
- private void executeReloadAndWaitForCompletion(ManagementClient managementClient) {
- executeReload(managementClient.getControllerClient(), false, null);
- waitForLiveServerToReload(TIMEOUT, managementClient.getMgmtAddress(), managementClient.getMgmtPort());
- }
-
- private void executeReload(ModelControllerClient client, boolean adminOnly, String serverConfig) {
- ModelNode operation = new ModelNode();
- operation.get(ADDRESS).setEmptyList();
- operation.get(OPERATION).set(RELOAD);
- operation.get(ADMIN_ONLY).set(adminOnly);
- if(serverConfig != null) {
- operation.get(SERVER_CONFIG).set(serverConfig);
- }
- try {
- ModelNode result = client.execute(operation);
- if (!SUCCESS.equals(result.get(ClientConstants.OUTCOME).asString())) {
- fail("Reload operation didn't finish successfully: " + result.asString());
- }
- } catch(IOException e) {
- final Throwable cause = e.getCause();
- if (!(cause instanceof ExecutionException) && !(cause instanceof CancellationException)) {
- throw new RuntimeException(e);
- } // else ignore, this might happen if the channel gets closed before we got the response
- }
- }
-
- private void waitForLiveServerToReload(int timeout, String serverAddress, int serverPort) {
- int adjustedTimeout = timeout;
- long start = System.currentTimeMillis();
- ModelNode operation = new ModelNode();
- operation.get(ADDRESS).setEmptyList();
- operation.get(OPERATION).set(READ_ATTRIBUTE);
- operation.get(NAME).set(SERVER_STATE);
- while (System.currentTimeMillis() - start < adjustedTimeout) {
- //do the sleep before we check, as the attribute state may not change instantly
- //also reload generally takes longer than 100ms anyway
- try {
- Thread.sleep(100);
- } catch (InterruptedException e) {
- }
- try {
- ModelControllerClient liveClient = ModelControllerClient.Factory.create(
- serverAddress, serverPort);
- try {
- ModelNode result = liveClient.execute(operation);
- if (RUNNING.equals(result.get(RESULT).asString())) {
- return;
- }
- } catch (IOException e) {
- // ignore
- } finally {
- IoUtils.safeClose(liveClient);
- }
- } catch (UnknownHostException e) {
- throw new RuntimeException(e);
- }
- }
- fail("Live Server did not reload in the imparted time of " +
- adjustedTimeout + "(" + timeout + ") milliseconds");
- }
-
- private ModelNode getExtensionAddress(String extension) {
- ModelNode addr = new ModelNode();
- addr.add(EXTENSION, extension);
- return addr;
- }
-
- private ModelNode getSubsystemAddress(String subsystem) {
- ModelNode addr = new ModelNode();
- addr.add(SUBSYSTEM, subsystem);
- return addr;
- }
-
-}
diff --git a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/ReactiveMessagingKafkaIT.java b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/ReactiveMessagingKafkaIT.java
index 31134b2247..04cc5973f7 100644
--- a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/ReactiveMessagingKafkaIT.java
+++ b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/ReactiveMessagingKafkaIT.java
@@ -22,62 +22,38 @@
package org.wildfly.quickstarts.microprofile.reactive.messaging.test;
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.eclipse.microprofile.rest.client.RestClientBuilder;
-import org.jboss.arquillian.container.test.api.Deployment;
-import org.jboss.arquillian.container.test.api.RunAsClient;
-import org.jboss.arquillian.junit.Arquillian;
-import org.jboss.arquillian.test.api.ArquillianResource;
-import org.jboss.as.arquillian.api.ServerSetup;
-import org.jboss.shrinkwrap.api.ShrinkWrap;
-import org.jboss.shrinkwrap.api.asset.EmptyAsset;
-import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.junit.Assert;
import org.junit.Test;
-import org.junit.runner.RunWith;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
-import org.wildfly.quickstarts.microprofile.reactive.messaging.MessagingBean;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+
+import static org.wildfly.quickstarts.microprofile.reactive.messaging.test.TestUtils.getServerHost;
/**
* @author Kabir Khan
*/
-@RunWith(Arquillian.class)
-@RunAsClient
-@ServerSetup({RunKafkaSetupTask.class, EnableReactiveExtensionsSetupTask.class})
public class ReactiveMessagingKafkaIT {
- @ArquillianResource
- URL url;
private final CloseableHttpClient httpClient = HttpClientBuilder.create().build();
private static final long TIMEOUT = 30000;
- @Deployment
- public static WebArchive getDeployment() {
- final WebArchive webArchive = ShrinkWrap.create(WebArchive.class, "reactive-messaging-kafka-tx.war")
- .addAsWebInfResource(EmptyAsset.INSTANCE, "beans.xml")
- .addPackage(MessagingBean.class.getPackage())
- .addAsWebInfResource("META-INF/persistence.xml", "classes/META-INF/persistence.xml")
- .addAsWebInfResource("META-INF/microprofile-config.properties", "classes/META-INF/microprofile-config.properties");
-
- return webArchive;
- }
-
@Test
- public void test() throws Throwable {
- HttpGet httpGet = new HttpGet(url.toExternalForm());
+ public void testDbEntries() throws Throwable {
+ HttpGet httpGet = new HttpGet(getServerHost() + "/db");
long end = System.currentTimeMillis() + TIMEOUT;
boolean done = false;
while (!done) {
@@ -91,7 +67,7 @@ public void test() throws Throwable {
@Test
public void testUserApi() throws Throwable {
final UserClient client = RestClientBuilder.newBuilder()
- .baseUrl(url)
+ .baseUrl(new URL(getServerHost()))
.build(UserClient.class);
final ListSubscriber taskA = new ListSubscriber(new CountDownLatch(3));
diff --git a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/RunKafkaSetupTask.java b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/RunKafkaSetupTask.java
deleted file mode 100644
index 7c979b1bfb..0000000000
--- a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/RunKafkaSetupTask.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * JBoss, Home of Professional Open Source.
- * Copyright 2020, Red Hat, Inc., and individual contributors
- * as indicated by the @author tags. See the copyright.txt file in the
- * distribution for a full listing of individual contributors.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This software is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-
-package org.wildfly.quickstarts.microprofile.reactive.messaging.test;
-
-import java.io.IOException;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
-
-import org.jboss.as.arquillian.api.ServerSetupTask;
-import org.jboss.as.arquillian.container.ManagementClient;
-import org.springframework.kafka.test.EmbeddedKafkaBroker;
-
-/**
- * @author Kabir Khan
- */
-public class RunKafkaSetupTask implements ServerSetupTask {
- EmbeddedKafkaBroker broker;
- Path kafkaDir;
- @Override
- public void setup(ManagementClient managementClient, String s) throws Exception {
- Path target = Paths.get("target").toAbsolutePath().normalize();
- kafkaDir = Files.createTempDirectory(target, "kafka");
-
- Files.createDirectories(kafkaDir);
-
- broker = new EmbeddedKafkaBroker(1, true, 1, "testing")
- .zkPort(2181)
- .kafkaPorts(9092)
- .brokerProperty("log.dir", kafkaDir.toString())
- .brokerProperty("num.partitions", 1)
- .brokerProperty("offsets.topic.num.partitions", 5);
-
- broker.afterPropertiesSet();
- }
-
- @Override
- public void tearDown(ManagementClient managementClient, String s) throws Exception {
- try {
- if (broker != null) {
- broker.destroy();
- }
- } finally {
- try {
- if (!Files.exists(kafkaDir)) {
- return;
- }
- Files.walkFileTree(kafkaDir, new SimpleFileVisitor() {
- @Override
- public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
- if (!Files.isDirectory(file)) {
- Files.delete(file);
- }
- return super.visitFile(file, attrs);
- }
-
- @Override
- public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
- Files.delete(dir);
- return super.postVisitDirectory(dir, exc);
- }
- });
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- }
-}
diff --git a/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/TestUtils.java b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/TestUtils.java
new file mode 100644
index 0000000000..483a7cda51
--- /dev/null
+++ b/microprofile-reactive-messaging-kafka/src/test/java/org/wildfly/quickstarts/microprofile/reactive/messaging/test/TestUtils.java
@@ -0,0 +1,16 @@
+package org.wildfly.quickstarts.microprofile.reactive.messaging.test;
+
+public class TestUtils {
+ static final String DEFAULT_SERVER_HOST = "http://localhost:8080/microprofile-reactive-messaging-kafka";
+
+ static String getServerHost() {
+ String serverHost = System.getenv("SERVER_HOST");
+ if (serverHost == null) {
+ serverHost = System.getProperty("server.host");
+ }
+ if (serverHost == null) {
+ serverHost = DEFAULT_SERVER_HOST;
+ }
+ return serverHost;
+ }
+}
diff --git a/shared-doc/build-and-run-the-quickstart-with-openshift.adoc b/shared-doc/build-and-run-the-quickstart-with-openshift.adoc
index 94a9d68f53..c1bcc56f3b 100644
--- a/shared-doc/build-and-run-the-quickstart-with-openshift.adoc
+++ b/shared-doc/build-and-run-the-quickstart-with-openshift.adoc
@@ -4,6 +4,13 @@
include::../shared-doc/build-the-quickstart-for-openshift.adoc[leveloffset=+1]
// Getting Started with Helm
include::../shared-doc/helm-getting-started-overview.adoc[leveloffset=+1]
+
+ifdef::helm-install-prerequisites[]
+// Additional steps needed before deploying in Helm
+[[deploy_helm_prerequisites]]
+include::{helm-install-prerequisites}[leveloffset=+1]
+endif::helm-install-prerequisites[]
+
//Prepare Helm for Quickstart Deployment
include::../shared-doc/helm-deploy-project.adoc[leveloffset=+1]
// Testing on Openshift