diff --git a/README.md b/README.md index 44d17800..9b2fa341 100644 --- a/README.md +++ b/README.md @@ -220,7 +220,7 @@ spec: ``` Available options : -- **spec.resourceType**: TOPIC, GROUP, CONNECT +- **spec.resourceType**: TOPIC, GROUP, CONNECT, CONNECT_CLUSTER - **spec.resourcePatternType**: PREFIXED, LITERAL - **spec.permission**: READ, WRITE @@ -261,6 +261,31 @@ user@local:/home/user$ kafkactl apply -f connector.yml Success Connector/test.connect1 (created) ``` +### Connect Cluster + +This resource declares a Connect cluster that has been self-deployed so namespace are autonomous to deploy connectors on it +without any Ns4Kafka outage. + +```yaml +--- +apiVersion: v1 +kind: ConnectCluster +metadata: + name: test.myConnectCluster +spec: + url: http://localhost:8083 + username: myUsername + password: myPassword +``` + +```bash +user@local:/home/user$ kafkactl apply -f connect-cluster.yml +Success ConnectCluster/test.myConnectCluster (created) +``` + +**metadata.name** should not collide with the name of a Connect cluster declared in the Ns4Kafka configuration. +An error message will be thrown otherwise. + ### Kafka Streams This resource only grants the necessary Kafka ACLs for your Kafka Stream to work properly (if you have internal topics). It doesn’t do anything with your actual Kafka Stream code or Kafka Stream deployment. @@ -779,7 +804,7 @@ Success Namespace/test (changed) It is possible to define quotas on a namespace. Ideal for clusters with limited resources! -A namespace with quotas will not be able to exceed the limits enforced by the quotas. +A namespace with quotas will not be able to exceed the limits enforced by these quotas. ```yaml apiVersion: v1 diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerConfig.java b/api/src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java similarity index 89% rename from api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerConfig.java rename to api/src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java index 6cd7c1b0..0c51922c 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerConfig.java +++ b/api/src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.config; import io.micronaut.context.annotation.ConfigurationProperties; import lombok.Getter; diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorConfig.java b/api/src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java similarity index 98% rename from api/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorConfig.java rename to api/src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java index d99676cd..a1440630 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorConfig.java +++ b/api/src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.services.executors; +package com.michelin.ns4kafka.config; import io.micronaut.context.annotation.ConfigurationProperties; import io.micronaut.context.annotation.EachProperty; diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreConfig.java b/api/src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java similarity index 90% rename from api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreConfig.java rename to api/src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java index 2af44746..5b522550 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreConfig.java +++ b/api/src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java @@ -1,7 +1,6 @@ -package com.michelin.ns4kafka.repositories.kafka; +package com.michelin.ns4kafka.config; import io.micronaut.context.annotation.ConfigurationProperties; -import io.micronaut.context.annotation.Property; import io.micronaut.core.convert.format.MapFormat; import java.util.Map; diff --git a/api/src/main/java/com/michelin/ns4kafka/security/SecurityConfig.java b/api/src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java similarity index 81% rename from api/src/main/java/com/michelin/ns4kafka/security/SecurityConfig.java rename to api/src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java index 8f9fe046..6515cc26 100644 --- a/api/src/main/java/com/michelin/ns4kafka/security/SecurityConfig.java +++ b/api/src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.security; +package com.michelin.ns4kafka.config; import com.michelin.ns4kafka.security.local.LocalUser; import io.micronaut.context.annotation.ConfigurationProperties; @@ -13,4 +13,5 @@ public class SecurityConfig { private List localUsers; private String adminGroup; + private String aes256EncryptionKey; } diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/AccessControlListController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/AccessControlListController.java index afce380f..e94d1644 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/AccessControlListController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/AccessControlListController.java @@ -1,10 +1,13 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java index 9d62c6ef..d1e68364 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java index 15555e20..61f1f372 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java @@ -40,7 +40,7 @@ public class ApiResourcesController { .kind("Connector") .namespaced(true) .synchronizable(true) - .path("connects") + .path("connectors") .names(List.of("connects", "connect", "co")) .build(); @@ -99,6 +99,17 @@ public class ApiResourcesController { .names(List.of("resource-quotas", "resource-quota", "quotas", "quota", "qu")) .build(); + /** + * Connect worker resource definition + */ + public static final ResourceDefinition CONNECT_CLUSTER = ResourceDefinition.builder() + .kind("ConnectCluster") + .namespaced(true) + .synchronizable(false) + .path("connect-clusters") + .names(List.of("connect-clusters", "connect-cluster", "cc")) + .build(); + /** * Namespace resource definition */ @@ -129,6 +140,7 @@ public List list(@Nullable Authentication authentication) { KSTREAM, ROLE_BINDING, RESOURCE_QUOTA, + CONNECT_CLUSTER, TOPIC, NAMESPACE, SCHEMA diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectClusterController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectClusterController.java new file mode 100644 index 00000000..d1dd7e68 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectClusterController.java @@ -0,0 +1,142 @@ +package com.michelin.ns4kafka.controllers; + +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.models.connector.Connector; +import com.michelin.ns4kafka.services.ConnectClusterService; +import com.michelin.ns4kafka.services.ConnectorService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.HttpStatus; +import io.micronaut.http.annotation.*; +import io.micronaut.scheduling.TaskExecutors; +import io.micronaut.scheduling.annotation.ExecuteOn; +import io.swagger.v3.oas.annotations.tags.Tag; + +import javax.inject.Inject; +import javax.validation.Valid; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +@Tag(name = "Connect Clusters") +@Controller(value = "/api/namespaces/{namespace}/connect-clusters") +@ExecuteOn(TaskExecutors.IO) +public class ConnectClusterController extends NamespacedResourceController { + @Inject + ConnectClusterService connectClusterService; + + @Inject + ConnectorService connectorService; + + /** + * Get all the Connect workers by namespace + * @param namespace The namespace + * @return A list of connectors + */ + @Get + public List list(String namespace) { + return connectClusterService.findAllByNamespaceOwner(getNamespace(namespace)); + } + + /** + * Get the last version of a connector by namespace and name + * @param namespace The namespace + * @param connectCluster The name + * @return A Connect worker + */ + @Get("/{connectCluster}") + public Optional getConnectCluster(String namespace, String connectCluster) { + return connectClusterService.findByNamespaceAndNameOwner(getNamespace(namespace), connectCluster); + } + + /** + * Publish a Connect worker + * @param namespace The namespace + * @param connectCluster The connect worker + * @param dryrun Does the creation is a dry run + * @return The created role binding + */ + @Post("/{?dryrun}") + HttpResponse apply(String namespace, @Body @Valid ConnectCluster connectCluster, @QueryValue(defaultValue = "false") boolean dryrun) throws Exception { + Namespace ns = getNamespace(namespace); + + List validationErrors = new ArrayList<>(); + if (!connectClusterService.isNamespaceOwnerOfConnectCluster(ns, connectCluster.getMetadata().getName())) { + validationErrors.add(String.format("Namespace not owner of this Connect cluster %s.", connectCluster.getMetadata().getName())); + } + + validationErrors.addAll(connectClusterService.validateConnectClusterCreation(connectCluster)); + + if (!validationErrors.isEmpty()) { + throw new ResourceValidationException(validationErrors, connectCluster.getKind(), connectCluster.getMetadata().getName()); + } + + connectCluster.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + connectCluster.getMetadata().setCluster(ns.getMetadata().getCluster()); + connectCluster.getMetadata().setNamespace(ns.getMetadata().getName()); + + Optional existingConnectCluster = connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster.getMetadata().getName()); + if (existingConnectCluster.isPresent() && existingConnectCluster.get().equals(connectCluster)) { + return formatHttpResponse(existingConnectCluster.get(), ApplyStatus.unchanged); + } + + ApplyStatus status = existingConnectCluster.isPresent() ? ApplyStatus.changed : ApplyStatus.created; + if (dryrun) { + return formatHttpResponse(connectCluster, status); + } + + sendEventLog(connectCluster.getKind(), connectCluster.getMetadata(), status, existingConnectCluster.map(ConnectCluster::getSpec).orElse(null), + connectCluster.getSpec()); + + return formatHttpResponse(connectClusterService.create(connectCluster), status); + } + + /** + * Delete Connect cluster by the given name + * @param namespace The current namespace + * @param connectCluster The current connect cluster name to delete + * @param dryrun Run in dry mode or not + * @return A HTTP response + */ + @Status(HttpStatus.NO_CONTENT) + @Delete("/{connectCluster}{?dryrun}") + public HttpResponse delete(String namespace, String connectCluster, @QueryValue(defaultValue = "false") boolean dryrun) { + Namespace ns = getNamespace(namespace); + + List validationErrors = new ArrayList<>(); + if (!connectClusterService.isNamespaceOwnerOfConnectCluster(ns, connectCluster)) { + validationErrors.add(String.format("Namespace not owner of this Connect cluster %s.", connectCluster)); + } + + List connectors = connectorService.findAllByConnectCluster(ns, connectCluster); + if (!connectors.isEmpty()) { + validationErrors.add(String.format("The Connect cluster %s has %s deployed connector(s): %s. Please remove the associated connector(s) before deleting it.", connectCluster, connectors.size(), + connectors.stream().map(connector -> connector.getMetadata().getName()).collect(Collectors.joining(", ")))); + } + + if (!validationErrors.isEmpty()) { + throw new ResourceValidationException(validationErrors, "ConnectCluster", connectCluster); + } + + Optional optionalConnectCluster = connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster); + if (optionalConnectCluster.isEmpty()) { + return HttpResponse.notFound(); + } + + if (dryrun) { + return HttpResponse.noContent(); + } + + ConnectCluster connectClusterToDelete = optionalConnectCluster.get(); + sendEventLog(connectClusterToDelete.getKind(), connectClusterToDelete.getMetadata(), ApplyStatus.deleted, connectClusterToDelete.getSpec(), null); + + connectClusterService.delete(connectClusterToDelete); + return HttpResponse.noContent(); + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java similarity index 86% rename from api/src/main/java/com/michelin/ns4kafka/controllers/ConnectController.java rename to api/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java index 223b166b..bce39b9c 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java @@ -1,10 +1,13 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.connector.ChangeConnectorState; import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.models.Namespace; -import com.michelin.ns4kafka.services.KafkaConnectService; +import com.michelin.ns4kafka.services.ConnectorService; import com.michelin.ns4kafka.services.ResourceQuotaService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.MutableHttpResponse; @@ -22,10 +25,10 @@ import java.util.Optional; import java.util.stream.Collectors; -@Tag(name = "Connects") -@Controller(value = "/api/namespaces/{namespace}/connects") +@Tag(name = "Connectors") +@Controller(value = "/api/namespaces/{namespace}/connectors") @ExecuteOn(TaskExecutors.IO) -public class ConnectController extends NamespacedResourceController { +public class ConnectorController extends NamespacedResourceController { /** * Message threw when namespace is not owner of the given connector */ @@ -35,7 +38,7 @@ public class ConnectController extends NamespacedResourceController { * Connector service */ @Inject - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * The resource quota service @@ -50,7 +53,7 @@ public class ConnectController extends NamespacedResourceController { */ @Get public List list(String namespace) { - return kafkaConnectService.findAllForNamespace(getNamespace(namespace)); + return connectorService.findAllForNamespace(getNamespace(namespace)); } /** @@ -61,7 +64,7 @@ public List list(String namespace) { */ @Get("/{connector}") public Optional getConnector(String namespace, String connector) { - return kafkaConnectService.findByName(getNamespace(namespace), connector); + return connectorService.findByName(getNamespace(namespace), connector); } /** @@ -77,12 +80,12 @@ public Single> deleteConnector(String namespace, String conne Namespace ns = getNamespace(namespace); // Validate ownership - if (!kafkaConnectService.isNamespaceOwnerOfConnect(ns, connector)) { + if (!connectorService.isNamespaceOwnerOfConnect(ns, connector)) { return Single.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector)), "Connector", connector)); } - Optional optionalConnector = kafkaConnectService.findByName(ns, connector); + Optional optionalConnector = connectorService.findByName(ns, connector); if (optionalConnector.isEmpty()) { return Single.just(HttpResponse.notFound()); } @@ -98,7 +101,7 @@ public Single> deleteConnector(String namespace, String conne connectorToDelete.getSpec(), null); - return kafkaConnectService + return connectorService .delete(ns, optionalConnector.get()) .map(httpResponse -> HttpResponse.noContent()); } @@ -115,7 +118,7 @@ public Single> apply(String namespace, @Valid @Body Conn Namespace ns = getNamespace(namespace); // Validate ownership - if (!kafkaConnectService.isNamespaceOwnerOfConnect(ns, connector.getMetadata().getName())) { + if (!connectorService.isNamespaceOwnerOfConnect(ns, connector.getMetadata().getName())) { return Single.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector.getMetadata().getName())), connector.getKind(), connector.getMetadata().getName())); } @@ -131,14 +134,14 @@ public Single> apply(String namespace, @Valid @Body Conn connector.getSpec().getConfig().put("name", connector.getMetadata().getName()); // Validate locally - return kafkaConnectService.validateLocally(ns, connector) + return connectorService.validateLocally(ns, connector) .flatMap(validationErrors -> { if (!validationErrors.isEmpty()) { return Single.error(new ResourceValidationException(validationErrors, connector.getKind(), connector.getMetadata().getName())); } // Validate against connect rest API /validate - return kafkaConnectService.validateRemotely(ns, connector) + return connectorService.validateRemotely(ns, connector) .flatMap(remoteValidationErrors -> { if (!remoteValidationErrors.isEmpty()) { return Single.error(new ResourceValidationException(remoteValidationErrors, connector.getKind(), connector.getMetadata().getName())); @@ -152,7 +155,7 @@ public Single> apply(String namespace, @Valid @Body Conn .state(Connector.TaskState.UNASSIGNED) .build()); - Optional existingConnector = kafkaConnectService.findByName(ns, connector.getMetadata().getName()); + Optional existingConnector = connectorService.findByName(ns, connector.getMetadata().getName()); if (existingConnector.isPresent() && existingConnector.get().equals(connector)) { return Single.just(formatHttpResponse(existingConnector.get(), ApplyStatus.unchanged)); } @@ -174,7 +177,7 @@ public Single> apply(String namespace, @Valid @Body Conn sendEventLog(connector.getKind(), connector.getMetadata(), status, existingConnector.map(Connector::getSpec).orElse(null), connector.getSpec()); - return Single.just(formatHttpResponse(kafkaConnectService.createOrUpdate(connector), status)); + return Single.just(formatHttpResponse(connectorService.createOrUpdate(connector), status)); }); }); } @@ -190,12 +193,12 @@ public Single> apply(String namespace, @Valid @Body Conn public Single> changeState(String namespace, String connector, @Body @Valid ChangeConnectorState changeConnectorState) { Namespace ns = getNamespace(namespace); - if (!kafkaConnectService.isNamespaceOwnerOfConnect(ns, connector)) { + if (!connectorService.isNamespaceOwnerOfConnect(ns, connector)) { return Single.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector)), "Connector", connector)); } - Optional optionalConnector = kafkaConnectService.findByName(ns, connector); + Optional optionalConnector = connectorService.findByName(ns, connector); if (optionalConnector.isEmpty()) { return Single.just(HttpResponse.notFound()); @@ -204,13 +207,13 @@ public Single> changeState(String name Single> response; switch (changeConnectorState.getSpec().getAction()) { case restart: - response = kafkaConnectService.restart(ns, optionalConnector.get()); + response = connectorService.restart(ns, optionalConnector.get()); break; case pause: - response = kafkaConnectService.pause(ns, optionalConnector.get()); + response = connectorService.pause(ns, optionalConnector.get()); break; case resume: - response = kafkaConnectService.resume(ns, optionalConnector.get()); + response = connectorService.resume(ns, optionalConnector.get()); break; default: return Single.error(new IllegalStateException("Unspecified action " + changeConnectorState.getSpec().getAction())); @@ -249,7 +252,7 @@ public Single> changeState(String name @Post("/_/import{?dryrun}") public Single> importResources(String namespace, @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); - return kafkaConnectService.listUnsynchronizedConnectors(ns) + return connectorService.listUnsynchronizedConnectors(ns) .map(unsynchronizedConnectors -> { unsynchronizedConnectors.forEach(connector -> { connector.getMetadata().setCreationTimestamp(Date.from(Instant.now())); @@ -265,7 +268,7 @@ public Single> importResources(String namespace, @QueryValue(def .stream() .map(connector -> { sendEventLog(connector.getKind(), connector.getMetadata(), ApplyStatus.created, null, connector.getSpec()); - return kafkaConnectService.createOrUpdate(connector); + return connectorService.createOrUpdate(connector); }) .collect(Collectors.toList()); }); diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java index d3965a7c..a907945d 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java @@ -1,9 +1,12 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsets; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsetsResponse; import com.michelin.ns4kafka.services.ConsumerGroupService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.annotation.Body; import io.micronaut.http.annotation.Controller; import io.micronaut.http.annotation.Post; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java index d43392cf..7ab1f333 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java @@ -4,6 +4,7 @@ import com.michelin.ns4kafka.models.Status.StatusDetails; import com.michelin.ns4kafka.models.Status.StatusPhase; import com.michelin.ns4kafka.models.Status.StatusReason; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpRequest; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java index 81fb24d2..d3fda9e2 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java @@ -1,8 +1,11 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NonNamespacedResourceController; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.annotation.*; import io.swagger.v3.oas.annotations.tags.Tag; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceQuotaController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceQuotaController.java index 418a5932..50bab24c 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceQuotaController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceQuotaController.java @@ -1,9 +1,12 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.quota.ResourceQuota; import com.michelin.ns4kafka.models.quota.ResourceQuotaResponse; import com.michelin.ns4kafka.services.ResourceQuotaService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; @@ -18,7 +21,7 @@ import java.util.List; import java.util.Optional; -@Tag(name = "Resource Quota") +@Tag(name = "Resource Quotas") @Controller(value = "/api/namespaces/{namespace}/resource-quotas") @ExecuteOn(TaskExecutors.IO) public class ResourceQuotaController extends NamespacedResourceController { diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java index 4d0b8fd2..0b363290 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java @@ -1,9 +1,12 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.services.RoleBindingService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java index aaad335a..1753b58d 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java @@ -1,11 +1,14 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.schema.Schema; import com.michelin.ns4kafka.models.schema.SchemaCompatibilityState; import com.michelin.ns4kafka.models.schema.SchemaList; import com.michelin.ns4kafka.services.SchemaService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java index ebaa83cf..bb7027f3 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java @@ -1,8 +1,11 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.services.StreamService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; @@ -15,7 +18,7 @@ import java.util.List; import java.util.Optional; -@Tag(name = "Stream") +@Tag(name = "Streams") @Controller(value = "/api/namespaces/{namespace}/streams") public class StreamController extends NamespacedResourceController { /** diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/TopicController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/TopicController.java index 0392d216..d7c27acd 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/TopicController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/TopicController.java @@ -1,10 +1,13 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.DeleteRecordsResponse; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.services.ResourceQuotaService; import com.michelin.ns4kafka.services.TopicService; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.annotation.*; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/UserController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/UserController.java index ddcee39a..4aebceb3 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/UserController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/UserController.java @@ -1,9 +1,12 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.KafkaUserResetPassword; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.services.executors.UserAsyncExecutor; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.ApplicationContext; import io.micronaut.http.HttpResponse; import io.micronaut.http.annotation.Controller; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/NamespacedResourceController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java similarity index 93% rename from api/src/main/java/com/michelin/ns4kafka/controllers/NamespacedResourceController.java rename to api/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java index 9b8faecd..92f27a8b 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/NamespacedResourceController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.controllers.generic; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.services.NamespaceService; diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/NonNamespacedResourceController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java similarity index 64% rename from api/src/main/java/com/michelin/ns4kafka/controllers/NonNamespacedResourceController.java rename to api/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java index 727a561a..919c07ba 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/NonNamespacedResourceController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java @@ -1,8 +1,8 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.controllers.generic; /** * Base Controller for all NonNamespaced resources */ -public abstract class NonNamespacedResourceController extends ResourceController{ +public abstract class NonNamespacedResourceController extends ResourceController { } diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceController.java b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java similarity index 81% rename from api/src/main/java/com/michelin/ns4kafka/controllers/ResourceController.java rename to api/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java index 8dd1f3d4..7aa24eb7 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceController.java +++ b/api/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java @@ -1,8 +1,9 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.controllers.generic; import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; import io.micronaut.security.utils.SecurityService; @@ -12,6 +13,7 @@ import java.util.Date; public abstract class ResourceController { + private static final String STATUS_HEADER = "X-Ns4kafka-Result"; @Inject public SecurityService securityService; @@ -19,10 +21,8 @@ public abstract class ResourceController { @Inject public ApplicationEventPublisher applicationEventPublisher; - public final String statusHeaderName = "X-Ns4kafka-Result"; - public HttpResponse formatHttpResponse(T body, ApplyStatus status) { - return HttpResponse.ok(body).header(statusHeaderName, status.toString()); + return HttpResponse.ok(body).header(STATUS_HEADER, status.toString()); } public void sendEventLog(String kind, ObjectMeta metadata, ApplyStatus operation, Object before, Object after) { diff --git a/api/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java b/api/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java index 256b16e5..796d4e08 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java @@ -13,26 +13,13 @@ @AllArgsConstructor @Data public class AccessControlEntry { - /** - * The API version - */ private final String apiVersion = "v1"; - - /** - * The resource kind - */ private final String kind = "AccessControlEntry"; - /** - * The resource metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * The resource specification - */ @Valid @NotNull private AccessControlEntrySpec spec; @@ -43,62 +30,37 @@ public class AccessControlEntry { @NoArgsConstructor @Data public static class AccessControlEntrySpec { - /** - * The ACL type - */ @NotNull protected ResourceType resourceType; - /** - * The resource name - */ @NotNull @NotBlank protected String resource; - /** - * The pattern type - */ @NotNull protected ResourcePatternType resourcePatternType; - /** - * The permission type - */ @NotNull protected Permission permission; - /** - * The grantee - */ @NotBlank @NotNull protected String grantedTo; } - /** - * The resource types - * It's important to follow the same naming as {@link org.apache.kafka.common.resource.ResourceType} - */ public enum ResourceType { TOPIC, GROUP, CONNECT, + CONNECT_CLUSTER, SCHEMA } - /** - * The resource patterns - * It's important to follow the same naming as {@link org.apache.kafka.common.resource.ResourcePattern} - */ public enum ResourcePatternType { LITERAL, PREFIXED } - /** - * The permissions - */ public enum Permission { OWNER, READ, diff --git a/api/src/main/java/com/michelin/ns4kafka/models/AuditLog.java b/api/src/main/java/com/michelin/ns4kafka/models/AuditLog.java index 44c8d33d..85b222c4 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/AuditLog.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/AuditLog.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.models; import com.fasterxml.jackson.annotation.JsonFormat; -import com.michelin.ns4kafka.controllers.ApplyStatus; +import com.michelin.ns4kafka.utils.enums.ApplyStatus; import lombok.AllArgsConstructor; import lombok.Data; @@ -10,9 +10,9 @@ @Data @AllArgsConstructor public class AuditLog { - private String user; private boolean admin; + @JsonFormat(shape = JsonFormat.Shape.STRING) private Date date; private String kind; @@ -20,5 +20,4 @@ public class AuditLog { private ApplyStatus operation; private Object before; private Object after; - } diff --git a/api/src/main/java/com/michelin/ns4kafka/models/ConnectCluster.java b/api/src/main/java/com/michelin/ns4kafka/models/ConnectCluster.java new file mode 100644 index 00000000..ec249024 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/models/ConnectCluster.java @@ -0,0 +1,38 @@ +package com.michelin.ns4kafka.models; + +import io.micronaut.core.annotation.Introspected; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import javax.validation.Valid; +import javax.validation.constraints.NotNull; + +@Data +@Builder +@Introspected +@NoArgsConstructor +@AllArgsConstructor +public class ConnectCluster { + private final String apiVersion = "v1"; + private final String kind = "ConnectCluster"; + + @Valid + @NotNull + private ObjectMeta metadata; + + @NotNull + private ConnectClusterSpec spec; + + @Builder + @AllArgsConstructor + @NoArgsConstructor + @Data + public static class ConnectClusterSpec { + @NotNull + String url; + String username; + String password; + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java b/api/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java index 16810f75..bea9022a 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java @@ -6,32 +6,19 @@ import javax.validation.Valid; import javax.validation.constraints.NotNull; -@Introspected -@Builder @Getter +@Builder +@Introspected @NoArgsConstructor @AllArgsConstructor public class DeleteRecordsResponse { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Resource kind - */ private final String kind = "DeleteRecordsResponse"; - /** - * Resource metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * Resource specifications - */ @Valid @NotNull private DeleteRecordsResponseSpec spec; @@ -43,19 +30,8 @@ public class DeleteRecordsResponse { @Getter @ToString public static class DeleteRecordsResponseSpec { - /** - * The topic that was reset - */ private String topic; - - /** - * The partition that was reset - */ private int partition; - - /** - * The new offset - */ private Long offset; } } diff --git a/api/src/main/java/com/michelin/ns4kafka/models/KafkaCluster.java b/api/src/main/java/com/michelin/ns4kafka/models/KafkaCluster.java index 1693a69f..f4a2b1a1 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/KafkaCluster.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/KafkaCluster.java @@ -6,9 +6,9 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.security.scram.internals.ScramMechanism; -@NoArgsConstructor @Getter @Setter +@NoArgsConstructor public class KafkaCluster { private String name; private String boostrapServers; @@ -16,5 +16,4 @@ public class KafkaCluster { private ScramMechanism scramMechanism; private String username; private String password; - } diff --git a/api/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java b/api/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java index 5cebe8bb..ef58c6aa 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java @@ -6,15 +6,15 @@ import javax.validation.Valid; import javax.validation.constraints.NotNull; -@Introspected +@Data @Builder +@Introspected @NoArgsConstructor @AllArgsConstructor -@Data public class KafkaStream { - private final String apiVersion = "v1"; private final String kind = "KafkaStream"; + @Valid @NotNull private ObjectMeta metadata; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java b/api/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java index 74f9c7e3..c8bf6311 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java @@ -3,18 +3,16 @@ import io.micronaut.core.annotation.Introspected; import lombok.*; -@Introspected +@Getter +@Setter @Builder +@Introspected @NoArgsConstructor @AllArgsConstructor -@Getter -@Setter public class KafkaUserResetPassword { private final String apiVersion = "v1"; private final String kind = "KafkaUserResetPassword"; - private ObjectMeta metadata; - private KafkaUserResetPasswordSpec spec; @Introspected diff --git a/api/src/main/java/com/michelin/ns4kafka/models/Namespace.java b/api/src/main/java/com/michelin/ns4kafka/models/Namespace.java index 8de62769..db8bc352 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/Namespace.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/Namespace.java @@ -13,11 +13,11 @@ import javax.validation.constraints.NotNull; import java.util.List; -@Introspected +@Data @Builder -@AllArgsConstructor +@Introspected @NoArgsConstructor -@Data +@AllArgsConstructor public class Namespace { private final String apiVersion = "v1"; private final String kind = "Namespace"; @@ -30,10 +30,10 @@ public class Namespace { @NotNull private NamespaceSpec spec; + @Data @Builder @AllArgsConstructor @NoArgsConstructor - @Data public static class NamespaceSpec { @NotBlank private String kafkaUser; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java b/api/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java index 6409e018..e1853e54 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java @@ -9,11 +9,11 @@ import java.util.Date; import java.util.Map; -@Introspected +@Data @Builder -@AllArgsConstructor +@Introspected @NoArgsConstructor -@Data +@AllArgsConstructor public class ObjectMeta { @NotBlank @Pattern(regexp = "^[a-zA-Z0-9_.-]+$") diff --git a/api/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java b/api/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java index bacb82c4..536082e6 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java @@ -19,7 +19,6 @@ @NoArgsConstructor @Data public class RoleBinding { - private final String apiVersion = "v1"; private final String kind = "RoleBinding"; @@ -36,7 +35,6 @@ public class RoleBinding { @NoArgsConstructor @Data public static class RoleBindingSpec { - @Valid @NotNull private Role role; @@ -51,7 +49,6 @@ public static class RoleBindingSpec { @NoArgsConstructor @Data public static class Role { - @NotNull @NotEmpty private Collection resourceTypes; @@ -66,7 +63,6 @@ public static class Role { @NoArgsConstructor @Data public static class Subject { - @NotNull private SubjectType subjectType; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/Status.java b/api/src/main/java/com/michelin/ns4kafka/models/Status.java index 983bf552..475c8bba 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/Status.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/Status.java @@ -8,22 +8,18 @@ import java.util.List; -@Introspected +@Data @Builder +@Introspected @NoArgsConstructor @AllArgsConstructor -@Data public class Status { private final String apiVersion = "v1"; private final String kind = "Status"; - private StatusPhase status; - private String message; private StatusReason reason; - private StatusDetails details; - private int code; @Builder @@ -54,5 +50,4 @@ public enum StatusReason { MethodNotAllowed, InternalError } - } diff --git a/api/src/main/java/com/michelin/ns4kafka/models/Topic.java b/api/src/main/java/com/michelin/ns4kafka/models/Topic.java index 063022a4..487916e5 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/Topic.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/Topic.java @@ -11,38 +11,22 @@ import java.util.Date; import java.util.Map; -@Introspected +@Data @Builder +@Introspected @NoArgsConstructor @AllArgsConstructor -@Data public class Topic { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Kind of resource - */ private final String kind = "Topic"; - /** - * Schema metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * Topic specifications - */ @NotNull private TopicSpec spec; - /** - * Topic status - */ @EqualsAndHashCode.Exclude private TopicStatus status; @@ -51,19 +35,8 @@ public class Topic { @NoArgsConstructor @Data public static class TopicSpec { - /** - * Replication factor - */ private int replicationFactor; - - /** - * Partitions quantity - */ private int partitions; - - /** - * Topic configuration - */ private Map configs; } @@ -74,19 +47,9 @@ public static class TopicSpec { @Setter @Schema(description = "Server-side", accessMode = Schema.AccessMode.READ_ONLY) public static class TopicStatus { - /** - * Topic phase - */ private TopicPhase phase; - - /** - * Message - */ private String message; - /** - * Last updated time - */ @JsonFormat(shape = JsonFormat.Shape.STRING) private Date lastUpdateTime; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java b/api/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java index 6d038f5d..6a104194 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java @@ -19,9 +19,11 @@ public class ChangeConnectorState { private final String apiVersion = "v1"; private final String kind = "ChangeConnectorState"; + @Valid @NotNull private ObjectMeta metadata; + @Valid @NotNull private ChangeConnectorStateSpec spec; @@ -35,9 +37,6 @@ public class ChangeConnectorState { public static class ChangeConnectorStateSpec { @NotNull private ConnectorAction action; - //TODO - // connectCluster - // taskId } @Introspected diff --git a/api/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java b/api/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java index e3d70344..c7b537ad 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java @@ -21,6 +21,7 @@ public class Connector { private final String apiVersion = "v1"; private final String kind = "Connector"; + @Valid @NotNull private ObjectMeta metadata; @@ -53,8 +54,8 @@ public static class ConnectorSpec { public static class ConnectorStatus { private TaskState state; private String worker_id; - private List tasks; + @JsonFormat(shape = JsonFormat.Shape.STRING) private Date lastUpdateTime; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java b/api/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java index 4a0a69ea..6d70d7b4 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java @@ -14,26 +14,13 @@ @AllArgsConstructor @Data public class ConsumerGroupResetOffsets { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Resource kind - */ private final String kind = "ConsumerGroupResetOffsets"; - /** - * Resource metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * Resource specifications - */ @Valid @NotNull private ConsumerGroupResetOffsetsSpec spec; @@ -46,36 +33,22 @@ public class ConsumerGroupResetOffsets { @Setter @ToString public static class ConsumerGroupResetOffsetsSpec { - /** - * The topic to reset offsets - */ @NotNull @NotBlank private String topic; - /** - * The method used to reset offsets - */ @NotNull private ResetOffsetsMethod method; - - /** - * Additional options for offsets reset - */ private String options; } - /** - * All reset offsets method - */ @Introspected public enum ResetOffsetsMethod { TO_EARLIEST, TO_LATEST, TO_DATETIME, // string:yyyy-MM-ddTHH:mm:SS.sss BY_DURATION, - SHIFT_BY, // int + SHIFT_BY, TO_OFFSET - // FROM_FILE map spec; diff --git a/api/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java b/api/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java index 0f44357f..1d9dbc47 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java @@ -13,26 +13,13 @@ @NoArgsConstructor @AllArgsConstructor public class ResourceQuotaResponse { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Resource kind - */ private final String kind = "ResourceQuotaResponse"; - /** - * Resource quota metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * Resource specifications - */ @Valid @NotNull private ResourceQuotaResponseSpec spec; @@ -44,25 +31,9 @@ public class ResourceQuotaResponse { @AllArgsConstructor @NoArgsConstructor public static class ResourceQuotaResponseSpec { - /** - * The count quota for topics - */ private String countTopic; - - /** - * The count quota for partitions - */ private String countPartition; - - /** - * The disk quota for topics - */ private String diskTopic; - - /** - * The count quota for connectors - */ private String countConnector; - } } diff --git a/api/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java b/api/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java index ae49a9bf..7b98eee2 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java @@ -14,26 +14,13 @@ @NoArgsConstructor @AllArgsConstructor public class Schema { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Kind of resource - */ private final String kind = "Schema"; - /** - * Schema metadata - */ @Valid @NotNull private ObjectMeta metadata; - /** - * Schema specifications - */ @Valid @NotNull private SchemaSpec spec; @@ -43,36 +30,15 @@ public class Schema { @NoArgsConstructor @Data public static class SchemaSpec { - /** - * Schema ID - */ private Integer id; - - /** - * Schema version - */ private Integer version; - - /** - * Content of the schema - */ private String schema; - /** - * Schema type - */ @Builder.Default private SchemaType schemaType = SchemaType.AVRO; - /** - * Schema compatibility - */ @Builder.Default private Compatibility compatibility = Compatibility.GLOBAL; - - /** - * References list - */ private List references; @Builder @@ -87,9 +53,6 @@ public static class Reference { } } - /** - * Schema compatibility - */ @Introspected public enum Compatibility { GLOBAL, @@ -102,9 +65,6 @@ public enum Compatibility { NONE } - /** - * Schema type - */ @Introspected public enum SchemaType { AVRO, diff --git a/api/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java b/api/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java index 2cd0b9c0..6d0255b8 100644 --- a/api/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java +++ b/api/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java @@ -16,19 +16,9 @@ @NoArgsConstructor @AllArgsConstructor public class SchemaList { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Kind of resource - */ private final String kind = "SchemaList"; - /** - * Schema metadata - */ @Valid @NotNull private ObjectMeta metadata; diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java new file mode 100644 index 00000000..a18c6c18 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java @@ -0,0 +1,12 @@ +package com.michelin.ns4kafka.repositories; + +import com.michelin.ns4kafka.models.ConnectCluster; + +import java.util.List; + +public interface ConnectClusterRepository { + List findAll(); + List findAllForCluster(String cluster); + ConnectCluster create(ConnectCluster connectCluster); + void delete(ConnectCluster connectCluster); +} diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java index 02e37bb1..f54c0330 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java @@ -6,8 +6,6 @@ public interface ConnectorRepository { List findAllForCluster(String cluster); - Connector create(Connector connector); - void delete(Connector connector); } diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java index abcd9b6d..a05e436d 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java @@ -7,9 +7,7 @@ public interface NamespaceRepository { List findAllForCluster(String cluster); - Namespace createNamespace(Namespace namespace); Optional findByName(String namespace); - void delete(Namespace namespace); } diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java new file mode 100644 index 00000000..decb77a5 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java @@ -0,0 +1,59 @@ +package com.michelin.ns4kafka.repositories.kafka; + +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.repositories.ConnectClusterRepository; +import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.context.annotation.Value; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; + +import javax.inject.Singleton; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +@Singleton +@KafkaListener( + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED +) +public class KafkaConnectClusterRepository extends KafkaStore implements ConnectClusterRepository { + public KafkaConnectClusterRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.connect-workers") String kafkaTopic, + @KafkaClient("connect-workers") Producer kafkaProducer) { + super(kafkaTopic, kafkaProducer); + } + + @Override + public List findAll() { + return new ArrayList<>(getKafkaStore().values()); + } + + @Override + public List findAllForCluster(String cluster) { + return getKafkaStore().values().stream() + .filter(connectCluster -> connectCluster.getMetadata().getCluster().equals(cluster)) + .collect(Collectors.toList()); + } + + @Override + public ConnectCluster create(ConnectCluster connectCluster) { + return this.produce(getMessageKey(connectCluster), connectCluster); + } + + @Override + public void delete(ConnectCluster connectCluster) { + this.produce(getMessageKey(connectCluster),null); + } + + @Override + @Topic(value = "${ns4kafka.store.kafka.topics.prefix}.connect-workers") + void receive(ConsumerRecord record) { + super.receive(record); + } + + @Override + String getMessageKey(ConnectCluster connectCluster) { + return connectCluster.getMetadata().getNamespace() + "/" + connectCluster.getMetadata().getName(); + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java index 8e57a322..5fa02b78 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java @@ -24,10 +24,11 @@ public KafkaConnectorRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.co } @Override - String getMessageKey(Connector roleBinding) { - return roleBinding.getMetadata().getNamespace() + "/" + roleBinding.getMetadata().getName(); + String getMessageKey(Connector connector) { + return connector.getMetadata().getNamespace() + "/" + connector.getMetadata().getName(); } + @Override @Topic(value = "${ns4kafka.store.kafka.topics.prefix}.connectors") void receive(ConsumerRecord record) { super.receive(record); @@ -49,5 +50,4 @@ public List findAllForCluster(String cluster) { .filter(connector -> connector.getMetadata().getCluster().equals(cluster)) .collect(Collectors.toList()); } - } diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java index 1dfd64d9..ccd7b068 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java @@ -23,7 +23,7 @@ public class KafkaResourceQuotaRepository extends KafkaStore impl * @param kafkaProducer The resource quota producer */ public KafkaResourceQuotaRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.resource-quotas") String kafkaTopic, - @KafkaClient("role-resource-quotas") Producer kafkaProducer) { + @KafkaClient("resource-quotas") Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } diff --git a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java index 587ead65..eadfb38e 100644 --- a/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java +++ b/api/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.repositories.kafka; +import com.michelin.ns4kafka.config.KafkaStoreConfig; import io.micronaut.context.ApplicationContext; import io.micronaut.context.annotation.Property; import io.micronaut.scheduling.TaskExecutors; @@ -39,7 +40,8 @@ public abstract class KafkaStore { @Inject ApplicationContext applicationContext; @Inject AdminClient adminClient; - @Inject KafkaStoreConfig kafkaStoreConfig; + @Inject + KafkaStoreConfig kafkaStoreConfig; @Inject @Named(TaskExecutors.SCHEDULED) TaskScheduler taskScheduler; diff --git a/api/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java b/api/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java index cbc94434..ee1ddeb1 100644 --- a/api/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java +++ b/api/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.security; +import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.RoleBindingRepository; diff --git a/api/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java b/api/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java index 332a7fb9..f35dd12f 100644 --- a/api/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java +++ b/api/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.security.gitlab; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; -import com.michelin.ns4kafka.security.SecurityConfig; +import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.services.RoleBindingService; import edu.umd.cs.findbugs.annotations.Nullable; import io.micronaut.http.HttpRequest; diff --git a/api/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java b/api/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java index f5a96d6f..c9b99f0c 100644 --- a/api/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java +++ b/api/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.security.local; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; -import com.michelin.ns4kafka.security.SecurityConfig; +import com.michelin.ns4kafka.config.SecurityConfig; import edu.umd.cs.findbugs.annotations.Nullable; import io.micronaut.http.HttpRequest; import io.micronaut.security.authentication.*; diff --git a/api/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java b/api/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java index 0e72ad21..f9b99591 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java @@ -4,13 +4,14 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.repositories.AccessControlEntryRepository; import com.michelin.ns4kafka.services.executors.AccessControlEntryAsyncExecutor; -import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; import javax.inject.Inject; import javax.inject.Singleton; -import java.util.*; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; @Singleton @@ -36,9 +37,9 @@ public class AccessControlEntryService { public List validate(AccessControlEntry accessControlEntry, Namespace namespace) { List validationErrors = new ArrayList<>(); - // Which resource can be granted cross namespaces ? TOPIC + // Which resource can be granted cross namespaces List allowedResourceTypes = - List.of(AccessControlEntry.ResourceType.TOPIC); + List.of(AccessControlEntry.ResourceType.TOPIC, AccessControlEntry.ResourceType.CONNECT_CLUSTER); // Which permission can be granted cross namespaces ? READ, WRITE // Only admin can grant OWNER @@ -59,7 +60,6 @@ public List validate(AccessControlEntry accessControlEntry, Namespace na } if (!allowedPermissions.contains(accessControlEntry.getSpec().getPermission())) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getPermission() + " for permission: Value must be one of [" + allowedPermissions.stream().map(Object::toString).collect(Collectors.joining(", ")) + diff --git a/api/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java b/api/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java new file mode 100644 index 00000000..3d304925 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java @@ -0,0 +1,188 @@ +package com.michelin.ns4kafka.services; + +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.models.AccessControlEntry; +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.repositories.ConnectClusterRepository; +import com.michelin.ns4kafka.utils.EncryptionUtils; +import com.nimbusds.jose.JOSEException; +import io.micronaut.http.HttpRequest; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.HttpStatus; +import io.micronaut.http.MutableHttpRequest; +import io.micronaut.http.client.RxHttpClient; +import io.micronaut.http.client.annotation.Client; +import io.micronaut.http.client.exceptions.HttpClientException; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import javax.inject.Inject; +import javax.inject.Singleton; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +@Slf4j +@Singleton +public class ConnectClusterService { + @Inject + AccessControlEntryService accessControlEntryService; + + @Inject + ConnectClusterRepository connectClusterRepository; + + @Inject + List kafkaAsyncExecutorConfig; + + @Inject + SecurityConfig securityConfig; + + @Inject + @Client("/") + RxHttpClient httpClient; + + /** + * Find all self deployed Connect clusters + * @return A list of Connect clusters + */ + public List findAll() { + return connectClusterRepository.findAll(); + } + + /** + * Find all self deployed Connect clusters for a given namespace with a given list of permissions + * @param namespace The namespace + * @param permissions The list of permission to filter on + * @return A list of Connect clusters + */ + public List findAllByNamespace(Namespace namespace, List permissions) { + List acls = accessControlEntryService.findAllGrantedToNamespace(namespace).stream() + .filter(acl -> permissions.contains(acl.getSpec().getPermission())) + .filter(acl -> acl.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .collect(Collectors.toList()); + + return connectClusterRepository.findAllForCluster(namespace.getMetadata().getCluster()) + .stream() + .filter(connector -> acls.stream().anyMatch(accessControlEntry -> { + switch (accessControlEntry.getSpec().getResourcePatternType()) { + case PREFIXED: + return connector.getMetadata().getName().startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL: + return connector.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); + } + + return false; + })) + .collect(Collectors.toList()); + } + + /** + * Find all self deployed Connect clusters whose namespace is owner + * @param namespace The namespace + * @return The list of owned Connect cluster + */ + public List findAllByNamespaceOwner(Namespace namespace) { + return findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER)) + .stream() + .map(connectCluster -> ConnectCluster.builder() + .metadata(connectCluster.getMetadata()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url(connectCluster.getSpec().getUrl()) + .username(connectCluster.getSpec().getUsername()) + .password(EncryptionUtils.decryptAES256GCM(connectCluster.getSpec().getPassword(), securityConfig.getAes256EncryptionKey())) + .build()) + .build()) + .collect(Collectors.toList()); + } + + /** + * Find all self deployed Connect clusters whose namespace has write access + * @param namespace The namespace + * @return The list of Connect cluster with write access + */ + public List findAllByNamespaceWrite(Namespace namespace) { + return findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER, AccessControlEntry.Permission.WRITE)); + } + + /** + * Find a self deployed Connect cluster by namespace and name with owner rights + * @param namespace The namespace + * @param connectClusterName The connect worker name + * @return An optional connect worker + */ + public Optional findByNamespaceAndNameOwner(Namespace namespace, String connectClusterName) { + return findAllByNamespaceOwner(namespace) + .stream() + .filter(connectCluster -> connectCluster.getMetadata().getName().equals(connectClusterName)) + .findFirst(); + } + + /** + * Create a given connect worker + * @param connectCluster The connect worker + * @return The created connect worker + */ + public ConnectCluster create(ConnectCluster connectCluster) throws IOException, JOSEException { + if (StringUtils.isNotBlank(connectCluster.getSpec().getPassword())) { + connectCluster.getSpec() + .setPassword(EncryptionUtils.encryptAES256GCM(connectCluster.getSpec().getPassword(), securityConfig.getAes256EncryptionKey())); + } + return connectClusterRepository.create(connectCluster); + } + + /** + * Validate the given connect worker configuration for creation + * @param connectCluster The connect worker to validate + * @return A list of validation errors + */ + public List validateConnectClusterCreation(ConnectCluster connectCluster) { + List errors = new ArrayList<>(); + + if (kafkaAsyncExecutorConfig.stream().anyMatch(cluster -> + cluster.getConnects().entrySet().stream().anyMatch(entry -> entry.getKey().equals(connectCluster.getMetadata().getName())))) { + errors.add(String.format("A Connect cluster is already defined globally with the name %s. Please provide a different name.", connectCluster.getMetadata().getName())); + } + + try { + MutableHttpRequest request = HttpRequest.GET(new URL(connectCluster.getSpec().getUrl()) + "/connectors?expand=info&expand=status"); + if (StringUtils.isNotBlank(connectCluster.getSpec().getUsername()) && StringUtils.isNotBlank(connectCluster.getSpec().getPassword())){ + request.basicAuth(connectCluster.getSpec().getUsername(), connectCluster.getSpec().getPassword()); + } + HttpResponse response = httpClient.exchange(request).blockingFirst(); + if (!response.getStatus().equals(HttpStatus.OK)) { + errors.add(String.format("The Connect cluster %s is not healthy (HTTP code %s).", connectCluster.getMetadata().getName(), response.getStatus().getCode())); + } + } catch (MalformedURLException e) { + errors.add(String.format("The Connect cluster %s has a malformed URL \"%s\".", connectCluster.getMetadata().getName(), connectCluster.getSpec().getUrl())); + } catch (HttpClientException e) { + errors.add(String.format("The following error occurred trying to check the Connect cluster %s health: %s.", connectCluster.getMetadata().getName(), e.getMessage())); + } + + return errors; + } + + /** + * Delete a given Connect cluster + * @param connectCluster The Connect cluster + */ + public void delete(ConnectCluster connectCluster) { + connectClusterRepository.delete(connectCluster); + } + + /** + * Is given namespace owner of the given connect worker + * @param namespace The namespace + * @param connectCluster The connect cluster + * @return true if it is, false otherwise + */ + public boolean isNamespaceOwnerOfConnectCluster(Namespace namespace, String connectCluster) { + return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), + AccessControlEntry.ResourceType.CONNECT, connectCluster); + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/services/KafkaConnectService.java b/api/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java similarity index 81% rename from api/src/main/java/com/michelin/ns4kafka/services/KafkaConnectService.java rename to api/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java index 40342f8c..04595cec 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/KafkaConnectService.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java @@ -4,8 +4,8 @@ import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.repositories.ConnectorRepository; -import com.michelin.ns4kafka.services.connect.KafkaConnectClientProxy; -import com.michelin.ns4kafka.services.connect.client.KafkaConnectClient; +import com.michelin.ns4kafka.services.connect.ConnectorClientProxy; +import com.michelin.ns4kafka.services.connect.client.ConnectorClient; import com.michelin.ns4kafka.services.connect.client.entities.ConnectorSpecs; import com.michelin.ns4kafka.services.executors.ConnectorAsyncExecutor; import io.micronaut.context.ApplicationContext; @@ -22,35 +22,26 @@ import java.util.Locale; import java.util.Optional; import java.util.stream.Collectors; - +import java.util.stream.Stream; @Slf4j @Singleton -public class KafkaConnectService { - /** - * The ACL service - */ +public class ConnectorService { @Inject AccessControlEntryService accessControlEntryService; - /** - * The connector HTTP client - */ @Inject - KafkaConnectClient kafkaConnectClient; + ConnectorClient connectorClient; - /** - * The connector repository - */ @Inject ConnectorRepository connectorRepository; - /** - * The application context - */ @Inject ApplicationContext applicationContext; + @Inject + ConnectClusterService connectClusterService; + /** * Find all connectors by given namespace * @param namespace The namespace @@ -61,7 +52,6 @@ public List findAllForNamespace(Namespace namespace) { return connectorRepository.findAllForCluster(namespace.getMetadata().getCluster()) .stream() .filter(connector -> acls.stream().anyMatch(accessControlEntry -> { - //need to check accessControlEntry.Permission, we want OWNER if (accessControlEntry.getSpec().getPermission() != AccessControlEntry.Permission.OWNER) { return false; } @@ -78,6 +68,19 @@ public List findAllForNamespace(Namespace namespace) { .collect(Collectors.toList()); } + /** + * Find all connectors by given namespace and Connect cluster + * @param namespace The namespace + * @param connectCluster The Connect cluster + * @return A list of connectors + */ + public List findAllByConnectCluster(Namespace namespace, String connectCluster) { + return connectorRepository.findAllForCluster(namespace.getMetadata().getCluster()) + .stream() + .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) + .collect(Collectors.toList()); + } + /** * Find a connector by namespace and name * @param namespace The namespace @@ -99,18 +102,25 @@ public Optional findByName(Namespace namespace, String connector) { */ public Single> validateLocally(Namespace namespace, Connector connector) { // Check whether target Connect Cluster is allowed for this namespace - if(!namespace.getSpec().getConnectClusters().contains(connector.getSpec().getConnectCluster())){ - String allowedConnectClusters = String.join(", ",namespace.getSpec().getConnectClusters()); + List selfDeployedConnectClusters = connectClusterService.findAllByNamespaceWrite(namespace) + .stream() + .map(connectCluster -> connectCluster.getMetadata().getName()) + .collect(Collectors.toList()); + + if (!namespace.getSpec().getConnectClusters().contains(connector.getSpec().getConnectCluster()) && + !selfDeployedConnectClusters.contains(connector.getSpec().getConnectCluster())) { + String allowedConnectClusters = Stream.concat(namespace.getSpec().getConnectClusters().stream(), selfDeployedConnectClusters.stream()).collect(Collectors.joining(", ")); return Single.just( - List.of("Invalid value " + connector.getSpec().getConnectCluster() + " for spec.connectCluster: Value must be one of ["+allowedConnectClusters+"]")); + List.of("Invalid value " + connector.getSpec().getConnectCluster() + " for spec.connectCluster: Value must be one of [" + allowedConnectClusters + "]")); } // If class doesn't exist, no need to go further - if (StringUtils.isEmpty(connector.getSpec().getConfig().get("connector.class"))) + if (StringUtils.isEmpty(connector.getSpec().getConfig().get("connector.class"))) { return Single.just(List.of("Invalid value for spec.config.'connector.class': Value must be non-null")); + } // Connector type exists on this target connect cluster ? - return kafkaConnectClient.connectPlugins(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + return connectorClient.connectPlugins(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster()) .map(connectorPluginInfos -> { Optional connectorType = connectorPluginInfos @@ -145,12 +155,8 @@ public boolean isNamespaceOwnerOfConnect(Namespace namespace, String connect) { * @return A list of errors */ public Single> validateRemotely(Namespace namespace, Connector connector) { - // Calls the "validate" endpoints and returns the validation error messages if any - return kafkaConnectClient.validate( - KafkaConnectClientProxy.PROXY_SECRET, - namespace.getMetadata().getCluster(), - connector.getSpec().getConnectCluster(), - connector.getSpec().getConfig().get("connector.class"), + return connectorClient.validate(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + connector.getSpec().getConnectCluster(), connector.getSpec().getConfig().get("connector.class"), ConnectorSpecs.builder() .config(connector.getSpec().getConfig()) .build()) @@ -176,7 +182,7 @@ public Connector createOrUpdate(Connector connector) { * @param connector The connector */ public Single> delete(Namespace namespace, Connector connector) { - return kafkaConnectClient.delete(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + return connectorClient.delete(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) .defaultIfEmpty(HttpResponse.noContent()) .flatMapSingle(httpResponse -> { @@ -222,11 +228,11 @@ public Single> listUnsynchronizedConnectors(Namespace namespace) * @return An HTTP response */ public Single> restart(Namespace namespace, Connector connector) { - return kafkaConnectClient.status(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), + return connectorClient.status(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) .flatMap(status -> { Observable> observable = Observable.fromIterable(status.tasks()) - .flatMapSingle(task -> kafkaConnectClient.restart(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + .flatMapSingle(task -> connectorClient.restart(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName(), task.id())) .map(restartedTasks -> { log.info("Success restarting connector [{}] on namespace [{}] connect [{}]", @@ -248,7 +254,7 @@ public Single> restart(Namespace namespace, Connector connect * @return An HTTP response */ public Single> pause(Namespace namespace, Connector connector) { - return kafkaConnectClient.pause(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + return connectorClient.pause(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) .map(pause -> { log.info("Success pausing Connector [{}] on Namespace [{}] Connect [{}]", @@ -267,7 +273,7 @@ public Single> pause(Namespace namespace, Connector connector * @return An HTTP response */ public Single> resume(Namespace namespace, Connector connector) { - return kafkaConnectClient.resume(KafkaConnectClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), + return connectorClient.resume(ConnectorClientProxy.PROXY_SECRET, namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) .map(resume -> { log.info("Success resuming Connector [{}] on Namespace [{}] Connect [{}]", diff --git a/api/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java b/api/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java index 8d87b11f..822ec9a1 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java @@ -2,7 +2,7 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.repositories.NamespaceRepository; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import javax.inject.Inject; import javax.inject.Singleton; @@ -26,7 +26,7 @@ public class NamespaceService { @Inject AccessControlEntryService accessControlEntryService; @Inject - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * Namespace validation in case of new namespace @@ -91,7 +91,7 @@ public List listAllNamespaceResources(Namespace namespace){ return Stream.of( topicService.findAllForNamespace(namespace).stream() .map(topic -> topic.getKind()+"/"+topic.getMetadata().getName()), - kafkaConnectService.findAllForNamespace(namespace).stream() + connectorService.findAllForNamespace(namespace).stream() .map(connector -> connector.getKind()+"/"+connector.getMetadata().getName()), accessControlEntryService.findAllForNamespace(namespace).stream() .map(ace -> ace.getKind()+"/"+ace.getMetadata().getName()), diff --git a/api/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java b/api/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java index 7c4d6f5c..5a65cb83 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java @@ -53,7 +53,7 @@ public class ResourceQuotaService { * Connector service */ @Inject - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * Find a resource quota by namespace @@ -184,7 +184,7 @@ public long getCurrentDiskTopics(Namespace namespace) { * @return The number of topics */ public long getCurrentCountConnectors(Namespace namespace) { - return kafkaConnectService.findAllForNamespace(namespace).size(); + return connectorService.findAllForNamespace(namespace).size(); } /** diff --git a/api/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/api/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 116e93ad..9c2ec10c 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -4,7 +4,7 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; diff --git a/api/src/main/java/com/michelin/ns4kafka/services/connect/ConnectorClientProxy.java b/api/src/main/java/com/michelin/ns4kafka/services/connect/ConnectorClientProxy.java new file mode 100644 index 00000000..b95d7b69 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/services/connect/ConnectorClientProxy.java @@ -0,0 +1,160 @@ +package com.michelin.ns4kafka.services.connect; + +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig.ConnectConfig; +import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.services.ConnectClusterService; +import com.michelin.ns4kafka.utils.EncryptionUtils; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import io.micronaut.core.async.publisher.Publishers; +import io.micronaut.core.util.StringUtils; +import io.micronaut.http.HttpHeaders; +import io.micronaut.http.HttpRequest; +import io.micronaut.http.MutableHttpRequest; +import io.micronaut.http.MutableHttpResponse; +import io.micronaut.http.annotation.Filter; +import io.micronaut.http.client.ProxyHttpClient; +import io.micronaut.http.filter.OncePerRequestHttpServerFilter; +import io.micronaut.http.filter.ServerFilterChain; +import lombok.extern.slf4j.Slf4j; +import org.reactivestreams.Publisher; + +import javax.inject.Inject; +import java.net.URI; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +@Slf4j +@Filter(ConnectorClientProxy.PROXY_PREFIX + "/**") +public class ConnectorClientProxy extends OncePerRequestHttpServerFilter { + /** + * Prefix used to filter request to Connect clusters. It'll be replaced by + * the Connect cluster URL of the given cluster + */ + public static final String PROXY_PREFIX = "/connect-proxy"; + + /** + * A header that contains the Kafka cluster + */ + public static final String PROXY_HEADER_KAFKA_CLUSTER = "X-Kafka-Cluster"; + + /** + * A header that contains the Connect cluster name + */ + public static final String PROXY_HEADER_CONNECT_CLUSTER = "X-Connect-Cluster"; + + /** + * A header that contains a secret for the request + */ + public static final String PROXY_HEADER_SECRET = "X-Proxy-Secret"; + + /** + * Generate a secret + */ + public static final String PROXY_SECRET = UUID.randomUUID().toString(); + + @Inject + ProxyHttpClient client; + + @Inject + List kafkaAsyncExecutorConfigs; + + @Inject + ConnectClusterService connectClusterService; + + @Inject + SecurityConfig securityConfig; + + /** + * Filter requests + * @param request The request to filter + * @param chain The servlet chain + * @return A modified request + */ + @Override + public Publisher> doFilterOnce(HttpRequest request, ServerFilterChain chain) { + // Check call is initiated from Micronaut and not from outside + if (!request.getHeaders().contains(ConnectorClientProxy.PROXY_HEADER_SECRET)) { + return Publishers.just(new ResourceValidationException(List.of("Missing required header " + ConnectorClientProxy.PROXY_HEADER_SECRET), null, null)); + } + + String secret = request.getHeaders().get(ConnectorClientProxy.PROXY_HEADER_SECRET); + if (!PROXY_SECRET.equals(secret)) { + return Publishers.just(new ResourceValidationException(List.of("Invalid value " + secret + " for header " + ConnectorClientProxy.PROXY_HEADER_SECRET), null, null)); + } + + if (!request.getHeaders().contains(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER)) { + return Publishers.just(new ResourceValidationException(List.of("Missing required header " + ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER), null, null)); + } + + if (!request.getHeaders().contains(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER)) { + return Publishers.just(new ResourceValidationException(List.of("Missing required header " + ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER), null, null)); + } + + String kafkaCluster = request.getHeaders().get(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER); + String connectCluster = request.getHeaders().get(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER); + + // Get config of the kafkaCluster + Optional config = kafkaAsyncExecutorConfigs.stream() + .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) + .findFirst(); + + if (config.isEmpty()) { + return Publishers.just(new ResourceValidationException(List.of("Kafka Cluster [" + kafkaCluster + "] not found"),null,null)); + } + + Optional connectClusterOptional = connectClusterService.findAll() + .stream() + .filter(researchConnectCluster -> researchConnectCluster.getMetadata().getName().equals(connectCluster)) + .findFirst(); + + if (connectClusterOptional.isPresent()) { + log.debug("Self deployed Connect cluster {} found in namespace {}", connectCluster, connectClusterOptional.get().getMetadata().getNamespace()); + return client.proxy(mutateKafkaConnectRequest(request, connectClusterOptional.get().getSpec().getUrl(), + connectClusterOptional.get().getSpec().getUsername(), + EncryptionUtils.decryptAES256GCM(connectClusterOptional.get().getSpec().getPassword(), securityConfig.getAes256EncryptionKey()))); + } + + ConnectConfig connectConfig = config.get().getConnects().get(connectCluster); + if (connectConfig == null) { + return Publishers.just(new ResourceValidationException(List.of("Connect cluster [" + connectCluster + "] not found"), null, null)); + } + + log.debug("Connect cluster {} found in Ns4Kafka configuration", connectCluster); + + return client.proxy(mutateKafkaConnectRequest(request, connectConfig.getUrl(), + connectConfig.getBasicAuthUsername(), + connectConfig.getBasicAuthPassword())); + } + + /** + * Mutate the prefixed request to the required Connect cluster, either from the Ns4Kafka configuration or from a self-deployed + * Connect cluster configuration + * @param request The request to modify + * @param url The Connect cluster URL + * @param username The Connect cluster username + * @param password The Connect cluster password + * @return The modified request + */ + public MutableHttpRequest mutateKafkaConnectRequest(HttpRequest request, String url, String username, String password) { + URI newURI = URI.create(url); + + MutableHttpRequest mutableHttpRequest = request.mutate() + .uri(b -> b + .scheme(newURI.getScheme()) + .host(newURI.getHost()) + .port(newURI.getPort()) + .replacePath(StringUtils.prependUri( + newURI.getPath(), + request.getPath().substring(ConnectorClientProxy.PROXY_PREFIX.length()) + )) + ) + .basicAuth(username, password); + + // Micronaut resets Host later on with proper value. + mutableHttpRequest.getHeaders().remove(HttpHeaders.HOST); + return mutableHttpRequest; + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/services/connect/KafkaConnectClientProxy.java b/api/src/main/java/com/michelin/ns4kafka/services/connect/KafkaConnectClientProxy.java deleted file mode 100644 index ccfeab6f..00000000 --- a/api/src/main/java/com/michelin/ns4kafka/services/connect/KafkaConnectClientProxy.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.michelin.ns4kafka.services.connect; - -import com.michelin.ns4kafka.controllers.ResourceValidationException; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig.ConnectConfig; -import io.micronaut.core.async.publisher.Publishers; -import io.micronaut.core.util.StringUtils; -import io.micronaut.http.HttpHeaders; -import io.micronaut.http.HttpRequest; -import io.micronaut.http.MutableHttpRequest; -import io.micronaut.http.MutableHttpResponse; -import io.micronaut.http.annotation.Filter; -import io.micronaut.http.client.ProxyHttpClient; -import io.micronaut.http.filter.OncePerRequestHttpServerFilter; -import io.micronaut.http.filter.ServerFilterChain; -import org.reactivestreams.Publisher; - -import javax.inject.Inject; -import java.net.URI; -import java.util.List; -import java.util.Optional; -import java.util.UUID; - -@Filter(KafkaConnectClientProxy.PROXY_PREFIX + "/**") -public class KafkaConnectClientProxy extends OncePerRequestHttpServerFilter { - public static final String PROXY_PREFIX = "/connect-proxy"; - public static final String PROXY_HEADER_KAFKA_CLUSTER = "X-Kafka-Cluster"; - public static final String PROXY_HEADER_CONNECT_CLUSTER = "X-Connect-Cluster"; - - // This UUID prevents anyone to access this filter directly and bypassing ConnectController and ConnectService. - // Only Micronaut can call this filter successfully - public static final String PROXY_HEADER_SECRET = "X-Proxy-Secret"; - public static final String PROXY_SECRET = UUID.randomUUID().toString(); - - @Inject - ProxyHttpClient client; - @Inject - List kafkaAsyncExecutorConfigs; - - @Override - public Publisher> doFilterOnce(HttpRequest request, ServerFilterChain chain) { - // check call is initiated from micronaut and not from outisde - if (!request.getHeaders().contains(KafkaConnectClientProxy.PROXY_HEADER_SECRET)) { - return Publishers.just(new ResourceValidationException(List.of("Missing required Header " + KafkaConnectClientProxy.PROXY_HEADER_SECRET), null, null)); - } - String secret = request.getHeaders().get(KafkaConnectClientProxy.PROXY_HEADER_SECRET); - if (!PROXY_SECRET.equals(secret)) { - return Publishers.just(new ResourceValidationException(List.of("Invalid value " + secret + " for Header " + KafkaConnectClientProxy.PROXY_HEADER_SECRET), null, null)); - } - // retrieve the connectConfig based on Header - if (!request.getHeaders().contains(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER)) { - return Publishers.just(new ResourceValidationException(List.of("Missing required Header " + KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER), null, null)); - } - if (!request.getHeaders().contains(KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER)) { - return Publishers.just(new ResourceValidationException(List.of("Missing required Header " + KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER), null, null)); - } - - String kafkaCluster = request.getHeaders().get(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER); - String connectCluster = request.getHeaders().get(KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER); - - // get config of the kafkaCluster - Optional config = kafkaAsyncExecutorConfigs.stream() - .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) - .findFirst(); - if (config.isEmpty()) { - return Publishers.just(new ResourceValidationException(List.of("Kafka Cluster [" + kafkaCluster + "] not found"),null,null)); - } - - // get the good connect config - ConnectConfig connectConfig = config.get().getConnects().get(connectCluster); - if (connectConfig == null) { - return Publishers.just(new ResourceValidationException(List.of("Connect Cluster [" + connectCluster + "] not found"), null, null)); - } - - // mutate the request with proper URL and Authent - HttpRequest mutatedRequest = mutateKafkaConnectRequest(request, connectConfig); - // call it - return client.proxy(mutatedRequest); - // If required to modify the response, use this - /* return Publishers.map(client.proxy(mutatedRequest), - response -> response.header("X-My-Response-Header", "YYY"));*/ - } - - public MutableHttpRequest mutateKafkaConnectRequest(HttpRequest request, KafkaAsyncExecutorConfig.ConnectConfig connectConfig) { - - URI newURI = URI.create(connectConfig.getUrl()); - MutableHttpRequest mutableHttpRequest = request.mutate() - .uri(b -> b - .scheme(newURI.getScheme()) - .host(newURI.getHost()) - .port(newURI.getPort()) - .replacePath(StringUtils.prependUri( - newURI.getPath(), - request.getPath().substring(KafkaConnectClientProxy.PROXY_PREFIX.length()) - )) - ) - .basicAuth(connectConfig.getBasicAuthUsername(), connectConfig.getBasicAuthPassword()); - - // Micronaut resets Host later on with proper value. - mutableHttpRequest.getHeaders().remove(HttpHeaders.HOST); - return mutableHttpRequest; - } -} diff --git a/api/src/main/java/com/michelin/ns4kafka/services/connect/client/ConnectorClient.java b/api/src/main/java/com/michelin/ns4kafka/services/connect/client/ConnectorClient.java new file mode 100644 index 00000000..cc319f3d --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/services/connect/client/ConnectorClient.java @@ -0,0 +1,80 @@ +package com.michelin.ns4kafka.services.connect.client; + +import com.michelin.ns4kafka.services.connect.ConnectorClientProxy; +import com.michelin.ns4kafka.services.connect.client.entities.*; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.annotation.*; +import io.micronaut.http.client.annotation.Client; +import io.reactivex.Maybe; +import io.reactivex.Single; + +import java.util.List; +import java.util.Map; + +@Client(value = ConnectorClientProxy.PROXY_PREFIX) +public interface ConnectorClient { + @Get("/connectors?expand=info&expand=status") + Single> listAll( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster); + + @Put("/connector-plugins/{connectorClass}/config/validate") + Single validate( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connectorClass, + @Body ConnectorSpecs connectorSpec); + + @Put("/connectors/{connector}/config") + Single createOrUpdate( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector, + @Body ConnectorSpecs connectorSpec); + + @Delete("/connectors/{connector}") + Maybe> delete( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector); + + + @Get("/connector-plugins") + Single> connectPlugins( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster); + + @Get("/connectors/{connector}/status") + Single status( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector); + + @Post("/connectors/{connector}/tasks/{taskId}/restart") + Single> restart( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector, + int taskId); + + @Put("/connectors/{connector}/pause") + Single> pause( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector); + + @Put("/connectors/{connector}/resume") + Single> resume( + @Header(value = ConnectorClientProxy.PROXY_HEADER_SECRET) String secret, + @Header(value = ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, + @Header(value = ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, + String connector); +} diff --git a/api/src/main/java/com/michelin/ns4kafka/services/connect/client/KafkaConnectClient.java b/api/src/main/java/com/michelin/ns4kafka/services/connect/client/KafkaConnectClient.java deleted file mode 100644 index 2066975a..00000000 --- a/api/src/main/java/com/michelin/ns4kafka/services/connect/client/KafkaConnectClient.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.michelin.ns4kafka.services.connect.client; - -import com.michelin.ns4kafka.services.connect.KafkaConnectClientProxy; -import com.michelin.ns4kafka.services.connect.client.entities.*; -import io.micronaut.http.HttpResponse; -import io.micronaut.http.annotation.*; -import io.micronaut.http.client.annotation.Client; -import io.reactivex.Maybe; -import io.reactivex.Single; - -import java.util.List; -import java.util.Map; - -@Client(value = KafkaConnectClientProxy.PROXY_PREFIX) -public interface KafkaConnectClient { - @Get("/connectors?expand=info&expand=status") - Single> listAll( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster); - - @Put("/connector-plugins/{connectorClass}/config/validate") - Single validate( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connectorClass, - @Body ConnectorSpecs connectorSpec); - - @Put("/connectors/{connector}/config") - Single createOrUpdate( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector, - @Body ConnectorSpecs connectorSpec); - - @Delete("/connectors/{connector}") - Maybe> delete( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector); - - - @Get("/connector-plugins") - Single> connectPlugins( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster); - - @Get("/connectors/{connector}/status") - Single status( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector); - - @Post("/connectors/{connector}/tasks/{taskId}/restart") - Single> restart( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector, - int taskId); - - @Put("/connectors/{connector}/pause") - Single> pause( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector); - - @Put("/connectors/{connector}/resume") - Single> resume( - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_SECRET) String secret, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER) String cluster, - @Header(value = KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER) String connectCluster, - String connector); -} diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java b/api/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java index 219132a3..be762789 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java @@ -1,12 +1,13 @@ package com.michelin.ns4kafka.services.executors; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; import com.michelin.ns4kafka.services.AccessControlEntryService; -import com.michelin.ns4kafka.services.KafkaConnectService; +import com.michelin.ns4kafka.services.ConnectorService; import com.michelin.ns4kafka.services.StreamService; import io.micronaut.context.annotation.EachBean; import lombok.extern.slf4j.Slf4j; @@ -55,7 +56,7 @@ public class AccessControlEntryAsyncExecutor { * The Kafka Connect service */ @Inject - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * The namespace repository diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java b/api/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java index e7735e5f..6980234a 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java @@ -1,10 +1,12 @@ package com.michelin.ns4kafka.services.executors; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.repositories.ConnectorRepository; -import com.michelin.ns4kafka.services.connect.KafkaConnectClientProxy; -import com.michelin.ns4kafka.services.connect.client.KafkaConnectClient; +import com.michelin.ns4kafka.services.ConnectClusterService; +import com.michelin.ns4kafka.services.connect.ConnectorClientProxy; +import com.michelin.ns4kafka.services.connect.client.ConnectorClient; import com.michelin.ns4kafka.services.connect.client.entities.ConnectorSpecs; import com.michelin.ns4kafka.services.connect.client.entities.ConnectorStatus; import io.micronaut.context.annotation.EachBean; @@ -19,27 +21,22 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.Stream; @Slf4j @EachBean(KafkaAsyncExecutorConfig.class) @Singleton public class ConnectorAsyncExecutor { - /** - * The managed clusters config - */ private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; - /** - * The connector repository - */ @Inject private ConnectorRepository connectorRepository; - /** - * The Kafka Connect client - */ @Inject - private KafkaConnectClient kafkaConnectClient; + private ConnectorClient connectorClient; + + @Inject + private ConnectClusterService connectClusterService; /** * Constructor @@ -63,8 +60,13 @@ public void run() { * For each connect cluster, start the synchronization of connectors */ private void synchronizeConnectors() { - kafkaAsyncExecutorConfig.getConnects() - .forEach((s, connectConfig) -> synchronizeConnectCluster(s)); + List selfDeclaredConnectClusterNames = connectClusterService.findAll() + .stream() + .map(connectCluster -> connectCluster.getMetadata().getName()) + .collect(Collectors.toList()); + + Stream.concat(kafkaAsyncExecutorConfig.getConnects().keySet().stream(), selfDeclaredConnectClusterNames.stream()) + .forEach(this::synchronizeConnectCluster); } /** @@ -73,8 +75,7 @@ private void synchronizeConnectors() { */ private void synchronizeConnectCluster(String connectCluster) { log.debug("Starting Connector synchronization for Kafka cluster {} and Connect cluster {}", - kafkaAsyncExecutorConfig.getName(), - connectCluster); + kafkaAsyncExecutorConfig.getName(), connectCluster); collectBrokerConnectors(connectCluster) .subscribe(new ConsumerSingleObserver<>(brokerConnectors -> { @@ -130,9 +131,9 @@ private void synchronizeConnectCluster(String connectCluster) { * @return A list of connectors */ public Single> collectBrokerConnectors(String connectCluster) { - return kafkaConnectClient.listAll(KafkaConnectClientProxy.PROXY_SECRET, kafkaAsyncExecutorConfig.getName(), connectCluster) + return connectorClient.listAll(ConnectorClientProxy.PROXY_SECRET, kafkaAsyncExecutorConfig.getName(), connectCluster) .map(connectors -> { - log.debug("Connectors found on Connect Cluster {} : {}", connectCluster, connectors.size()); + log.debug("Connectors found on Connect cluster {} : {}", connectCluster, connectors.size()); return connectors .values() @@ -171,7 +172,7 @@ private List collectNs4KafkaConnectors(String connectCluster) { .stream() .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) .collect(Collectors.toList()); - log.debug("Connectors found on Ns4kafka for Connect Cluster {}: {}", connectCluster, connectorList.size()); + log.debug("Connectors found on Ns4kafka for Connect cluster {}: {}", connectCluster, connectorList.size()); return connectorList; } @@ -200,7 +201,7 @@ private boolean connectorsAreSame(Connector expected, Connector actual) { * @param connector The connector to deploy */ private void deployConnector(Connector connector) { - kafkaConnectClient.createOrUpdate(KafkaConnectClientProxy.PROXY_SECRET, kafkaAsyncExecutorConfig.getName(), + connectorClient.createOrUpdate(ConnectorClientProxy.PROXY_SECRET, kafkaAsyncExecutorConfig.getName(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName(), ConnectorSpecs.builder().config(connector.getSpec().getConfig()).build()) .subscribe(new ConsumerSingleObserver<>(httpResponse -> log.info("Success deploying Connector [{}] on Kafka [{}] Connect [{}]", diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java b/api/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java index 4f2d093b..686eaaf8 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.services.executors; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import io.micronaut.context.annotation.EachBean; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/api/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index df0b890d..b5a46f40 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.services.executors; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; @@ -157,7 +158,6 @@ public Map collectBrokerTopicsFromNames(List topicNames) .map(s -> new ConfigResource(ConfigResource.Type.TOPIC, s)) .collect(Collectors.toList()) ) - //.describeConfigs(List.of(new ConfigResource(ConfigResource.Type.TOPIC,"*"))) .all() .get(30, TimeUnit.SECONDS) .entrySet() diff --git a/api/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java b/api/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java index 1b0a1974..492bbbea 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java @@ -1,6 +1,7 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.controllers.ResourceValidationException; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import com.michelin.ns4kafka.repositories.NamespaceRepository; import io.micronaut.context.annotation.EachBean; import lombok.extern.slf4j.Slf4j; diff --git a/api/src/main/java/com/michelin/ns4kafka/services/schema/KafkaSchemaRegistryClientProxy.java b/api/src/main/java/com/michelin/ns4kafka/services/schema/KafkaSchemaRegistryClientProxy.java index c57898ed..891ccc0a 100644 --- a/api/src/main/java/com/michelin/ns4kafka/services/schema/KafkaSchemaRegistryClientProxy.java +++ b/api/src/main/java/com/michelin/ns4kafka/services/schema/KafkaSchemaRegistryClientProxy.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.services.schema; -import com.michelin.ns4kafka.controllers.ResourceValidationException; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import io.micronaut.core.async.publisher.Publishers; import io.micronaut.core.util.StringUtils; import io.micronaut.http.HttpHeaders; @@ -24,7 +24,7 @@ public class KafkaSchemaRegistryClientProxy extends OncePerRequestHttpServerFilter { /** * Schema registry prefix used to filter request to schema registries. It'll be replaced by - * the schema registry URL of the + * the schema registry URL of the given cluster */ public static final String SCHEMA_REGISTRY_PREFIX = "/schema-registry-proxy"; @@ -57,7 +57,6 @@ public class KafkaSchemaRegistryClientProxy extends OncePerRequestHttpServerFilt /** * Filter requests - * * @param request The request to filter * @param chain The servlet chain * @return A modified request @@ -74,7 +73,6 @@ public Publisher> doFilterOnce(HttpRequest request, Se return Publishers.just(new ResourceValidationException(List.of("Invalid value " + secret + " for header " + KafkaSchemaRegistryClientProxy.PROXY_HEADER_SECRET), null, null)); } - // Retrieve the connectConfig based on Header if (!request.getHeaders().contains(KafkaSchemaRegistryClientProxy.PROXY_HEADER_KAFKA_CLUSTER)) { return Publishers.just(new ResourceValidationException(List.of("Missing required header " + KafkaSchemaRegistryClientProxy.PROXY_HEADER_KAFKA_CLUSTER), null, null)); } @@ -99,7 +97,6 @@ public Publisher> doFilterOnce(HttpRequest request, Se /** * Mutate a request to the Schema Registry by modifying the base URI by the Schema Registry URI from the * cluster config - * * @param request The request to modify * @param config The configuration used to modify the request * @return The modified request diff --git a/api/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java b/api/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java new file mode 100644 index 00000000..9be98ed8 --- /dev/null +++ b/api/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java @@ -0,0 +1,82 @@ +package com.michelin.ns4kafka.utils; + +import com.nimbusds.jose.*; +import com.nimbusds.jose.crypto.AESDecrypter; +import com.nimbusds.jose.crypto.AESEncrypter; +import com.nimbusds.jose.util.Base64URL; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +@Slf4j +public class EncryptionUtils { + /** + * Constructor + */ + private EncryptionUtils() { } + + /** + * Encrypt given text with the given key to AES256 GCM then encode it to Base64 + * @param clearText The text to encrypt + * @param key The key encryption key (KEK) + * @return The encrypted password + */ + public static String encryptAES256GCM(String clearText, String key) { + try { + if (!StringUtils.isNotBlank(clearText)) { + return clearText; + } + + AESEncrypter encrypter = new AESEncrypter(key.getBytes(StandardCharsets.UTF_8)); + JWECryptoParts encryptedData = encrypter.encrypt(new JWEHeader(JWEAlgorithm.A256KW, EncryptionMethod.A256GCM), + clearText.getBytes(StandardCharsets.UTF_8)); + + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + outputStream.write(encryptedData.getEncryptedKey().decode()); + outputStream.write(encryptedData.getInitializationVector().decode()); + outputStream.write(encryptedData.getAuthenticationTag().decode()); + outputStream.write(encryptedData.getCipherText().decode()); + + return Base64URL.encode(outputStream.toByteArray()).toString(); + } catch (JOSEException | IOException e) { + log.error("An error occurred during Connect cluster password encryption", e); + } + + return clearText; + } + + /** + * Decrypt given text with the given key from AES256 GCM + * @param encryptedText The text to decrypt + * @param key The key encryption key (KEK) + * @return The decrypted text + */ + public static String decryptAES256GCM(String encryptedText, String key) { + try { + if (!StringUtils.isNotBlank(encryptedText)) { + return encryptedText; + } + + AESDecrypter decrypter = new AESDecrypter(key.getBytes(StandardCharsets.UTF_8)); + byte[] encryptedData = Base64URL.from(encryptedText).decode(); + + Base64URL encryptedKey = Base64URL.encode(Arrays.copyOfRange(encryptedData, 0, 40)); + Base64URL iv = Base64URL.encode(Arrays.copyOfRange(encryptedData, 40, 52)); + Base64URL auth = Base64URL.encode(Arrays.copyOfRange(encryptedData, 52, 68)); + Base64URL text = Base64URL.encode(Arrays.copyOfRange(encryptedData, 68, encryptedData.length)); + + byte[] clearTextAsBytes = decrypter.decrypt(new JWEHeader(JWEAlgorithm.A256KW, EncryptionMethod.A256GCM), + encryptedKey, iv, text, auth); + + return new String(clearTextAsBytes); + } catch (JOSEException e) { + log.error("An error occurred during Connect cluster password decryption", e); + } + + return encryptedText; + } +} diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ApplyStatus.java b/api/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java similarity index 61% rename from api/src/main/java/com/michelin/ns4kafka/controllers/ApplyStatus.java rename to api/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java index 0f417c47..3f24f633 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ApplyStatus.java +++ b/api/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.utils.enums; public enum ApplyStatus { created, changed, unchanged, deleted diff --git a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceValidationException.java b/api/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java similarity index 91% rename from api/src/main/java/com/michelin/ns4kafka/controllers/ResourceValidationException.java rename to api/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java index 8d1b9837..d3714440 100644 --- a/api/src/main/java/com/michelin/ns4kafka/controllers/ResourceValidationException.java +++ b/api/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java @@ -1,4 +1,4 @@ -package com.michelin.ns4kafka.controllers; +package com.michelin.ns4kafka.utils.exceptions; import lombok.Getter; diff --git a/api/src/main/resources/application.yml b/api/src/main/resources/application.yml index 40496237..f478640a 100644 --- a/api/src/main/resources/application.yml +++ b/api/src/main/resources/application.yml @@ -1,8 +1,7 @@ micronaut: -# BEGIN ThreadPoolOptimization -# https://docs.micronaut.io/latest/guide/#clientConfiguration -# Moves HttpClient calls to a different ThreadPool -# This is mainly for Kafka Connect calls + application: + name: ns4kafka + netty: event-loops: default: @@ -13,6 +12,7 @@ micronaut: schema: num-threads: 4 prefer-native-transport: true + http: services: /connect-proxy: @@ -20,9 +20,6 @@ micronaut: /schema-registry-proxy: event-loop-group: schema -# END ThreadPoolOptimization - application: - name: ns4kafka security: enabled: true authentication: bearer @@ -59,6 +56,7 @@ micronaut: http-method: GET access: - isAnonymous() + router: static-resources: swagger: @@ -145,10 +143,9 @@ ns4kafka: min.compaction.lag.ms: "0" max.compaction.lag.ms: "604800000" segment.ms: "600000" - # Logs the actions performed by users when changes are made (created/deleted/changed) + log: - console: # + console: enabled: true - kafka: # + kafka: enabled: false - #topic: ns4kafka.audit-log diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/AccessControlListControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/AccessControlListControllerTest.java index e5fa4537..7145b3d5 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/AccessControlListControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/AccessControlListControllerTest.java @@ -6,6 +6,7 @@ import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java index 5f8864f0..5af01347 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java new file mode 100644 index 00000000..72ab79da --- /dev/null +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java @@ -0,0 +1,494 @@ +package com.michelin.ns4kafka.controllers; + +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.connector.Connector; +import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; +import com.michelin.ns4kafka.services.ConnectClusterService; +import com.michelin.ns4kafka.services.ConnectorService; +import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import com.michelin.ns4kafka.validation.TopicValidator; +import io.micronaut.context.event.ApplicationEventPublisher; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.HttpStatus; +import io.micronaut.security.utils.SecurityService; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentMatchers; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +@ExtendWith(MockitoExtension.class) +class ConnectClusterControllerTest { + @Mock + SecurityService securityService; + + @Mock + NamespaceService namespaceService; + + @Mock + ConnectClusterService connectClusterService; + + @Mock + ConnectorService connectorService; + + @InjectMocks + ConnectClusterController connectClusterController; + + @Mock + ApplicationEventPublisher applicationEventPublisher; + + /** + * Test connect clusters listing when namespace is empty + */ + @Test + void listEmptyConnectClusters() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + + List actual = connectClusterController.list("test"); + Assertions.assertTrue(actual.isEmpty()); + } + + /** + * Test connect clusters listing + */ + @Test + void listMultipleConnectClusters() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of( + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(), + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster2") + .build()) + .build())); + + List actual = connectClusterController.list("test"); + Assertions.assertEquals(2, actual.size()); + } + + /** + * Test get connect cluster by name when it does not exist + */ + @Test + void getConnectClusterEmpty() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.findByNamespaceAndNameOwner(ns, "missing")) + .thenReturn(Optional.empty()); + + Optional actual = connectClusterController.getConnectCluster("test", "missing"); + Assertions.assertTrue(actual.isEmpty()); + } + + /** + * Test get connect cluster by name + */ + @Test + void getConnectCluster() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")) + .thenReturn(Optional.of( + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build())); + + Optional actual = connectClusterController.getConnectCluster("test", "connect-cluster"); + Assertions.assertTrue(actual.isPresent()); + Assertions.assertEquals("connect-cluster", actual.get().getMetadata().getName()); + } + + /** + * Test connect cluster deletion when namespace is not owner + */ + @Test + void deleteConnectClusterNotOwned() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) + .thenReturn(false); + + Assertions.assertThrows(ResourceValidationException.class, + () -> connectClusterController.delete("test", "connect-cluster", false)); + } + + /** + * Test connect cluster deletion when not found + */ + @Test + void deleteConnectClusterNotFound() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) + .thenReturn(true); + Mockito.when(connectClusterService.findByNamespaceAndNameOwner(ns,"connect-cluster")) + .thenReturn(Optional.empty()); + + HttpResponse actual = connectClusterController.delete("test", "connect-cluster", false); + Assertions.assertEquals(HttpStatus.NOT_FOUND, actual.getStatus()); + } + + /** + * Test connect cluster deletion when namespace is owner + */ + @Test + void deleteConnectClusterOwned() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) + .thenReturn(true); + Mockito.when(connectorService.findAllByConnectCluster(ns,"connect-cluster")) + .thenReturn(List.of()); + Mockito.when(connectClusterService.findByNamespaceAndNameOwner(ns,"connect-cluster")) + .thenReturn(Optional.of(connectCluster)); + doNothing().when(connectClusterService).delete(connectCluster); + when(securityService.username()).thenReturn(Optional.of("test-user")); + when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); + doNothing().when(applicationEventPublisher).publishEvent(any()); + + HttpResponse actual = connectClusterController.delete("test", "connect-cluster", false); + Assertions.assertEquals(HttpStatus.NO_CONTENT, actual.getStatus()); + } + + /** + * Test connect cluster deletion in dry run mode + */ + @Test + void deleteConnectClusterOwnedDryRun() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) + .thenReturn(true); + Mockito.when(connectorService.findAllByConnectCluster(ns,"connect-cluster")) + .thenReturn(List.of()); + Mockito.when(connectClusterService.findByNamespaceAndNameOwner(ns,"connect-cluster")) + .thenReturn(Optional.of(connectCluster)); + + HttpResponse actual = connectClusterController.delete("test", "connect-cluster", true); + Assertions.assertEquals(HttpStatus.NO_CONTENT, actual.getStatus()); + + verify(connectClusterService, never()).delete(any()); + } + + /** + * Test connect cluster deletion when it has connectors deployed on it + */ + @Test + void deleteConnectClusterWithConnectors() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) + .thenReturn(true); + Mockito.when(connectorService.findAllByConnectCluster(ns,"connect-cluster")) + .thenReturn(List.of(connector)); + + ResourceValidationException result = Assertions.assertThrows(ResourceValidationException.class, + () -> connectClusterController.delete("test", "connect-cluster", false)); + + Assertions.assertEquals(1, result.getValidationErrors().size()); + Assertions.assertEquals("The Connect cluster connect-cluster has 1 deployed connector(s): connect1. Please remove the associated connector(s) before deleting it.", result.getValidationErrors().get(0)); + } + + /** + * Validate Connect cluster creation + */ + @Test + void createNewConnectCluster() throws Exception { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of()); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.empty()); + when(securityService.username()).thenReturn(Optional.of("test-user")); + when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); + doNothing().when(applicationEventPublisher).publishEvent(any()); + + when(connectClusterService.create(connectCluster)).thenReturn(connectCluster); + + HttpResponse response = connectClusterController.apply("test", connectCluster, false); + ConnectCluster actual = response.body(); + + Assertions.assertEquals("created", response.header("X-Ns4kafka-Result")); + assertEquals("connect-cluster", actual.getMetadata().getName()); + } + + /** + * Validate Connect cluster creation being not owner + */ + @Test + void createNewConnectClusterNotOwner() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(false); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of()); + + ResourceValidationException result = Assertions.assertThrows(ResourceValidationException.class, + () -> connectClusterController.apply("test", connectCluster, false)); + + Assertions.assertEquals(1, result.getValidationErrors().size()); + Assertions.assertEquals("Namespace not owner of this Connect cluster connect-cluster.", result.getValidationErrors().get(0)); + } + + /** + * Validate Connect cluster creation being not owner + */ + @Test + void createNewConnectClusterValidationError() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of("Error occurred")); + + ResourceValidationException result = Assertions.assertThrows(ResourceValidationException.class, + () -> connectClusterController.apply("test", connectCluster, false)); + + Assertions.assertEquals(1, result.getValidationErrors().size()); + Assertions.assertEquals("Error occurred", result.getValidationErrors().get(0)); + } + + /** + * Validate Connect cluster updated when unchanged + */ + @Test + void updateConnectClusterUnchanged() throws Exception { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of()); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.of(connectCluster)); + + HttpResponse response = connectClusterController.apply("test", connectCluster, false); + ConnectCluster actual = response.body(); + + Assertions.assertEquals("unchanged", response.header("X-Ns4kafka-Result")); + verify(connectClusterService, never()).create(ArgumentMatchers.any()); + assertEquals(connectCluster, actual); + } + + /** + * Validate Connect cluster updated when changed + */ + @Test + void updateConnectClusterChanged() throws Exception { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + ConnectCluster connectClusterChanged = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://before") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of()); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.of(connectClusterChanged)); + when(connectClusterService.create(connectCluster)).thenReturn(connectCluster); + + HttpResponse response = connectClusterController.apply("test", connectCluster, false); + ConnectCluster actual = response.body(); + + Assertions.assertEquals("changed", response.header("X-Ns4kafka-Result")); + assertEquals("connect-cluster", actual.getMetadata().getName()); + } + + /** + * Validate Connect cluster creation in dry run mode + */ + @Test + void createConnectClusterDryRun() throws Exception { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(List.of()); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.empty()); + + HttpResponse response = connectClusterController.apply("test", connectCluster, true); + + Assertions.assertEquals("created", response.header("X-Ns4kafka-Result")); + verify(connectClusterService, never()).create(connectCluster); + } +} diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java similarity index 79% rename from api/src/test/java/com/michelin/ns4kafka/controllers/ConnectControllerTest.java rename to api/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java index 8826ba7e..87266964 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java @@ -5,9 +5,10 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; -import com.michelin.ns4kafka.services.KafkaConnectService; +import com.michelin.ns4kafka.services.ConnectorService; import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; @@ -29,12 +30,12 @@ import static org.mockito.Mockito.*; @ExtendWith(MockitoExtension.class) -class ConnectControllerTest { +class ConnectorControllerTest { /** * Connector service */ @Mock - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * Namespace service @@ -58,7 +59,7 @@ class ConnectControllerTest { * Connector controller */ @InjectMocks - ConnectController connectController; + ConnectorController connectorController; /** @@ -81,10 +82,10 @@ void listEmptyConnectors() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of()); - List actual = connectController.list("test"); + List actual = connectorController.list("test"); Assertions.assertTrue(actual.isEmpty()); } @@ -102,12 +103,12 @@ void listMultipleConnectors() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); - List actual = connectController.list("test"); + List actual = connectorController.list("test"); Assertions.assertEquals(2, actual.size()); } @@ -125,10 +126,10 @@ void getConnectorEmpty() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.findByName(ns, "missing")) + Mockito.when(connectorService.findByName(ns, "missing")) .thenReturn(Optional.empty()); - Optional actual = connectController.getConnector("test", "missing"); + Optional actual = connectorController.getConnector("test", "missing"); Assertions.assertTrue(actual.isEmpty()); } @@ -146,11 +147,11 @@ void getConnector() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.findByName(ns, "connect1")) + Mockito.when(connectorService.findByName(ns, "connect1")) .thenReturn(Optional.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build())); - Optional actual = connectController.getConnector("test", "connect1"); + Optional actual = connectorController.getConnector("test", "connect1"); Assertions.assertTrue(actual.isPresent()); Assertions.assertEquals("connect1", actual.get().getMetadata().getName()); } @@ -169,10 +170,10 @@ void deleteConnectorNotOwned() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(false); - connectController.deleteConnector("test", "connect1", false) + connectorController.deleteConnector("test", "connect1", false) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -195,17 +196,17 @@ void deleteConnectorOwned() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.delete(ns,connector)) + Mockito.when(connectorService.delete(ns,connector)) .thenReturn(Single.just(HttpResponse.noContent())); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - connectController.deleteConnector("test", "connect1", false) + connectorController.deleteConnector("test", "connect1", false) .test() .assertNoErrors() .assertValue(response -> response.getStatus().equals(HttpStatus.NO_CONTENT)); @@ -226,17 +227,44 @@ void deleteConnectorOwnedDryRun() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - connectController.deleteConnector("test", "connect1", true) + connectorController.deleteConnector("test", "connect1", true) .test() .assertNoErrors() .assertValue(response -> response.getStatus().equals(HttpStatus.NO_CONTENT)); - verify(kafkaConnectService, never()).delete(any(), any()); + verify(connectorService, never()).delete(any(), any()); + } + + /** + * Test connector deletion when connector is not found + */ + @Test + void deleteConnectorNotFound() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); + + Mockito.when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + Mockito.when(connectorService.findByName(ns,"connect1")) + .thenReturn(Optional.empty()); + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) + .thenReturn(true); + + connectorController.deleteConnector("test", "connect1", true) + .test() + .assertNoErrors() + .assertValue(response -> response.getStatus().equals(HttpStatus.NOT_FOUND)); + + verify(connectorService, never()).delete(any(), any()); } /** @@ -254,10 +282,10 @@ void createConnectorNotOwner() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(false); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -283,12 +311,12 @@ void createConnectorLocalErrors() { .build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.validateLocally(ns, connector)) + Mockito.when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of("Local Validation Error 1"))); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -315,14 +343,14 @@ void createConnectorRemoteErrors() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.validateLocally(ns, connector)) + Mockito.when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.validateRemotely(ns, connector)) + Mockito.when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of("Remote Validation Error 1"))); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -353,20 +381,20 @@ void createConnectorSuccess() { .build(); when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - when(kafkaConnectService.validateLocally(ns, connector)) + when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - when(kafkaConnectService.validateRemotely(ns, connector)) + when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of())); when(resourceQuotaService.validateConnectorQuota(any())).thenReturn(List.of()); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - when(kafkaConnectService.createOrUpdate(connector)) + when(connectorService.createOrUpdate(connector)) .thenReturn(expected); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertValue(response -> Objects.equals(response.header("X-Ns4kafka-Result"), "created")) .assertValue(response -> response.getBody().isPresent() @@ -391,15 +419,15 @@ void createConnectorFailQuotaValidation() { .build(); when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - when(kafkaConnectService.validateLocally(ns, connector)) + when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - when(kafkaConnectService.validateRemotely(ns, connector)) + when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of())); when(resourceQuotaService.validateConnectorQuota(ns)).thenReturn(List.of("Quota error")); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -433,22 +461,22 @@ void createConnectorSuccessAlreadyExists() { .build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.validateLocally(ns, connector)) + Mockito.when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.validateRemotely(ns, connector)) + Mockito.when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.findByName(ns, "connect1")) + Mockito.when(connectorService.findByName(ns, "connect1")) .thenReturn(Optional.of(connector)); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertValue(response -> Objects.equals(response.header("X-Ns4kafka-Result"), "unchanged")) .assertValue(response -> response.getBody().isPresent() && response.getBody().get().getStatus().getState().equals(expected.getStatus().getState())); - verify(kafkaConnectService,never()).createOrUpdate(ArgumentMatchers.any()); + verify(connectorService,never()).createOrUpdate(ArgumentMatchers.any()); } /** @@ -478,21 +506,21 @@ void createConnectorSuccessChanged() { .build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.validateLocally(ns, connector)) + Mockito.when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.validateRemotely(ns, connector)) + Mockito.when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.findByName(ns, "connect1")) + Mockito.when(connectorService.findByName(ns, "connect1")) .thenReturn(Optional.of(connectorOld)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - Mockito.when(kafkaConnectService.createOrUpdate(connector)) + Mockito.when(connectorService.createOrUpdate(connector)) .thenReturn(expected); - connectController.apply("test", connector, false) + connectorController.apply("test", connector, false) .test() .assertValue(response -> Objects.equals(response.header("X-Ns4kafka-Result"), "changed")) .assertValue(response -> response.getBody().isPresent() @@ -517,18 +545,18 @@ void createConnectorDryRun() { .build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.validateLocally(ns, connector)) + Mockito.when(connectorService.validateLocally(ns, connector)) .thenReturn(Single.just(List.of())); - Mockito.when(kafkaConnectService.validateRemotely(ns, connector)) + Mockito.when(connectorService.validateRemotely(ns, connector)) .thenReturn(Single.just(List.of())); - connectController.apply("test", connector, true) + connectorController.apply("test", connector, true) .test() .assertValue(response -> Objects.equals(response.header("X-Ns4kafka-Result"), "created")); - verify(kafkaConnectService, never()).createOrUpdate(connector); + verify(connectorService, never()).createOrUpdate(connector); } /** @@ -548,13 +576,13 @@ void importConnector() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - when(kafkaConnectService.listUnsynchronizedConnectors(ns)) + when(connectorService.listUnsynchronizedConnectors(ns)) .thenReturn(Single.just(List.of(connector1, connector2))); - when(kafkaConnectService.createOrUpdate(connector1)).thenReturn(connector1); - when(kafkaConnectService.createOrUpdate(connector2)).thenReturn(connector2); + when(connectorService.createOrUpdate(connector1)).thenReturn(connector1); + when(connectorService.createOrUpdate(connector2)).thenReturn(connector2); - connectController.importResources("test", false) + connectorController.importResources("test", false) .test() .assertValue(response -> response.stream().anyMatch(c -> c.getMetadata().getName().equals("connect1"))) .assertValue(response -> response.stream().anyMatch(c -> c.getMetadata().getName().equals("connect2"))) @@ -579,18 +607,18 @@ void importConnectorDryRun() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - when(kafkaConnectService.listUnsynchronizedConnectors(ns)) + when(connectorService.listUnsynchronizedConnectors(ns)) .thenReturn(Single.just(List.of(connector1, connector2))); - connectController.importResources("test", true) + connectorController.importResources("test", true) .test() .assertValue(response -> response.stream().anyMatch(c -> c.getMetadata().getName().equals("connect1"))) .assertValue(response -> response.stream().anyMatch(c -> c.getMetadata().getName().equals("connect2"))) .assertValue(response -> response.stream().noneMatch(c -> c.getMetadata().getName().equals("connect3"))); - verify(kafkaConnectService, never()).createOrUpdate(connector1); - verify(kafkaConnectService, never()).createOrUpdate(connector2); - verify(kafkaConnectService, never()).createOrUpdate(connector3); + verify(connectorService, never()).createOrUpdate(connector1); + verify(connectorService, never()).createOrUpdate(connector2); + verify(connectorService, never()).createOrUpdate(connector3); } /** @@ -607,7 +635,7 @@ void restartConnectorNotOwned() { Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(false); ChangeConnectorState restart = ChangeConnectorState.builder() @@ -615,7 +643,7 @@ void restartConnectorNotOwned() { .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) .build(); - connectController.changeState("test", "connect1", restart) + connectorController.changeState("test", "connect1", restart) .test() .assertError(ResourceValidationException.class) .assertError(error -> ((ResourceValidationException) error).getValidationErrors().size() == 1) @@ -636,9 +664,9 @@ void restartConnectorNotExists() { .build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.empty()); ChangeConnectorState restart = ChangeConnectorState.builder() @@ -646,11 +674,11 @@ void restartConnectorNotExists() { .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) .build(); - connectController.changeState("test", "connect1", restart) + connectorController.changeState("test", "connect1", restart) .test() .assertValue(response -> response.getStatus().equals(HttpStatus.NOT_FOUND)); - verify(kafkaConnectService,never()).restart(ArgumentMatchers.any(), ArgumentMatchers.any()); + verify(connectorService,never()).restart(ArgumentMatchers.any(), ArgumentMatchers.any()); } /** @@ -667,11 +695,11 @@ void restartConnectorException() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) + Mockito.when(connectorService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) .thenReturn(Single.error(new HttpClientResponseException("Rebalancing", HttpResponse.status(HttpStatus.CONFLICT)))); ChangeConnectorState restart = ChangeConnectorState.builder() @@ -679,7 +707,7 @@ void restartConnectorException() { .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) .build(); - connectController.changeState("test", "connect1", restart) + connectorController.changeState("test", "connect1", restart) .test() .assertValue(response -> response.getBody().isPresent() && !response.getBody().get().getStatus().isSuccess() @@ -701,11 +729,11 @@ void restartConnectorOwned() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) + Mockito.when(connectorService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) .thenReturn(Single.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() @@ -713,7 +741,7 @@ void restartConnectorOwned() { .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) .build(); - connectController.changeState("test", "connect1", changeConnectorState) + connectorController.changeState("test", "connect1", changeConnectorState) .test() .assertValue(response -> response.getBody().isPresent() && response.getBody().get().getStatus().isSuccess()) @@ -736,11 +764,11 @@ void pauseConnectorOwned() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.pause(ArgumentMatchers.any(),ArgumentMatchers.any())) + Mockito.when(connectorService.pause(ArgumentMatchers.any(),ArgumentMatchers.any())) .thenReturn(Single.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() @@ -751,7 +779,7 @@ void pauseConnectorOwned() { .build()) .build(); - connectController.changeState("test", "connect1", changeConnectorState) + connectorController.changeState("test", "connect1", changeConnectorState) .test() .assertValue(response -> response.getBody().isPresent() && response.getBody().get().getStatus().isSuccess()) @@ -773,11 +801,11 @@ void resumeConnectorOwned() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Mockito.when(namespaceService.findByName("test")) .thenReturn(Optional.of(ns)); - Mockito.when(kafkaConnectService.isNamespaceOwnerOfConnect(ns, "connect1")) + Mockito.when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) .thenReturn(true); - Mockito.when(kafkaConnectService.findByName(ns,"connect1")) + Mockito.when(connectorService.findByName(ns,"connect1")) .thenReturn(Optional.of(connector)); - Mockito.when(kafkaConnectService.resume(ArgumentMatchers.any(),ArgumentMatchers.any())) + Mockito.when(connectorService.resume(ArgumentMatchers.any(),ArgumentMatchers.any())) .thenReturn(Single.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() @@ -788,7 +816,7 @@ void resumeConnectorOwned() { .build()) .build(); - connectController.changeState("test", "connect1", changeConnectorState) + connectorController.changeState("test", "connect1", changeConnectorState) .test() .assertValue(response -> response.getBody().isPresent() && response.getBody().get().getStatus().isSuccess()) diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java index 08b0f0f6..4612a8ee 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java @@ -9,6 +9,7 @@ import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.ConsumerGroupService; import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.security.utils.SecurityService; import org.apache.kafka.common.TopicPartition; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java index eb86d73a..b95da542 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.controllers; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpMethod; import io.micronaut.http.HttpRequest; import io.micronaut.http.HttpStatus; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java index a3fc29a5..9203049f 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java @@ -4,6 +4,7 @@ import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.NamespaceService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.security.utils.SecurityService; import org.junit.jupiter.api.Assertions; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java index e799d831..74d4cd3b 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java @@ -7,6 +7,7 @@ import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java index 93f0acb2..e094982b 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java @@ -8,6 +8,7 @@ import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.SchemaService; import com.michelin.ns4kafka.services.schema.client.entities.SchemaCompatibilityResponse; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java index 792810b0..c8b5c73c 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java @@ -6,6 +6,7 @@ import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.StreamService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; diff --git a/api/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/api/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 5a09f53f..6b63627d 100644 --- a/api/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -9,6 +9,7 @@ import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; import com.michelin.ns4kafka.services.TopicService; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import com.michelin.ns4kafka.validation.TopicValidator; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; diff --git a/api/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java b/api/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java index 76478d83..b36a6a91 100644 --- a/api/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java @@ -22,15 +22,13 @@ @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") -public class ApiResourcesTest extends AbstractIntegrationTest { - +class ApiResourcesTest extends AbstractIntegrationTest { @Inject @Client("/") RxHttpClient client; @Test void asAdmin() { - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); HttpResponse response = client.exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class).blockingFirst(); @@ -40,8 +38,7 @@ void asAdmin() { Argument.listOf(ApiResourcesController.ResourceDefinition.class) ).blockingFirst(); - Assertions.assertEquals(8, resources.size()); - + Assertions.assertEquals(9, resources.size()); } @Test @@ -53,7 +50,7 @@ void asAnonymous() { Argument.listOf(ApiResourcesController.ResourceDefinition.class) ).blockingFirst(); - Assertions.assertEquals(8, resources.size()); + Assertions.assertEquals(9, resources.size()); } @Test diff --git a/api/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java b/api/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java index c8f0567f..1baa0492 100644 --- a/api/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java @@ -246,9 +246,9 @@ void deployConnectors() throws InterruptedException, MalformedURLException { client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)).blockingFirst(); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(connectorWithNullParameter)).blockingFirst(); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(connectorWithEmptyParameter)).blockingFirst(); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(connectorWithFillParameter)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithNullParameter)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithEmptyParameter)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithFillParameter)).blockingFirst(); connectorAsyncExecutorList.forEach(ConnectorAsyncExecutor::run); Thread.sleep(2000); @@ -327,7 +327,7 @@ void updateConnectorsWithNullProperty() throws InterruptedException, MalformedUR client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)).blockingFirst(); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(updateConnector)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(updateConnector)).blockingFirst(); connectorAsyncExecutorList.forEach(ConnectorAsyncExecutor::run); Thread.sleep(2000); @@ -375,7 +375,7 @@ void restartConnector() throws InterruptedException { client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)).blockingFirst(); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(co)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)).blockingFirst(); connectorAsyncExecutorList.forEach(ConnectorAsyncExecutor::run); Thread.sleep(2000); @@ -384,7 +384,7 @@ void restartConnector() throws InterruptedException { .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) .build(); - HttpResponse actual = client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects/ns1-co1/change-state").bearerAuth(token).body(restartState), ChangeConnectorState.class).blockingFirst(); + HttpResponse actual = client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co1/change-state").bearerAuth(token).body(restartState), ChangeConnectorState.class).blockingFirst(); Assertions.assertEquals(HttpStatus.OK, actual.status()); } @@ -428,7 +428,7 @@ void PauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)).blockingFirst(); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects").bearerAuth(token).body(co)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)).blockingFirst(); connectorAsyncExecutorList.forEach(ConnectorAsyncExecutor::run); Thread.sleep(2000); @@ -437,7 +437,7 @@ void PauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio .metadata(ObjectMeta.builder().name("ns1-co2").build()) .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.pause).build()) .build(); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects/ns1-co2/change-state").bearerAuth(token).body(pauseState)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token).body(pauseState)).blockingFirst(); Thread.sleep(2000); // verify paused directly on connect cluster @@ -452,7 +452,7 @@ void PauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio .metadata(ObjectMeta.builder().name("ns1-co2").build()) .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.resume).build()) .build(); - client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connects/ns1-co2/change-state").bearerAuth(token).body(resumeState)).blockingFirst(); + client.exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token).body(resumeState)).blockingFirst(); Thread.sleep(2000); // verify resumed directly on connect cluster diff --git a/api/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java b/api/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java index 9e0c77a1..c7234bba 100644 --- a/api/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java @@ -2,7 +2,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.michelin.ns4kafka.controllers.AkhqClaimProviderController; -import com.michelin.ns4kafka.controllers.ResourceValidationException; import com.michelin.ns4kafka.models.*; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; import com.michelin.ns4kafka.models.AccessControlEntry.Permission; diff --git a/api/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java b/api/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java index f26c2d37..538634c0 100644 --- a/api/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.security; +import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.security.gitlab.GitlabAuthenticationProvider; diff --git a/api/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java b/api/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java index a3c2a0e0..27094c5f 100644 --- a/api/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.security; +import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.security.local.LocalUser; import com.michelin.ns4kafka.security.local.LocalUserAuthenticationProvider; import io.micronaut.security.authentication.AuthenticationResponse; diff --git a/api/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java b/api/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java index 3bd87bd5..7eae06b6 100644 --- a/api/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java @@ -1,5 +1,6 @@ package com.michelin.ns4kafka.security; +import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.RoleBinding; diff --git a/api/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java b/api/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java new file mode 100644 index 00000000..7215349c --- /dev/null +++ b/api/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java @@ -0,0 +1,406 @@ +package com.michelin.ns4kafka.services; + +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.models.AccessControlEntry; +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.repositories.ConnectClusterRepository; +import com.nimbusds.jose.JOSEException; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.MutableHttpRequest; +import io.micronaut.http.client.RxHttpClient; +import io.micronaut.http.client.annotation.Client; +import io.micronaut.http.client.exceptions.HttpClientException; +import io.reactivex.Flowable; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Stream; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +class ConnectClusterServiceTest { + @Mock + ConnectClusterRepository connectClusterRepository; + + @Mock + AccessControlEntryService accessControlEntryService; + + @Mock + List kafkaAsyncExecutorConfigList; + + @Mock + SecurityConfig securityConfig; + + @InjectMocks + ConnectClusterService connectClusterService; + + @Mock + @Client("/") + RxHttpClient httpClient; + + /** + * Test find all + */ + @Test + void findAllEmpty() { + Mockito.when(connectClusterRepository.findAll()).thenReturn(List.of()); + List actual = connectClusterRepository.findAll(); + + Assertions.assertTrue(actual.isEmpty()); + } + + /** + * Test find all + */ + @Test + void findAll() { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + Mockito.when(connectClusterRepository.findAll()).thenReturn(List.of(connectCluster)); + List actual = connectClusterService.findAll(); + + Assertions.assertEquals(1L, actual.size()); + } + + /** + * Test find all for namespace + */ + @Test + void findAllForNamespace() { + Namespace namespace = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + ConnectCluster connectClusterTwo = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix2.connect-two") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + ConnectCluster connectClusterThree = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + ConnectCluster connectClusterFour = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("not-owner") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + when(connectClusterRepository.findAllForCluster("local")) + .thenReturn(List.of(connectCluster, connectClusterTwo, connectClusterThree, connectClusterFour)); + + when(accessControlEntryService.findAllGrantedToNamespace(namespace)) + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix2.connect-two") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.READ) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix3.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("topic.") + .build()) + .build() + )); + + List actual = connectClusterService.findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER)); + + Assertions.assertEquals(2, actual.size()); + // contains + Assertions.assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix.connect-cluster"))); + Assertions.assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix2.connect-two"))); + // doesn't contain + Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("not-owner"))); + Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix3.connect-cluster"))); + } + + /** + * Test find by namespace and name + */ + @Test + void findByNamespaceAndName() { + Namespace namespace = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + when(connectClusterRepository.findAllForCluster("local")) + .thenReturn(List.of(connectCluster)); + + when(accessControlEntryService.findAllGrantedToNamespace(namespace)) + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + Optional actual = connectClusterService.findByNamespaceAndNameOwner(namespace, "prefix.connect-cluster"); + + Assertions.assertTrue(actual.isPresent()); + Assertions.assertEquals("prefix.connect-cluster", actual.get().getMetadata().getName()); + } + + /** + * Test find by namespace and name empty response + */ + @Test + void findByNamespaceAndNameEmpty() { + Namespace namespace = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + when(connectClusterRepository.findAllForCluster("local")) + .thenReturn(List.of(connectCluster)); + + when(accessControlEntryService.findAllGrantedToNamespace(namespace)) + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + Optional actual = connectClusterService.findByNamespaceAndNameOwner(namespace, "does-not-exist"); + + Assertions.assertTrue(actual.isEmpty()); + } + + /** + * Test creation + */ + @Test + void create() throws IOException, JOSEException { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + when(connectClusterRepository.create(connectCluster)).thenReturn(connectCluster); + + ConnectCluster actual = connectClusterService.create(connectCluster); + Assertions.assertEquals(actual, connectCluster); + } + + + /** + * Test creation with encrypted credentials + */ + @Test + void createCredentialsEncrypted() throws IOException, JOSEException { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("myUsername") + .password("myPassword") + .build()) + .build(); + + when(connectClusterRepository.create(connectCluster)).thenReturn(connectCluster); + when(securityConfig.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); + + connectClusterService.create(connectCluster); + Assertions.assertNotEquals("myPassword", connectCluster.getSpec().getPassword()); + } + + /** + * Test validate connect cluster creation when Connect cluster is already defined in the + * Ns4Kafka configuration + */ + @Test + void validateConnectClusterCreationAlreadyDefined() { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + KafkaAsyncExecutorConfig kafka = new KafkaAsyncExecutorConfig("local"); + kafka.setConnects(Map.of("test-connect", new KafkaAsyncExecutorConfig.ConnectConfig())); + when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of(kafka)); + when(httpClient.exchange(any(MutableHttpRequest.class))).thenReturn(Flowable.just(HttpResponse.ok())); + + List errors = connectClusterService.validateConnectClusterCreation(connectCluster); + + Assertions.assertEquals(1L, errors.size()); + Assertions.assertEquals("A Connect cluster is already defined globally with the name test-connect. Please provide a different name.", errors.get(0)); + } + + /** + * Test validate connect cluster creation when Connect cluster is down + */ + @Test + void validateConnectClusterCreationDown() { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("username") + .password("password") + .build()) + .build(); + + when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + when(httpClient.exchange(any(MutableHttpRequest.class))).thenReturn(Flowable.just(HttpResponse.serverError())); + + List errors = connectClusterService.validateConnectClusterCreation(connectCluster); + + Assertions.assertEquals(1L, errors.size()); + Assertions.assertEquals("The Connect cluster test-connect is not healthy (HTTP code 500).", errors.get(0)); + } + + /** + * Test validate connect cluster creation malformed URL + */ + @Test + void validateConnectClusterCreationMalformedUrl() { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("malformed-url") + .build()) + .build(); + + when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + + List errors = connectClusterService.validateConnectClusterCreation(connectCluster); + + Assertions.assertEquals(1L, errors.size()); + Assertions.assertEquals("The Connect cluster test-connect has a malformed URL \"malformed-url\".", errors.get(0)); + } + + /** + * Test validate connect cluster creation throws http client exception + */ + @Test + void validateConnectClusterCreationHttpClientException() { + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); + + when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + when(httpClient.exchange(any(MutableHttpRequest.class))) + .thenThrow(new HttpClientException("Error")); + + List errors = connectClusterService.validateConnectClusterCreation(connectCluster); + + Assertions.assertEquals(1L, errors.size()); + Assertions.assertEquals("The following error occurred trying to check the Connect cluster test-connect health: Error.", errors.get(0)); + } +} diff --git a/api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectClientProxyTest.java b/api/src/test/java/com/michelin/ns4kafka/services/ConnectorClientProxyTest.java similarity index 57% rename from api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectClientProxyTest.java rename to api/src/test/java/com/michelin/ns4kafka/services/ConnectorClientProxyTest.java index e399a024..b4684f3a 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectClientProxyTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/ConnectorClientProxyTest.java @@ -1,9 +1,13 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.controllers.ResourceValidationException; -import com.michelin.ns4kafka.services.connect.KafkaConnectClientProxy; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig.ConnectConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig.ConnectConfig; +import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.models.ConnectCluster; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.services.connect.ConnectorClientProxy; +import com.michelin.ns4kafka.utils.EncryptionUtils; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.async.publisher.Publishers; import io.micronaut.http.*; import io.micronaut.http.client.ProxyHttpClient; @@ -24,22 +28,29 @@ import java.util.stream.Stream; @ExtendWith(MockitoExtension.class) -public class KafkaConnectClientProxyTest { +class ConnectorClientProxyTest { @Mock ProxyHttpClient client; + @Mock List kafkaAsyncExecutorConfigs; + @Mock + ConnectClusterService connectClusterService; + + @Mock + SecurityConfig securityConfig; + @InjectMocks - KafkaConnectClientProxy proxy; + ConnectorClientProxy proxy; @Test - void doFilterMissingHeader_Secret() { + void doFilterMissingHeaderSecret() { MutableHttpRequest request = HttpRequest .GET("http://localhost/connect-proxy/connectors") .header("X-Unused", "123"); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -49,7 +60,7 @@ void doFilterMissingHeader_Secret() { subscriber.assertError(throwable -> ((ResourceValidationException)throwable) .getValidationErrors() - .contains("Missing required Header X-Proxy-Secret") + .contains("Missing required header X-Proxy-Secret") ); } @@ -59,7 +70,7 @@ void doFilterWrongSecret() { .GET("http://localhost/connect-proxy/connectors") .header("X-Proxy-Secret", "123"); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -69,17 +80,17 @@ void doFilterWrongSecret() { subscriber.assertError(throwable -> ((ResourceValidationException)throwable) .getValidationErrors() - .contains("Invalid value 123 for Header X-Proxy-Secret") + .contains("Invalid value 123 for header X-Proxy-Secret") ); } @Test - void doFilterMissingHeader_KafkaCluster() { + void doFilterMissingHeaderKafkaCluster() { MutableHttpRequest request = HttpRequest .GET("http://localhost/connect-proxy/connectors") - .header("X-Proxy-Secret", KafkaConnectClientProxy.PROXY_SECRET) + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) .header("X-Unused", "123"); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -89,17 +100,17 @@ void doFilterMissingHeader_KafkaCluster() { subscriber.assertError(throwable -> ((ResourceValidationException)throwable) .getValidationErrors() - .contains("Missing required Header X-Kafka-Cluster") + .contains("Missing required header X-Kafka-Cluster") ); } @Test - void doFilterMissingHeader_ConnectCluster() { + void doFilterMissingHeaderConnectCluster() { MutableHttpRequest request = HttpRequest .GET("http://localhost/connect-proxy/connectors") - .header("X-Proxy-Secret", KafkaConnectClientProxy.PROXY_SECRET) - .header(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local"); + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) + .header(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local"); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -109,7 +120,7 @@ void doFilterMissingHeader_ConnectCluster() { subscriber.assertError(throwable -> ((ResourceValidationException)throwable) .getValidationErrors() - .contains("Missing required Header X-Connect-Cluster") + .contains("Missing required header X-Connect-Cluster") ); } @@ -117,12 +128,12 @@ void doFilterMissingHeader_ConnectCluster() { void doFilterWrongKafkaCluster() { MutableHttpRequest request = HttpRequest .GET("http://localhost/connect-proxy/connectors") - .header("X-Proxy-Secret", KafkaConnectClientProxy.PROXY_SECRET) - .header(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") - .header(KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) + .header(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") + .header(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); Mockito.when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.empty()); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -138,9 +149,9 @@ void doFilterWrongKafkaCluster() { void doFilterWrongConnectCluster() { MutableHttpRequest request = HttpRequest .GET("http://localhost/connect-proxy/connectors") - .header("X-Proxy-Secret", KafkaConnectClientProxy.PROXY_SECRET) - .header(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") - .header(KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) + .header(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") + .header(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); KafkaAsyncExecutorConfig config = new KafkaAsyncExecutorConfig("local"); ConnectConfig connectConfig = new KafkaAsyncExecutorConfig.ConnectConfig(); config.setConnects(Map.of("invalid-name",connectConfig)); @@ -148,7 +159,7 @@ void doFilterWrongConnectCluster() { Mockito.when(kafkaAsyncExecutorConfigs.stream()) .thenReturn(Stream.of(config)); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -157,21 +168,20 @@ void doFilterWrongConnectCluster() { subscriber.assertError(throwable -> ((ResourceValidationException)throwable) .getValidationErrors() - .contains("Connect Cluster [local-name] not found") + .contains("Connect cluster [local-name] not found") ); } @Test void doFilterSuccess() { - - MutableHttpRequest request = new MutableSimpleHttpRequest("http://localhost/connect-proxy/connectors") - .header("X-Proxy-Secret", KafkaConnectClientProxy.PROXY_SECRET) - .header(KafkaConnectClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") - .header(KafkaConnectClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors") + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) + .header(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") + .header(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "local-name"); KafkaAsyncExecutorConfig config1 = new KafkaAsyncExecutorConfig("local"); ConnectConfig connectConfig = new KafkaAsyncExecutorConfig.ConnectConfig(); - connectConfig.setUrl("http://target/"); + connectConfig.setUrl("https://target/"); config1.setConnects(Map.of("local-name",connectConfig)); // Should not interfere KafkaAsyncExecutorConfig config2 = new KafkaAsyncExecutorConfig("not-match"); @@ -181,7 +191,49 @@ void doFilterSuccess() { Mockito.when(client.proxy(ArgumentMatchers.any(MutableHttpRequest.class))) .thenReturn(Publishers.just(HttpResponse.ok())); - TestSubscriber> subscriber = new TestSubscriber(); + TestSubscriber> subscriber = new TestSubscriber<>(); + Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); + + mutableHttpResponsePublisher.subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertValueCount(1); + subscriber.assertValue(mutableHttpResponse -> mutableHttpResponse.status() == HttpStatus.OK); + } + + @Test + void doFilterSuccessSelfDeployedConnectCluster() { + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors") + .header("X-Proxy-Secret", ConnectorClientProxy.PROXY_SECRET) + .header(ConnectorClientProxy.PROXY_HEADER_KAFKA_CLUSTER, "local") + .header(ConnectorClientProxy.PROXY_HEADER_CONNECT_CLUSTER, "connect-cluster"); + + KafkaAsyncExecutorConfig config1 = new KafkaAsyncExecutorConfig("local"); + ConnectConfig connectConfig = new KafkaAsyncExecutorConfig.ConnectConfig(); + connectConfig.setUrl("https://target/"); + config1.setConnects(Map.of("local-name",connectConfig)); + KafkaAsyncExecutorConfig config2 = new KafkaAsyncExecutorConfig("not-match"); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://my-custom-connect-cluster") + .username("myUsername") + .password(EncryptionUtils.encryptAES256GCM("myPassword", "changeitchangeitchangeitchangeit")) + .build()) + .build(); + + Mockito.when(kafkaAsyncExecutorConfigs.stream()) + .thenReturn(Stream.of(config1, config2)); + Mockito.when(connectClusterService.findAll()) + .thenReturn(List.of(connectCluster)); + Mockito.when(client.proxy(ArgumentMatchers.any(MutableHttpRequest.class))) + .thenReturn(Publishers.just(HttpResponse.ok())); + Mockito.when(securityConfig.getAes256EncryptionKey()) + .thenReturn("changeitchangeitchangeitchangeit"); + + TestSubscriber> subscriber = new TestSubscriber<>(); Publisher> mutableHttpResponsePublisher = proxy.doFilterOnce(request, null); mutableHttpResponsePublisher.subscribe(subscriber); @@ -193,55 +245,44 @@ void doFilterSuccess() { @Test void testMutateKafkaConnectRequest() { - MutableHttpRequest request = new MutableSimpleHttpRequest("http://localhost/connect-proxy/connectors"); - KafkaAsyncExecutorConfig.ConnectConfig config = new KafkaAsyncExecutorConfig.ConnectConfig(); - config.setUrl("http://target/"); + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors"); - MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, config); + MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, "https://target/", null, null); - Assertions.assertEquals("http://target/connectors", actual.getUri().toString()); + Assertions.assertEquals("https://target/connectors", actual.getUri().toString()); } @Test - void testMutateKafkaConnectRequestEmptyHostHeader_fix107() { - // https://github.com/michelin/ns4kafka/issues/107 - MutableHttpRequest request = new MutableSimpleHttpRequest("http://localhost/connect-proxy/connectors"); + void testMutateKafkaConnectRequestEmptyHostHeader() { + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors"); request.header("Host","value"); - KafkaAsyncExecutorConfig.ConnectConfig config = new KafkaAsyncExecutorConfig.ConnectConfig(); - config.setUrl("http://target/"); - MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, config); + MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, "https://target/", null, null); - Assertions.assertEquals("http://target/connectors", actual.getUri().toString()); + Assertions.assertEquals("https://target/connectors", actual.getUri().toString()); Assertions.assertTrue(actual.getHeaders().getAll("Host").isEmpty(), "Host header should be unset"); } @Test void testMutateKafkaConnectRequestRewrite() { - MutableHttpRequest request = new MutableSimpleHttpRequest("http://localhost/connect-proxy/connectors"); - KafkaAsyncExecutorConfig.ConnectConfig config = new KafkaAsyncExecutorConfig.ConnectConfig(); - config.setUrl("http://target/rewrite"); + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors"); - MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, config); + MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, "https://target/rewrite", null, null); - Assertions.assertEquals("http://target/rewrite/connectors", actual.getUri().toString()); + Assertions.assertEquals("https://target/rewrite/connectors", actual.getUri().toString()); } @Test - void testMutateKafkaConnectRequestAuthent() { - MutableHttpRequest request = new MutableSimpleHttpRequest("http://localhost/connect-proxy/connectors"); - KafkaAsyncExecutorConfig.ConnectConfig config = new KafkaAsyncExecutorConfig.ConnectConfig(); - config.setUrl("http://target/"); - config.setBasicAuthUsername("toto"); - config.setBasicAuthPassword("titi"); + void testMutateKafkaConnectRequestAuthentication() { + MutableHttpRequest request = new MutableSimpleHttpRequest<>("http://localhost/connect-proxy/connectors"); - MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, config); + MutableHttpRequest actual = proxy.mutateKafkaConnectRequest(request, "https://target/", "toto", "titi"); - Assertions.assertEquals("http://target/connectors", actual.getUri().toString()); + Assertions.assertEquals("https://target/connectors", actual.getUri().toString()); Assertions.assertEquals("Basic dG90bzp0aXRp", actual.getHeaders().get(HttpHeaders.AUTHORIZATION)); } - public class MutableSimpleHttpRequest extends SimpleHttpRequest{ + public static class MutableSimpleHttpRequest extends SimpleHttpRequest{ @Override public MutableHttpRequest mutate() { diff --git a/api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectServiceTest.java b/api/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java similarity index 85% rename from api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectServiceTest.java rename to api/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java index 2cf7c122..0fe002f4 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/KafkaConnectServiceTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java @@ -1,13 +1,14 @@ package com.michelin.ns4kafka.services; import com.michelin.ns4kafka.models.AccessControlEntry; +import com.michelin.ns4kafka.models.ConnectCluster; import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.repositories.ConnectorRepository; -import com.michelin.ns4kafka.services.connect.KafkaConnectClientProxy; -import com.michelin.ns4kafka.services.connect.client.KafkaConnectClient; +import com.michelin.ns4kafka.services.connect.ConnectorClientProxy; +import com.michelin.ns4kafka.services.connect.client.ConnectorClient; import com.michelin.ns4kafka.services.connect.client.entities.*; import com.michelin.ns4kafka.services.executors.ConnectorAsyncExecutor; import com.michelin.ns4kafka.validation.ConnectValidator; @@ -35,36 +36,24 @@ import static org.mockito.Mockito.*; @ExtendWith(MockitoExtension.class) -class KafkaConnectServiceTest { - /** - * Mocked ACL service - */ +class ConnectorServiceTest { @Mock AccessControlEntryService accessControlEntryService; - /** - * Mocked Kafka connector client - */ @Mock - KafkaConnectClient kafkaConnectClient; + ConnectorClient connectorClient; - /** - * Mocked connector repository - */ @Mock ConnectorRepository connectorRepository; - /** - * Mocked application context - */ @Mock ApplicationContext applicationContext; - /** - * Mocked kafka connect service - */ @InjectMocks - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; + + @Mock + ConnectClusterService connectClusterService; /** * Test to find all connectors by namespace when there is no connector @@ -84,7 +73,7 @@ void findByNamespaceNone() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of()); - List actual = kafkaConnectService.findAllForNamespace(ns); + List actual = connectorService.findAllForNamespace(ns); Assertions.assertTrue(actual.isEmpty()); } @@ -172,7 +161,7 @@ void findByNamespaceMultiple() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of(c1, c2, c3, c4, c5)); - List actual = kafkaConnectService.findAllForNamespace(ns); + List actual = connectorService.findAllForNamespace(ns); Assertions.assertEquals(3, actual.size()); // contains @@ -203,7 +192,7 @@ void findByNameNotFound() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of()); - Optional actual = kafkaConnectService.findByName(ns, "ns-connect1"); + Optional actual = connectorService.findByName(ns, "ns-connect1"); Assertions.assertTrue(actual.isEmpty()); } @@ -274,12 +263,71 @@ void findByNameFound() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of(c1, c2, c3)); - Optional actual = kafkaConnectService.findByName(ns, "ns-connect1"); + Optional actual = connectorService.findByName(ns, "ns-connect1"); Assertions.assertTrue(actual.isPresent()); Assertions.assertEquals("ns-connect1", actual.get().getMetadata().getName()); } + /** + * Test find all by namespace and connect cluster + */ + @Test + void findAllByConnectCluster() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); + + Connector c1 = Connector.builder() + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster") + .build()) + .build(); + + Connector c2 = Connector.builder() + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster2") + .build()) + .build(); + + Connector c3 = Connector.builder() + .metadata(ObjectMeta.builder().name("other-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster3") + .build()) + .build(); + + Connector c4 = Connector.builder() + .metadata(ObjectMeta.builder().name("other-connect2").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster4") + .build()) + .build(); + + Connector c5 = Connector.builder() + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster5") + .build()) + .build(); + + Mockito.when(connectorRepository.findAllForCluster("local")) + .thenReturn(List.of(c1, c2, c3, c4, c5)); + + List actual = connectorService.findAllByConnectCluster(ns, "connect-cluster"); + + Assertions.assertEquals(1, actual.size()); + Assertions.assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns-connect1"))); + } + /** * Test to validate the configuration of a connector when the KConnect cluster is invalid */ @@ -309,7 +357,8 @@ void validateLocallyInvalidConnectCluster() { .build()) .build(); - kafkaConnectService.validateLocally(ns, connector) + when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of()); + connectorService.validateLocally(ns, connector) .test() .assertValue(response -> response.size() == 1) .assertValue(response -> response.get(0).equals("Invalid value wrong for spec.connectCluster: Value must be one of [local-name]")); @@ -337,7 +386,7 @@ void validateLocallyNoClassName() { .build()) .build(); - kafkaConnectService.validateLocally(ns, connector) + connectorService.validateLocally(ns, connector) .test() .assertValue(response -> response.size() == 1) .assertValue(response -> response.get(0).equals("Invalid value for spec.config.'connector.class': Value must be non-null")); @@ -364,10 +413,10 @@ void validateLocallyInvalidClassName() { .connectClusters(List.of("local-name")) .build()) .build(); - Mockito.when(kafkaConnectClient.connectPlugins(KafkaConnectClientProxy.PROXY_SECRET, "local", "local-name")) + Mockito.when(connectorClient.connectPlugins(ConnectorClientProxy.PROXY_SECRET, "local", "local-name")) .thenReturn(Single.just(List.of())); - kafkaConnectService.validateLocally(ns, connector) + connectorService.validateLocally(ns, connector) .test() .assertValue(response -> response.size() == 1) .assertValue(response -> response.get(0).equals("Failed to find any class that implements Connector and which name matches org.apache.kafka.connect.file.FileStreamSinkConnector")); @@ -400,10 +449,10 @@ void validateLocallyValidationErrors() { .connectClusters(List.of("local-name")) .build()) .build(); - Mockito.when(kafkaConnectClient.connectPlugins(KafkaConnectClientProxy.PROXY_SECRET, "local", "local-name")) + Mockito.when(connectorClient.connectPlugins(ConnectorClientProxy.PROXY_SECRET, "local", "local-name")) .thenReturn(Single.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); - kafkaConnectService.validateLocally(ns, connector) + connectorService.validateLocally(ns, connector) .test() .assertValue(response -> response.size() == 1) .assertValue(response -> response.get(0).equals("Invalid value null for configuration missing.field: Value must be non-null")); @@ -436,10 +485,52 @@ void validateLocallySuccess() { .connectClusters(List.of("local-name")) .build()) .build(); - Mockito.when(kafkaConnectClient.connectPlugins(KafkaConnectClientProxy.PROXY_SECRET, "local", "local-name")) + Mockito.when(connectorClient.connectPlugins(ConnectorClientProxy.PROXY_SECRET, "local", "local-name")) + .thenReturn(Single.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + + connectorService.validateLocally(ns, connector) + .test() + .assertValue(List::isEmpty); + } + + /** + * Test to validate the configuration of a connector + */ + @Test + void validateLocallySuccessWithSelfDeployedConnectCluster() { + Connector connector = Connector.builder() + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); + + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .classValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .validationConstraints(Map.of()) + .build()) + .connectClusters(List.of()) + .build()) + .build(); + + when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of(ConnectCluster.builder() + .metadata(ObjectMeta.builder() + .name("local-name") + .build()) + .build())); + when(connectorClient.connectPlugins(ConnectorClientProxy.PROXY_SECRET, "local", "local-name")) .thenReturn(Single.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); - kafkaConnectService.validateLocally(ns, connector) + connectorService.validateLocally(ns, connector) .test() .assertValue(List::isEmpty); } @@ -471,7 +562,7 @@ void validateRemotelyErrors() { List.of(new ConfigInfo(new ConfigKeyInfo(null, null, false, null, null, null, null, 0, null, null, null), new ConfigValueInfo(null, null, null, List.of("error_message"), true)))); - Mockito.when(kafkaConnectClient.validate( + Mockito.when(connectorClient.validate( ArgumentMatchers.anyString(), ArgumentMatchers.eq("local"), ArgumentMatchers.eq("local-name"), @@ -479,7 +570,7 @@ void validateRemotelyErrors() { ArgumentMatchers.any())) .thenReturn(Single.just(configInfos)); - kafkaConnectService.validateRemotely(ns, connector) + connectorService.validateRemotely(ns, connector) .test() .assertValue(response -> response.size() == 1) .assertValue(response -> response.contains("error_message")); @@ -509,7 +600,7 @@ void validateRemotelySuccess() { .build(); ConfigInfos configInfos = new ConfigInfos("name", 1, List.of(), List.of()); - Mockito.when(kafkaConnectClient.validate( + Mockito.when(connectorClient.validate( ArgumentMatchers.anyString(), ArgumentMatchers.eq("local"), ArgumentMatchers.eq("local-name"), @@ -517,7 +608,7 @@ void validateRemotelySuccess() { ArgumentMatchers.any())) .thenReturn(Single.just(configInfos)); - kafkaConnectService.validateRemotely(ns, connector) + connectorService.validateRemotely(ns, connector) .test() .assertValue(List::isEmpty); } @@ -594,7 +685,7 @@ void listUnsynchronizedNoExistingConnectors() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of()); - kafkaConnectService.listUnsynchronizedConnectors(ns) + connectorService.listUnsynchronizedConnectors(ns) .test() .assertValue(response -> response.size() == 3) .assertValue(response -> response.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns-connect1"))) @@ -675,11 +766,10 @@ void listUnsynchronizedAllExistingConnectors() { .build() )); - // all connects exists into ns4kfk Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of(c1, c2, c3, c4)); - kafkaConnectService.listUnsynchronizedConnectors(ns) + connectorService.listUnsynchronizedConnectors(ns) .test() .assertValue(response -> response.size() == 0); } @@ -763,7 +853,7 @@ void listUnsynchronizedPartialExistingConnectors() { Mockito.when(connectorRepository.findAllForCluster("local")) .thenReturn(List.of(c1)); - kafkaConnectService.listUnsynchronizedConnectors(ns) + connectorService.listUnsynchronizedConnectors(ns) .test() .assertValue(response -> response.size() == 2) .assertValue(response -> response.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns-connect2"))) @@ -792,16 +882,16 @@ void deleteConnectorSuccess() { .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) .build(); - when(kafkaConnectClient.delete(KafkaConnectClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), + when(connectorClient.delete(ConnectorClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), "local-name", "ns-connect1")).thenReturn(Maybe.just(HttpResponse.ok())); doNothing().when(connectorRepository).delete(connector); - kafkaConnectService.delete(ns, connector) + connectorService.delete(ns, connector) .test() .assertValue(response -> response.getStatus().equals(HttpStatus.OK)); - verify(kafkaConnectClient, times(1)).delete(KafkaConnectClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), + verify(connectorClient, times(1)).delete(ConnectorClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), "local-name", "ns-connect1"); verify(connectorRepository, times(1)).delete(connector); @@ -827,10 +917,10 @@ void deleteConnectorConnectClusterError() { .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) .build(); - when(kafkaConnectClient.delete(KafkaConnectClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), + when(connectorClient.delete(ConnectorClientProxy.PROXY_SECRET, ns.getMetadata().getCluster(), "local-name", "ns-connect1")).thenReturn(Maybe.error(new HttpClientResponseException("Error", HttpResponse.serverError()))); - kafkaConnectService.delete(ns, connector) + connectorService.delete(ns, connector) .test() .assertError(HttpClientResponseException.class); diff --git a/api/src/test/java/com/michelin/ns4kafka/services/KafkaSchemaRegistryClientProxyTest.java b/api/src/test/java/com/michelin/ns4kafka/services/KafkaSchemaRegistryClientProxyTest.java index e28a98b9..68cb0dcb 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/KafkaSchemaRegistryClientProxyTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/KafkaSchemaRegistryClientProxyTest.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.controllers.ResourceValidationException; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.schema.KafkaSchemaRegistryClientProxy; import io.micronaut.http.*; import io.micronaut.http.client.ProxyHttpClient; diff --git a/api/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java b/api/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java index 4b6fd13d..4b195e24 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java @@ -4,8 +4,8 @@ import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.repositories.NamespaceRepository; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig.ConnectConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig.ConnectConfig; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -30,7 +30,7 @@ public class NamespaceServiceTest { @Mock AccessControlEntryService accessControlEntryService; @Mock - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; @Mock List kafkaAsyncExecutorConfigList; @@ -285,7 +285,7 @@ void listAllNamespaceResourcesEmpty() { Mockito.when(topicService.findAllForNamespace(ns)) .thenReturn(List.of()); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of()); Mockito.when(roleBindingService.list("namespace")) .thenReturn(List.of()); @@ -319,7 +319,7 @@ void listAllNamespaceResourcesTopic() { Mockito.when(topicService.findAllForNamespace(ns)) .thenReturn(List.of(topic)); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of()); Mockito.when(roleBindingService.list("namespace")) .thenReturn(List.of()); @@ -354,7 +354,7 @@ void listAllNamespaceResourcesConnect() { Mockito.when(topicService.findAllForNamespace(ns)) .thenReturn(List.of()); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of(connector)); Mockito.when(roleBindingService.list("namespace")) .thenReturn(List.of()); @@ -389,7 +389,7 @@ void listAllNamespaceResourcesRoleBinding() { Mockito.when(topicService.findAllForNamespace(ns)) .thenReturn(List.of()); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of()); Mockito.when(roleBindingService.list("namespace")) .thenReturn(List.of(rb)); @@ -424,7 +424,7 @@ void listAllNamespaceResourcesAccessControlEntry() { Mockito.when(topicService.findAllForNamespace(ns)) .thenReturn(List.of()); - Mockito.when(kafkaConnectService.findAllForNamespace(ns)) + Mockito.when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of()); Mockito.when(roleBindingService.list("namespace")) .thenReturn(List.of()); diff --git a/api/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java b/api/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java index 87d6088f..0b643973 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java @@ -46,7 +46,7 @@ class ResourceQuotaServiceTest { * Connect service */ @Mock - KafkaConnectService kafkaConnectService; + ConnectorService connectorService; /** * Test get quota by namespace when it is defined @@ -492,7 +492,7 @@ void validateNewQuotaAgainstCurrentResourceForCountConnectors() { .spec(Map.of(COUNT_CONNECTORS.toString(), "1")) .build(); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); @@ -612,7 +612,7 @@ void getCurrentUsedResourceForCountConnectors() { .build()) .build(); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); @@ -969,7 +969,7 @@ void validateConnectorQuota() { when(resourceQuotaRepository.findForNamespace("namespace")) .thenReturn(Optional.of(resourceQuota)); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); @@ -1025,7 +1025,7 @@ void validateConnectorQuotaExceed() { when(resourceQuotaRepository.findForNamespace("namespace")) .thenReturn(Optional.of(resourceQuota)); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); @@ -1096,7 +1096,7 @@ void toResponse() { when(topicService.findAllForNamespace(ns)) .thenReturn(List.of(topic1, topic2, topic3)); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); @@ -1159,7 +1159,7 @@ void toResponseNoQuota() { when(topicService.findAllForNamespace(ns)) .thenReturn(List.of(topic1, topic2, topic3)); - when(kafkaConnectService.findAllForNamespace(ns)) + when(connectorService.findAllForNamespace(ns)) .thenReturn(List.of( Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); diff --git a/api/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/api/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index b0e2f4de..8ee35376 100644 --- a/api/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/api/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -6,7 +6,7 @@ import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; -import com.michelin.ns4kafka.services.executors.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; diff --git a/api/src/test/java/com/michelin/ns4kafka/utils/EncryptionUtilsTest.java b/api/src/test/java/com/michelin/ns4kafka/utils/EncryptionUtilsTest.java new file mode 100644 index 00000000..c402052f --- /dev/null +++ b/api/src/test/java/com/michelin/ns4kafka/utils/EncryptionUtilsTest.java @@ -0,0 +1,52 @@ +package com.michelin.ns4kafka.utils; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import static org.apache.commons.lang3.StringUtils.EMPTY; + +class EncryptionUtilsTest { + /** + * Validate encryption/decryption when given text is null + */ + @Test + void validateEncryptAndDecryptAES256GCMNullText() { + String keyEncryptionKey = "myKeyEncryptionKeyWrongSize"; + + String stillNullText = EncryptionUtils.encryptAES256GCM(null, keyEncryptionKey); + Assertions.assertNull(stillNullText); + } + + /** + * Validate encryption/decryption when given text is blank + */ + @Test + void validateEncryptAndDecryptAES256GCMBlankText() { + String keyEncryptionKey = "myKeyEncryptionKeyWrongSize"; + + String stillBlankText = EncryptionUtils.encryptAES256GCM(EMPTY, keyEncryptionKey); + Assertions.assertEquals(EMPTY, stillBlankText); + } + + /** + * Validate encryption/decryption is not working when the KEK has wrong key size + */ + @Test + void validateEncryptAndDecryptAES256GCMWrongKeySize() { + String clearText = "myClearText"; + String keyEncryptionKey = "myKeyEncryptionKeyWrongSize"; + String myClearText = EncryptionUtils.encryptAES256GCM(clearText, keyEncryptionKey); + + Assertions.assertEquals(clearText, myClearText); + } + + @Test + void validateEncryptAndDecryptAES256GCM() { + String clearText = "myClearText"; + String keyEncryptionKey = "olDeandATEDiCenSiTurThrepASTrole"; + String encryptedText = EncryptionUtils.encryptAES256GCM(clearText, keyEncryptionKey); + String clearTextDecrypted = EncryptionUtils.decryptAES256GCM(encryptedText, keyEncryptionKey); + + Assertions.assertEquals(clearText, clearTextDecrypted); + } +} diff --git a/cli/src/main/resources/application.yml b/cli/src/main/resources/application.yml index 8d856478..5fedd7f8 100644 --- a/cli/src/main/resources/application.yml +++ b/cli/src/main/resources/application.yml @@ -40,6 +40,11 @@ kafkactl: - "CLASS:/spec/config/connector.class" - "TOPICS:/spec/config/topics" - "AGE:/metadata/creationTimestamp%AGO" + ConnectCluster: + - "CLUSTER:/metadata/name" + - "URL:/spec/url" + - "USERNAME:/spec/username" + - "PASSWORD:/spec/password" RoleBinding: - "ROLEBINDING:/metadata/name" - "GROUP:/spec/subject/subjectName" diff --git a/kafkactl.bat b/kafkactl.bat index 15634e40..04530630 100644 --- a/kafkactl.bat +++ b/kafkactl.bat @@ -1,2 +1,2 @@ @ECHO OFF -%JAVA_HOME%\bin\java -jar .\cli\build\libs\kafkactl-1.9.1-SNAPSHOT.jar %* +%JAVA_HOME%\bin\java -jar .\cli\build\libs\kafkactl-1.9.2-SNAPSHOT.jar %*