Skip to content

Commit

Permalink
docs: example of using this module in multi-cluster mode with Istio s…
Browse files Browse the repository at this point in the history
…ervice mesh

Signed-off-by: Ali Mukadam <ali.mukadam@oracle.com>
  • Loading branch information
hyder committed Feb 22, 2024
1 parent a1fdfcb commit f4cdb00
Show file tree
Hide file tree
Showing 20 changed files with 1,139 additions and 0 deletions.
263 changes: 263 additions & 0 deletions examples/istio-mc/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,263 @@
# Multi-region service mesh with Istio and OKE

## Assumptions

1. A pair of OKE clusters in 2 different OCI regions will be used.
2. The OKE clusters will use private control planes.
3. The topology model used is [Multi-Primary on different networks](https://istio.io/latest/docs/setup/install/multicluster/multi-primary_multi-network/).

![Multi-primary on multiple networks](docs/assets/multi-primary%20multi-networks.png)
4. This example uses self-signed certificates.

## Create the OKE Clusters

1. Copy the terraform.tfvars.example to terraform.tfvars and provide the necessary values as detailed in steps 2-6.

2. Configure the provider parameters:

```
# provider
api_fingerprint = ""
api_private_key_path = "~/.oci/oci_rsa.pem"
home_region = "ashburn"
tenancy_id = "ocid1.tenancy.oc1.."
user_id = "ocid1.user.oc1.."
compartment_id = "ocid1.compartment.oc1.."
```

3. Configure an ssh key pair:

```
# ssh
ssh_private_key_path = "~/.ssh/id_rsa"
ssh_public_key_path = "~/.ssh/id_rsa.pub"
```

4. Configure your clusters' regions.

```
# clusters
clusters = {
c1 = { region = "sydney", vcn = "10.1.0.0/16", pods = "10.201.0.0/16", services = "10.101.0.0/16", enabled = true }
c2 = { region = "melbourne", vcn = "10.2.0.0/16", pods = "10.202.0.0/16", services = "10.102.0.0/16", enabled = true }
}
```

5. Configure additional parameters if necessary:

```
kubernetes_version = "v1.28.2"
cluster_type = "basic"
oke_control_plane = "private"
```

6. Configure your node pools:

```
nodepools = {
np1 = {
shape = "VM.Standard.E4.Flex",
ocpus = 2,
memory = 64,
size = 2,
boot_volume_size = 150,
}
}
```

7. Run terraform to create your clusters:

```
terraform apply --auto-approve
```

8. Once the Dynamic Routing Gateways (DRGs) and Remote Peering Connections (RPCs) have been created, use the OCI console to establish a connection between them.

## Install Istio

1. Terraform will output an ssh convenience command. Use it to ssh to the operator host:

```
ssh_to_operator = "ssh -o ProxyCommand='ssh -W %h:%p -i ~/.ssh/id_rsa opc@<bastion_ip>' -i ~/.ssh/id_rsa opc@<operator_ip>"
```

2. Verify connectivity to both clusters:

```
for cluster in c1 c2; do
ktx $cluster
k get nodes
done
```

3. Generate certs for each cluster:

```
export ISTIO_HOME=/home/opc/istio-1.20.2
cd $ISTIO_HOME/tools/certs
make -f Makefile.selfsigned.mk c1-cacerts
make -f Makefile.selfsigned.mk c2-cacerts
```

4. Create and label istio-system namespace in each cluster:

```
for cluster in c1 c2; do
ktx $cluster
k create ns istio-system
k label namespace istio-system topology.istio.io/network=$cluster
done
```

5. Create a secret containing the certificates in istio-system namespace for both clusters:

```
for cluster in c1 c2; do
ktx $cluster
kubectl create secret generic cacerts -n istio-system \
--from-file=$cluster/ca-cert.pem \
--from-file=$cluster/ca-key.pem \
--from-file=$cluster/root-cert.pem \
--from-file=$cluster/cert-chain.pem
done
```

6. Install Istio in both clusters:

```
for cluster in c1 c2; do
ktx $cluster
istioctl install --set profile=default -f $HOME/$cluster.yaml
done
```

7. Verify the Istio installation in both clusters:

```
for cluster in c1 c2; do
ktx $cluster
istioctl verify-install
done
```

8. Check if the load balancers have been properly provisioned:

```
for cluster in c1 c2; do
ktx $cluster
k -n istio-system get svc
done
```

9. Check if Istio pods are running:

```
for cluster in c1 c2; do
ktx $cluster
k -n istio-system get pods
done
```

10. Create an Gateway to expose all services through the eastwest ingress gateway:

```
cd $ISTIO_HOME
for cluster in c1 c2; do
ktx $cluster
k apply -f samples/multicluster/expose-services.yaml
done
```

11. Set the environment variables to verify multi-cluster connectivity:
```
export CTX_CLUSTER1=c1
export CTX_CLUSTER2=c2
```

12. Enable endpoint discovery in each cluster by creating a remote secret:

```
istioctl create-remote-secret \
--context="${CTX_CLUSTER1}" \
--name="${CTX_CLUSTER1}" | \
kubectl apply -f - --context="${CTX_CLUSTER2}"
istioctl create-remote-secret \
--context="${CTX_CLUSTER2}" \
--name="${CTX_CLUSTER2}" | \
kubectl apply -f - --context="${CTX_CLUSTER1}"
```

## Verify cross-cluster connectivity

1. Deploy the HelloWorld Service in both clusters:

```
for cluster in c1 c2; do
kubectl create --context="${cluster}" namespace sample
kubectl label --context="${cluster}" namespace sample istio-injection=enabled
kubectl apply --context="${cluster}" -f samples/helloworld/helloworld.yaml -l service=helloworld -n sample
done
```

2. Deploy v1 to cluster c1:

```
kubectl apply --context="${CTX_CLUSTER1}" \
-f samples/helloworld/helloworld.yaml \
-l version=v1 -n sample
kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=helloworld
```

3. Deploy v2 to cluster c2:

```
kubectl apply --context="${CTX_CLUSTER2}" \
-f samples/helloworld/helloworld.yaml \
-l version=v2 -n sample
kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=helloworld
```

4. Deploy Sleep client pod in both clusters:

```
kubectl apply --context="${CTX_CLUSTER1}" \
-f samples/sleep/sleep.yaml -n sample
kubectl apply --context="${CTX_CLUSTER2}" \
-f samples/sleep/sleep.yaml -n sample
```

5. Generate traffic from c1. The response should alternate between c1 (v1) and c2 (v2) regions:

```
for i in $(seq 1 100); do
kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \
"$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
-- curl -sS helloworld.sample:5000/hello
done
```

6. Generate traffic from c2. The response should alternate between c1 (v1) and c2 (v2) regions:

```
for i in $(seq 1 100); do
kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \
"$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
-- curl -sS helloworld.sample:5000/hello
done
```

7. Cross-cluster connectivity has been verified.

114 changes: 114 additions & 0 deletions examples/istio-mc/c1.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# Copyright (c) 2024 Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl

module "c1" {

source = "oracle-terraform-modules/oke/oci"
version = "5.1.1"

count = lookup(lookup(var.clusters, "c1"), "enabled") ? 1 : 0

home_region = lookup(local.regions, var.home_region)

region = lookup(local.regions, lookup(lookup(var.clusters, "c1"), "region"))

tenancy_id = var.tenancy_id

# general oci parameters
compartment_id = var.compartment_id

# ssh keys
ssh_private_key_path = var.ssh_private_key_path
ssh_public_key_path = var.ssh_public_key_path

# networking
create_drg = var.oke_control_plane == "private" ? true : false
drg_display_name = "c1"

remote_peering_connections = var.oke_control_plane == "private" ? {
for k, v in var.clusters : "rpc-to-${k}" => {} if k != "c1"
} : {}

nat_gateway_route_rules = var.oke_control_plane == "private" ? [
for k, v in var.clusters :
{
destination = lookup(v, "vcn")
destination_type = "CIDR_BLOCK"
network_entity_id = "drg"
description = "Routing to allow connectivity to ${title(k)} cluster"
} if k != "c1"
] : []

vcn_cidrs = [lookup(lookup(var.clusters, "c1"), "vcn")]
vcn_dns_label = "c1"
vcn_name = "c1"

#subnets
subnets = {
bastion = { newbits = 13, netnum = 0, dns_label = "bastion" }
operator = { newbits = 13, netnum = 1, dns_label = "operator" }
cp = { newbits = 13, netnum = 2, dns_label = "cp" }
int_lb = { newbits = 11, netnum = 16, dns_label = "ilb" }
pub_lb = { newbits = 11, netnum = 17, dns_label = "plb" }
workers = { newbits = 2, netnum = 1, dns_label = "workers" }
pods = { newbits = 2, netnum = 2, dns_label = "pods" }
}

# bastion host
create_bastion = true
bastion_allowed_cidrs = ["0.0.0.0/0"]
bastion_upgrade = false

# operator host
create_operator = true
operator_upgrade = false
create_iam_resources = true
create_iam_operator_policy = "always"
operator_install_k9s = true

# oke cluster options
cluster_name = "c1"
cluster_type = var.cluster_type
cni_type = var.preferred_cni
control_plane_is_public = var.oke_control_plane == "public"
control_plane_allowed_cidrs = [local.anywhere]
kubernetes_version = var.kubernetes_version
pods_cidr = lookup(lookup(var.clusters, "c1"), "pods")
services_cidr = lookup(lookup(var.clusters, "c1"), "services")


# node pools
allow_worker_ssh_access = true
kubeproxy_mode = "iptables"
worker_pool_mode = "node-pool"
worker_pools = var.nodepools
worker_cloud_init = local.worker_cloud_init
worker_image_type = "oke"

# oke load balancers
load_balancers = "both"
preferred_load_balancer = "public"

allow_rules_internal_lb = {
for p in local.service_mesh_ports :

format("Allow ingress to port %v", p) => {
protocol = local.tcp_protocol, port = p, source = lookup(lookup(var.clusters, "c2"), "vcn"), source_type = local.rule_type_cidr,
}
}

allow_rules_public_lb = {
for p in local.public_lb_allowed_ports :

format("Allow ingress to port %v", p) => {
protocol = local.tcp_protocol, port = p, source = "0.0.0.0/0", source_type = local.rule_type_cidr,
}
}

user_id = var.user_id

providers = {
oci = oci.c1
oci.home = oci.home
}
}
Loading

0 comments on commit f4cdb00

Please sign in to comment.