Skip to content

Kubernetes

Cengiz Ilerler edited this page Oct 17, 2022 · 20 revisions

Helm

Use https://artifacthub.io/ to find the packages.

!!! tip Minikube

```powershell
# minikube delete;
minikube start --container-runtime=containerd --mount --mount-string "C:\!\minikube\:/minikube-host" --cpus='max' --memory='max';
# minikube node add;
minikube tunnel --cleanup;
```

!!! tip Common Console

```powershell
kubectl run -n observability -i --tty --image cilerler/efe:latest myconsole --restart=Never --wait --rm bash;
```

Helm Repositories

helm repo add stable                https://charts.helm.sh/stable;
helm repo add incubator             https://charts.helm.sh/incubator;
helm repo add kubernetes-dashboard  https://kubernetes.github.io/dashboard;
helm repo add hashicorp             https://helm.releases.hashicorp.com;
helm repo add jetstack              https://charts.jetstack.io;
helm repo add prometheus-community  https://prometheus-community.github.io/helm-charts;
helm repo add minio                 https://operator.min.io/;
helm repo add grafana               https://grafana.github.io/helm-charts;
helm repo add ingress-nginx         https://kubernetes.github.io/ingress-nginx;
helm repo add dapr                  https://dapr.github.io/helm-charts;
helm repo add bitnami               https://charts.bitnami.com/bitnami;
helm repo add kubevious             https://helm.kubevious.io;
helm repo add ingress-nginx         https://kubernetes.github.io/ingress-nginx;
helm repo add elastic               https://helm.elastic.co;
helm repo add argo                  https://argoproj.github.io/argo-helm;
helm repo update;
helm search repo bitnami;
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace=kube-system --set extraArgs="{--enable-skip-login}";

!!! info Port Forwarding powershell kubectl --namespace kube-system port-forward $(kubectl get pods --namespace kube-system -l "app.kubernetes.io/name=kubernetes-dashboard,app.kubernetes.io/instance=kubernetes-dashboard" -o jsonpath="{.items[0].metadata.name}") 8443:8443

!!! tip Link http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:https/proxy/

helm install kubevious kubevious/kubevious --namespace observability --create-namespace --atomic --version 1.1.1;

!!! info Port Forwarding

```powershell    
kubectl --namespace observability port-forward service/kubevious-ui-clusterip 8080:80;
```
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --wait --set installCRDs=true;
kubectl apply --filename .\selfsigned-cert.yaml;
kubectl apply --filename .\selfsigned-issuer.yaml;
helm install cert-manager-trust jetstack/cert-manager-trust --namespace cert-manager --wait;
kubectl apply --filename .\selfsigned-trust.yaml;

debug

$(kubectl get secret selfsigned-tls --namespace=cert-manager -o jsonpath="{.data.tls\.key}" | ForEach-Object {[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($_))}) > .\certificate\tls.key;
$(kubectl get secret selfsigned-tls --namespace=cert-manager -o jsonpath="{.data.tls\.crt}" | ForEach-Object {[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($_))}) > .\certificate\tls.crt;

# verification crt
kubectl get certificate;
kubectl get certificaterequest
kubectl describe certificate --namespace default;
kubectl get secret selfsigned-tls -o yaml;
kubectl describe clusterissuer selfsigned-issuer;
kubectl describe issuer selfsigned-issuer;
openssl x509 -in .\certificate\tls.crt -text -noout;

## verication bundle
kubectl get bundle;
kubectl get cm -A --field-selector=metadata.name=selfsigned-bundle;
kubectl get cm --namespace kube-system selfsigned-bundle -o jsonpath="{.data.ca\.crt}";

selfsigned-cert.yaml

apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: selfsigned-cert
  namespace: cert-manager
spec:
  isCA: true
  commonName: my-selfsigned-ca
  secretName: selfsigned-tls
  privateKey:
    algorithm: ECDSA
    size: 256
  issuerRef:
    kind: ClusterIssuer
    name: selfsigned-issuer
  dnsNames:
  - "*.*.svc.cluster.local"

selfsigned-issuer.yaml

# apiVersion: cert-manager.io/v1
# kind: Issuer
# metadata:
#   name: selfsigned-issuer
#   namespace: default
# spec:
#   selfSigned: {}
# ---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
  name: selfsigned-issuer
spec:
  selfSigned: {}

selfsigned-trust.yaml

apiVersion: trust.cert-manager.io/v1alpha1
kind: Bundle
metadata:
  name: selfsigned-bundle
  namespace: cert-manager
spec:
  sources:
  - secret:
      name: "selfsigned-tls"
      key: "ca.crt"
  target:
    configMap:
      key: "ca.crt"
    # namespaceSelector:
    #   matchLabels:
    #     linkerd.io/inject: "enabled"

deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: myconsole
  namespace: observability
spec:
  selector:
    matchLabels:
      app: myconsole
  replicas: 1
  template:
    metadata:
      labels:
        app: myconsole
    spec:
      containers:
        - name: myconsole
          image: cilerler/efe:latest
          command: ["/bin/sh", "-c", "tail -f /dev/null"]
          volumeMounts:
            - name: selfsigned-volume
              mountPath: /etc/ssl/certs/selfsigned-bundle-ca
            # - name: minio-volume
            #   mountPath: /etc/ssl/certs/minio1-tls
      volumes:
        - name: selfsigned-volume
          configMap:
            name: "selfsigned-bundle"
        # - name: minio-volume
        #   secret:
        #     secretName: "minio1-tls"
helm install ingress-nginx ingress-nginx/ingress-nginx --namespace default --set controller.metrics.enabled=true;
kubectl apply --filename .\ingress.yaml;

ingress.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: local-ingress
  annotations:
    cert-manager.io/cluster-issuer: selfsigned-issuer
    # optional allow max file upload size 100M
    nginx.ingress.kubernetes.io/client-body-buffer-size: 100M
    nginx.ingress.kubernetes.io/proxy-body-size: 100M
spec:
  rules:
    - host: minikube.internal
      http:
        paths:
          - backend:
              service:
                name: minio1-console
                port:
                  number: 9090
            path: /
            pathType: "Prefix"
  tls:
    - hosts:
        - minikube.internal
      secretName: selfsigned-tls

!!! warning Host file update required!

Add `127.0.01 minikube.internal` to your host file at `C:\Windows\System32\drivers\etc\hosts`
helm install vault hashicorp/vault --set "injector.enabled=false" --namespace default;

reference

# kubectl exec -ti vault-0 -- vault operator init;
# kubectl exec -ti vault-0 -- vault operator unseal <KEY_1>;
# kubectl exec -ti vault-0 -- vault operator unseal <KEY_2>;
# kubectl exec -ti vault-0 -- vault operator unseal <KEY_3>;
# kubectl exec -ti vault-0 -- vault login <ROOT_TOKEN>;

kubectl exec -ti vault-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json > init-keys.json;
kubectl exec -ti vault-0 -- vault operator unseal $(cat init-keys.json | jq -r ".unseal_keys_b64[]");
kubectl exec -ti vault-0 -- vault login $(cat init-keys.json | jq -r ".root_token");
kubectl exec -ti vault-0 -- vault secrets enable pki;
kubectl exec -ti vault-0 -- vault secrets tune -max-lease-ttl=8760h pki;
kubectl exec -ti vault-0 -- vault write pki/root/generate/internal common_name=example.com ttl=8760h;
kubectl exec -ti vault-0 -- vault write pki/config/urls issuing_certificates="http://vault.default.svc.cluster.local:8200/v1/pki/ca" crl_distribution_points="http://vault.default.svc.cluster.local:8200/v1/pki/crl";
kubectl exec -ti vault-0 -- vault write pki/roles/example-dot-com allowed_domains=example.com allow_subdomains=true max_ttl=72h;
kubectl exec --stdin=true --tty=true vault-0 -- bash;
vault policy write pki - <<EOF
path "pki*"                        { capabilities = ["read", "list"] }
path "pki/sign/example-dot-com"    { capabilities = ["create", "update"] }
path "pki/issue/example-dot-com"   { capabilities = ["create"] }
EOF
kubectl exec -ti vault-0 -- vault auth enable kubernetes;
kubectl exec --stdin=true --tty=true vault-0 -- bash;
vault write auth/kubernetes/config kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443";
kubectl exec -ti vault-0 -- vault write auth/kubernetes/role/issuer bound_service_account_names=issuer bound_service_account_namespaces=default policies=pki ttl=20m;
kubectl create serviceaccount issuer;
kubectl apply -f issuer-secret.yaml;
kubectl apply --filename vault-issuer.yaml;
kubectl apply --filename example-com-cert.yaml;
kubectl describe certificate.cert-manager example-com;
kubectl get issuers vault-issuer --namespace default -o wide;

issuer-secret.yaml

apiVersion: v1
kind: Secret
metadata:
  name: issuer-token-lmzpj
  annotations:
    kubernetes.io/service-account.name: issuer
type: kubernetes.io/service-account-token

vault-issuer.yaml

apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
  name: vault-issuer
  namespace: default
spec:
  vault:
    server: http://vault.default:8200
    path: pki/sign/example-dot-com
    auth:
      kubernetes:
        mountPath: /v1/auth/kubernetes
        role: issuer
        secretRef:
          name: issuer-token-lmzpj
          key: token

example-com-cert.yaml

apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: example-com
  namespace: default
spec:
  secretName: example-com-tls
  issuerRef:
    name: vault-issuer
  commonName: www.example.com
  dnsNames:
  - www.example.com
helm install minio-operator minio/operator --namespace minio-operator --create-namespace --wait;
helm install minio-observability minio/tenant --namespace observability --create-namespace --wait --set tenant.certificate.requestAutoCert=false,tenant.buckets[0].name=loki,tenant.buckets[1].name=tempo,tenant.buckets[2].name=mimir-block,tenant.buckets[3].name=mimir-alert,tenant.buckets[4].name=mimir-ruler;

!!! info Port Forwarding

```powershell
kubectl --namespace observability port-forward svc/minio1-console 9090:9090;
```

!!! help default credentials

```text
user `minio` 
pass `minio123`
```
helm install prometheus prometheus-community/prometheus --namespace observability --create-namespace --set server.remoteWrite[0].url="http://mimir-distributor.observability.svc.cluster.local:8080/api/v1/push";

!!! info Port Forwarding

```powershell
kubectl --namespace observability port-forward $(kubectl get pods --namespace observability -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
```
helm install promtail grafana/promtail --namespace observability --create-namespace --set config.clients[0].url="http://loki-loki-distributed-gateway.observability.svc.cluster.local/loki/api/v1/push";
helm install loki grafana/loki-distributed --namespace observability --create-namespace --set loki.storage.bucketNames="loki",loki.storage.type="s3",loki.storage.s3.endpoint="minio1-hl.observability.svc.cluster.local:9000",loki.storage.s3.accessKeyId="minio",loki.storage.s3.secretAccessKey="minio123",loki.storage.s3.insecure=true -f .\configs\loki.yml;
helm install tempo grafana/tempo-distributed --namespace observability --create-namespace --set storage.trace.backend="s3",storage.trace.s3.bucket="tempo",storage.trace.s3.endpoint="minio1-hl.observability.svc.cluster.local:9000",storage.trace.s3.access_key="minio",storage.trace.s3.secret_key="minio123",storage.trace.s3.insecure=true;
helm install mimir grafana/mimir-distributed --namespace observability --create-namespace --wait --set minio.enabled=true,mimir.structuredConfig.blocks_storage.s3.bucket_name="mimir-block",mimir.structuredConfig.blocks_storage.backend="s3",mimir.structuredConfig.blocks_storage.s3.endpoint="minio1-hl.observability.svc.cluster.local:9000",mimir.structuredConfig.blocks_storage.s3.access_key_id="minio",mimir.structuredConfig.blocks_storage.s3.secret_access_key="minio123",mimir.structuredConfig.blocks_storage.s3.insecure=true,mimir.structuredConfig.alertmanager_storage.s3.bucket_name="mimir-alert",mimir.structuredConfig.alertmanager_storage.backend="s3",mimir.structuredConfig.alertmanager_storage.s3.endpoint="minio1-hl.observability.svc.cluster.local:9000",mimir.structuredConfig.alertmanager_storage.s3.access_key_id="minio",mimir.structuredConfig.alertmanager_storage.s3.secret_access_key="minio123",mimir.structuredConfig.alertmanager_storage.s3.insecure=true,mimir.structuredConfig.ruler_storage.s3.bucket_name="mimir-ruler",mimir.structuredConfig.ruler_storage.backend="s3",mimir.structuredConfig.ruler_storage.s3.endpoint="minio1-hl.observability.svc.cluster.local:9000",mimir.structuredConfig.ruler_storage.s3.access_key_id="minio",mimir.structuredConfig.ruler_storage.s3.secret_access_key="minio123",mimir.structuredConfig.ruler_storage.s3.insecure=true;
helm install grafana grafana/grafana --namespace observability --set plugins="{raintank-worldping-app,grafana-azure-data-explorer-datasource,marcusolsson-json-datasource}",persistence.enabled=true,service.port=3000,service.type=LoadBalancer,extraVolumeMounts[0].hostPath="/minikube-host/dashboards" -f ./configs/grafana.yml;

!!! info Port Forwarding

```powershell
kubectl --namespace observability port-forward $(kubectl get pods --namespace observability -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") 3000
```

!!! help default credentials

```powershell
# user `admin`
kubectl get secret --namespace observability grafana -o jsonpath="{.data.admin-password}" | ForEach-Object {[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($_))};
```

grafana.yaml

apiVersion: 1
# persistence:
#   enabled: true
# service:
#   type: LoadBalancer
#   port: 3000
# plugins: ['raintank-worldping-app', 'grafana-azure-data-explorer-datasource', 'marcusolsson-json-datasource']
extraVolumeMounts:
- name: host-mount
  readOnly: false
  mountPath: /var/lib/grafana/dashboards
  # hostPath: /minikube-host/dashboards
dashboardProviders: 
 dashboardproviders.yaml:
   apiVersion: 1
   providers:
   - name: 'local'
     orgId: 1
     folder: ''
     type: file
     disableDeletion: false
     editable: true
     options:
      path: /var/lib/grafana/dashboards
      foldersFromFilesStructure: true
datasources:
  datasources.yaml:
    apiVersion: 1
    datasources:
    - name: Prometheus
      uid: prometheus
      type: prometheus
      access: proxy
      orgId: 1
      url: http://prometheus-server.observability.svc.cluster.local:80
      basicAuth: false
      isDefault: false
      version: 1
      editable: false
    - name: Tempo
      uid: tempo
      type: tempo
      access: proxy
      orgId: 1
      url: http://tempo.observability.svc.cluster.local:3100
      basicAuth: false
      isDefault: true
      version: 1
      editable: false
      apiVersion: 1
      jsonData:
        httpMethod: GET
        tracesToLogs:
          datasourceUid: 'loki'
          tags: ['job', 'instance', 'pod', 'namespace']
          mappedTags: [{ key: 'service.name', value: 'service' }]
          mapTagNamesEnabled: false
          spanStartTimeShift: '1h'
          spanEndTimeShift: '1h'
          filterByTraceID: false
          filterBySpanID: false
        lokiSearch:
          datasourceUid: 'loki'
    - name: Loki
      uid: loki
      type: loki
      access: proxy
      url: http://loki-loki-distributed-gateway.observability.svc.cluster.local:80
      jsonData:
        derivedFields:
          - datasourceUid: tempo
            matcherRegex: "TraceId=(\\w+)"
            name: TraceId
            url: "$${__value.raw}"
        httpHeaderName1: "X-Scope-OrgID"
      secureJsonData:
        httpHeaderValue1: "tenant1"
helm install rabbitmq bitnami/rabbitmq --namespace default --set image.tag=3.8.9-debian-10-r20,auth.user=user,auth.password=user,service.port=5672,service.metricsPort=9419,metrics.enabled=true,rabbitmq.plugins="rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_shovel rabbitmq_shovel_management rabbitmq_management_themes"
helm install redis bitnami/redis --namespace default --set image.tag=6.0.9-debian-10-r0,master.service.port=6379,metrics.enabled=true,metrics.port=9121,usePassword=false;
helm install dapr dapr/dapr --namespace dapr-system --wait --create-namespace

!!! tip Link

http://localhost:8001/api/v1/namespaces/dapr-system/services/dapr-dashboard:8080/proxy/
helm install mssql-linux stable/mssql-linux --namespace default --set image.repository=cilerler/mssql-server-linux,image.tag=2017-CU16,service.port=1433,service.type=LoadBalancer,persistence.enabled=true,acceptEula.value=Y,edition.value=Developer,agent.enabled=true;
kubectl get secret --namespace default mssql-linux-secret -o jsonpath="{.data.sapassword}" | ForEach-Object {[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($_))};
helm upgrade mssql-linux stable/mssql-linux --namespace default --reuse-values --recreate-pods --wait --set image.repository=mcr.microsoft.com/mssql/server,image.tag=2017-CU16-ubuntu;
  # kubectl run mssqlcli --image=mcr.microsoft.com/mssql-tools -it --restart=Never --rm=true -- /bin/bash
  # sqlcmd -S mssql-linux.default,1433 -U sa

##cilerler/prometheus-sql-exporter

helm install prometheus-sql-exporter cilerler/prometheus-sql-exporter --namespace default --set-string podAnnotations."prometheus\.io/scrape"=true,podAnnotations."prometheus\.io/port"=9399;
helm install my-release bitnami/mongodb
helm install cerebro stable/cerebro --namespace default --set image.tag=latest,service.port=9000,service.type=LoadBalancer;
helm install elasticsearch elastic/elasticsearch --namespace default --set ingress.enabled=true,service.type=LoadBalancer,antiAffinity=soft,resources.requests.cpu=100m,resources.requests.memory=512M,resources.limits.cpu=300m,resources.limits.memory=1024M,volumeClaimTemplate.storageClassName=hostpath,volumeClaimTemplate.resources.requests.storage=100M,esJavaOpts="-Xmx128m -Xms128m" --version 6.5.0;
helm install kibana elastic/kibana --namespace default --set ingress.enabled=true,service.port=5601,service.type=LoadBalancer,resources.requests.cpu=100m,resources.requests.memory=512M,resources.limits.cpu=1000m,resources.limits.memory=512M --version 6.5.0;