-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.sh
executable file
·86 lines (60 loc) · 2.46 KB
/
setup.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#!/bin/bash
# IPv4 address expected
export TSC_IP=$(tailscale ip | awk 'FNR == 1 {print}')
export TSC_IP_BASE=$(tailscale ip | awk 'NR==1' | grep -oP '^\d+\.\d+')
export FENRIR_DOCKER_NETWORK=$(hostname)-network
export FENRIR_ROOT_DIR=$(pwd)
# init docker swarm
docker swarm init --advertise-addr ${TSC_IP}
# docker swarm leader id
export DOCKER_LEADER_NODE_ID=$(docker node ls | grep Leader | awk '{print $1}')
export DEFAULT_PSG_VER=17
# kafka volumes
mkdir -p kafka/volumes/zookeeper/data
mkdir -p kafka/volumes/zookeeper/logs
mkdir -p ./kafka/volumes/broker-0/data
mkdir -p ./kafka/volumes/connect/jars
# metabase volumes
mkdir -p analytics/volumes/metabase/data
# devbox
mkdir -p devbox/volumes/data
# portainer
mkdir -p portainer/volumes/data
# postgres
mkdir -p postgres/volumes/${DEFAULT_PSG_VER}
# tailscale
mkdir -p tailscale/volumes/data
# spark
mkdir -p spark/volumes/data
mkdir -p spark/volumes/jars
mkdir -p spark/volumes/logs
# update for restraints
docker node update --label-add kafka=true $DOCKER_LEADER_NODE_ID
docker node update --label-add dev=true $DOCKER_LEADER_NODE_ID
docker node update --label-add airflow=true $DOCKER_LEADER_NODE_ID
docker node update --label-add analytics=true $DOCKER_LEADER_NODE_ID
docker node update --label-add postgres=true $DOCKER_LEADER_NODE_ID
docker node update --label-add spark=true $DOCKER_LEADER_NODE_ID
# create network for swarm
docker network create \
--driver overlay \
--subnet="${TSC_IP_BASE}.0.0/16" \
--gateway=$TSC_IP \
--attachable \
--label layer=core \
--scope global \
$(hostname)-network
# in order to use variables from host shell from within docker swarm yaml syntax
# import .env as env_file in kafka-stack.yml in order to use
# ports: \n\t published: ${VARIABLE}
# set -a; . ./.env; set +a
# https://stackoverflow.com/a/58670417
# https://stackoverflow.com/a/58082993 also
# deployment
docker stack deploy -c ${FENRIR_ROOT_DIR}/kafka/kafka-stack.yml kafka --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/portainer/portainer-stack.yml portainer --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/postgres/postgres-stack.yml postgres --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/analytics/metabase-stack.yml metabase --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/airflow/airflow-stack.yml airflow --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/devbox/devbox-stack.yml devbox --detach=false
docker stack deploy -c ${FENRIR_ROOT_DIR}/spark/spark-stack.yml spark --detach=false