Skip to content

Commit

Permalink
Merge pull request #284 from contiv/master
Browse files Browse the repository at this point in the history
Merge master into release-1.1
  • Loading branch information
dseevr authored Nov 1, 2017
2 parents 42991e4 + ac54a9f commit de1fa19
Show file tree
Hide file tree
Showing 9 changed files with 293 additions and 163 deletions.
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ cluster-legacy-swarm: vagrant-clean
@bash ./scripts/vagrantup.sh legacy-swarm

# Brings up a demo cluster to install Contiv on with swarm, centos.
cluster-swarm-mode: vagrant-clean
cluster-swarm-mode: vagrant-clean
@bash ./scripts/vagrantup.sh swarm-mode

# Brings up a demo cluster to install Contiv on with kubeadm, centos.
Expand Down Expand Up @@ -50,15 +50,15 @@ demo-kubeadm:
# of the demo Contiv Admin Console which was set up
# BUILD_VERSION must be setup to use a specific build, e.g.
# export BUILD_VERSION=1.0.0-beta.3
# Or run make as BUILD_VERSION=1.0.0-beta.3 make demo-k8s
# Or run make as BUILD_VERSION=1.0.0-beta.3 make demo-legacy-swarm
demo-legacy-swarm:
BUILD_VERSION=$(rel_ver) make cluster-legacy-swarm
BUILD_VERSION=$(rel_ver) make install-test-legacy-swarm

vagrant-clean:
cd cluster && vagrant destroy -f
@bash ./scripts/vbcleanup.sh

# Create a build and test the release installation on a vagrant cluster
# TODO: The vagrant part of this can be optimized by taking snapshots instead
# of creating a new set of VMs for each case
Expand All @@ -83,15 +83,15 @@ release-test-legacy-swarm: build
release-test-kubelegacy: build
# Test k8s ansible (centos by default)
make cluster-kubeadm
make install-test-kube-legacy
make install-test-kube-legacy

# shfmt reformats all shell scripts in this repo
shfmt:
go get github.com/contiv-experimental/sh/cmd/shfmt
find . -type f -name "*.sh" -print0 | xargs -0 shfmt -w

# Test the installation on the provided cluster. This is for bare-metal and other
# setups where the cluster is created using non-vagrant mechanisms.
# setups where the cluster is created using non-vagrant mechanisms.
# Clusters need to have k8s installed for kubernetes kubeadm based mechanism and
# docker installed on the master node for all others.
install-test-swarm-mode:
Expand Down
26 changes: 13 additions & 13 deletions install/ansible/env.json
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
{
"docker_version":"__DOCKER_VERSION__",
"aci_gw_image":"contiv/aci-gw:__ACI_GW_VERSION__",
"contiv_network_version":"__CONTIV_VERSION__",
"env": "{}",
{
"docker_version": "__DOCKER_VERSION__",
"aci_gw_image": "contiv/aci-gw:__ACI_GW_VERSION__",
"contiv_network_version": "__CONTIV_VERSION__",
"env": {},
"etcd_peers_group": "netplugin-master",
"service_vip": "__NETMASTER_IP__",
"validate_certs": "no",
"validate_certs": false,
"cluster_store": "__CLUSTER_STORE__",
"auth_proxy_image": "contiv/auth_proxy:__API_PROXY_VERSION__",
"docker_reset_container_state": "False",
"docker_reset_image_state": "False",
"etcd_cleanup_state": "False",
"auth_proxy_local_install": "False",
"contiv_network_local_install": "False",
"vxlan_port": "4789",
"docker_reset_container_state": __DOCKER_RESET_CONTAINER_STATE__,
"docker_reset_image_state": __DOCKER_RESET_IMAGE_STATE__,
"etcd_cleanup_state": __ETCD_CLEANUP_STATE__,
"auth_proxy_local_install": __AUTH_PROXY_LOCAL_INSTALL__,
"contiv_network_local_install": __CONTIV_NETWORK_LOCAL_INSTALL__,
"vxlan_port": __VXLAN_PORT__,
"netctl_url": "http://__NETMASTER_IP__:9999",
"contiv_v2plugin_install": "False",
"contiv_v2plugin_install": __CONTIV_V2PLUGIN_INSTALL__,
"contiv_v2plugin_image": "contiv/v2plugin:__CONTIV_V2PLUGIN_VERSION__"
}
101 changes: 52 additions & 49 deletions install/ansible/install.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/sh

set -euo pipefail
set -xeuo pipefail

# This scripts runs in a container with ansible installed.
. ./install/ansible/install_defaults.sh
Expand Down Expand Up @@ -39,38 +39,38 @@ error_ret() {

while getopts ":n:a:im:d:v:ps:" opt; do
case $opt in
n)
netmaster=$OPTARG
;;
a)
ans_opts=$OPTARG
;;
i)
install_scheduler=true
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
p)
contiv_v2plugin_install=true
;;
s)
cluster_store=$OPTARG
install_etcd=false
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
n)
netmaster=$OPTARG
;;
a)
ans_opts=$OPTARG
;;
i)
install_scheduler=true
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
p)
contiv_v2plugin_install=true
;;
s)
cluster_store=$OPTARG
install_etcd=false
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
esac
done

Expand Down Expand Up @@ -128,23 +128,29 @@ if [ "$cluster_store" == "" ]; then
cluster_store="etcd://localhost:2379"
fi

sed -i.bak "s#.*service_vip.*#\"service_vip\":\"$service_vip\",#g" "$env_file"
sed -i.bak "s#.*netctl_url.*#\"netctl_url\":\"http://$service_vip:9999\",#g" "$env_file"
sed -i.bak "s#.*cluster_store.*#\"cluster_store\":\"$cluster_store\",#g" "$env_file"
sed -i.bak 's#__NETMASTER_IP__#'"$service_vip"'#g' "$env_file"
sed -i.bak 's#__CLUSTER_STORE__#'"$cluster_store"'#g' "$env_file"
sed -i.bak 's#__DOCKER_RESET_CONTAINER_STATE__#false#g' "$env_file"
sed -i.bak 's#__DOCKER_RESET_IMAGE_STATE__#false#g' "$env_file"
sed -i.bak 's#__ETCD_CLEANUP_STATE__#false#g' "$env_file"
sed -i.bak 's#__AUTH_PROXY_LOCAL_INSTALL__#false#g' "$env_file"
sed -i.bak 's#__CONTIV_NETWORK_LOCAL_INSTALL__#false#g' "$env_file"

# Copy certs
cp /var/contiv/cert.pem /ansible/roles/auth_proxy/files/
cp /var/contiv/key.pem /ansible/roles/auth_proxy/files/

if [ "$aci_image" != "" ]; then
sed -i.bak "s#.*aci_gw_image.*#\"aci_gw_image\":\"$aci_image\",#g" "$env_file"
sed -i.bak 's#__ACI_GW_VERSION__#'"$aci_image"'#g' "$env_file"
fi
if [ "$contiv_v2plugin_install" == "true" ]; then
sed -i.bak "s#.*vxlan_port.*#\"vxlan_port\":\"8472\",#g" "$env_file"
sed -i.bak "s#.*contiv_v2plugin_install.*#\"contiv_v2plugin_install\":\"True\",#g" "$env_file"
# docker uses 4789 port for container ingress network, uses 8472 by default to avoid conflicting
# https://docs.docker.com/engine/swarm/ingress/
sed -i.bak 's#__VXLAN_PORT__#8472#g' "$env_file"
sed -i.bak 's#__CONTIV_V2PLUGIN_INSTALL__#true#g' "$env_file"
else
sed -i.bak "s#.*vxlan_port.*#\"vxlan_port\":\"4789\",#g" "$env_file"
sed -i.bak "s#.*contiv_v2plugin_install.*#\"contiv_v2plugin_install\":\"False\",#g" "$env_file"
sed -i.bak 's#__VXLAN_PORT__#4789#g' "$env_file"
sed -i.bak 's#__CONTIV_V2PLUGIN_INSTALL__#false#g' "$env_file"
fi

echo "Installing Contiv"
Expand All @@ -170,14 +176,11 @@ echo '- include: install_auth_proxy.yml' >>$ansible_path/install_plays.yml
log_file_name="contiv_install_$(date -u +%m-%d-%Y.%H-%M-%S.UTC).log"
log_file="/var/contiv/$log_file_name"

# Ansible needs unquoted booleans but we need quoted booleans for json parsing.
# So remove quotes before sending to ansible and add them back after.
sed -i.bak "s#\"True\"#True#gI" "$env_file"
sed -i.bak "s#\"False\"#False#gI" "$env_file"
ansible-playbook $ans_opts -i "$host_inventory" -e "$(cat $env_file)" $ansible_path/install_plays.yml | tee $log_file
sed -i.bak "s#True#\"True\"#gI" "$env_file"
sed -i.bak "s#False#\"False\"#gI" "$env_file"
rm -rf "$env_file.bak*"
echo "Ansible extra vars from env.json:"
cat "$env_file"
# run playbook
ansible-playbook $ans_opts -i "$host_inventory" -e@"$env_file" $ansible_path/install_plays.yml | tee $log_file
rm -rf "$env_file.bak"

unreachable=$(grep "PLAY RECAP" -A 9999 $log_file | awk -F "unreachable=" '{print $2}' | awk '{print $1}' | grep -v "0" | xargs)
failed=$(grep "PLAY RECAP" -A 9999 $log_file | awk -F "failed=" '{print $2}' | awk '{print $1}' | grep -v "0" | xargs)
Expand Down
4 changes: 2 additions & 2 deletions install/ansible/install_swarm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Mandatory Options:
-f string Configuration file (cfg.yml) listing the hostnames with the control and data interfaces and optionally ACI parameters
-e string SSH key to connect to the hosts
-u string SSH User
-i Install the scheduler stack
-i Install the scheduler stack
-p Install v2plugin
Additional Options:
Expand All @@ -51,7 +51,7 @@ Additional parameters can also be updated in install/ansible/env.json file.
Examples:
1. Install Contiv with Docker Swarm on hosts specified by cfg.yml.
1. Install Contiv with Docker Swarm on hosts specified by cfg.yml.
./install/ansible/install_swarm.sh -f cfg.yml -e ~/ssh_key -u admin -i
2. Install Contiv on hosts specified by cfg.yml. Docker should be pre-installed on the hosts.
Expand Down
111 changes: 56 additions & 55 deletions install/ansible/uninstall.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/sh

set -euo pipefail
set -xeuo pipefail
# This scripts runs in a container with ansible installed.
. ./install/ansible/install_defaults.sh

Expand Down Expand Up @@ -40,43 +40,43 @@ error_ret() {

while getopts ":n:a:ipm:d:v:rgs:" opt; do
case $opt in
n)
netmaster=$OPTARG
;;
a)
ans_opts=$OPTARG
;;
i)
uninstall_scheduler=true
;;
p)
uninstall_v2plugin=true
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
s)
cluster_store=$OPTARG
;;
r)
reset="true"
;;
g)
reset_images="true"
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
n)
netmaster=$OPTARG
;;
a)
ans_opts=$OPTARG
;;
i)
uninstall_scheduler=true
;;
p)
uninstall_v2plugin=true
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
s)
cluster_store=$OPTARG
;;
r)
reset="true"
;;
g)
reset_images="true"
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
esac
done

Expand Down Expand Up @@ -105,7 +105,7 @@ env_file=install/ansible/env.json

echo "Verifying ansible reachability"
ansible all -vvv $ans_opts -i $host_inventory -m setup -a 'filter=ansible_distribution*' | tee $inventory_log
if [ egrep 'FAIL|UNREACHABLE' $inventory_log > /dev/null ]; then
if [ egrep 'FAIL|UNREACHABLE' $inventory_log ] >/dev/null; then
echo "WARNING Some of the hosts are not accessible via passwordless SSH"
echo " "
echo "This means either the host is unreachable or passwordless SSH is not"
Expand All @@ -128,20 +128,24 @@ if [ "$cluster_store" == "" ]; then
cluster_store="etcd://$service_vip:2379"
fi

sed -i.bak "s#.*service_vip.*#\"service_vip\":\"$service_vip\",#g" "$env_file"
sed -i.bak "s#.*cluster_store.*#\"cluster_store\":\"$cluster_store\",#g" "$env_file"
sed -i.bak 's#__NETMASTER_IP__#'"$service_vip"'#g' "$env_file"
sed -i.bak 's#__CLUSTER_STORE__#'"$cluster_store"'#g' "$env_file"

sed -i.bak "s/.*docker_reset_container_state.*/\"docker_reset_container_state\":$reset,/g" $env_file
sed -i.bak "s/.*docker_reset_image_state.*/\"docker_reset_image_state\":$reset_images,/g" $env_file
sed -i.bak "s/.*etcd_cleanup_state.*/\"etcd_cleanup_state\":$reset,/g" $env_file
sed -i.bak 's#__DOCKER_RESET_CONTAINER_STATE__#'"$reset"'#g' "$env_file"
sed -i.bak 's#__DOCKER_RESET_IMAGE_STATE__#'"$reset_images"'#g' "$env_file"
sed -i.bak 's#__ETCD_CLEANUP_STATE__#'"$reset"'#g' "$env_file"
sed -i.bak 's#__AUTH_PROXY_LOCAL_INSTALL__#false#g' "$env_file"
sed -i.bak 's#__CONTIV_NETWORK_LOCAL_INSTALL__#false#g' "$env_file"
sed -i.bak 's#__AUTH_PROXY_LOCAL_INSTALL__#false#g' "$env_file"
sed -i.bak 's#__CONTIV_NETWORK_LOCAL_INSTALL__#false#g' "$env_file"

if [ "$aci_image" != "" ]; then
sed -i.bak "s#.*aci_gw_image.*#\"aci_gw_image\":\"$aci_image\",#g" "$env_file"
sed -i.bak 's#__ACI_GW_VERSION__#'"$aci_image"'#g' "$env_file"
fi
if [ "$uninstall_v2plugin" == "true" ]; then
sed -i.bak "s#.*contiv_v2plugin_install.*#\"contiv_v2plugin_install\":\"True\",#g" "$env_file"
sed -i.bak 's#__CONTIV_V2PLUGIN_INSTALL__#true#g' "$env_file"
else
sed -i.bak "s#.*contiv_v2plugin_install.*#\"contiv_v2plugin_install\":\"False\",#g" "$env_file"
sed -i.bak 's#__CONTIV_V2PLUGIN_INSTALL__#false#g' "$env_file"
fi

echo "Uninstalling Contiv"
Expand All @@ -163,14 +167,11 @@ fi
log_file_name="contiv_uninstall_$(date -u +%m-%d-%Y.%H-%M-%S.UTC).log"
log_file="/var/contiv/$log_file_name"

# Ansible needs unquoted booleans but we need quoted booleans for json parsing.
# So remove quotes before sending to ansible and add them back after.
sed -i.bak "s#\"True\"#True#gI" "$env_file"
sed -i.bak "s#\"False\"#False#gI" "$env_file"
ansible-playbook $ans_opts -i "$host_inventory" -e "$(cat $env_file)" $ansible_path/uninstall_plays.yml | tee $log_file
sed -i.bak "s#True#\"True\"#gI" "$env_file"
sed -i.bak "s#False#\"False\"#gI" "$env_file"
rm -rf "$env_file.bak*"
echo "Ansible extra vars from env.json:"
cat "$env_file"
ansible-playbook $ans_opts -i "$host_inventory" -e@"$env_file" $ansible_path/uninstall_plays.yml | tee $log_file

rm -rf "$env_file.bak"

unreachable=$(grep "PLAY RECAP" -A 9999 $log_file | awk -F "unreachable=" '{print $2}' | awk '{print $1}' | grep -v "0" | xargs)
failed=$(grep "PLAY RECAP" -A 9999 $log_file | awk -F "failed=" '{print $2}' | awk '{print $1}' | grep -v "0" | xargs)
Expand Down
Loading

0 comments on commit de1fa19

Please sign in to comment.