diff --git a/Makefile b/Makefile index a69f57d..703aab2 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ export CONTIV_INSTALLER_VERSION ?= $(BUILD_VERSION) # downloaded and built assets intended to go in installer by build.sh export CONTIV_ARTIFACT_STAGING := $(PWD)/artifact_staging # some assets are retrieved from GitHub, this is the default version to fetch -export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.1.7 +export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.2.0 export CONTIV_ACI_GW_VERSION ?= latest export NETPLUGIN_OWNER ?= contiv # setting NETPLUGIN_BRANCH compiles that commit on demand, @@ -107,10 +107,15 @@ release-test-swarm-mode: build make cluster-swarm-mode make install-test-swarm-mode +# create k8s release testing image (do not contains ansible) +k8s-build: prepare-netplugin-images assemble-build + +prepare-netplugin-images: + @bash ./scripts/prepare_netplugin_images.sh # Create a build and test the release installation on a vagrant cluster # TODO: The vagrant part of this can be optimized by taking snapshots instead # of creating a new set of VMs for each case -release-test-kubeadm: build +release-test-kubeadm: k8s-build # Test kubeadm (centos by default) make cluster-kubeadm make install-test-kubeadm @@ -152,4 +157,4 @@ install-test-legacy-swarm: ci: release-test-kubeadm ci-old: release-test-swarm-mode release-test-kubeadm release-test-legacy-swarm -.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy +.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy k8s-build prepare-netplugin-images diff --git a/cluster/docker17/bootstrap_centos.sh b/cluster/docker17/bootstrap_centos.sh index cafc25c..42a0651 100755 --- a/cluster/docker17/bootstrap_centos.sh +++ b/cluster/docker17/bootstrap_centos.sh @@ -9,8 +9,8 @@ fi yum install -y yum-utils yum-config-manager \ - --add-repo \ - https://download.docker.com/linux/centos/docker-ce.repo + --add-repo \ + https://download.docker.com/linux/centos/docker-ce.repo yum makecache fast yum -y install docker-ce diff --git a/cluster/docker17/centos_docker_install.sh b/cluster/docker17/centos_docker_install.sh index 1f454ed..b8c0656 100755 --- a/cluster/docker17/centos_docker_install.sh +++ b/cluster/docker17/centos_docker_install.sh @@ -6,8 +6,8 @@ set -euo pipefail if [ $EUID -ne 0 ]; then - echo "Please run this script as root user" - exit 1 + echo "Please run this script as root user" + exit 1 fi # Install pre-reqs @@ -16,22 +16,21 @@ yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - # Install Docker -# If you require a specific version, comment out the first line and uncomment +# If you require a specific version, comment out the first line and uncomment # the other one. Fill in the version you want. yum -y install docker-ce #sudo yum install docker-ce- # Post-install steps -# add admin user to docker group +# add admin user to docker group usermod -aG docker $SUDO_USER # add /etc/docker/ if it doesn't exist mkdir -p /etc/docker # add (and create) daemon.json with entry for storage-device -cat <> /etc/docker/daemon.json +cat <>/etc/docker/daemon.json { "storage-driver": "devicemapper" } diff --git a/cluster/docker17/master.sh b/cluster/docker17/master.sh index d7ea625..93f8763 100644 --- a/cluster/docker17/master.sh +++ b/cluster/docker17/master.sh @@ -1,5 +1,5 @@ docker swarm init --advertise-addr $1 -docker swarm join-token manager | \ - grep -A 20 "docker swarm join" > $2/manager.sh -docker swarm join-token worker | \ - grep -A 20 "docker swarm join" > $2/worker.sh +docker swarm join-token manager | + grep -A 20 "docker swarm join" >$2/manager.sh +docker swarm join-token worker | + grep -A 20 "docker swarm join" >$2/worker.sh diff --git a/cluster/k8s1.6/k8smaster.sh b/cluster/k8s1.6/k8smaster.sh index d1a5c96..a294f18 100644 --- a/cluster/k8s1.6/k8smaster.sh +++ b/cluster/k8s1.6/k8smaster.sh @@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks if [ "$#" -eq 4 ]; then cp /etc/kubernetes/admin.conf /home/$4 chown $(id -u $4):$(id -g $4) /home/$4/admin.conf - echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc - echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc -fi \ No newline at end of file + echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc + echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc +fi diff --git a/cluster/k8s1.8/k8smaster.sh b/cluster/k8s1.8/k8smaster.sh index d1a5c96..a294f18 100644 --- a/cluster/k8s1.8/k8smaster.sh +++ b/cluster/k8s1.8/k8smaster.sh @@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks if [ "$#" -eq 4 ]; then cp /etc/kubernetes/admin.conf /home/$4 chown $(id -u $4):$(id -g $4) /home/$4/admin.conf - echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc - echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc -fi \ No newline at end of file + echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc + echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc +fi diff --git a/install/ansible/env.json b/install/ansible/env.json index 35c541b..e2ddbdf 100644 --- a/install/ansible/env.json +++ b/install/ansible/env.json @@ -6,7 +6,8 @@ "etcd_peers_group": "netplugin-master", "service_vip": "__NETMASTER_IP__", "validate_certs": false, - "cluster_store": "__CLUSTER_STORE__", + "cluster_store_driver": "__CLUSTER_STORE_TYPE__", + "cluster_store_url": "__CLUSTER_STORE_URLS__", "auth_proxy_image": "contiv/auth_proxy:__API_PROXY_VERSION__", "docker_reset_container_state": __DOCKER_RESET_CONTAINER_STATE__, "docker_reset_image_state": __DOCKER_RESET_IMAGE_STATE__, diff --git a/install/ansible/install.sh b/install/ansible/install.sh index fccfee7..cf35a70 100644 --- a/install/ansible/install.sh +++ b/install/ansible/install.sh @@ -37,7 +37,7 @@ error_ret() { exit 1 } -while getopts ":n:a:im:d:v:ps:" opt; do +while getopts ":n:a:im:d:v:pe:c:s:" opt; do case $opt in n) netmaster=$OPTARG @@ -67,8 +67,29 @@ while getopts ":n:a:im:d:v:ps:" opt; do p) contiv_v2plugin_install=true ;; + e) + # etcd endpoint option + cluster_store_type=etcd + cluster_store_urls=$OPTARG + install_etcd=false + ;; + c) + # consul endpoint option + cluster_store_type=consul + cluster_store_urls=$OPTARG + install_etcd=false + ;; s) - cluster_store=$OPTARG + # backward compatibility + echo "-s option has been deprecated, use -e or -c instead" + local cluster_store=$OPTARG + if [[ "$cluster_store" =~ ^etcd://.+ ]]; then + cluster_store_type=etcd + cluster_store_urls=$(echo $cluster_store | sed s/etcd/http/) + elif [[ "$cluster_store" =~ ^consul://.+ ]]; then + cluster_store_type=consul + cluster_store_urls=$(echo $cluster_store | sed s/consul/http/) + fi install_etcd=false ;; :) @@ -88,6 +109,15 @@ mkdir -p "$inventory" host_inventory="$inventory/contiv_hosts" node_info="$inventory/contiv_nodes" +# TODO: use python to generate the inventory +# This python generated inventory contains +# 1. groups and host +# 2. ssh info for each host +# 3. control interface for each host +# 4. data interface for each host +# 5. aci info +# 6. fwd_mode(bridge/routing), net_mode(vlan/vxlan), contiv_network_mode(standalone/aci) +# then below sed against env_file set rest of them, they should be combined as one ./install/genInventoryFile.py "$contiv_config" "$host_inventory" "$node_info" $contiv_network_mode $fwd_mode if [ "$netmaster" = "" ]; then @@ -131,13 +161,15 @@ if [ "$service_vip" == "" ]; then service_vip=$netmaster fi -if [ "$cluster_store" == "" ]; then - cluster_store="etcd://localhost:2379" +if [ "$cluster_store" = "" ]; then + cluster_store_type="etcd" + cluster_store_urls="http://localhost:2379" fi # variables already replaced by build.sh will not pattern match sed -i.bak 's#__NETMASTER_IP__#'"$service_vip"'#g' "$env_file" -sed -i.bak 's#__CLUSTER_STORE__#'"$cluster_store"'#g' "$env_file" +sed -i.bak 's#__CLUSTER_STORE_TYPE__#'"$cluster_store_type"'#g' "$env_file" +sed -i.bak 's#__CLUSTER_STORE_URLS__#'"$cluster_store_urls"'#g' "$env_file" sed -i.bak 's#__DOCKER_RESET_CONTAINER_STATE__#false#g' "$env_file" sed -i.bak 's#__DOCKER_RESET_IMAGE_STATE__#false#g' "$env_file" sed -i.bak 's#__ETCD_CLEANUP_STATE__#false#g' "$env_file" @@ -205,8 +237,6 @@ if [ "$unreachable" = "" ] && [ "$failed" = "" ]; then echo "Please export DOCKER_HOST=tcp://$netmaster:2375 in your shell before proceeding" echo "Contiv UI is available at https://$netmaster:10000" echo "Please use the first run wizard or configure the setup as follows:" - echo " Configure forwarding mode (optional, default is bridge)." - echo " netctl global set --fwd-mode routing" echo " Configure ACI mode (optional)" echo " netctl global set --fabric-mode aci --vlan-range -" echo " Create a default network" diff --git a/install/ansible/install_swarm.sh b/install/ansible/install_swarm.sh index dbddcaf..d7dc97e 100755 --- a/install/ansible/install_swarm.sh +++ b/install/ansible/install_swarm.sh @@ -73,53 +73,53 @@ mkdir -p "$src_conf_path" cluster_param="" while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:" opt; do case $opt in - f) - cp "$OPTARG" "$host_contiv_config" - ;; - n) - netmaster=$OPTARG - ;; - a) - ans_opts="$OPTARG" - ;; - e) - ans_key=$OPTARG - ;; - u) - ans_user=$OPTARG - ;; - m) - contiv_network_mode=$OPTARG - ;; - d) - fwd_mode=$OPTARG - ;; - v) - aci_image=$OPTARG - ;; - s) - cluster_param="-s $OPTARG" - ;; - - i) - install_scheduler="-i" - ;; - p) - v2plugin_param="-p" - ;; - c) - cp "$OPTARG" "$host_tls_cert" - ;; - k) - cp "$OPTARG" "$host_tls_key" - ;; - :) - echo "An argument required for $OPTARG was not passed" - usage - ;; - ?) - usage - ;; + f) + cp "$OPTARG" "$host_contiv_config" + ;; + n) + netmaster=$OPTARG + ;; + a) + ans_opts="$OPTARG" + ;; + e) + ans_key=$OPTARG + ;; + u) + ans_user=$OPTARG + ;; + m) + contiv_network_mode=$OPTARG + ;; + d) + fwd_mode=$OPTARG + ;; + v) + aci_image=$OPTARG + ;; + s) + cluster_param="-s $OPTARG" + ;; + + i) + install_scheduler="-i" + ;; + p) + v2plugin_param="-p" + ;; + c) + cp "$OPTARG" "$host_tls_cert" + ;; + k) + cp "$OPTARG" "$host_tls_key" + ;; + :) + echo "An argument required for $OPTARG was not passed" + usage + ;; + ?) + usage + ;; esac done @@ -148,7 +148,7 @@ fi if [ "$ans_opts" == "" ]; then ans_opts="--private-key $def_ans_key -u $ans_user" else - ans_opts+=" --private-key $def_ans_key -u $ans_user" + ans_opts+=" --private-key $def_ans_key -u $ans_user" fi # Generate SSL certs for auth proxy @@ -172,4 +172,6 @@ mounts[5]="$src_conf_path:$container_conf_path:Z" mounts[6]="-v" mounts[7]="$(pwd)/contiv_cache:/var/contiv_cache:Z" set -x -docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh $netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $cluster_param $v2plugin_param +docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh \ + $netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode \ + -d $fwd_mode $aci_param $cluster_param $v2plugin_param diff --git a/install/ansible/uninstall_swarm.sh b/install/ansible/uninstall_swarm.sh index 874c186..c96d46d 100755 --- a/install/ansible/uninstall_swarm.sh +++ b/install/ansible/uninstall_swarm.sh @@ -65,57 +65,57 @@ mkdir -p "$src_conf_path" cluster_param="" while getopts ":f:n:a:e:ipm:d:v:u:rgs:" opt; do case $opt in - f) - cp "$OPTARG" "$host_contiv_config" - ;; - n) - netmaster=$OPTARG - ;; - a) - ans_opts="$OPTARG" - ;; - e) - ans_key=$OPTARG - ;; - u) - ans_user=$OPTARG - ;; - m) - contiv_network_mode=$OPTARG - ;; - d) - fwd_mode=$OPTARG - ;; - v) - aci_image=$OPTARG - ;; - s) - cluster_param="-s $OPTARG" - ;; - - i) - echo "Uninstalling docker will fail if the uninstallation is being run from a node in the cluster." - echo "Press Ctrl+C to cancel the uininstall and start it from a host outside the cluster." - echo "Uninstalling Contiv, Docker and Swarm in 20 seconds" - sleep 20 - uninstall_scheduler="-i" - ;; - p) - uninstall_v2plugin="-p" - ;; - r) - reset_params="-r $reset_params" - ;; - g) - reset_params="-g $reset_params" - ;; - :) - echo "An argument required for $OPTARG was not passed" - usage - ;; - ?) - usage - ;; + f) + cp "$OPTARG" "$host_contiv_config" + ;; + n) + netmaster=$OPTARG + ;; + a) + ans_opts="$OPTARG" + ;; + e) + ans_key=$OPTARG + ;; + u) + ans_user=$OPTARG + ;; + m) + contiv_network_mode=$OPTARG + ;; + d) + fwd_mode=$OPTARG + ;; + v) + aci_image=$OPTARG + ;; + s) + cluster_param="-s $OPTARG" + ;; + + i) + echo "Uninstalling docker will fail if the uninstallation is being run from a node in the cluster." + echo "Press Ctrl+C to cancel the uininstall and start it from a host outside the cluster." + echo "Uninstalling Contiv, Docker and Swarm in 20 seconds" + sleep 20 + uninstall_scheduler="-i" + ;; + p) + uninstall_v2plugin="-p" + ;; + r) + reset_params="-r $reset_params" + ;; + g) + reset_params="-g $reset_params" + ;; + :) + echo "An argument required for $OPTARG was not passed" + usage + ;; + ?) + usage + ;; esac done @@ -145,7 +145,7 @@ if [ "$ans_opts" == "" ]; then ans_opts="--private-key $def_ans_key -u $ans_user" else # escape each word in the array and put spaces between the words - ans_opts+=" --private-key $def_ans_key -u $ans_user" + ans_opts+=" --private-key $def_ans_key -u $ans_user" fi echo "Starting the uninstaller container" image_name="__CONTIV_INSTALL_VERSION__" diff --git a/install/genInventoryFile.py b/install/genInventoryFile.py index 8c4263d..403bf53 100644 --- a/install/genInventoryFile.py +++ b/install/genInventoryFile.py @@ -64,6 +64,8 @@ def writeInventory(self, outFd, groupName, groupRole): def writeGlobalVars(self, outFd): outFd.write("[" + "all:vars]\n") var_line = "fwd_mode={}\n".format(self.fwdMode) + net_mode = "vlan" if self.fwdMode == "bridge" else "vxlan" + var_line += "net_mode=%s\n" % net_mode outFd.write(var_line) var_line = "contiv_network_mode={}\n".format(self.networkMode) outFd.write(var_line) @@ -95,7 +97,7 @@ def writeGlobalVars(self, outFd): # if no leaf was found, treat as error if leafCount == 0: self.handleMissing("APIC_LEAF_NODES", self.cfgFile, outFd); - + leafStr += "\n" outFd.write(leafStr) diff --git a/install/k8s/rbac/aci_gw.yaml b/install/k8s/configs/aci_gw.yaml similarity index 100% rename from install/k8s/rbac/aci_gw.yaml rename to install/k8s/configs/aci_gw.yaml diff --git a/install/k8s/k8s1.4/cleanup.yaml b/install/k8s/configs/cleanup.yaml similarity index 100% rename from install/k8s/k8s1.4/cleanup.yaml rename to install/k8s/configs/cleanup.yaml diff --git a/install/k8s/rbac/contiv-grafana.yml b/install/k8s/configs/contiv-grafana.yml similarity index 100% rename from install/k8s/rbac/contiv-grafana.yml rename to install/k8s/configs/contiv-grafana.yml diff --git a/install/k8s/rbac/contiv-prometheus.yml b/install/k8s/configs/contiv-prometheus.yml similarity index 100% rename from install/k8s/rbac/contiv-prometheus.yml rename to install/k8s/configs/contiv-prometheus.yml diff --git a/install/k8s/rbac/contiv.yaml b/install/k8s/configs/contiv.yaml similarity index 72% rename from install/k8s/rbac/contiv.yaml rename to install/k8s/configs/contiv.yaml index 14923f3..696d247 100644 --- a/install/k8s/rbac/contiv.yaml +++ b/install/k8s/configs/contiv.yaml @@ -1,5 +1,4 @@ --- - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: @@ -12,7 +11,9 @@ subjects: - kind: ServiceAccount name: contiv-netplugin namespace: kube-system + --- + kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: @@ -20,24 +21,31 @@ metadata: namespace: kube-system rules: - apiGroups: - - "" + - "" + - extensions resources: + - endpoints + - nodes + - namespaces + - networkpolicies - pods - verbs: - - get - - apiGroups: - - "" - resources: - services - - endpoints verbs: - watch + - list + - update + - get + --- + apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netplugin namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + --- apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -52,7 +60,9 @@ subjects: - kind: ServiceAccount name: contiv-netmaster namespace: kube-system + --- + kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: @@ -71,12 +81,16 @@ rules: - watch - list - update + --- + apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netmaster namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" --- @@ -89,16 +103,21 @@ metadata: name: contiv-config namespace: kube-system data: - cluster_store: "etcd://__NETMASTER_IP__:6666" - vlan_if: "__VLAN_IF__" + contiv_mode: kubernetes + contiv_fwdmode: routing + contiv_netmode: vxlan + # The location of your cluster store. This is set to the + # avdertise-client value below from the contiv-etcd service. + # Change it to an external etcd/consul instance if required. + contiv_etcd: "http://__NETMASTER_IP__:6666" # The CNI network configuration to install on each node. - cni_config: |- + contiv_cni_config: |- { "cniVersion": "0.1.0", "name": "contiv-net", "type": "contivk8s" } - config: |- + contiv_k8s_config: |- { "K8S_API_SERVER": "https://__NETMASTER_IP__:6443", "K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", @@ -107,6 +126,7 @@ data: "K8S_TOKEN": "", "SVC_SUBNET": "10.96.0.0/12" } + --- # This manifest installs contiv-netplugin container, as well @@ -139,9 +159,6 @@ spec: effect: NoSchedule serviceAccountName: contiv-netplugin containers: - # Runs netplugin container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - name: netplugin-exporter image: contiv/stats env: @@ -149,38 +166,48 @@ spec: valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store + key: contiv_etcd - name: EXPORTER_MODE value: 'netplugin' - name: contiv-netplugin image: contiv/netplugin:__CONTIV_VERSION__ - args: - - -pkubernetes env: - - name: VLAN_IF + - name: CONTIV_ROLE + value: netplugin + - name: CONTIV_NETPLUGIN_MODE valueFrom: configMapKeyRef: name: contiv-config - key: vlan_if - - name: VTEP_IP + key: contiv_mode + - name: CONTIV_NETPLUGIN_VTEP_IP valueFrom: fieldRef: fieldPath: status.podIP - - name: CONTIV_ETCD + - name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store + key: contiv_etcd - name: CONTIV_CNI_CONFIG valueFrom: configMapKeyRef: name: contiv-config - key: cni_config - - name: CONTIV_CONFIG + key: contiv_cni_config + - name: CONTIV_K8S_CONFIG valueFrom: configMapKeyRef: name: contiv-config - key: config + key: contiv_k8s_config + - name: CONTIV_NETPLUGIN_FORWARD_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_fwdmode + - name: CONTIV_NETPLUGIN_NET_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_netmode securityContext: privileged: true volumeMounts: @@ -196,6 +223,9 @@ spec: - mountPath: /var/contiv name: var-contiv readOnly: false + - mountPath: /var/log/contiv + name: var-log-contiv + readOnly: false - mountPath: /opt/cni/bin name: cni-bin-dir readOnly: false @@ -216,13 +246,15 @@ spec: - name: var-contiv hostPath: path: /var/contiv - # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: etc-cni-dir hostPath: path: /etc/cni/net.d/ + - name: var-log-contiv + hostPath: + path: /var/log/contiv --- # This manifest deploys the Contiv API Server on Kubernetes. @@ -265,44 +297,65 @@ spec: valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store + key: contiv_etcd - name: EXPORTER_MODE value: 'netmaster' - name: contiv-netmaster image: contiv/netplugin:__CONTIV_VERSION__ - args: - - -m - - -pkubernetes env: - - name: CONTIV_ETCD + - name: CONTIV_ROLE + value: netmaster + - name: CONTIV_NETMASTER_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_mode + - name: CONTIV_NETMASTER_ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store - - name: CONTIV_CONFIG + key: contiv_etcd + - name: CONTIV_K8S_CONFIG valueFrom: configMapKeyRef: name: contiv-config - key: config + key: contiv_k8s_config + - name: CONTIV_NETMASTER_FORWARD_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_fwdmode + - name: CONTIV_NETMASTER_NET_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_netmode volumeMounts: - mountPath: /var/contiv name: var-contiv readOnly: false + - mountPath: /var/log/contiv + name: var-log-contiv + readOnly: false + - name: contiv-api-proxy - image: contiv/auth_proxy:__CONTIV_VERSION__ + image: contiv/auth_proxy:__API_PROXY_VERSION__ args: - --tls-key-file=/var/contiv/auth_proxy_key.pem - --tls-certificate=/var/contiv/auth_proxy_cert.pem - - --data-store-address=$(CONTIV_ETCD) + - --data-store-address=$(STORE_URL) + - --data-store-driver=$(STORE_DRIVER) - --netmaster-address=localhost:9999 env: - name: NO_NETMASTER_STARTUP_CHECK value: "0" - - name: CONTIV_ETCD + - name: STORE_URL valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store + key: contiv_etcd + - name: STORE_DRIVER + value: etcd securityContext: privileged: false volumeMounts: @@ -314,4 +367,6 @@ spec: - name: var-contiv hostPath: path: /var/contiv ---- + - name: var-log-contiv + hostPath: + path: /var/log/contiv diff --git a/install/k8s/configs/etcd.yaml b/install/k8s/configs/etcd.yaml new file mode 100644 index 0000000..d1aa30a --- /dev/null +++ b/install/k8s/configs/etcd.yaml @@ -0,0 +1,99 @@ +--- +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: contiv-etcd + namespace: kube-system + labels: + k8s-app: contiv-etcd +spec: + selector: + matchLabels: + k8s-app: contiv-etcd + template: + metadata: + labels: + k8s-app: contiv-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + hostPID: true + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + initContainers: + - name: contiv-etcd-init + image: ferest/etcd-initer:latest + imagePullPolicy: Always + env: + - name: ETCD_INIT_ARGSFILE + value: /etc/contiv/etcd/contiv-etcd-args + - name: ETCD_INIT_LISTEN_PORT + value: '6666' + - name: ETCD_INIT_PEER_PORT + value: '6667' + - name: ETCD_INIT_CLUSTER + value: 'contiv0=http://__NETMASTER_IP__:6667' + - name: ETCD_INIT_DATA_DIR + value: /var/lib/etcd/contiv-data + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: /etc/contiv/etcd + containers: + - name: contiv-etcd + image: quay.io/coreos/etcd:__ETCD_VERSION__ + command: + - sh + - -c + - "/usr/local/bin/etcd $(cat $ETCD_INIT_ARGSFILE)" + env: + - name: ETCD_INIT_ARGSFILE + value: /etc/contiv/etcd/contiv-etcd-args + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: /etc/contiv/etcd + - name: contiv-etcd-data-dir + mountPath: /var/lib/etcd/contiv-data + volumes: + - name: contiv-etcd-data-dir + hostPath: + path: /var/lib/etcd/contiv-data + - name: contiv-etcd-conf-dir + hostPath: + path: /etc/contiv/etcd + +--- + +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: contiv-etcd-proxy + namespace: kube-system + labels: + k8s-app: contiv-etcd-proxy +spec: + selector: + matchLabels: + k8s-app: contiv-etcd-proxy + template: + metadata: + labels: + k8s-app: contiv-etcd-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + hostPID: true + containers: + - name: contiv-etcd-proxy + image: quay.io/coreos/etcd:__ETCD_VERSION__ + env: + - name: ETCD_LISTEN_CLIENT_URLS + value: 'http://127.0.0.1:6666' + - name: ETCD_PROXY + value: "on" + - name: ETCD_INITIAL_CLUSTER + value: 'contiv0=http://__NETMASTER_IP__:6667' diff --git a/install/k8s/rbac/prometheus.yml b/install/k8s/configs/prometheus.yml similarity index 100% rename from install/k8s/rbac/prometheus.yml rename to install/k8s/configs/prometheus.yml diff --git a/install/k8s/install.sh b/install/k8s/install.sh index 545397b..36d4ff0 100755 --- a/install/k8s/install.sh +++ b/install/k8s/install.sh @@ -8,17 +8,10 @@ if [ $EUID -ne 0 ]; then exit 1 fi -if [ -e /etc/kubernetes/admin.conf ] -then - kubectl="kubectl --kubeconfig /etc/kubernetes/admin.conf" +if [ -e /etc/kubernetes/admin.conf ]; then + kubectl="kubectl --kubeconfig /etc/kubernetes/admin.conf" else - kubectl="kubectl" -fi -k8sversion=$($kubectl version --short | grep "Server Version") -if [[ "$k8sversion" == *"v1.4"* ]] || [[ "$k8sversion" == *"v1.5"* ]]; then - k8sfolder="k8s1.4" -else - k8sfolder="rbac" + kubectl="kubectl" fi # @@ -26,7 +19,7 @@ fi # # If an etcd or consul cluster store is not provided, we will start an etcd instance -cluster_store="" +cluster_store_urls="" # Netmaster address netmaster="" @@ -64,7 +57,10 @@ Mandatory Options: -n string DNS name/IP address of the host to be used as the net master service VIP. Additional Options: --s string External cluster store to be used to store contiv data. This can be an etcd or consul server. + +-e string external etcd endpoints for cluster store +-c string external consul endpoints for cluster store +-s string (DEPRECATED) External cluster store to be used to store contiv data. This can be an etcd or consul server. -v string Data plane interface -w string Forwarding mode (“routing” or “bridge”). Default mode is “bridge” -c string Configuration file for netplugin @@ -79,7 +75,7 @@ Additional Options for ACI: -p string Password to connect to the APIC -l string APIC leaf node -d string APIC physical domain --e string APIC EPG bridge domain +-b string APIC EPG bridge domain -m string APIC contracts unrestricted mode Examples: @@ -102,7 +98,6 @@ EOF exit 1 } - # this function copies $1 to $2 if the full paths to $1 and $2 (as determined by # `realpath`) are different. this allows people to specify a certificate, key, etc. # which was moved into place by a previous installer run. @@ -121,66 +116,87 @@ error_ret() { exit 1 } -while getopts ":s:n:v:w:t:k:a:u:p:l:d:e:m:y:z:g:i:" opt; do +while getopts ":c:e:s:n:v:w:t:k:a:u:p:l:d:b:m:y:z:g:i:" opt; do case $opt in - s) - cluster_store=$OPTARG - ;; - n) - netmaster=$OPTARG - ;; - v) - vlan_if=$OPTARG - ;; - w) - fwd_mode=$OPTARG - ;; - t) - tls_cert=$OPTARG - ;; - k) - tls_key=$OPTARG - ;; - a) - apic_url=$OPTARG - ;; - u) - apic_username=$OPTARG - ;; - p) - apic_password=$OPTARG - ;; - l) - apic_leaf_node=$OPTARG - ;; - d) - apic_phys_domain=$OPTARG - ;; - e) - apic_epg_bridge_domain=$OPTARG - ;; - m) - apic_contracts_unrestricted_mode=$OPTARG - ;; - y) - aci_key=$OPTARG - ;; - z) - apic_cert_dn=$OPTARG - ;; - g) - infra_gateway=$OPTARG - ;; - i) - infra_subnet=$OPTARG - ;; - :) - echo "An argument required for $OPTARG was not passed" - usage - ;; - ?) - usage - ;; + e) + # etcd endpoint option + cluster_store_type=etcd + cluster_store_urls=$OPTARG + install_etcd=false + ;; + c) + # consul endpoint option + cluster_store_type=consul + cluster_store_urls=$OPTARG + install_etcd=false + ;; + s) + # backward compatibility + echo "-s option has been deprecated, use -e or -c instead" + local cluster_store=$OPTARG + if [[ "$cluster_store" =~ ^etcd://.+ ]]; then + cluster_store_type=etcd + cluster_store_urls=$(echo $cluster_store | sed s/etcd/http/) + elif [[ "$cluster_store" =~ ^consul://.+ ]]; then + cluster_store_type=consul + cluster_store_urls=$(echo $cluster_store | sed s/consul/http/) + fi + ;; + n) + netmaster=$OPTARG + ;; + v) + vlan_if=$OPTARG + ;; + w) + fwd_mode=$OPTARG + ;; + t) + tls_cert=$OPTARG + ;; + k) + tls_key=$OPTARG + ;; + a) + apic_url=$OPTARG + ;; + u) + apic_username=$OPTARG + ;; + p) + apic_password=$OPTARG + ;; + l) + apic_leaf_node=$OPTARG + ;; + d) + apic_phys_domain=$OPTARG + ;; + b) + apic_epg_bridge_domain=$OPTARG + ;; + m) + apic_contracts_unrestricted_mode=$OPTARG + ;; + y) + aci_key=$OPTARG + ;; + z) + apic_cert_dn=$OPTARG + ;; + g) + infra_gateway=$OPTARG + ;; + i) + infra_subnet=$OPTARG + ;; + :) + echo "An argument required for $OPTARG was not passed" + usage + ;; + ?) + usage + ;; esac done @@ -207,16 +223,20 @@ contiv_yaml="./.contiv.yaml" rm -f $contiv_yaml # Create the new config file from the templates -contiv_yaml_template="./install/k8s/$k8sfolder/contiv.yaml" -contiv_etcd_template="./install/k8s/$k8sfolder/etcd.yaml" -contiv_aci_gw_template="./install/k8s/$k8sfolder/aci_gw.yaml" +contiv_yaml_template="./install/k8s/configs/contiv.yaml" +contiv_etcd_template="./install/k8s/configs/etcd.yaml" +contiv_aci_gw_template="./install/k8s/configs/aci_gw.yaml" cat $contiv_yaml_template >>$contiv_yaml -if [ "$cluster_store" = "" ]; then +if [ "$cluster_store_urls" = "" ]; then cat $contiv_etcd_template >>$contiv_yaml -else - sed -i.bak "s#cluster_store:.*#cluster_store: \"$cluster_store\"#g" $contiv_yaml +elif [ "$cluster_store_type" = "etcd" ]; then + sed -i.bak "s#contiv_etcd:.*#contiv_etcd: \"$cluster_store_urls\"#g" $contiv_yaml +elif [ "$cluster_store_type" = "consul" ]; then + sed -i.bak "s#contiv_etcd:.*#contiv_consul: \"$cluster_store_urls\"#g" $contiv_yaml + # change auth_proxy + sed -i.bak "s#value: etcd#value: consul#g" $contiv_yaml fi if [ "$apic_url" != "" ]; then @@ -280,8 +300,9 @@ sleep 5 # extract netctl from netplugin container echo "Extracting netctl from netplugin container" netplugin_version=$( - sed '/contiv_network_version/!d;s/.*\: \?"\(.*\)".*/\1/' \ - install/ansible/env.json) + sed '/contiv_network_version/!d;s/.*\: \?"\(.*\)".*/\1/' \ + install/ansible/env.json +) docker rm netplugin-tmp >/dev/null 2>/dev/null || : c_id=$(docker create --name netplugin-tmp contiv/netplugin:$netplugin_version) docker cp ${c_id}:/contiv/bin/netctl /usr/bin @@ -294,7 +315,7 @@ set +e for i in {0..150}; do sleep 2 # check contiv netmaster pods - $kubectl get pods -n kube-system | grep -v "Running" | grep -q ^contiv-netmaster && continue + $kubectl get pods -n kube-system | grep -v "Running" | grep -q ^contiv-netmaster && continue # check that netmaster is available netctl tenant ls >/dev/null 2>&1 || continue break @@ -304,20 +325,14 @@ done set -e -if [ "$fwd_mode" == "routing" ]; then - netctl global set --fwd-mode $fwd_mode || true - netctl net ls -q | grep -q -w "contivh1" || netctl net create -n infra -s $infra_subnet -g $infra_gateway contivh1 - - # Restart netplugin to allow fwdMode change - $kubectl -n kube-system delete daemonset contiv-netplugin - $kubectl apply -f $contiv_yaml -fi +# fwd mode has to be routing to make it work with vxlan +netctl net ls -q | grep -q -w "contivh1" || netctl net create -n infra -s $infra_subnet -g $infra_gateway contivh1 set +e for i in {0..150}; do sleep 2 # check contiv pods - $kubectl get pods -n kube-system --request-timeout=1s | grep -v "Running" | grep -q ^contiv && continue + $kubectl get pods -n kube-system --request-timeout=1s | grep -v "Running" | grep -q ^contiv && continue # check netplugin status curl -s localhost:9090/inspect/driver | grep -wq FwdMode || continue break @@ -327,7 +342,6 @@ done set -e - echo "Installation is complete" echo "=========================================================" echo " " diff --git a/install/k8s/k8s1.4/aci_gw.yaml b/install/k8s/k8s1.4/aci_gw.yaml deleted file mode 100644 index f185be9..0000000 --- a/install/k8s/k8s1.4/aci_gw.yaml +++ /dev/null @@ -1,53 +0,0 @@ - -# This manifest installs contiv-aci-gw container on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: contiv-aci-gw - namespace: kube-system - labels: - k8s-app: contiv-aci-gw -spec: - selector: - matchLabels: - k8s-app: contiv-aci-gw - template: - metadata: - labels: - k8s-app: contiv-aci-gw - spec: - hostNetwork: true - containers: - # Runs aci-gw container on each Kubernetes node. - - name: contiv-aci-gw - image: contiv/aci-gw:__ACI_GW_VERSION__ - env: - - name: APIC_URL - value: "__APIC_URL__" - - name: APIC_USERNAME - value: "__APIC_USERNAME__" - - name: APIC_PASSWORD - value: "__APIC_PASSWORD__" - - name: APIC_CERT_DN - value: "__APIC_CERT_DN__" - - name: APIC_LEAF_NODE - value: "__APIC_LEAF_NODE__" - - name: APIC_PHYS_DOMAIN - value: "__APIC_PHYS_DOMAIN__" - - name: APIC_EPG_BRIDGE_DOMAIN - value: "__APIC_EPG_BRIDGE_DOMAIN__" - - name: APIC_CONTRACTS_UNRESTRICTED_MODE - value: "__APIC_CONTRACTS_UNRESTRICTED_MODE__" - securityContext: - privileged: false - volumeMounts: - - mountPath: /aciconfig - name: aci-config - volumes: - - name: aci-config - secret: - secretName: aci.key - ---- - diff --git a/install/k8s/k8s1.4/contiv.yaml b/install/k8s/k8s1.4/contiv.yaml deleted file mode 100644 index 1777dbc..0000000 --- a/install/k8s/k8s1.4/contiv.yaml +++ /dev/null @@ -1,290 +0,0 @@ ---- -# This ConfigMap is used to configure a self-hosted Contiv installation. -# It can be used with an external cluster store(etcd or consul) or used -# with the etcd instance being installed as contiv-etcd -kind: ConfigMap -apiVersion: v1 -metadata: - name: contiv-config - namespace: kube-system -data: - # The location of your cluster store. This is set to the - # avdertise-client value below from the contiv-etcd service. - # Change it to an external etcd/consul instance if required. - cluster_store: "etcd://__NETMASTER_IP__:6666" - # The CNI network configuration to install on each node. - cni_config: |- - { - "cniVersion": "0.1.0", - "name": "contiv-net", - "type": "contivk8s" - } - config: |- - { - "K8S_API_SERVER": "https://__NETMASTER_IP__:6443", - "K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - "K8S_KEY": "", - "K8S_CERT": "", - "K8S_TOKEN": "", - "SVC_SUBNET": "10.96.0.0/12" - } ---- - -# This manifest installs contiv-netplugin container, as well -# as the Contiv CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: contiv-netplugin - namespace: kube-system - labels: - k8s-app: contiv-netplugin -spec: - selector: - matchLabels: - k8s-app: contiv-netplugin - template: - metadata: - labels: - k8s-app: contiv-netplugin - spec: - hostNetwork: true - hostPID: true - containers: - # Runs netplugin container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: contiv-netplugin - image: contiv/netplugin:__CONTIV_VERSION__ - args: - - -pkubernetes - - -x - env: - - name: VLAN_IF - value: __VLAN_IF__ - - name: VTEP_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: CONTIV_ETCD - valueFrom: - configMapKeyRef: - name: contiv-config - key: cluster_store - - name: CONTIV_CNI_CONFIG - valueFrom: - configMapKeyRef: - name: contiv-config - key: cni_config - - name: CONTIV_CONFIG - valueFrom: - configMapKeyRef: - name: contiv-config - key: config - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/openvswitch - name: etc-openvswitch - readOnly: false - - mountPath: /lib/modules - name: lib-modules - readOnly: false - - mountPath: /var/run - name: var-run - readOnly: false - - mountPath: /var/contiv - name: var-contiv - readOnly: false - - mountPath: /etc/kubernetes/pki - name: etc-kubernetes-pki - readOnly: false - - mountPath: /etc/kubernetes/ssl - name: etc-kubernetes-ssl - readOnly: false - - mountPath: /opt/cni/bin - name: cni-bin-dir - readOnly: false - - mountPath: /etc/cni/net.d/ - name: etc-cni-dir - readOnly: false - volumes: - # Used by contiv-netplugin - - name: etc-openvswitch - hostPath: - path: /etc/openvswitch - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run - hostPath: - path: /var/run - - name: var-contiv - hostPath: - path: /var/contiv - - name: etc-kubernetes-pki - hostPath: - path: /etc/kubernetes/pki - - name: etc-kubernetes-ssl - hostPath: - path: /etc/kubernetes/ssl - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: etc-cni-dir - hostPath: - path: /etc/cni/net.d/ ---- - -# This manifest deploys the Contiv API Server on Kubernetes. -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: contiv-netmaster - namespace: kube-system - labels: - k8s-app: contiv-netmaster -spec: - # The netmaster should have 1, 3, 5 nodes of which one is active at any given time. - # More nodes are desired in a production environment for HA. - replicas: 1 - template: - metadata: - name: contiv-netmaster - namespace: kube-system - labels: - k8s-app: contiv-netmaster - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # Only run this pod on the master. - nodeSelector: - kubeadm.alpha.kubernetes.io/role: master - # The netmaster must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - hostPID: true - containers: - - name: contiv-netmaster - image: contiv/netplugin:__CONTIV_VERSION__ - args: - - -m - - -pkubernetes - env: - - name: CONTIV_ETCD - valueFrom: - configMapKeyRef: - name: contiv-config - key: cluster_store - - name: CONTIV_CONFIG - valueFrom: - configMapKeyRef: - name: contiv-config - key: config - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/openvswitch - name: etc-openvswitch - readOnly: false - - mountPath: /lib/modules - name: lib-modules - readOnly: false - - mountPath: /var/run - name: var-run - readOnly: false - - mountPath: /var/contiv - name: var-contiv - readOnly: false - - mountPath: /etc/kubernetes/ssl - name: etc-kubernetes-ssl - readOnly: false - - mountPath: /opt/cni/bin - name: cni-bin-dir - readOnly: false - volumes: - # Used by contiv-netmaster - - name: etc-openvswitch - hostPath: - path: /etc/openvswitch - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run - hostPath: - path: /var/run - - name: var-contiv - hostPath: - path: /var/contiv - - name: etc-kubernetes-ssl - hostPath: - path: /etc/kubernetes/ssl - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin ---- - -# This manifest deploys the Contiv API Proxy Server on Kubernetes. -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: contiv-api-proxy - namespace: kube-system - labels: - k8s-app: contiv-api-proxy -spec: - # The API proxy should have 1, 3, 5 nodes of which one is active at any given time. - # More nodes are desired in a production environment for HA. - replicas: 1 - template: - metadata: - name: contiv-api-proxy - namespace: kube-system - labels: - k8s-app: contiv-api-proxy - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # Only run this pod on the master. - nodeSelector: - kubeadm.alpha.kubernetes.io/role: master - # The API proxy must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - hostPID: true - containers: - - name: contiv-api-proxy - image: contiv/auth_proxy:__CONTIV_VERSION__ - args: - - --tls-key-file=/var/contiv/auth_proxy_key.pem - - --tls-certificate=/var/contiv/auth_proxy_cert.pem - - --data-store-address=$(CONTIV_ETCD) - - --netmaster-address=__NETMASTER_IP__:9999 - env: - - name: NO_NETMASTER_STARTUP_CHECK - value: "0" - - name: CONTIV_ETCD - valueFrom: - configMapKeyRef: - name: contiv-config - key: cluster_store - securityContext: - privileged: false - volumeMounts: - - mountPath: /var/contiv - name: var-contiv - readOnly: false - volumes: - - name: var-contiv - hostPath: - path: /var/contiv - ---- diff --git a/install/k8s/k8s1.4/etcd.yaml b/install/k8s/k8s1.4/etcd.yaml deleted file mode 100644 index 166c6ed..0000000 --- a/install/k8s/k8s1.4/etcd.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# This manifest installs the Contiv etcd on the kubeadm master. -# If using an external etcd instance, this can be deleted. This uses a DaemonSet -# to force it to run on the master even when the master isn't schedulable, and uses -# nodeSelector to ensure it only runs on the master. -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: contiv-etcd - namespace: kube-system - labels: - k8s-app: contiv-etcd -spec: - template: - metadata: - labels: - k8s-app: contiv-etcd - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # Only run this pod on the master. - nodeSelector: - kubeadm.alpha.kubernetes.io/role: master - hostNetwork: true - containers: - - name: contiv-etcd - image: quay.io/coreos/etcd:__ETCD_VERSION__ - args: - - --name - - contiv0 - - --data-dir - - /var/etcd/contiv-data - - --initial-advertise-peer-urls - - http://__NETMASTER_IP__:6667 - - --listen-peer-urls - - http://0.0.0.0:6667 - - --listen-client-urls - - http://0.0.0.0:6666 - - --advertise-client-urls - - http://__NETMASTER_IP__:6666 - - --initial-cluster - - contiv0=http://__NETMASTER_IP__:6667 - - --initial-cluster-state - - new - volumeMounts: - - name: var-etcd - mountPath: /var/etcd - volumes: - - name: var-etcd - hostPath: - path: /var/etcd - ---- diff --git a/install/k8s/rbac/cleanup.yaml b/install/k8s/rbac/cleanup.yaml deleted file mode 100644 index 9b6d8c3..0000000 --- a/install/k8s/rbac/cleanup.yaml +++ /dev/null @@ -1,64 +0,0 @@ - -# This manifest runs the contiv-cleanup container -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: contiv-cleanup - namespace: kube-system - labels: - k8s-app: contiv-cleanup -spec: - selector: - matchLabels: - k8s-app: contiv-cleanup - template: - metadata: - labels: - k8s-app: contiv-cleanup - spec: - hostNetwork: true - hostPID: true - containers: - # Runs netplugin cleanup container on each Kubernetes node. - - name: contiv-cleanup - image: contiv/netplugin:__CONTIV_VERSION__ - args: - - -pkubernetes - - -r - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/openvswitch - name: etc-openvswitch - readOnly: false - - mountPath: /var/run - name: var-run - readOnly: false - - mountPath: /var/contiv - name: var-contiv - readOnly: false - - mountPath: /opt/cni/bin - name: cni-bin-dir - readOnly: false - - mountPath: /etc/cni/net.d/ - name: etc-cni-dir - readOnly: false - volumes: - # Used by contiv-cleanup - - name: etc-openvswitch - hostPath: - path: /etc/openvswitch - - name: var-run - hostPath: - path: /var/run - - name: var-contiv - hostPath: - path: /var/contiv - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: etc-cni-dir - hostPath: - path: /etc/cni/net.d/ ---- - diff --git a/install/k8s/rbac/etcd.yaml b/install/k8s/rbac/etcd.yaml deleted file mode 100644 index 7217bf4..0000000 --- a/install/k8s/rbac/etcd.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# This manifest installs the Contiv etcd on the kubeadm master. -# If using an external etcd instance, this can be deleted. This uses a DaemonSet -# to force it to run on the master even when the master isn't schedulable, and uses -# nodeSelector to ensure it only runs on the master. -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: contiv-etcd - namespace: kube-system - labels: - k8s-app: contiv-etcd -spec: - template: - metadata: - labels: - k8s-app: contiv-etcd - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # Only run this pod on the master. - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - nodeSelector: - node-role.kubernetes.io/master: "" - hostNetwork: true - containers: - - name: contiv-etcd - image: quay.io/coreos/etcd:__ETCD_VERSION__ - args: - - --name - - contiv0 - - --data-dir - - /var/etcd/contiv-data - - --initial-advertise-peer-urls - - http://__NETMASTER_IP__:6667 - - --listen-peer-urls - - http://0.0.0.0:6667 - - --listen-client-urls - - http://0.0.0.0:6666 - - --advertise-client-urls - - http://__NETMASTER_IP__:6666 - - --initial-cluster - - contiv0=http://__NETMASTER_IP__:6667 - - --initial-cluster-state - - new - volumeMounts: - - name: var-etcd - mountPath: /var/etcd - volumes: - - name: var-etcd - hostPath: - path: /var/etcd - ---- diff --git a/install/k8s/uninstall.sh b/install/k8s/uninstall.sh index 5bd6748..8f44726 100755 --- a/install/k8s/uninstall.sh +++ b/install/k8s/uninstall.sh @@ -7,11 +7,6 @@ fi kubectl="kubectl --kubeconfig /etc/kubernetes/admin.conf" k8sversion=$($kubectl version --short | grep "Server Version") -if [[ "$k8sversion" == *"v1.4"* ]] || [[ "$k8sversion" == *"v1.5"* ]]; then - k8sfolder="k8s1.4" -else - k8sfolder="rbac" -fi if [ "$#" -eq 1 ] && [ "$1" = "-h" ]; then echo "Usage: ./install/k8s/uninstall.sh to uninstall contiv" echo " ./install/k8s/uninstall.sh etcd-cleanup to uninstall contiv and cleanup contiv data" @@ -28,6 +23,6 @@ if [ "$#" -eq 1 ] && [ "$1" = "etcd-cleanup" ]; then rm -rf /var/etcd/contiv-data fi -$kubectl create -f install/k8s/$k8sfolder/cleanup.yaml +$kubectl create -f install/k8s/configs/cleanup.yaml sleep 60 -$kubectl delete -f install/k8s/$k8sfolder/cleanup.yaml +$kubectl delete -f install/k8s/configs/cleanup.yaml diff --git a/scripts/build.sh b/scripts/build.sh index becb078..211808b 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -21,7 +21,7 @@ aci_gw_version=${CONTIV_ACI_GW_VERSION:-"latest"} ansible_image_version=${CONTIV_ANSIBLE_IMAGE:-contiv/install:$DEFAULT_DOWNLOAD_CONTIV_VERSION} auth_proxy_version=${CONTIV_API_PROXY_VERSION:-$DEFAULT_DOWNLOAD_CONTIV_VERSION} docker_version=${CONTIV_DOCKER_VERSION:-1.12.6} -etcd_version=${CONTIV_ETCD_VERSION:-v2.3.8} +etcd_version=${CONTIV_ETCD_VERSION:-v3.2.4} v2plugin_version=${CONTIV_V2PLUGIN_VERSION} # where everything is assembled, always start with a clean dir and clean it up @@ -71,25 +71,28 @@ mkdir -p $binary_cache # only build installer that pulls artifacts over internet if not building # a specific commit of netplugin if [ -z "${NETPLUGIN_BRANCH:-}" ]; then - # Create the minimal tar bundle - tar czf $output_file -C $output_tmp_dir contiv-${CONTIV_INSTALLER_VERSION} - echo -n "Contiv Installer version '$CONTIV_INSTALLER_VERSION' with " - echo "netplugin version '$CONTIV_NETPLUGIN_VERSION' is available " - echo "at '$output_file'" + # Create the minimal tar bundle + tar czf $output_file -C $output_tmp_dir contiv-${CONTIV_INSTALLER_VERSION} + echo -n "Contiv Installer version '$CONTIV_INSTALLER_VERSION' with " + echo "netplugin version '$CONTIV_NETPLUGIN_VERSION' is available " + echo "at '$output_file'" fi # Save the auth proxy & aci-gw images for packaging the full docker images with contiv install binaries if [[ "$(docker images -q contiv/auth_proxy:$auth_proxy_version 2>/dev/null)" == "" || "$pull_images" == "true" ]]; then docker pull contiv/auth_proxy:$auth_proxy_version fi -proxy_image=$(docker images -q contiv/auth_proxy:$auth_proxy_version) -docker save $proxy_image -o $binary_cache/auth-proxy-image.tar +docker save contiv/auth_proxy:$auth_proxy_version -o $binary_cache/auth-proxy-image.tar if [[ "$(docker images -q contiv/aci-gw:$aci_gw_version 2>/dev/null)" == "" || "$pull_images" == "true" ]]; then docker pull contiv/aci-gw:$aci_gw_version fi -aci_image=$(docker images -q contiv/aci-gw:$aci_gw_version) -docker save $aci_image -o $binary_cache/aci-gw-image.tar +docker save contiv/aci-gw:$aci_gw_version -o $binary_cache/aci-gw-image.tar + +if [ -f $CONTIV_ARTIFACT_STAGING/netplugin-image-${CONTIV_NETPLUGIN_VERSION}.tar ]; then + cp $CONTIV_ARTIFACT_STAGING/netplugin-image-${CONTIV_NETPLUGIN_VERSION}.tar $binary_cache/ +fi + curl --fail -sL -o $binary_cache/openvswitch-2.5.0-2.el7.x86_64.rpm http://cbs.centos.org/kojifiles/packages/openvswitch/2.5.0/2.el7/x86_64/openvswitch-2.5.0-2.el7.x86_64.rpm curl --fail -sL -o $binary_cache/ovs-common.deb http://mirrors.kernel.org/ubuntu/pool/main/o/openvswitch/openvswitch-common_2.5.2-0ubuntu0.16.04.3_amd64.deb curl --fail -sL -o $binary_cache/ovs-switch.deb http://mirrors.kernel.org/ubuntu/pool/main/o/openvswitch/openvswitch-switch_2.5.2-0ubuntu0.16.04.3_amd64.deb @@ -99,18 +102,29 @@ curl --fail -sL -o $binary_cache/ovs-switch.deb http://mirrors.kernel.org/ubuntu # but there is a symlink to point to the SHA named tarball by it's branch name plugin_tball="${CONTIV_ARTIFACT_STAGING}/$CONTIV_NETPLUGIN_TARBALL_NAME" if [[ -L "${plugin_tball}" ]]; then - # copy the link (so other processes can find the tarball) and the tarball - target_plugin_tball="$(readlink "${plugin_tball}")" - cp -a "${plugin_tball}" "${binary_cache}/" - plugin_tball="${CONTIV_ARTIFACT_STAGING}/${target_plugin_tball}" + # copy the link (so other processes can find the tarball) and the tarball + target_plugin_tball="$(readlink "${plugin_tball}")" + cp -a "${plugin_tball}" "${binary_cache}/" + plugin_tball="${CONTIV_ARTIFACT_STAGING}/${target_plugin_tball}" +fi +if [ -f "${plugin_tball}" ]; then + cp "${plugin_tball}" "${binary_cache}/" fi -cp "${plugin_tball}" "${binary_cache}/" # copy v2plugin assets if built locally on branch if [ -n "${NETPLUGIN_BRANCH:-}" ]; then - cp "${CONTIV_ARTIFACT_STAGING}"/${CONTIV_V2PLUGIN_TARBALL_NAME} \ - "${binary_cache}/" - cp "${CONTIV_ARTIFACT_STAGING}/config.json" "${binary_cache}/" + if [ -L "${CONTIV_ARTIFACT_STAGING}/$CONTIV_V2PLUGIN_TARBALL_NAME" ]; then + cp "${CONTIV_ARTIFACT_STAGING}/${CONTIV_V2PLUGIN_TARBALL_NAME}" "${binary_cache}/" + v2plugin_tball="$(readlink ${CONTIV_ARTIFACT_STAGING}/${CONTIV_V2PLUGIN_TARBALL_NAME})" + if [ -f "$v2plugin_tball" ]; then + cp -a "$v2plugin_tball" "${binary_cache}/" + fi + fi + + if [ -f "${CONTIV_ARTIFACT_STAGING}/config.json" ]; then + cp "${CONTIV_ARTIFACT_STAGING}/config.json" "${binary_cache}/" + fi + fi env_file=$output_dir/install/ansible/env.json @@ -122,8 +136,8 @@ cat $env_file # Create the full tar bundle tar czf $full_output_file -C $output_tmp_dir contiv-${CONTIV_INSTALLER_VERSION} echo -n "Contiv Installer version '$CONTIV_INSTALLER_VERSION' with " -echo "netplugin version '$CONTIV_NETPLUGIN_VERSION' is available " -echo "at '$full_output_file', it includes all contiv assets " -echo "required for installation" +echo "netplugin version '$CONTIV_NETPLUGIN_VERSION' is available " +echo "at '$full_output_file', it includes all contiv assets " +echo "required for installation" echo echo -e "\nSuccess" diff --git a/scripts/download_ansible_repo.sh b/scripts/download_ansible_repo.sh index 4ccae0a..faee55b 100755 --- a/scripts/download_ansible_repo.sh +++ b/scripts/download_ansible_repo.sh @@ -9,5 +9,5 @@ rm -rf "$ANSIBLE_REPO_DIR" mkdir -p "$ANSIBLE_REPO_DIR" "$CONTIV_ARTIFACT_STAGING" echo downloading ${CONTIV_ANSIBLE_OWNER}/ansible commit: $CONTIV_ANSIBLE_COMMIT -curl --fail -sL https://api.github.com/repos/${CONTIV_ANSIBLE_OWNER}/ansible/tarball/$CONTIV_ANSIBLE_COMMIT \ - | tar --strip-components 1 -C "$ANSIBLE_REPO_DIR" -z -x +curl --fail -sL https://api.github.com/repos/${CONTIV_ANSIBLE_OWNER}/ansible/tarball/$CONTIV_ANSIBLE_COMMIT | + tar --strip-components 1 -C "$ANSIBLE_REPO_DIR" -z -x diff --git a/scripts/kubeadm_test.sh b/scripts/kubeadm_test.sh index d4117f0..26122d2 100644 --- a/scripts/kubeadm_test.sh +++ b/scripts/kubeadm_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -euo pipefail +set -exuo pipefail # Get the master node IP from the yml file generated by vagrant contiv_master=$(grep -B 3 master cluster/.cfg_kubeadm.yaml | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}" | xargs) @@ -19,9 +19,23 @@ user=${CONTIV_SSH_USER:-"$def_user"} install_version="contiv-${BUILD_VERSION:-devbuild}" default_net_cidr="${DEFAULT_NET:-20.1.1.0/24}" default_net_gw="${DEFAULT_NET:-20.1.1.1}" +release_local_tarball="contiv-full-${BUILD_VERSION}.tgz" + +if [ -f "release/${release_local_tarball}" ]; then + # extract docker images under shared mount direcotry + tar -xzf release/$release_local_tarball ${install_version}/contiv_cache/*.tar + pushd cluster + for vm in $(vagrant status | awk '/kubeadm/{print $1}'); do + keyfile=$(mktemp) + vagrant ssh-config $vm >$keyfile + for img in $(ls -1 ../${install_version}/contiv_cache/*.tar); do + scp -F $keyfile $img $vm:/tmp/ + ssh -F $keyfile $vm -- sudo docker load -i /tmp/$(basename $img) + done + rm -f $keyfile + done + popd -# For local builds, copy the build binaries to the vagrant node, using the vagrant ssh-key -if [ -f "release/${install_version}.tgz" ]; then pushd cluster ssh_key=${CONTIV_SSH_KEY:-"$def_key"} if [ "$ssh_key" == "" ]; then @@ -32,7 +46,7 @@ if [ -f "release/${install_version}.tgz" ]; then ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" # Copy the installation folder - scp $ssh_opts -i $ssh_key release/${install_version}.tgz $user@$contiv_master:$dest_path + scp $ssh_opts -i $ssh_key release/${release_local_tarball} $user@$contiv_master:$dest_path/${install_version}.tgz curl_cmd="echo 'Devbuild'" else # github redirects you to a signed AWS URL, so we need to follow redirects with -L @@ -69,7 +83,7 @@ for i in {0..20}; do cat < exists ##### + +if [ -z "${NETPLUGIN_BRANCH:-}" ]; then + echo "Trying to use dockerhub contiv/netplugin:${CONTIV_NETPLUGIN_VERSION}" + # ensure the image exists + http_rc=$(curl -L -s -w "%{http_code}" -o /dev/null https://hub.docker.com/v2/repositories/contiv/netplugin/tags/${CONTIV_NETPLUGIN_VERSION}/) + if [ "$http_rc" = 200 ]; then + echo "Found contiv/netplugin:${CONTIV_NETPLUGIN_VERSION} on dockerhub, exit 0" + else + echo "Failed to find contiv/netplugin:${CONTIV_NETPLUGIN_VERSION} on dockerhub, return code $http_rc, exit 1" + exit 1 + fi +else + # tempdir for building and cleanup on exit + netplugin_tmp_dir="$(mktemp -d)" + trap 'rm -rf ${netplugin_tmp_dir}' EXIT + + echo Cloning ${NETPLUGIN_OWNER}/netplugin branch ${NETPLUGIN_BRANCH} + # about 3x faster to pull the HEAD of a branch with no history + git clone --branch ${NETPLUGIN_BRANCH} --depth 1 \ + https://github.com/${NETPLUGIN_OWNER}/netplugin.git \ + ${netplugin_tmp_dir}/netplugin + + # Try to build docker image locally + cd $netplugin_tmp_dir/netplugin + make host-build-docker-image + + # the new built image tagged contivbase:latest + # below codes probably should goto netplugin, here just a hacking way + docker tag contivbase:latest contiv/netplugin:${CONTIV_NETPLUGIN_VERSION} + docker save contiv/netplugin:${CONTIV_NETPLUGIN_VERSION} -o $CONTIV_ARTIFACT_STAGING/netplugin-image-${CONTIV_NETPLUGIN_VERSION}.tar +fi +#### ENSURE contiv/auth_proxy: exists ##### + +auth_proxy_version=${CONTIV_API_PROXY_VERSION:-$DEFAULT_DOWNLOAD_CONTIV_VERSION} + +if [ -z "${NETPLUGIN_AUTH_PROXY_BRANCH:-}" ]; then + echo "Trying to use dockerhub contiv/auth_proxy:${auth_proxy_version}" + # ensure the image exists + http_rc=$(curl -L -s -w "%{http_code}" -o /dev/null https://hub.docker.com/v2/repositories/contiv/auth_proxy/tags/${auth_proxy_version}/) + if [ "$http_rc" = 200 ]; then + echo "Found contiv/auth_proxy:${auth_proxy_version} on dockerhub, exit 0" + else + echo "Failed to find contiv/auth_proxy:${auth_proxy_version} on dockerhub, return code $http_rc, exit 1" + exit 1 + fi +else + # tempdir for building and cleanup on exit + auth_proxy_tmp_dir="$(mktemp -d)" + trap 'rm -rf ${auth_proxy_tmp_dir}' EXIT + + echo Cloning ${NETPLUGIN_AUTH_PROXY_OWNER}/auth_proxy branch ${NETPLUGIN_AUTH_PROXY_BRANCH} + # about 3x faster to pull the HEAD of a branch with no history + git clone --branch ${NETPLUGIN_AUTH_PROXY_BRANCH} --depth 1 \ + https://github.com/${NETPLUGIN_AUTH_PROXY_OWNER}/auth_proxy.git \ + ${auth_proxy_tmp_dir}/auth_proxy + + # Try to build docker image locally + cd $auth_proxy_tmp_dir/auth_proxy + # this also checkouts contiv-ui master branch + BUILD_VERSION=master make build + # tag the same version with netplugin + docker tag contiv/auth_proxy:master contiv/auth_proxy:${auth_proxy_version} +fi diff --git a/scripts/release.sh b/scripts/release.sh index 52625e8..9fbd60e 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -53,7 +53,7 @@ if [ ! -f ${TAR_FILE} ] || [ ! -f ${TAR_FILE2} ]; then fi set -x -( (github-release -v release $pre_release -r install -t $BUILD_VERSION -d "**Changelog**
$changelog") \ - && ( (github-release -v upload -r install -t $BUILD_VERSION -n $TAR_FILENAME -f $TAR_FILE \ - && github-release -v upload -r install -t $BUILD_VERSION -n $TAR_FILENAME2 -f $TAR_FILE2) \ - || github-release -v delete -r install -t $BUILD_VERSION)) || exit 1 +( (github-release -v release $pre_release -r install -t $BUILD_VERSION -d "**Changelog**
$changelog") && + ( (github-release -v upload -r install -t $BUILD_VERSION -n $TAR_FILENAME -f $TAR_FILE && + github-release -v upload -r install -t $BUILD_VERSION -n $TAR_FILENAME2 -f $TAR_FILE2) || + github-release -v delete -r install -t $BUILD_VERSION)) || exit 1 diff --git a/scripts/swarm_mode_test.sh b/scripts/swarm_mode_test.sh index a808d25..b09530b 100644 --- a/scripts/swarm_mode_test.sh +++ b/scripts/swarm_mode_test.sh @@ -31,13 +31,13 @@ release_name="contiv-${BUILD_VERSION:-devbuild}" release_tarball="${release_name}.tgz" release_local_tarball="contiv-full-${BUILD_VERSION}.tgz" if [ -f "${release_local_tarball}" ]; then - tar oxf "${release_local_tarball}" + tar oxf "${release_local_tarball}" else - if [ ! -f "${release_tarball}" ]; then - # For release builds, get the build from github releases - curl -L -O https://github.com/contiv/install/releases/download/${BUILD_VERSION}/${release_name}.tgz - fi - tar oxf "${release_name}.tgz" + if [ ! -f "${release_tarball}" ]; then + # For release builds, get the build from github releases + curl -L -O https://github.com/contiv/install/releases/download/${BUILD_VERSION}/${release_name}.tgz + fi + tar oxf "${release_name}.tgz" fi cd $release_name diff --git a/scripts/unpack-installer.sh b/scripts/unpack-installer.sh index 8c6a2c7..904cf78 100755 --- a/scripts/unpack-installer.sh +++ b/scripts/unpack-installer.sh @@ -28,12 +28,12 @@ rm -rf ${release_name} # this tarball has a cache of binary assets release_tarball="contiv-full-${BUILD_VERSION}.tgz" if [ ! -f "${release_tarball}" ]; then - release_tarball="${release_name}.tgz" + release_tarball="${release_name}.tgz" fi if [ ! -f "${release_tarball}" ]; then - # For release builds, get the build from github releases - echo Downloading ${release_tarball} from GitHub releases - curl --fail -L -O https://github.com/contiv/install/releases/download/${BUILD_VERSION}/${release_tarball} + # For release builds, get the build from github releases + echo Downloading ${release_tarball} from GitHub releases + curl --fail -L -O https://github.com/contiv/install/releases/download/${BUILD_VERSION}/${release_tarball} fi echo Unpacking ${release_tarball}