-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
e2e.sh
executable file
·228 lines (206 loc) · 7.33 KB
/
e2e.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# hack script for running a kind e2e
# must be run with a kubernetes checkout in $PWD (IE from the checkout)
# TODO(bentheelder): replace this with kubetest integration
# Usage: SKIP="ginkgo skip regex" FOCUS="ginkgo focus regex" kind-e2e.sh
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# our exit handler (trap)
cleanup() {
# always attempt to dump logs
kind "export" logs "${ARTIFACTS}/logs" || true
# KIND_IS_UP is true once we: kind create
if [[ "${KIND_IS_UP:-}" = true ]]; then
kind delete cluster || true
fi
# clean up e2e.test symlink
rm -f _output/bin/e2e.test
# remove our tempdir
# NOTE: this needs to be last, or it will prevent kind delete
if [[ -n "${TMP_DIR:-}" ]]; then
rm -rf "${TMP_DIR}"
fi
}
# install kind to a tempdir GOPATH from this script's kind checkout
install_kind() {
# install `kind` to tempdir
TMP_DIR=$(mktemp -d)
# ensure bin dir
mkdir -p "${TMP_DIR}/bin"
# install
local script_dir
script_dir="$(dirname "${BASH_SOURCE[0]}")"
make -C "${script_dir}/../.." install INSTALL_PATH="${TMP_DIR}/bin"
# ensure it is in path
PATH="${TMP_DIR}/bin:${PATH}"
export PATH
}
# build kubernetes / node image, e2e binaries
build() {
# possibly enable bazel build caching before building kubernetes
BAZEL_REMOTE_CACHE_ENABLED=${BAZEL_REMOTE_CACHE_ENABLED:-false}
if [[ "${BAZEL_REMOTE_CACHE_ENABLED}" == "true" ]]; then
# run the script in the kubekins image, do not fail if it fails
/usr/local/bin/create_bazel_cache_rcs.sh || true
fi
# build the node image w/ kubernetes
# TODO(bentheelder): remove the kube-root flag after we make kind try to
# find this in a go module compatible way
kind build node-image --type=bazel \
--kube-root="$(go env GOPATH)/src/k8s.io/kubernetes"
# make sure we have e2e requirements
#make all WHAT="cmd/kubectl test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo"
bazel build //cmd/kubectl //test/e2e:e2e.test //vendor/github.com/onsi/ginkgo/ginkgo
# e2e.test does not show up in a path with platform in it and will not be found
# by kube::util::find-binary, so we will copy it to an acceptable location
# until this is fixed upstream
# https://github.com/kubernetes/kubernetes/issues/68306
mkdir -p "_output/bin/"
cp "bazel-bin/test/e2e/e2e.test" "_output/bin/"
# try to make sure the kubectl we built is in PATH
local maybe_kubectl
maybe_kubectl="$(find "${PWD}/bazel-bin/" -name "kubectl" -type f)"
if [[ -n "${maybe_kubectl}" ]]; then
PATH="$(dirname "${maybe_kubectl}"):${PATH}"
export PATH
fi
# release some memory after building
sync || true
echo 1 > /proc/sys/vm/drop_caches || true
}
# up a cluster with kind
create_cluster() {
# create the config file
cat <<EOF > "${ARTIFACTS}/kind-config.yaml"
# config for 1 control plane node and 2 workers
# necessary for conformance
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
networking:
ipFamily: ${IP_FAMILY:-ipv4}
nodes:
# the control plane node
- role: control-plane
- role: worker
- role: worker
EOF
# mark the cluster as up for cleanup
# even if kind create fails, kind delete can clean up after it
KIND_IS_UP=true
# actually create, with:
# - do not delete created nodes from a failed cluster create (for debugging)
# - wait up to one minute for the nodes to be "READY"
# - set log leve to debug
# - use our multi node config
kind create cluster \
--image=kindest/node:latest \
--retain \
--wait=1m \
--loglevel=debug \
"--config=${ARTIFACTS}/kind-config.yaml"
}
# run e2es with kubetest
run_tests() {
# export the KUBECONFIG
KUBECONFIG="$(kind get kubeconfig-path)"
export KUBECONFIG
if [[ "${IP_FAMILY:-ipv4}" == "ipv6" ]]; then
# IPv6 clusters need some CoreDNS changes in order to work in k8s CI:
# 1. k8s CI doesn´t offer IPv6 connectivity, so CoreDNS should be configured
# to work in an offline environment:
# https://github.com/coredns/coredns/issues/2494#issuecomment-457215452
# 2. k8s CI adds following domains to resolv.conf search field :
# c.k8s-prow-builds.internal google.internal.
# CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL
# otherwise pods stops trying to resolve the domain.
# The difference against the default CoreDNS config in k8s 1.15 is:
# < kubernetes cluster.local in-addr.arpa ip6.arpa {
# ---
# > kubernetes cluster.local internal in-addr.arpa ip6.arpa {
# 9,10d9
# < upstream
# < fallthrough in-addr.arpa ip6.arpa
# 13,15d11
# < forward . /etc/resolv.conf
# < loop
# 21c17,20
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local internal in-addr.arpa ip6.arpa {
pods insecure
}
prometheus :9153
cache 30
reload
loadbalance
}
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
---
EOF
fi
# base kubetest args
KUBETEST_ARGS="--provider=skeleton --test --check-version-skew=false"
# get the number of worker nodes
# TODO(bentheelder): this is kinda gross
NUM_NODES="$(kubectl get nodes \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.taints}{"\n"}{end}' \
| grep -cv "node-role.kubernetes.io/master" \
)"
# ginkgo regexes
SKIP="${SKIP:-}"
FOCUS="${FOCUS:-"\\[Conformance\\]"}"
# if we set PARALLEL=true, skip serial tests set --ginkgo-parallel
PARALLEL="${PARALLEL:-false}"
if [[ "${PARALLEL}" == "true" ]]; then
if [[ -z "${SKIP}" ]]; then
SKIP="\\[Serial\\]"
else
SKIP="\\[Serial\\]|${SKIP}"
fi
KUBETEST_ARGS="${KUBETEST_ARGS} --ginkgo-parallel"
fi
# add ginkgo args
KUBETEST_ARGS="${KUBETEST_ARGS} --test_args=\"--ginkgo.focus=${FOCUS} --ginkgo.skip=${SKIP} --report-dir=${ARTIFACTS} --disable-log-dump=true --num-nodes=${NUM_NODES}\""
# setting this env prevents ginkg e2e from trying to run provider setup
export KUBERNETES_CONFORMANCE_TEST="y"
# run kubetest, if it fails clean up and exit failure
eval "kubetest ${KUBETEST_ARGS}"
}
# setup kind, build kubernetes, create a cluster, run the e2es
main() {
# ensure artifacts exists when not in CI
ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
mkdir -p "${ARTIFACTS}"
export ARTIFACTS
# now build an run the cluster and tests
trap cleanup EXIT
install_kind
build
create_cluster
run_tests
}
main