-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathdeploy_resources_on_kind_cluster.sh
executable file
·123 lines (102 loc) · 3.71 KB
/
deploy_resources_on_kind_cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/bin/bash -e
# Clean up background jobs on exit.
set -m
function cleanup() {
rc=$?
jobs -p | xargs --no-run-if-empty kill
exit $rc
}
trap 'cleanup' SIGINT SIGHUP SIGTERM EXIT
function wait_pod_ready() {
args="$@"
# Start background process, waiting for the pod to be ready.
(
# Wait in a loop because the command fails fast if the pod isn't visible yet.
while ! ${kubectl} wait pod --for=condition=Ready --timeout=30s $args; do
echo "Waiting for pod $args to be ready..."
${kubectl} get po -o wide $args || true
sleep 1
done;
${kubectl} wait pod --for=condition=Ready --timeout=300s $args
) & pid=$!
# Start a second background process that implements the actual timeout.
( sleep 300; kill $pid ) 2>/dev/null & watchdog=$!
set +e
wait $pid 2>/dev/null
rc=$?
kill $watchdog 2>/dev/null
wait $watchdog 2>/dev/null
if [ $rc -ne 0 ]; then
echo "Pod $args failed to become ready within 300s"
echo "collecting diags..."
${kubectl} get po -A -o wide
${kubectl} describe po $args
${kubectl} logs $args
echo "Pod $args failed to become ready within 300s; diags above ^^"
fi
set -e
return $rc
}
# test directory.
TEST_DIR=./tests/k8st
ARCH=${ARCH:-amd64}
GIT_VERSION=${GIT_VERSION:-`git describe --tags --dirty --always --abbrev=12`}
HELM=../bin/helm
CHART=../bin/tigera-operator-$GIT_VERSION.tgz
# kubectl binary.
: ${kubectl:=../hack/test/kind/kubectl}
echo "Set ipv6 address on each node"
docker exec kind-control-plane ip -6 addr replace 2001:20::8/64 dev eth0
docker exec kind-worker ip -6 addr replace 2001:20::1/64 dev eth0
docker exec kind-worker2 ip -6 addr replace 2001:20::2/64 dev eth0
docker exec kind-worker3 ip -6 addr replace 2001:20::3/64 dev eth0
echo
echo "Load calico/node docker images onto each node"
$TEST_DIR/load_images_on_kind_cluster.sh
echo "Install additional permissions for BGP password"
${kubectl} apply -f $TEST_DIR/infra/additional-rbac.yaml
echo
echo "Install Calico using the helm chart"
$HELM install calico $CHART -f $TEST_DIR/infra/values.yaml -n tigera-operator --create-namespace
echo "Install calicoctl as a pod"
${kubectl} apply -f $TEST_DIR/infra/calicoctl.yaml
echo
echo "Wait for Calico to be ready..."
wait_pod_ready -l k8s-app=calico-node -n calico-system
wait_pod_ready -l k8s-app=calico-kube-controllers -n calico-system
wait_pod_ready -l k8s-app=calico-apiserver -n calico-apiserver
wait_pod_ready -l k8s-app=kube-dns -n kube-system
wait_pod_ready calicoctl -n kube-system
echo "Calico is running."
echo
echo "Install MetalLB controller for allocating LoadBalancer IPs"
${kubectl} create ns metallb-system || true
${kubectl} apply -f $TEST_DIR/infra/metallb.yaml
${kubectl} apply -f $TEST_DIR/infra/metallb-config.yaml
# Create and monitor a test webserver service for dual stack.
echo "Create test-webserver deployment..."
${kubectl} apply -f tests/k8st/infra/test-webserver.yaml
echo "Wait for client and webserver pods to be ready..."
wait_pod_ready -l pod-name=client
wait_pod_ready -l app=webserver
echo "client and webserver pods are running."
echo
# Show all the pods running for diags purposes.
${kubectl} get po --all-namespaces -o wide
${kubectl} get svc
# Run ipv4 ipv6 connection test
function test_connection() {
local svc="webserver-ipv$1"
output=$(${kubectl} exec client -- wget $svc -T 10 -O -)
echo $output
if [[ $output != *test-webserver* ]]; then
echo "connection to $svc service failed"
exit 1
fi
}
test_connection 4
test_connection 6
# At the end of it all, scale down the operator so that it doesn't
# make changes to the cluster. Some of our tests modify calico/node, etc.
# We should remove this once we fix up those tests.
${kubectl} scale deployment -n tigera-operator tigera-operator --replicas=0