Skip to content

Commit 4aae3dc

Browse files
authored
Update k8s installer for refactored contiv components (contiv#317)
* Update k8s installer for refactored contiv components Dropped codes for k8s1.4, updated scripts and yaml files for contiv services. Made a branch based install testing for k8s, and changed k8s gating process. Also update etcd to v3.2.4, it still behaves as etcd2, because contiv connects it with v2 api, just need to use newer container to make it having sh in it. Also shfmt all bash scripts and bump contiv version to 1.2.0 Signed-off-by: Wei Tie <[email protected]>
1 parent 6c8a0d9 commit 4aae3dc

33 files changed

+628
-838
lines changed

Makefile

+8-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ export CONTIV_INSTALLER_VERSION ?= $(BUILD_VERSION)
55
# downloaded and built assets intended to go in installer by build.sh
66
export CONTIV_ARTIFACT_STAGING := $(PWD)/artifact_staging
77
# some assets are retrieved from GitHub, this is the default version to fetch
8-
export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.1.7
8+
export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.2.0
99
export CONTIV_ACI_GW_VERSION ?= latest
1010
export NETPLUGIN_OWNER ?= contiv
1111
# setting NETPLUGIN_BRANCH compiles that commit on demand,
@@ -107,10 +107,15 @@ release-test-swarm-mode: build
107107
make cluster-swarm-mode
108108
make install-test-swarm-mode
109109

110+
# create k8s release testing image (do not contains ansible)
111+
k8s-build: prepare-netplugin-images assemble-build
112+
113+
prepare-netplugin-images:
114+
@bash ./scripts/prepare_netplugin_images.sh
110115
# Create a build and test the release installation on a vagrant cluster
111116
# TODO: The vagrant part of this can be optimized by taking snapshots instead
112117
# of creating a new set of VMs for each case
113-
release-test-kubeadm: build
118+
release-test-kubeadm: k8s-build
114119
# Test kubeadm (centos by default)
115120
make cluster-kubeadm
116121
make install-test-kubeadm
@@ -152,4 +157,4 @@ install-test-legacy-swarm:
152157
ci: release-test-kubeadm
153158
ci-old: release-test-swarm-mode release-test-kubeadm release-test-legacy-swarm
154159

155-
.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy
160+
.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy k8s-build prepare-netplugin-images

cluster/docker17/bootstrap_centos.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ fi
99

1010
yum install -y yum-utils
1111
yum-config-manager \
12-
--add-repo \
13-
https://download.docker.com/linux/centos/docker-ce.repo
12+
--add-repo \
13+
https://download.docker.com/linux/centos/docker-ce.repo
1414

1515
yum makecache fast
1616
yum -y install docker-ce

cluster/docker17/centos_docker_install.sh

+5-6
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
set -euo pipefail
77

88
if [ $EUID -ne 0 ]; then
9-
echo "Please run this script as root user"
10-
exit 1
9+
echo "Please run this script as root user"
10+
exit 1
1111
fi
1212

1313
# Install pre-reqs
@@ -16,22 +16,21 @@ yum install -y yum-utils device-mapper-persistent-data lvm2
1616

1717
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
1818

19-
2019
# Install Docker
21-
# If you require a specific version, comment out the first line and uncomment
20+
# If you require a specific version, comment out the first line and uncomment
2221
# the other one. Fill in the version you want.
2322
yum -y install docker-ce
2423
#sudo yum install docker-ce-<VERSION>
2524

2625
# Post-install steps
27-
# add admin user to docker group
26+
# add admin user to docker group
2827
usermod -aG docker $SUDO_USER
2928

3029
# add /etc/docker/ if it doesn't exist
3130
mkdir -p /etc/docker
3231

3332
# add (and create) daemon.json with entry for storage-device
34-
cat <<EOT >> /etc/docker/daemon.json
33+
cat <<EOT >>/etc/docker/daemon.json
3534
{
3635
"storage-driver": "devicemapper"
3736
}

cluster/docker17/master.sh

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
docker swarm init --advertise-addr $1
2-
docker swarm join-token manager | \
3-
grep -A 20 "docker swarm join" > $2/manager.sh
4-
docker swarm join-token worker | \
5-
grep -A 20 "docker swarm join" > $2/worker.sh
2+
docker swarm join-token manager |
3+
grep -A 20 "docker swarm join" >$2/manager.sh
4+
docker swarm join-token worker |
5+
grep -A 20 "docker swarm join" >$2/worker.sh

cluster/k8s1.6/k8smaster.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks
22
if [ "$#" -eq 4 ]; then
33
cp /etc/kubernetes/admin.conf /home/$4
44
chown $(id -u $4):$(id -g $4) /home/$4/admin.conf
5-
echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc
6-
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc
7-
fi
5+
echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc
6+
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc
7+
fi

cluster/k8s1.8/k8smaster.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks
22
if [ "$#" -eq 4 ]; then
33
cp /etc/kubernetes/admin.conf /home/$4
44
chown $(id -u $4):$(id -g $4) /home/$4/admin.conf
5-
echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc
6-
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc
7-
fi
5+
echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc
6+
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc
7+
fi

install/ansible/env.json

+2-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
"etcd_peers_group": "netplugin-master",
77
"service_vip": "__NETMASTER_IP__",
88
"validate_certs": false,
9-
"cluster_store": "__CLUSTER_STORE__",
9+
"cluster_store_driver": "__CLUSTER_STORE_TYPE__",
10+
"cluster_store_url": "__CLUSTER_STORE_URLS__",
1011
"auth_proxy_image": "contiv/auth_proxy:__API_PROXY_VERSION__",
1112
"docker_reset_container_state": __DOCKER_RESET_CONTAINER_STATE__,
1213
"docker_reset_image_state": __DOCKER_RESET_IMAGE_STATE__,

install/ansible/install.sh

+37-7
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ error_ret() {
3737
exit 1
3838
}
3939

40-
while getopts ":n:a:im:d:v:ps:" opt; do
40+
while getopts ":n:a:im:d:v:pe:c:s:" opt; do
4141
case $opt in
4242
n)
4343
netmaster=$OPTARG
@@ -67,8 +67,29 @@ while getopts ":n:a:im:d:v:ps:" opt; do
6767
p)
6868
contiv_v2plugin_install=true
6969
;;
70+
e)
71+
# etcd endpoint option
72+
cluster_store_type=etcd
73+
cluster_store_urls=$OPTARG
74+
install_etcd=false
75+
;;
76+
c)
77+
# consul endpoint option
78+
cluster_store_type=consul
79+
cluster_store_urls=$OPTARG
80+
install_etcd=false
81+
;;
7082
s)
71-
cluster_store=$OPTARG
83+
# backward compatibility
84+
echo "-s option has been deprecated, use -e or -c instead"
85+
local cluster_store=$OPTARG
86+
if [[ "$cluster_store" =~ ^etcd://.+ ]]; then
87+
cluster_store_type=etcd
88+
cluster_store_urls=$(echo $cluster_store | sed s/etcd/http/)
89+
elif [[ "$cluster_store" =~ ^consul://.+ ]]; then
90+
cluster_store_type=consul
91+
cluster_store_urls=$(echo $cluster_store | sed s/consul/http/)
92+
fi
7293
install_etcd=false
7394
;;
7495
:)
@@ -88,6 +109,15 @@ mkdir -p "$inventory"
88109
host_inventory="$inventory/contiv_hosts"
89110
node_info="$inventory/contiv_nodes"
90111

112+
# TODO: use python to generate the inventory
113+
# This python generated inventory contains
114+
# 1. groups and host
115+
# 2. ssh info for each host
116+
# 3. control interface for each host
117+
# 4. data interface for each host
118+
# 5. aci info
119+
# 6. fwd_mode(bridge/routing), net_mode(vlan/vxlan), contiv_network_mode(standalone/aci)
120+
# then below sed against env_file set rest of them, they should be combined as one
91121
./install/genInventoryFile.py "$contiv_config" "$host_inventory" "$node_info" $contiv_network_mode $fwd_mode
92122

93123
if [ "$netmaster" = "" ]; then
@@ -131,13 +161,15 @@ if [ "$service_vip" == "" ]; then
131161
service_vip=$netmaster
132162
fi
133163

134-
if [ "$cluster_store" == "" ]; then
135-
cluster_store="etcd://localhost:2379"
164+
if [ "$cluster_store" = "" ]; then
165+
cluster_store_type="etcd"
166+
cluster_store_urls="http://localhost:2379"
136167
fi
137168

138169
# variables already replaced by build.sh will not pattern match
139170
sed -i.bak 's#__NETMASTER_IP__#'"$service_vip"'#g' "$env_file"
140-
sed -i.bak 's#__CLUSTER_STORE__#'"$cluster_store"'#g' "$env_file"
171+
sed -i.bak 's#__CLUSTER_STORE_TYPE__#'"$cluster_store_type"'#g' "$env_file"
172+
sed -i.bak 's#__CLUSTER_STORE_URLS__#'"$cluster_store_urls"'#g' "$env_file"
141173
sed -i.bak 's#__DOCKER_RESET_CONTAINER_STATE__#false#g' "$env_file"
142174
sed -i.bak 's#__DOCKER_RESET_IMAGE_STATE__#false#g' "$env_file"
143175
sed -i.bak 's#__ETCD_CLEANUP_STATE__#false#g' "$env_file"
@@ -205,8 +237,6 @@ if [ "$unreachable" = "" ] && [ "$failed" = "" ]; then
205237
echo "Please export DOCKER_HOST=tcp://$netmaster:2375 in your shell before proceeding"
206238
echo "Contiv UI is available at https://$netmaster:10000"
207239
echo "Please use the first run wizard or configure the setup as follows:"
208-
echo " Configure forwarding mode (optional, default is bridge)."
209-
echo " netctl global set --fwd-mode routing"
210240
echo " Configure ACI mode (optional)"
211241
echo " netctl global set --fabric-mode aci --vlan-range <start>-<end>"
212242
echo " Create a default network"

install/ansible/install_swarm.sh

+51-49
Original file line numberDiff line numberDiff line change
@@ -73,53 +73,53 @@ mkdir -p "$src_conf_path"
7373
cluster_param=""
7474
while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:" opt; do
7575
case $opt in
76-
f)
77-
cp "$OPTARG" "$host_contiv_config"
78-
;;
79-
n)
80-
netmaster=$OPTARG
81-
;;
82-
a)
83-
ans_opts="$OPTARG"
84-
;;
85-
e)
86-
ans_key=$OPTARG
87-
;;
88-
u)
89-
ans_user=$OPTARG
90-
;;
91-
m)
92-
contiv_network_mode=$OPTARG
93-
;;
94-
d)
95-
fwd_mode=$OPTARG
96-
;;
97-
v)
98-
aci_image=$OPTARG
99-
;;
100-
s)
101-
cluster_param="-s $OPTARG"
102-
;;
103-
104-
i)
105-
install_scheduler="-i"
106-
;;
107-
p)
108-
v2plugin_param="-p"
109-
;;
110-
c)
111-
cp "$OPTARG" "$host_tls_cert"
112-
;;
113-
k)
114-
cp "$OPTARG" "$host_tls_key"
115-
;;
116-
:)
117-
echo "An argument required for $OPTARG was not passed"
118-
usage
119-
;;
120-
?)
121-
usage
122-
;;
76+
f)
77+
cp "$OPTARG" "$host_contiv_config"
78+
;;
79+
n)
80+
netmaster=$OPTARG
81+
;;
82+
a)
83+
ans_opts="$OPTARG"
84+
;;
85+
e)
86+
ans_key=$OPTARG
87+
;;
88+
u)
89+
ans_user=$OPTARG
90+
;;
91+
m)
92+
contiv_network_mode=$OPTARG
93+
;;
94+
d)
95+
fwd_mode=$OPTARG
96+
;;
97+
v)
98+
aci_image=$OPTARG
99+
;;
100+
s)
101+
cluster_param="-s $OPTARG"
102+
;;
103+
104+
i)
105+
install_scheduler="-i"
106+
;;
107+
p)
108+
v2plugin_param="-p"
109+
;;
110+
c)
111+
cp "$OPTARG" "$host_tls_cert"
112+
;;
113+
k)
114+
cp "$OPTARG" "$host_tls_key"
115+
;;
116+
:)
117+
echo "An argument required for $OPTARG was not passed"
118+
usage
119+
;;
120+
?)
121+
usage
122+
;;
123123
esac
124124
done
125125

@@ -148,7 +148,7 @@ fi
148148
if [ "$ans_opts" == "" ]; then
149149
ans_opts="--private-key $def_ans_key -u $ans_user"
150150
else
151-
ans_opts+=" --private-key $def_ans_key -u $ans_user"
151+
ans_opts+=" --private-key $def_ans_key -u $ans_user"
152152
fi
153153

154154
# Generate SSL certs for auth proxy
@@ -172,4 +172,6 @@ mounts[5]="$src_conf_path:$container_conf_path:Z"
172172
mounts[6]="-v"
173173
mounts[7]="$(pwd)/contiv_cache:/var/contiv_cache:Z"
174174
set -x
175-
docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh $netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $cluster_param $v2plugin_param
175+
docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh \
176+
$netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode \
177+
-d $fwd_mode $aci_param $cluster_param $v2plugin_param

0 commit comments

Comments
 (0)