Skip to content
This repository was archived by the owner on Apr 25, 2023. It is now read-only.

fix: broken upgrade path from previous versions #1346

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,13 @@ spec:
{{- if .Values.featureGates }}
- name: PushReconciler
configuration: {{ .Values.featureGates.PushReconciler | default "Enabled" | quote }}
- name: RawResourceStatusCollection
configuration: {{ .Values.featureGates.RawResourceStatusCollection | default "Disabled" | quote }}
- name: SchedulerPreferences
configuration: {{ .Values.featureGates.SchedulerPreferences | default "Enabled" | quote }}
- name: CrossClusterServiceDiscovery
configuration: {{ .Values.featureGates.CrossClusterServiceDiscovery | default "Disabled" | quote }}
- name: FederatedIngress
configuration: {{ .Values.featureGates.FederatedIngress | default "Disabled" | quote }}
# NOTE: Commented feature gate to fix https://github.com/kubernetes-sigs/kubefed/issues/1333
#- name: RawResourceStatusCollection
# configuration: {{ .Values.featureGates.RawResourceStatusCollection | default "Disabled" | quote }}
{{- end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ .Release.Name }}-kubefed-config-hook"
namespace: "{{ .Release.Namespace }}"
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
data:
setup.sh: |-
#!/bin/bash
set -euo pipefail

kubectl patch kubefedconfig -n {{ .Release.Namespace }} kubefed --type='json' -p='[{"op": "add", "path": "/spec/featureGates", "value":[{"configuration": {{ .Values.featureGates.PushReconciler | default "Enabled" | quote }},"name":"PushReconciler"},{"configuration": {{ .Values.featureGates.CrossClusterServiceDiscovery | default "Disabled" | quote }},"name":"CrossClusterServiceDiscovery"},{"configuration": {{ .Values.featureGates.RawResourceStatusCollection | default "Disabled" | quote }},"name":"RawResourceStatusCollection"},{"configuration": {{ .Values.featureGates.FederatedIngress | default "Disabled" | quote }},"name":"FederatedIngress"},{"configuration": {{ .Values.featureGates.SchedulerPreferences | default "Enabled" | quote }},"name":"SchedulerPreferences"}]}]'

echo "Kubefedconfig patched successfully!"

kubectl rollout restart deployment/kubefed-controller-manager -n {{ .Release.Namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ .Release.Name }}-{{ randAlphaNum 10 | lower }}"
namespace: "{{ .Release.Namespace }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-4"
"helm.sh/hook-delete-policy": hook-succeeded,hook-failed
spec:
template:
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
restartPolicy: Never
serviceAccountName: kubefed-config-hook
automountServiceAccountToken: true
containers:
- name: post-install-job
image: "bitnami/kubectl:1.17.16"
command: ["/bin/bash"]
args: ["/opt/scripts/setup.sh"]
volumeMounts:
- name: "scripts"
mountPath: "/opt/scripts"
volumes:
- name: "scripts"
configMap:
name: "{{ .Release.Name }}-kubefed-config-hook"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kubefed-config-hook
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
rules:
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["patch","get"]
- apiGroups: ["core.kubefed.io"]
resources: ["kubefedconfigs"]
verbs: ["patch","get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubefed-config-hook
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubefed-config-hook
subjects:
- kind: ServiceAccount
name: kubefed-config-hook
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubefed-config-hook
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
21 changes: 0 additions & 21 deletions scripts/deploy-kubefed.sh
Original file line number Diff line number Diff line change
Expand Up @@ -92,27 +92,6 @@ function kubefed-admission-webhook-ready() {
[[ "${readyReplicas}" -ge "1" ]]
}

function deployment-image-as-expected() {
local namespace="${1}"
local deployment="${2}"
local container="${3}"
local expected_image="${4}"

local deployed_image
deployed_image="$(kubectl -n "${namespace}" get deployment "${deployment}" -o jsonpath='{.spec.template.spec.containers[?(@.name=="'"${container}"'")].image}')"
[[ "${deployed_image}" == "${expected_image}" ]]
}

function check-command-installed() {
local cmdName="${1}"

command -v "${cmdName}" >/dev/null 2>&1 ||
{
echo "${cmdName} command not found. Please download dependencies using ${BASH_SOURCE%/*}/download-binaries.sh and install it in your PATH." >&2
exit 1
}
}

NS="${KUBEFED_NAMESPACE:-kube-federation-system}"
IMAGE_NAME="${1:-}"
NAMESPACED="${NAMESPACED:-}"
Expand Down
47 changes: 46 additions & 1 deletion scripts/pre-commit.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ E2E_TEST_CMD="${TEMP_DIR}/e2e-${PLATFORM} ${COMMON_TEST_ARGS}"
# given control plane scope.
IN_MEMORY_E2E_TEST_CMD="go test -v -timeout 900s -race ./test/e2e -args ${COMMON_TEST_ARGS} -in-memory-controllers=true -limited-scope-in-memory-controllers=false"

KUBEFED_UPGRADE_TEST_NS="upgrade-test"

function build-binaries() {
${MAKE_CMD} hyperfed
${MAKE_CMD} controller
Expand All @@ -62,6 +64,43 @@ function run-e2e-tests() {
${E2E_TEST_CMD}
}

function run-e2e-upgrade-test() {
HOST_CLUSTER="$(kubectl config current-context)"

echo "Adding a repo to install an older kubefed version"
helm repo add kubefed-charts https://raw.githubusercontent.com/kubernetes-sigs/kubefed/master/charts
helm repo update

# Get the previous version prior to our latest stable version
KUBEFED_UPGRADE_TEST_VERSION=$(helm search repo kubefed-charts/kubefed --versions | awk '{print $2}' | head -3 | tail -1)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For PRs this would currently test an upgrade from 0.5.1 to what's in the PR. However, I believe that for PRs we would like to test upgrades from the latest version (i.e. 0.6.0 atm). wdyt?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was testing from the old version 0.5.1 that showed the issue that this PR fixes. Generally, we should run upgrades from the most recent release, but the upgrade from 0.5.1 -> 0.6.0 is broken right now, hence this test.

I guess we should actually remove 0.6.0 once 0.6.1 is released as it's a broken release (at least for upgrades)? Thoughts?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we should delete v0.6.0 once v0.6.1 is released.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍


echo "Installing an older kubefed version v${KUBEFED_UPGRADE_TEST_VERSION}"
helm install kubefed kubefed-charts/kubefed --namespace ${KUBEFED_UPGRADE_TEST_NS} --version=v${KUBEFED_UPGRADE_TEST_VERSION} --create-namespace --wait

deployment-image-as-expected "${KUBEFED_UPGRADE_TEST_NS}" kubefed-admission-webhook admission-webhook "quay.io/kubernetes-multicluster/kubefed:v${KUBEFED_UPGRADE_TEST_VERSION}"
deployment-image-as-expected "${KUBEFED_UPGRADE_TEST_NS}" kubefed-controller-manager controller-manager "quay.io/kubernetes-multicluster/kubefed:v${KUBEFED_UPGRADE_TEST_VERSION}"

echo "Upgrading kubefed to current version"
IMAGE_NAME="local/kubefed:e2e"
local repository=${IMAGE_NAME%/*}
local image_tag=${IMAGE_NAME##*/}
local image=${image_tag%:*}
local tag=${image_tag#*:}

helm upgrade -i kubefed charts/kubefed --namespace ${KUBEFED_UPGRADE_TEST_NS} \
--set controllermanager.controller.repository=${repository} \
--set controllermanager.controller.image=${image} \
--set controllermanager.controller.tag=${tag} \
--set controllermanager.webhook.repository=${repository} \
--set controllermanager.webhook.image=${image} \
--set controllermanager.webhook.tag=${tag} \
--set controllermanager.featureGates.CrossClusterServiceDiscovery=Enabled,controllermanager.featureGates.FederatedIngress=Enabled,controllermanager.featureGates.RawResourceStatusCollection=Enabled \
--wait

deployment-image-as-expected "${KUBEFED_UPGRADE_TEST_NS}" kubefed-admission-webhook admission-webhook "local/kubefed:e2e"
deployment-image-as-expected "${KUBEFED_UPGRADE_TEST_NS}" kubefed-controller-manager controller-manager "local/kubefed:e2e"
}

function run-e2e-tests-with-in-memory-controllers() {
${IN_MEMORY_E2E_TEST_CMD}
}
Expand Down Expand Up @@ -138,7 +177,7 @@ echo "Downloading e2e test dependencies"

KIND_TAG="v1.19.4@sha256:796d09e217d93bed01ecf8502633e48fd806fe42f9d02fdd468b81cd4e3bd40b" ./scripts/create-clusters.sh

declare -a join_cluster_list=()
declare -a join_cluster_list=()
if [[ -z "${JOIN_CLUSTERS}" ]]; then
for i in $(seq 2 "${NUM_CLUSTERS}"); do
join_cluster_list+=("cluster${i}")
Expand Down Expand Up @@ -178,3 +217,9 @@ run-namespaced-e2e-tests

echo "Deleting namespace-scoped kubefed"
KUBEFED_NAMESPACE=foo NAMESPACED=y DELETE_CLUSTER_RESOURCE=y ./scripts/delete-kubefed.sh

echo "Running e2e upgrade test"
run-e2e-upgrade-test

echo "Deleting kubefed"
KUBEFED_NAMESPACE=${KUBEFED_UPGRADE_TEST_NS} DELETE_CLUSTER_RESOURCE=y ./scripts/delete-kubefed.sh
21 changes: 21 additions & 0 deletions scripts/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -98,3 +98,24 @@ function util::wait-for-condition() {
fi
}
readonly -f util::wait-for-condition

function deployment-image-as-expected() {
local namespace="${1}"
local deployment="${2}"
local container="${3}"
local expected_image="${4}"

local deployed_image
deployed_image="$(kubectl -n "${namespace}" get deployment "${deployment}" -o jsonpath='{.spec.template.spec.containers[?(@.name=="'"${container}"'")].image}')"
[[ "${deployed_image}" == "${expected_image}" ]]
}

function check-command-installed() {
local cmdName="${1}"

command -v "${cmdName}" >/dev/null 2>&1 ||
{
echo "${cmdName} command not found. Please download dependencies using ${BASH_SOURCE%/*}/download-binaries.sh and install it in your PATH." >&2
exit 1
}
}