diff --git a/.drone.yml b/.drone.yml index 54946c3..383256c 100644 --- a/.drone.yml +++ b/.drone.yml @@ -8,10 +8,10 @@ type: docker steps: - name: check - image: docker.io/library/golang:1.16 + image: docker.io/library/golang:1.21 pull: always commands: - - go get -u github.com/google/addlicense + - go install github.com/google/addlicense@v1.1.1 - addlicense -c "SIGHUP s.r.l" -v -l bsd --check . --- name: policeman @@ -66,7 +66,7 @@ steps: - /pluto detect tigera-on-prem.yml --ignore-deprecations --target-versions=k8s=v1.26.0 --- -name: e2e-kubernetes-1.24-calico +name: e2e-kubernetes-1.25-calico kind: pipeline type: docker @@ -88,17 +88,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-124 - pipeline_id: cluster-124 + action: custom-cluster-125 + pipeline_id: cluster-125 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.24.0" + cluster_version: "1.25.3" instance_path: /shared aws_default_region: from_secret: aws_region @@ -120,23 +120,24 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1 + # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 + image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-124 + - export KUBECONFIG=/shared/kube/kubeconfig-125 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-124 + pipeline_id: cluster-125 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -163,9 +164,8 @@ steps: volumes: - name: shared temp: {} - --- -name: e2e-kubernetes-1.25-calico +name: e2e-kubernetes-1.26-calico kind: pipeline type: docker @@ -187,17 +187,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-125 - pipeline_id: cluster-125 + action: custom-cluster-126 + pipeline_id: cluster-126 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.25.3" + cluster_version: "1.26.4" instance_path: /shared aws_default_region: from_secret: aws_region @@ -219,24 +219,23 @@ steps: from_secret: dockerhub_password - name: test - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-125 + - export KUBECONFIG=/shared/kube/kubeconfig-126 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-125 + pipeline_id: cluster-126 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -264,7 +263,7 @@ volumes: - name: shared temp: {} --- -name: e2e-kubernetes-1.26-calico +name: e2e-kubernetes-1.27-calico kind: pipeline type: docker @@ -286,17 +285,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-126 - pipeline_id: cluster-126 + action: custom-cluster-127 + pipeline_id: cluster-127 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.26.4" + cluster_version: "1.27.1" instance_path: /shared aws_default_region: from_secret: aws_region @@ -318,23 +317,23 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.12.0_1.9.4_1.27.1_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-126 + - export KUBECONFIG=/shared/kube/kubeconfig-127 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-126 + pipeline_id: cluster-127 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -363,7 +362,7 @@ volumes: temp: {} --- -name: e2e-kubernetes-1.24-cilium +name: e2e-kubernetes-1.25-cilium kind: pipeline type: docker @@ -381,23 +380,21 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - e2e-kubernetes-1.25-calico - - e2e-kubernetes-1.26-calico steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-124 - pipeline_id: cluster-124-cilium + action: custom-cluster-125 + pipeline_id: cluster-125-cilium local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.24.0" + cluster_version: "1.25.3" instance_path: /shared aws_default_region: from_secret: aws_region @@ -419,23 +416,24 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1 + # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 + image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-124 + - export KUBECONFIG=/shared/kube/kubeconfig-125 - bats -t katalog/tests/cilium/cilium.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-124-cilium + pipeline_id: cluster-125-cilium aws_default_region: from_secret: aws_region aws_access_key_id: @@ -464,7 +462,7 @@ volumes: temp: {} --- -name: e2e-kubernetes-1.25-cilium +name: e2e-kubernetes-1.26-cilium kind: pipeline type: docker @@ -482,23 +480,21 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - - e2e-kubernetes-1.25-calico - e2e-kubernetes-1.26-calico steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-125 - pipeline_id: cluster-125-cilium + action: custom-cluster-126 + pipeline_id: cluster-126-cilium local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.25.3" + cluster_version: "1.26.4" instance_path: /shared aws_default_region: from_secret: aws_region @@ -520,24 +516,23 @@ steps: from_secret: dockerhub_password - name: test - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-125 + - export KUBECONFIG=/shared/kube/kubeconfig-126 - bats -t katalog/tests/cilium/cilium.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-125-cilium + pipeline_id: cluster-126-cilium aws_default_region: from_secret: aws_region aws_access_key_id: @@ -565,7 +560,7 @@ volumes: - name: shared temp: {} --- -name: e2e-kubernetes-1.26-cilium +name: e2e-kubernetes-1.27-cilium kind: pipeline type: docker @@ -583,23 +578,21 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - - e2e-kubernetes-1.25-calico - - e2e-kubernetes-1.26-calico + - e2e-kubernetes-1.27-calico steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-126 - pipeline_id: cluster-126-cilium + action: custom-cluster-127 + pipeline_id: cluster-127-cilium local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.26.4" + cluster_version: "1.27.1" instance_path: /shared aws_default_region: from_secret: aws_region @@ -621,23 +614,23 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.12.0_1.9.4_1.27.1_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-126 + - export KUBECONFIG=/shared/kube/kubeconfig-127 - bats -t katalog/tests/cilium/cilium.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-126-cilium + pipeline_id: cluster-127-cilium aws_default_region: from_secret: aws_region aws_access_key_id: @@ -664,19 +657,18 @@ steps: volumes: - name: shared temp: {} - --- name: release kind: pipeline type: docker depends_on: - - e2e-kubernetes-1.24-calico - e2e-kubernetes-1.25-calico - e2e-kubernetes-1.26-calico - - e2e-kubernetes-1.24-cilium + - e2e-kubernetes-1.27-calico - e2e-kubernetes-1.25-cilium - e2e-kubernetes-1.26-cilium + - e2e-kubernetes-1.27-cilium platform: os: linux diff --git a/README.md b/README.md index 0d6948d..a87dd42 100644 --- a/README.md +++ b/README.md @@ -29,9 +29,9 @@ Kubernetes Fury Networking provides the following packages: | Package | Version | Description | | -------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| [calico](katalog/calico) | `3.26.1` | [Calico][calico-page] CNI Plugin. For cluster with `< 50` nodes. | -| [cilium](katalog/cilium) | `1.13.3` | [Cilium][cilium-page] CNI Plugin. For cluster with `< 200` nodes. | -| [tigera](katalog/tigera) | `1.30.4` | [Tigera Operator][tigera-page], a Kubernetes Operator for Calico, provides pre-configured installations for on-prem and for EKS in policy-only mode. | +| [calico](katalog/calico) | `3.26.3` | [Calico][calico-page] CNI Plugin. For cluster with `< 50` nodes. | +| [cilium](katalog/cilium) | `1.14.3` | [Cilium][cilium-page] CNI Plugin. For cluster with `< 200` nodes. | +| [tigera](katalog/tigera) | `1.30.7` | [Tigera Operator][tigera-page], a Kubernetes Operator for Calico, provides pre-configured installations for on-prem and for EKS in policy-only mode. | | [ip-masq](katalog/ip-masq) | `2.8.0` | The `ip-masq-agent` configures iptables rules to implement IP masquerading functionality | > The resources in these packages are going to be deployed in `kube-system` namespace. Except for the operator. @@ -42,9 +42,10 @@ Click on each package to see its full documentation. | Kubernetes Version | Compatibility | Notes | | ------------------ | :----------------: | --------------- | -| `1.24.x` | :white_check_mark: | No known issues | | `1.25.x` | :white_check_mark: | No known issues | | `1.26.x` | :white_check_mark: | No known issues | +| `1.27.x` | :white_check_mark: | No known issues | + Check the [compatibility matrix][compatibility-matrix] for additional information on previous releases of the module. @@ -59,14 +60,14 @@ Check the [compatibility matrix][compatibility-matrix] for additional informatio ### Deployment -> ⚠️ please notice that the Calico packages is for cluster with less the 50 nodes. If your cluster has more than 50 nodes, you'll need to switch to [Calico + Typha](https://projectcalico.docs.tigera.io/archive/v3.23/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes) or to the [Tigera Operator](katalog/tigera/README.md). +> ⚠️ Please notice that the Calico packages is for cluster with less the 50 nodes. If your cluster has more than 50 nodes, you'll need to switch to [Calico + Typha](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises) or to the [Tigera Operator](katalog/tigera/README.md). 1. List the packages you want to deploy and their version in a `Furyfile.yml` ```yaml bases: - name: networking - version: "v1.14.0" + version: "v1.15.0" ``` > See `furyctl` [documentation][furyctl-repo] for additional details about `Furyfile.yml` format. diff --git a/docs/COMPATIBILITY_MATRIX.md b/docs/COMPATIBILITY_MATRIX.md index e4758fc..7184aa8 100644 --- a/docs/COMPATIBILITY_MATRIX.md +++ b/docs/COMPATIBILITY_MATRIX.md @@ -1,13 +1,14 @@ # Compatibility Matrix -| Module Version / Kubernetes Version | 1.24.X | 1.25.X | 1.26.X | -| ----------------------------------- | ------------------ | ------------------ | ------------------ | -| v1.10.0 | :white_check_mark: | | | -| v1.11.0 | :white_check_mark: | :white_check_mark: | | -| v1.12.0 | :white_check_mark: | :white_check_mark: | | -| v1.12.1 | :white_check_mark: | :white_check_mark: | | -| v1.12.2 | :white_check_mark: | :white_check_mark: | | -| v1.14.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| Module Version / Kubernetes Version | 1.24.X | 1.25.X | 1.26.X | 1.27.X | +| ----------------------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| v1.10.0 | :white_check_mark: | | | | +| v1.11.0 | :white_check_mark: | :white_check_mark: | | | +| v1.12.0 | :white_check_mark: | :white_check_mark: | | | +| v1.12.1 | :white_check_mark: | :white_check_mark: | | | +| v1.12.2 | :white_check_mark: | :white_check_mark: | | | +| v1.14.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | +| v1.15.0 | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: Compatible @@ -41,4 +42,4 @@ | v1.8.0 | | | | | | | :x: | :x: | :x: | :x: | | v1.8.1 | | | | | | | :x: | :x: | :x: | :x: | | v1.8.2 | | | | | | | :white_check_mark: | :x: | :x: | :x: | -| v1.9.0 | | | | | | | :x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | \ No newline at end of file +| v1.9.0 | | | | | | | :x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | diff --git a/docs/releases/v1.15.0.md b/docs/releases/v1.15.0.md new file mode 100644 index 0000000..1202a27 --- /dev/null +++ b/docs/releases/v1.15.0.md @@ -0,0 +1,64 @@ +# Networking Core Module Release 1.15.0 + +Welcome to the latest release of the `Networking` module of [`Kubernetes Fury Distribution`](https://github.com/sighupio/fury-distribution) maintained by team SIGHUP. + +This minor release updates some components and adds support to Kubernetes 1.27. + +## Component Images 🚢 + +| Component | Supported Version | Previous Version | +| ----------------- | -------------------------------------------------------------------------------- | ---------------- | +| `calico` | [`v3.26.3`](https://projectcalico.docs.tigera.io/archive/v3.26/release-notes/) | `v3.26.1` | +| `cilium` | [`v1.14.3`](https://github.com/cilium/cilium/releases/tag/v1.14.3) | `v1.13.1` | +| `ip-masq` | [`v2.8.0`](https://github.com/kubernetes-sigs/ip-masq-agent/releases/tag/v2.5.0) | No update | +| `tigera-operator` | [`v1.30.7`](https://github.com/tigera/operator/releases/tag/v1.30.7) | `v1.30.4` | + +> Please refer the individual release notes to get detailed information on each release. + +## Update Guide 🦮 + +### Process + +If you are using Cilium, read the steps [below](#cilium-upgrade) before proceeding. + +1. Just deploy as usual: + +```bash +kustomize build katalog/calico | kubectl apply -f - +# OR +kustomize build katalog/tigera/on-prem | kubectl apply -f - +# OR +kustomize build katalog/cilium | kubectl apply -f - +``` + +#### Cilium upgrade +Cilium suggested path expect a pre-flight check to be run before any upgrade. + +1. Create the resources for the check +```bash +kubectl create -f katalog/cilium/tasks/preflight.yaml +``` + +2. Make sure that the number of READY pods is the same as the number of RUNNING Cilium pods. +```text +kubectl get daemonset -n kube-system | sed -n '1p;/cilium/p' +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +cilium 2 2 2 2 2 1h20m +cilium-pre-flight-check 2 2 2 2 2 7m15s +``` + +3. Once the number of READY pods is equal, make sure the Cilium pre-flight deployment is also marked as READY 1/1. +If it shows READY 0/1, consult the [CNP Validation](https://docs.cilium.io/en/stable/operations/upgrade/#cnp-validation) section in the official docs and resolve issues with the deployment before continuing with the upgrade. +```text +kubectl get deployment -n kube-system cilium-pre-flight-check -w +NAME READY UP-TO-DATE AVAILABLE AGE +cilium-pre-flight-check 1/1 1 0 12s +``` + +4. Once the number of READY for the preflight DaemonSet is the same as the number of cilium pods running and the preflight Deployment is marked as READY 1/1 you can delete the cilium-preflight and proceed with the upgrade. +```bash +kubectl delete -f cilium-preflight.yaml +``` + + +If you are upgrading from previous versions, please refer to the [`v1.14.0` release notes](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.14.0). diff --git a/katalog/calico/MAINTENANCE.md b/katalog/calico/MAINTENANCE.md index 3d1f62f..5c7e953 100644 --- a/katalog/calico/MAINTENANCE.md +++ b/katalog/calico/MAINTENANCE.md @@ -20,12 +20,10 @@ Compare the `deploy.yaml` file with the downloaded `calico-${CALICO_VERSION}` fi 3. Update the `kustomization.yaml` file with the right image versions. ```bash -export CALICO_IMAGE_TAG=v3.26.1 +export CALICO_IMAGE_TAG=v3.26.3 kustomize edit set image docker.io/calico/kube-controllers=registry.sighup.io/fury/calico/kube-controllers:${CALICO_IMAGE_TAG} kustomize edit set image docker.io/calico/cni=registry.sighup.io/fury/calico/cni:${CALICO_IMAGE_TAG} kustomize edit set image docker.io/calico/node=registry.sighup.io/fury/calico/node:${CALICO_IMAGE_TAG} -# Not present anymore in 3.23: -# kustomize edit set image docker.io/calico/pod2daemon-flexvol=registry.sighup.io/fury/calico/pod2daemon-flexvol:${CALICO_IMAGE_TAG} ``` > ⚠️ Remember to check if images have been added to or dropped from upstream. @@ -36,12 +34,12 @@ kustomize edit set image docker.io/calico/node=registry.sighup.io/fury/calico/no The resources needed to provide monitoring features are not included in the default upstream manifests. There are some additional steps to perform. -See for details. Note that we are adding an environment variable to the DaemonSet instead of modifing the `default` instance of the `felixconfigurations.crd.projectcalico.org` CRD as the docs say. Modifing the CRD is not possible using Kustomize patches. +See for details. Note that we are adding an environment variable to the DaemonSet instead of modifing the `default` instance of the `felixconfigurations.crd.projectcalico.org` CRD as the docs say. Modifing the CRD is not possible using Kustomize patches. 1. Download the dashboard from upstream: ```bash -export CALICO_VERSION=3.26.1 +export CALICO_VERSION=3.26.3 # ⚠️ Assuming $PWD == root of the project # We take the `felix-dashboard.json` from the downloaded yaml, we are not deploying `typha`, so we don't need its dashboard. curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["felix-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./monitoring/dashboards/felix-dashboard.json diff --git a/katalog/calico/README.md b/katalog/calico/README.md index ee05d36..1e2bf3e 100644 --- a/katalog/calico/README.md +++ b/katalog/calico/README.md @@ -7,6 +7,7 @@ Calico supports a broad range of platforms including Kubernetes, OpenShift, Dock > For more information about Calico refer to [calico documentation][calico-documentation] +## Components and features The deployment of Calico consists of a daemon set running on every node (including the control-plane) and a controller that implements: - *policy controller* watches network policies and programs Calico policies. @@ -20,18 +21,18 @@ The deployment of Calico consists of a daemon set running on every node (includi ## Image repository and tag - calico images: - - `calico/kube-controllers:v3.25.0`. - - `calico/cni:v3.25.0`. - - `calico/node:v3.25.0`. + - `calico/kube-controllers:v3.26.3`. + - `calico/cni:v3.26.3`. + - `calico/node:v3.26.3`. - calico repositories: - - [https://github.com/projectcalico/kube-controllers](https://github.com/projectcalico/kube-controllers). - - [https://github.com/projectcalico/cni-plugin](https://github.com/projectcalico/cni-plugin). - - [https://github.com/projectcalico/node](https://github.com/projectcalico/node). + - [https://github.com/projectcalico/kube-controllers](https://github.com/projectcalico/calico/tree/master/kube-controllers). + - [https://github.com/projectcalico/cni-plugin](https://github.com/projectcalico/calico/tree/master/cni-plugin). + - [https://github.com/projectcalico/node](https://github.com/projectcalico/calico/tree/master/node). ## Requirements -- Tested with Kubernetes >= `1.23.X`. -- Tested with Kustomize >= `v3.3.X`. +- Tested with Kubernetes >= `1.25.X`. +- Tested with Kustomize >= `v3.5.3`. - Prometheus Operator, optional if you want to have metrics. ## Configuration diff --git a/katalog/calico/deploy.yaml b/katalog/calico/deploy.yaml index 1b45e7a..9e0e4e5 100644 --- a/katalog/calico/deploy.yaml +++ b/katalog/calico/deploy.yaml @@ -4643,7 +4643,7 @@ spec: # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam - image: docker.io/calico/cni:v3.26.1 + image: docker.io/calico/cni:v3.26.3 imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: @@ -4671,7 +4671,7 @@ spec: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: docker.io/calico/cni:v3.26.1 + image: docker.io/calico/cni:v3.26.3 imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: @@ -4714,7 +4714,7 @@ spec: # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - name: "mount-bpffs" - image: docker.io/calico/node:v3.26.1 + image: docker.io/calico/node:v3.26.3 imagePullPolicy: IfNotPresent command: ["calico-node", "-init", "-best-effort"] volumeMounts: @@ -4740,7 +4740,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: docker.io/calico/node:v3.26.1 + image: docker.io/calico/node:v3.26.3 imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -4957,7 +4957,7 @@ spec: priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers - image: docker.io/calico/kube-controllers:v3.26.1 + image: docker.io/calico/kube-controllers:v3.26.3 imagePullPolicy: IfNotPresent env: # Choose which controllers to run. diff --git a/katalog/calico/kustomization.yaml b/katalog/calico/kustomization.yaml index 65c28eb..0cf7f1d 100644 --- a/katalog/calico/kustomization.yaml +++ b/katalog/calico/kustomization.yaml @@ -10,15 +10,13 @@ namespace: kube-system images: - name: docker.io/calico/cni newName: registry.sighup.io/fury/calico/cni - newTag: v3.26.1 + newTag: v3.26.3 - name: docker.io/calico/kube-controllers newName: registry.sighup.io/fury/calico/kube-controllers - newTag: v3.26.1 + newTag: v3.26.3 - name: docker.io/calico/node newName: registry.sighup.io/fury/calico/node - newTag: v3.26.1 -- name: docker.io/calico/pod2daemon-flexvol - newName: registry.sighup.io/fury/calico/pod2daemon-flexvol + newTag: v3.26.3 # Resources needed for Monitoring resources: diff --git a/katalog/cilium/MAINTENANCE.values.yaml b/katalog/cilium/MAINTENANCE.values.yaml index e112892..dcdb8e3 100644 --- a/katalog/cilium/MAINTENANCE.values.yaml +++ b/katalog/cilium/MAINTENANCE.values.yaml @@ -7,7 +7,7 @@ image: override: ~ repository: "registry.sighup.io/fury/cilium/cilium" - tag: "v1.13.3" + tag: "v1.14.3" useDigest: false # -- Affinity for cilium-agent. @@ -19,16 +19,6 @@ affinity: matchLabels: k8s-app: cilium -# -- Agent resource limits & requests -# ref: https://kubernetes.io/docs/user-guide/compute-resources/ -resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi - hubble: # -- Enable Hubble (true by default). enabled: true @@ -86,7 +76,8 @@ hubble: image: override: ~ repository: "registry.sighup.io/fury/cilium/hubble-relay" - tag: "v1.13.3" + tag: "v1.14.3" + useDigest: false pullPolicy: "IfNotPresent" @@ -129,7 +120,8 @@ hubble: image: override: ~ repository: "registry.sighup.io/fury/cilium/hubble-ui-backend" - tag: "v0.11.0" + tag: "v0.12.1" + pullPolicy: "IfNotPresent" resources: {} @@ -145,7 +137,7 @@ hubble: image: override: ~ repository: "registry.sighup.io/fury/cilium/hubble-ui" - tag: "v0.11.0" + tag: "v0.12.1" pullPolicy: "IfNotPresent" # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. @@ -182,34 +174,6 @@ installIptablesRules: true # is running in a managed Kubernetes environment or in a chained CNI setup. installNoConntrackIptablesRules: false -ipam: - # -- Configure IP Address Management mode. - # ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/ - mode: "cluster-pool" - operator: - # -- Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList. - # IPv4 CIDR range to delegate to individual nodes for IPAM. - clusterPoolIPv4PodCIDR: "10.0.0.0/8" - # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. - clusterPoolIPv4PodCIDRList: [] - # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. - clusterPoolIPv4MaskSize: 24 - # -- Deprecated in favor of ipam.operator.clusterPoolIPv6PodCIDRList. - # IPv6 CIDR range to delegate to individual nodes for IPAM. - clusterPoolIPv6PodCIDR: "fd00::/104" - # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. - clusterPoolIPv6PodCIDRList: [] - # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. - clusterPoolIPv6MaskSize: 120 - # -- The maximum burst size when rate limiting access to external APIs. - # Also known as the token bucket capacity. - # @default -- `20` - externalAPILimitBurstSize: ~ - # -- The maximum queries per second when rate limiting access to - # external APIs. Also known as the bucket refill rate, which is used to - # refill the bucket up to the burst size capacity. - # @default -- `4.0` - externalAPILimitQPS: ~ # -- Configure the eBPF-based ip-masq-agent ipMasqAgent: @@ -239,13 +203,6 @@ k8s: {} # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR # range via the Kubernetes node resource # requireIPv6PodCIDR: false - -l2NeighDiscovery: - # -- Enable L2 neighbor discovery in the agent - enabled: true - # -- Override the agent's default neighbor resolution refresh period. - refreshPeriod: "30s" - # -- Enable Layer 7 network policy. l7Proxy: true @@ -263,49 +220,6 @@ localRedirectPolicy: false logSystemLoad: false -# -- Configure maglev consistent hashing -maglev: {} - # -- tableSize is the size (parameter M) for the backend table of one - # service entry - # tableSize: - - # -- hashSeed is the cluster-wide base64 encoded seed for the hashing -# hashSeed: - -# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. -enableIPv4Masquerade: true - -# -- Enables IPv6 BIG TCP support which increases maximum GSO/GRO limits for nodes and pods -enableIPv6BIGTCP: false - -# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. -enableIPv6Masquerade: true - - -vtep: - # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow - # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. - enabled: false - -ipv4NativeRoutingCIDR: "" - -# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. -# When specified, Cilium assumes networking for this CIDR is preconfigured and -# hands traffic destined for that range to the Linux network stack without -# applying any SNAT. -# Generally speaking, specifying a native routing CIDR implies that Cilium can -# depend on the underlying networking stack to route packets to their -# destination. To offer a concrete example, if Cilium is configured to use -# direct routing and the Kubernetes CIDR is included in the native routing CIDR, -# the user must configure the routes to reach pods, either manually or by -# setting the auto-direct-node-routes flag. -ipv6NativeRoutingCIDR: "" - -# -- cilium-monitor sidecar. -monitor: - # -- Enable the cilium-monitor sidecar. - enabled: false - # -- Configure prometheus metrics on the configured port at /metrics prometheus: enabled: true @@ -337,20 +251,6 @@ prometheus: # ref: https://docs.cilium.io/en/stable/operations/metrics/#exported-metrics metrics: ~ - -# -- Enable use of the remote node identity. -# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -remoteNodeIdentity: true - - -wellKnownIdentities: - # -- Enable the use of well-known identities. - enabled: false - -etcd: - # -- Enable etcd mode for the agent. - enabled: false - operator: # -- Enable the cilium-operator component (required). enabled: true @@ -362,19 +262,12 @@ operator: image: override: ~ repository: "registry.sighup.io/fury/cilium/operator" - tag: "v1.13.3" + tag: "v1.14.3" + useDigest: false pullPolicy: "IfNotPresent" suffix: "" - resources: {} - # limits: - # cpu: 1000m - # memory: 1Gi - # requests: - # cpu: 100m - # memory: 128Mi - # -- Enable prometheus metrics for cilium-operator on the configured port at # /metrics prometheus: @@ -394,30 +287,3 @@ preflight: # -- Enable Cilium pre-flight resources (required for upgrade) enabled: false -enableCriticalPriorityClass: true - -# disableEnvoyVersionCheck removes the check for Envoy, which can be useful -# on AArch64 as the images do not currently ship a version of Envoy. -#disableEnvoyVersionCheck: false - -# -- Configure cgroup related configuration -cgroup: - autoMount: - # -- Enable auto mount of cgroup2 filesystem. - # When `autoMount` is enabled, cgroup2 filesystem is mounted at - # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. - # If users disable `autoMount`, it's expected that users have mounted - # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the - # volume will be mounted inside the cilium agent pod at the same path. - enabled: true - # -- Init Container Cgroup Automount resource limits & requests - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) - hostRoot: /run/cilium/cgroupv2 - diff --git a/katalog/cilium/core/deploy.yaml b/katalog/cilium/core/deploy.yaml index 859265d..275e18b 100644 --- a/katalog/cilium/core/deploy.yaml +++ b/katalog/cilium/core/deploy.yaml @@ -138,13 +138,16 @@ data: # - disabled # - vxlan (default) # - geneve - tunnel: "vxlan" + # Default case + routing-mode: "tunnel" + tunnel-protocol: "vxlan" # Enables L7 proxy for L7 policy enforcement and visibility enable-l7-proxy: "true" enable-ipv4-masquerade: "true" + enable-ipv4-big-tcp: "false" enable-ipv6-big-tcp: "false" enable-ipv6-masquerade: "true" @@ -162,7 +165,11 @@ data: enable-svc-source-range-check: "true" enable-l2-neigh-discovery: "true" arping-refresh-period: "30s" - cni-uninstall: "true" + enable-k8s-networkpolicy: "true" + # Tell the agent to generate and write a CNI configuration file + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "true" + cni-log-file: "/var/run/cilium/cilium-cni.log" enable-endpoint-health-checking: "true" enable-health-checking: "true" enable-well-known-identities: "false" @@ -170,9 +177,12 @@ data: synchronize-k8s-nodes: "true" operator-api-serve-addr: "127.0.0.1:9234" ipam: "cluster-pool" + ipam-cilium-node-update-rate: "15s" cluster-pool-ipv4-cidr: "10.0.0.0/8" cluster-pool-ipv4-mask-size: "24" disable-cnp-status-updates: "true" + cnp-node-status-gc-interval: "0s" + egress-gateway-reconciliation-trigger-interval: "1s" enable-vtep: "false" vtep-endpoint: "" vtep-cidr: "" @@ -184,7 +194,10 @@ data: cgroup-root: "/run/cilium/cgroupv2" enable-k8s-terminating-endpoint: "true" enable-sctp: "false" + k8s-client-qps: "5" + k8s-client-burst: "10" remove-cilium-node-taints: "true" + set-cilium-node-taints: "true" set-cilium-is-up-condition: "true" unmanaged-pod-watcher-interval: "15" tofqdns-dns-reject-response-code: "refused" @@ -192,7 +205,6 @@ data: tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: "0s" tofqdns-max-deferred-connection-deletes: "10000" - tofqdns-min-ttl: "3600" tofqdns-proxy-response-max-delay: "100ms" agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" --- @@ -259,6 +271,9 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -299,6 +314,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch --- @@ -474,14 +490,24 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io - apiGroups: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -594,10 +620,6 @@ spec: port: 9964 protocol: TCP targetPort: envoy-metrics - - name: hubble-metrics - port: 9965 - protocol: TCP - targetPort: hubble-metrics --- # Source: cilium/templates/cilium-operator/service.yaml kind: Service @@ -657,7 +679,7 @@ spec: spec: containers: - name: cilium-agent - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -714,26 +736,7 @@ spec: fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - name: cilium-config - key: cni-chaining-mode - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - name: cilium-config - key: custom-cni-conf - optional: true lifecycle: - postStart: - exec: - command: - - "/cni-install.sh" - - "--enable-debug=false" - - "--cni-exclusive=true" - - "--log-file=/var/run/cilium/cilium-cni.log" preStop: exec: command: @@ -805,7 +808,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent command: - cilium @@ -828,7 +831,7 @@ spec: # Required to mount cgroup2 filesystem on the underlying Kubernetes node. # We use nsenter command with host's cgroup and mount namespaces enabled. - name: mount-cgroup - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent env: - name: CGROUP_ROOT @@ -865,7 +868,7 @@ spec: drop: - ALL - name: apply-sysctl-overwrites - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent env: - name: BIN_PATH @@ -903,7 +906,7 @@ spec: # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -919,7 +922,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -964,7 +967,7 @@ spec: memory: 100Mi # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "registry.sighup.io/fury/cilium/cilium:v1.13.3" + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1079,10 +1082,14 @@ spec: matchLabels: io.cilium/app: operator name: cilium-operator + # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case + # of one replica and no user configured Recreate strategy. + # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the + # podAntiAffinity which prevents deployments of multiple operator replicas on the same node. strategy: rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 + maxSurge: 25% + maxUnavailable: 50% type: RollingUpdate template: metadata: @@ -1095,7 +1102,7 @@ spec: spec: containers: - name: cilium-operator - image: "registry.sighup.io/fury/cilium/operator-generic:v1.13.3" + image: "registry.sighup.io/fury/cilium/operator-generic:v1.14.3" imagePullPolicy: IfNotPresent command: - cilium-operator-generic @@ -1133,6 +1140,16 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 + readinessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 5 volumeMounts: - name: cilium-config-path mountPath: /tmp/cilium/config-map diff --git a/katalog/cilium/hubble/deploy.yaml b/katalog/cilium/hubble/deploy.yaml index c9c00b5..c2cb2a7 100644 --- a/katalog/cilium/hubble/deploy.yaml +++ b/katalog/cilium/hubble/deploy.yaml @@ -1,7 +1,6 @@ # Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. - --- # Source: cilium/templates/hubble-relay/serviceaccount.yaml apiVersion: v1 @@ -17,6 +16,29 @@ metadata: name: "hubble-ui" namespace: kube-system --- +# Source: cilium/templates/hubble-relay/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + cluster-name: default + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" + listen-address: :4245 + gops: true + gops-port: "9893" + metrics-listen-address: ":9966" + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + disable-server-tls: true +--- apiVersion: cert-manager.io/v1 kind: Issuer metadata: @@ -38,28 +60,6 @@ spec: name: self-signed-cilium kind: Issuer commonName: "cilium-ca" - ---- -# Source: cilium/templates/hubble-relay/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: hubble-relay-config - namespace: kube-system -data: - config.yaml: | - cluster-name: default - peer-service: "hubble-peer.kube-system.svc.cluster.local:443" - listen-address: :4245 - metrics-listen-address: ":9966" - dial-timeout: - retry-timeout: - sort-buffer-len-max: - sort-buffer-drain-timeout: - tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt - tls-client-key-file: /var/lib/hubble-relay/tls/client.key - tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - disable-server-tls: true --- # Source: cilium/templates/hubble-ui/configmap.yaml apiVersion: v1 @@ -190,6 +190,28 @@ spec: port: 80 targetPort: 8081 --- +# Source: cilium/templates/hubble/metrics-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-metrics + namespace: kube-system + labels: + k8s-app: hubble + app.kubernetes.io/name: hubble + app.kubernetes.io/part-of: cilium + annotations: +spec: + clusterIP: None + type: ClusterIP + ports: + - name: hubble-metrics + port: 9965 + protocol: TCP + targetPort: hubble-metrics + selector: + k8s-app: cilium +--- # Source: cilium/templates/hubble/peer-service.yaml apiVersion: v1 kind: Service @@ -237,9 +259,18 @@ spec: app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium spec: + securityContext: + fsGroup: 65532 containers: - name: hubble-relay - image: "registry.sighup.io/fury/cilium/hubble-relay:v1.13.3" + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + image: "registry.sighup.io/fury/cilium/hubble-relay:v1.14.3" imagePullPolicy: IfNotPresent command: - hubble-relay @@ -295,12 +326,12 @@ spec: - secret: name: hubble-relay-client-certs items: - - key: ca.crt - path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key + - key: ca.crt + path: hubble-server-ca.crt --- # Source: cilium/templates/hubble-ui/deployment.yaml kind: Deployment @@ -329,17 +360,13 @@ spec: app.kubernetes.io/name: hubble-ui app.kubernetes.io/part-of: cilium spec: - securityContext: - fsGroup: 1001 - runAsGroup: 1001 - runAsUser: 1001 - priorityClassName: + priorityClassName: serviceAccount: "hubble-ui" serviceAccountName: "hubble-ui" automountServiceAccountToken: true containers: - name: frontend - image: "registry.sighup.io/fury/cilium/hubble-ui:v0.11.0" + image: "registry.sighup.io/fury/cilium/hubble-ui:v0.12.1" imagePullPolicy: IfNotPresent ports: - name: http @@ -352,7 +379,7 @@ spec: mountPath: /tmp terminationMessagePolicy: FallbackToLogsOnError - name: backend - image: "registry.sighup.io/fury/cilium/hubble-ui-backend:v0.11.0" + image: "registry.sighup.io/fury/cilium/hubble-ui-backend:v0.12.1" imagePullPolicy: IfNotPresent env: - name: EVENTS_SERVER_PORT @@ -374,6 +401,8 @@ spec: - emptyDir: {} name: tmp-dir --- + + # Source: cilium/templates/cilium-secrets-namespace.yaml # Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. --- @@ -453,17 +482,17 @@ metadata: spec: selector: matchLabels: - k8s-app: cilium + k8s-app: hubble namespaceSelector: matchNames: - - kube-system + - kube-system endpoints: - - port: hubble-metrics - interval: "10s" - honorLabels: true - path: /metrics - relabelings: - - replacement: ${1} - sourceLabels: - - __meta_kubernetes_pod_node_name - targetLabel: node + - port: hubble-metrics + interval: "10s" + honorLabels: true + path: /metrics + relabelings: + - replacement: ${1} + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node diff --git a/katalog/cilium/hubble/kustomization.yaml b/katalog/cilium/hubble/kustomization.yaml index b61247b..dc5f44d 100644 --- a/katalog/cilium/hubble/kustomization.yaml +++ b/katalog/cilium/hubble/kustomization.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. - +--- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization @@ -14,6 +14,7 @@ resources: patchesStrategicMerge: - patches/cilium.yaml + - patches/service.yaml configMapGenerator: - name: cilium-config diff --git a/katalog/cilium/hubble/patches/cilium.yaml b/katalog/cilium/hubble/patches/cilium.yaml index efc4778..a47ffdd 100644 --- a/katalog/cilium/hubble/patches/cilium.yaml +++ b/katalog/cilium/hubble/patches/cilium.yaml @@ -15,10 +15,10 @@ spec: containers: - name: cilium-agent ports: - - name: hubble-metrics - containerPort: 9965 - hostPort: 9965 - protocol: TCP + - name: hubble-metrics + containerPort: 9965 + hostPort: 9965 + protocol: TCP volumeMounts: - name: hubble-tls mountPath: /var/lib/cilium/tls/hubble @@ -26,7 +26,8 @@ spec: volumes: - name: hubble-tls projected: - # note: the leading zero means this number is in octal representation: do not remove it + # Do not remove the leading zero + # It states an octal number defaultMode: 0400 sources: - secret: diff --git a/katalog/cilium/hubble/patches/service.yaml b/katalog/cilium/hubble/patches/service.yaml new file mode 100644 index 0000000..d082d7e --- /dev/null +++ b/katalog/cilium/hubble/patches/service.yaml @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +--- +apiVersion: v1 +kind: Service +metadata: + name: cilium-agent + namespace: kube-system +spec: + ports: + - name: hubble-metrics + port: 9965 + protocol: TCP + targetPort: hubble-metrics diff --git a/katalog/cilium/tasks/preflight.yaml b/katalog/cilium/tasks/preflight.yaml new file mode 100644 index 0000000..c940110 --- /dev/null +++ b/katalog/cilium/tasks/preflight.yaml @@ -0,0 +1,314 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +--- +# Source: cilium/templates/cilium-preflight/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-pre-flight" + namespace: kube-system +--- +# Source: cilium/templates/cilium-preflight/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-pre-flight + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + verbs: + - patch +--- +# Source: cilium/templates/cilium-preflight/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-pre-flight + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-pre-flight +subjects: +- kind: ServiceAccount + name: "cilium-pre-flight" + namespace: kube-system +--- +# Source: cilium/templates/cilium-preflight/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium-pre-flight-check + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium-pre-flight-check + kubernetes.io/cluster-service: "true" + template: + metadata: + labels: + app.kubernetes.io/part-of: cilium + k8s-app: cilium-pre-flight-check + app.kubernetes.io/name: cilium-pre-flight-check + kubernetes.io/cluster-service: "true" + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + initContainers: + - name: clean-cilium-state + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" + imagePullPolicy: IfNotPresent + command: ["/bin/echo"] + args: + - "hello" + terminationMessagePolicy: FallbackToLogsOnError + containers: + - name: cilium-pre-flight-check + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - -c + - "touch /tmp/ready; sleep 1h" + livenessProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 5 + periodSeconds: 5 + volumeMounts: + - name: cilium-run + mountPath: /var/run/cilium + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + # This is here to seamlessly allow migrate-identity to work with + # etcd-operator setups. The assumption is that other cases would also + # work since the cluster DNS would forward the request on. + # This differs from the cilium-agent daemonset, where this is only + # enabled when etcd.managed=true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: "cilium-pre-flight" + serviceAccountName: "cilium-pre-flight" + automountServiceAccountToken: true + terminationGracePeriodSeconds: 1 + tolerations: + - effect: NoSchedule + key: node.kubernetes.io/not-ready + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - key: CriticalAddonsOnly + operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +--- +# Source: cilium/templates/cilium-preflight/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-pre-flight-check + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-pre-flight-check +spec: + selector: + matchLabels: + k8s-app: cilium-pre-flight-check-deployment + kubernetes.io/cluster-service: "true" + template: + metadata: + labels: + app.kubernetes.io/part-of: cilium + k8s-app: cilium-pre-flight-check-deployment + kubernetes.io/cluster-service: "true" + app.kubernetes.io/name: cilium-pre-flight-check + spec: + containers: + - name: cnp-validator + image: "registry.sighup.io/fury/cilium/cilium:v1.14.3" + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - -ec + - | + cilium preflight validate-cnp; + touch /tmp/ready-validate-cnp; + sleep 1h; + livenessProbe: + exec: + command: + - cat + - /tmp/ready-validate-cnp + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - cat + - /tmp/ready-validate-cnp + initialDelaySeconds: 5 + periodSeconds: 5 + env: + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-pre-flight" + serviceAccountName: "cilium-pre-flight" + terminationGracePeriodSeconds: 1 + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node.kubernetes.io/not-ready + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - key: CriticalAddonsOnly + operator: Exists +--- +# Source: cilium/templates/cilium-secrets-namespace.yaml +# Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. + +# Only create the namespace if it's different from Ingress and Gateway API secret namespaces (if enabled). diff --git a/katalog/tigera/MAINTENANCE.md b/katalog/tigera/MAINTENANCE.md index 577de56..d79bcfe 100644 --- a/katalog/tigera/MAINTENANCE.md +++ b/katalog/tigera/MAINTENANCE.md @@ -11,7 +11,7 @@ To update the YAML file, run the following command: ```bash # assuming katalog/tigera is the root of the repository -export CALICO_VERSION="3.26.1" +export CALICO_VERSION="3.26.3" curl "https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/tigera-operator.yaml" --output operator/tigera-operator.yaml ``` @@ -28,7 +28,7 @@ To download the default configuration from upstream and update the file use the ```bash # assuming katalog/tigera is the root of the repository -export CALICO_VERSION="3.26.1" +export CALICO_VERSION="3.26.3" curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/custom-resources.yaml --output on-prem/custom-resources.yaml ``` @@ -50,7 +50,7 @@ To get the dashboards you can use the following commands: ```bash # ⚠️ Assuming $PWD == root of the project -export CALICO_VERSION=3.26.1 +export CALICO_VERSION="3.26.3" # we split the upstream file and store only the json files curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["felix-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./on-prem/monitoring/dashboards/felix-dashboard.json curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["typha-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./on-prem/monitoring/dashboards/typa-dashboard.json diff --git a/katalog/tigera/operator/tigera-operator.yaml b/katalog/tigera/operator/tigera-operator.yaml index 0bbea6f..8b4f010 100644 --- a/katalog/tigera/operator/tigera-operator.yaml +++ b/katalog/tigera/operator/tigera-operator.yaml @@ -21486,7 +21486,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: tigera-operator - image: quay.io/tigera/operator:v1.30.4 + image: quay.io/tigera/operator:v1.30.7 imagePullPolicy: IfNotPresent command: - operator @@ -21504,7 +21504,7 @@ spec: - name: OPERATOR_NAME value: "tigera-operator" - name: TIGERA_OPERATOR_INIT_IMAGE_VERSION - value: v1.30.4 + value: v1.30.7 envFrom: - configMapRef: name: kubernetes-services-endpoint