diff --git a/.drone.yml b/.drone.yml index 6b55827..383256c 100644 --- a/.drone.yml +++ b/.drone.yml @@ -66,7 +66,7 @@ steps: - /pluto detect tigera-on-prem.yml --ignore-deprecations --target-versions=k8s=v1.26.0 --- -name: e2e-kubernetes-1.24-calico +name: e2e-kubernetes-1.25-calico kind: pipeline type: docker @@ -88,17 +88,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-124 - pipeline_id: cluster-124 + action: custom-cluster-125 + pipeline_id: cluster-125 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.24.0" + cluster_version: "1.25.3" instance_path: /shared aws_default_region: from_secret: aws_region @@ -120,23 +120,24 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1 + # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 + image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-124 + - export KUBECONFIG=/shared/kube/kubeconfig-125 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-124 + pipeline_id: cluster-125 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -163,9 +164,8 @@ steps: volumes: - name: shared temp: {} - --- -name: e2e-kubernetes-1.25-calico +name: e2e-kubernetes-1.26-calico kind: pipeline type: docker @@ -187,17 +187,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-125 - pipeline_id: cluster-125 + action: custom-cluster-126 + pipeline_id: cluster-126 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.25.3" + cluster_version: "1.26.4" instance_path: /shared aws_default_region: from_secret: aws_region @@ -219,24 +219,23 @@ steps: from_secret: dockerhub_password - name: test - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-125 + - export KUBECONFIG=/shared/kube/kubeconfig-126 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.25.3 + image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-125 + pipeline_id: cluster-126 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -264,7 +263,7 @@ volumes: - name: shared temp: {} --- -name: e2e-kubernetes-1.26-calico +name: e2e-kubernetes-1.27-calico kind: pipeline type: docker @@ -286,17 +285,17 @@ depends_on: steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always volumes: - name: shared path: /shared depends_on: [clone] settings: - action: custom-cluster-126 - pipeline_id: cluster-126 + action: custom-cluster-127 + pipeline_id: cluster-127 local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.26.4" + cluster_version: "1.27.1" instance_path: /shared aws_default_region: from_secret: aws_region @@ -318,23 +317,23 @@ steps: from_secret: dockerhub_password - name: test - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.12.0_1.9.4_1.27.1_3.5.3_4.33.3 pull: always volumes: - name: shared path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-126 + - export KUBECONFIG=/shared/kube/kubeconfig-127 - bats -t katalog/tests/calico/calico.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.26.4 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always depends_on: [test] settings: action: destroy - pipeline_id: cluster-126 + pipeline_id: cluster-127 aws_default_region: from_secret: aws_region aws_access_key_id: @@ -381,9 +380,7 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - e2e-kubernetes-1.25-calico - - e2e-kubernetes-1.26-calico steps: - name: init @@ -483,8 +480,6 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - - e2e-kubernetes-1.25-calico - e2e-kubernetes-1.26-calico steps: @@ -583,13 +578,11 @@ trigger: - refs/tags/** depends_on: - - e2e-kubernetes-1.24-calico - - e2e-kubernetes-1.25-calico - - e2e-kubernetes-1.26-calico + - e2e-kubernetes-1.27-calico steps: - name: init - image: quay.io/sighup/e2e-testing-drone-plugin:v1.27.1 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always volumes: - name: shared @@ -599,7 +592,7 @@ steps: action: custom-cluster-127 pipeline_id: cluster-127-cilium local_kind_config_path: katalog/tests/calico/resources/kind-config - cluster_version: "1.27.0" + cluster_version: "1.27.1" instance_path: /shared aws_default_region: from_secret: aws_region @@ -628,11 +621,11 @@ steps: path: /shared depends_on: [init] commands: - - export KUBECONFIG=/shared/kube/kubeconfig-124 + - export KUBECONFIG=/shared/kube/kubeconfig-127 - bats -t katalog/tests/cilium/cilium.sh - name: destroy - image: quay.io/sighup/e2e-testing-drone-plugin:v1.27.1 + image: quay.io/sighup/e2e-testing-drone-plugin:v2.0.0 pull: always depends_on: [test] settings: @@ -670,9 +663,9 @@ kind: pipeline type: docker depends_on: - - e2e-kubernetes-1.24-calico - e2e-kubernetes-1.25-calico - e2e-kubernetes-1.26-calico + - e2e-kubernetes-1.27-calico - e2e-kubernetes-1.25-cilium - e2e-kubernetes-1.26-cilium - e2e-kubernetes-1.27-cilium diff --git a/README.md b/README.md index f14ef5a..a87dd42 100644 --- a/README.md +++ b/README.md @@ -29,9 +29,9 @@ Kubernetes Fury Networking provides the following packages: | Package | Version | Description | | -------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| [calico](katalog/calico) | `3.26.1` | [Calico][calico-page] CNI Plugin. For cluster with `< 50` nodes. | +| [calico](katalog/calico) | `3.26.3` | [Calico][calico-page] CNI Plugin. For cluster with `< 50` nodes. | | [cilium](katalog/cilium) | `1.14.3` | [Cilium][cilium-page] CNI Plugin. For cluster with `< 200` nodes. | -| [tigera](katalog/tigera) | `1.30.4` | [Tigera Operator][tigera-page], a Kubernetes Operator for Calico, provides pre-configured installations for on-prem and for EKS in policy-only mode. | +| [tigera](katalog/tigera) | `1.30.7` | [Tigera Operator][tigera-page], a Kubernetes Operator for Calico, provides pre-configured installations for on-prem and for EKS in policy-only mode. | | [ip-masq](katalog/ip-masq) | `2.8.0` | The `ip-masq-agent` configures iptables rules to implement IP masquerading functionality | > The resources in these packages are going to be deployed in `kube-system` namespace. Except for the operator. @@ -60,7 +60,7 @@ Check the [compatibility matrix][compatibility-matrix] for additional informatio ### Deployment -> ⚠️ Please notice that the Calico packages is for cluster with less the 50 nodes. If your cluster has more than 50 nodes, you'll need to switch to [Calico + Typha](https://projectcalico.docs.tigera.io/archive/v3.23/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes) or to the [Tigera Operator](katalog/tigera/README.md). +> ⚠️ Please notice that the Calico packages is for cluster with less the 50 nodes. If your cluster has more than 50 nodes, you'll need to switch to [Calico + Typha](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises) or to the [Tigera Operator](katalog/tigera/README.md). 1. List the packages you want to deploy and their version in a `Furyfile.yml` diff --git a/docs/COMPATIBILITY_MATRIX.md b/docs/COMPATIBILITY_MATRIX.md index f0727db..7184aa8 100644 --- a/docs/COMPATIBILITY_MATRIX.md +++ b/docs/COMPATIBILITY_MATRIX.md @@ -8,7 +8,7 @@ | v1.12.1 | :white_check_mark: | :white_check_mark: | | | | v1.12.2 | :white_check_mark: | :white_check_mark: | | | | v1.14.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | -| v1.15.0 | | | :white_check_mark: | :white_check_mark: | +| v1.15.0 | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: Compatible diff --git a/docs/releases/v1.15.0.md b/docs/releases/v1.15.0.md index 7136ce1..1202a27 100644 --- a/docs/releases/v1.15.0.md +++ b/docs/releases/v1.15.0.md @@ -8,10 +8,10 @@ This minor release updates some components and adds support to Kubernetes 1.27. | Component | Supported Version | Previous Version | | ----------------- | -------------------------------------------------------------------------------- | ---------------- | -| `calico` | [`v3.26.1`](https://projectcalico.docs.tigera.io/archive/v3.26/release-notes/) | `v3.25.0` | +| `calico` | [`v3.26.3`](https://projectcalico.docs.tigera.io/archive/v3.26/release-notes/) | `v3.26.1` | | `cilium` | [`v1.14.3`](https://github.com/cilium/cilium/releases/tag/v1.14.3) | `v1.13.1` | | `ip-masq` | [`v2.8.0`](https://github.com/kubernetes-sigs/ip-masq-agent/releases/tag/v2.5.0) | No update | -| `tigera-operator` | [`v1.30.4`](https://github.com/tigera/operator/releases/tag/v1.30.4) | `v1.29.0` | +| `tigera-operator` | [`v1.30.7`](https://github.com/tigera/operator/releases/tag/v1.30.7) | `v1.30.4` | > Please refer the individual release notes to get detailed information on each release. diff --git a/katalog/calico/MAINTENANCE.md b/katalog/calico/MAINTENANCE.md index 3d1f62f..5c7e953 100644 --- a/katalog/calico/MAINTENANCE.md +++ b/katalog/calico/MAINTENANCE.md @@ -20,12 +20,10 @@ Compare the `deploy.yaml` file with the downloaded `calico-${CALICO_VERSION}` fi 3. Update the `kustomization.yaml` file with the right image versions. ```bash -export CALICO_IMAGE_TAG=v3.26.1 +export CALICO_IMAGE_TAG=v3.26.3 kustomize edit set image docker.io/calico/kube-controllers=registry.sighup.io/fury/calico/kube-controllers:${CALICO_IMAGE_TAG} kustomize edit set image docker.io/calico/cni=registry.sighup.io/fury/calico/cni:${CALICO_IMAGE_TAG} kustomize edit set image docker.io/calico/node=registry.sighup.io/fury/calico/node:${CALICO_IMAGE_TAG} -# Not present anymore in 3.23: -# kustomize edit set image docker.io/calico/pod2daemon-flexvol=registry.sighup.io/fury/calico/pod2daemon-flexvol:${CALICO_IMAGE_TAG} ``` > ⚠️ Remember to check if images have been added to or dropped from upstream. @@ -36,12 +34,12 @@ kustomize edit set image docker.io/calico/node=registry.sighup.io/fury/calico/no The resources needed to provide monitoring features are not included in the default upstream manifests. There are some additional steps to perform. -See for details. Note that we are adding an environment variable to the DaemonSet instead of modifing the `default` instance of the `felixconfigurations.crd.projectcalico.org` CRD as the docs say. Modifing the CRD is not possible using Kustomize patches. +See for details. Note that we are adding an environment variable to the DaemonSet instead of modifing the `default` instance of the `felixconfigurations.crd.projectcalico.org` CRD as the docs say. Modifing the CRD is not possible using Kustomize patches. 1. Download the dashboard from upstream: ```bash -export CALICO_VERSION=3.26.1 +export CALICO_VERSION=3.26.3 # ⚠️ Assuming $PWD == root of the project # We take the `felix-dashboard.json` from the downloaded yaml, we are not deploying `typha`, so we don't need its dashboard. curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["felix-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./monitoring/dashboards/felix-dashboard.json diff --git a/katalog/calico/README.md b/katalog/calico/README.md index ee05d36..1e2bf3e 100644 --- a/katalog/calico/README.md +++ b/katalog/calico/README.md @@ -7,6 +7,7 @@ Calico supports a broad range of platforms including Kubernetes, OpenShift, Dock > For more information about Calico refer to [calico documentation][calico-documentation] +## Components and features The deployment of Calico consists of a daemon set running on every node (including the control-plane) and a controller that implements: - *policy controller* watches network policies and programs Calico policies. @@ -20,18 +21,18 @@ The deployment of Calico consists of a daemon set running on every node (includi ## Image repository and tag - calico images: - - `calico/kube-controllers:v3.25.0`. - - `calico/cni:v3.25.0`. - - `calico/node:v3.25.0`. + - `calico/kube-controllers:v3.26.3`. + - `calico/cni:v3.26.3`. + - `calico/node:v3.26.3`. - calico repositories: - - [https://github.com/projectcalico/kube-controllers](https://github.com/projectcalico/kube-controllers). - - [https://github.com/projectcalico/cni-plugin](https://github.com/projectcalico/cni-plugin). - - [https://github.com/projectcalico/node](https://github.com/projectcalico/node). + - [https://github.com/projectcalico/kube-controllers](https://github.com/projectcalico/calico/tree/master/kube-controllers). + - [https://github.com/projectcalico/cni-plugin](https://github.com/projectcalico/calico/tree/master/cni-plugin). + - [https://github.com/projectcalico/node](https://github.com/projectcalico/calico/tree/master/node). ## Requirements -- Tested with Kubernetes >= `1.23.X`. -- Tested with Kustomize >= `v3.3.X`. +- Tested with Kubernetes >= `1.25.X`. +- Tested with Kustomize >= `v3.5.3`. - Prometheus Operator, optional if you want to have metrics. ## Configuration diff --git a/katalog/calico/deploy.yaml b/katalog/calico/deploy.yaml index 1b45e7a..9e0e4e5 100644 --- a/katalog/calico/deploy.yaml +++ b/katalog/calico/deploy.yaml @@ -4643,7 +4643,7 @@ spec: # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam - image: docker.io/calico/cni:v3.26.1 + image: docker.io/calico/cni:v3.26.3 imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: @@ -4671,7 +4671,7 @@ spec: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: docker.io/calico/cni:v3.26.1 + image: docker.io/calico/cni:v3.26.3 imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: @@ -4714,7 +4714,7 @@ spec: # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - name: "mount-bpffs" - image: docker.io/calico/node:v3.26.1 + image: docker.io/calico/node:v3.26.3 imagePullPolicy: IfNotPresent command: ["calico-node", "-init", "-best-effort"] volumeMounts: @@ -4740,7 +4740,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: docker.io/calico/node:v3.26.1 + image: docker.io/calico/node:v3.26.3 imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -4957,7 +4957,7 @@ spec: priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers - image: docker.io/calico/kube-controllers:v3.26.1 + image: docker.io/calico/kube-controllers:v3.26.3 imagePullPolicy: IfNotPresent env: # Choose which controllers to run. diff --git a/katalog/calico/kustomization.yaml b/katalog/calico/kustomization.yaml index 65c28eb..0cf7f1d 100644 --- a/katalog/calico/kustomization.yaml +++ b/katalog/calico/kustomization.yaml @@ -10,15 +10,13 @@ namespace: kube-system images: - name: docker.io/calico/cni newName: registry.sighup.io/fury/calico/cni - newTag: v3.26.1 + newTag: v3.26.3 - name: docker.io/calico/kube-controllers newName: registry.sighup.io/fury/calico/kube-controllers - newTag: v3.26.1 + newTag: v3.26.3 - name: docker.io/calico/node newName: registry.sighup.io/fury/calico/node - newTag: v3.26.1 -- name: docker.io/calico/pod2daemon-flexvol - newName: registry.sighup.io/fury/calico/pod2daemon-flexvol + newTag: v3.26.3 # Resources needed for Monitoring resources: diff --git a/katalog/tigera/MAINTENANCE.md b/katalog/tigera/MAINTENANCE.md index 577de56..d79bcfe 100644 --- a/katalog/tigera/MAINTENANCE.md +++ b/katalog/tigera/MAINTENANCE.md @@ -11,7 +11,7 @@ To update the YAML file, run the following command: ```bash # assuming katalog/tigera is the root of the repository -export CALICO_VERSION="3.26.1" +export CALICO_VERSION="3.26.3" curl "https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/tigera-operator.yaml" --output operator/tigera-operator.yaml ``` @@ -28,7 +28,7 @@ To download the default configuration from upstream and update the file use the ```bash # assuming katalog/tigera is the root of the repository -export CALICO_VERSION="3.26.1" +export CALICO_VERSION="3.26.3" curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/custom-resources.yaml --output on-prem/custom-resources.yaml ``` @@ -50,7 +50,7 @@ To get the dashboards you can use the following commands: ```bash # ⚠️ Assuming $PWD == root of the project -export CALICO_VERSION=3.26.1 +export CALICO_VERSION="3.26.3" # we split the upstream file and store only the json files curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["felix-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./on-prem/monitoring/dashboards/felix-dashboard.json curl -L https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/grafana-dashboards.yaml | yq '.data["typha-dashboard.json"]' | sed 's/calico-demo-prometheus/prometheus/g' | jq > ./on-prem/monitoring/dashboards/typa-dashboard.json diff --git a/katalog/tigera/operator/tigera-operator.yaml b/katalog/tigera/operator/tigera-operator.yaml index 0bbea6f..8b4f010 100644 --- a/katalog/tigera/operator/tigera-operator.yaml +++ b/katalog/tigera/operator/tigera-operator.yaml @@ -21486,7 +21486,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: tigera-operator - image: quay.io/tigera/operator:v1.30.4 + image: quay.io/tigera/operator:v1.30.7 imagePullPolicy: IfNotPresent command: - operator @@ -21504,7 +21504,7 @@ spec: - name: OPERATOR_NAME value: "tigera-operator" - name: TIGERA_OPERATOR_INIT_IMAGE_VERSION - value: v1.30.4 + value: v1.30.7 envFrom: - configMapRef: name: kubernetes-services-endpoint