diff --git a/CHANGELOG.yml b/CHANGELOG.yml
index 888c804553..e125edead2 100644
--- a/CHANGELOG.yml
+++ b/CHANGELOG.yml
@@ -27,6 +27,14 @@ items:
- version: 2.22.3
date: (TBD)
notes:
+ - type: bugfix
+ title: The Telepresence Helm chart could not be used as a dependency in another chart.
+ body: >-
+ The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object.
+ This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent
+ chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the
+ `global` object.
+ docs: https://github.com/telepresenceio/telepresence/issues/3833
- type: bugfix
title: Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager
body: >-
diff --git a/charts/telepresence-oss/values.schema.yaml b/charts/telepresence-oss/values.schema.yaml
index b07b91d9c8..0857898898 100644
--- a/charts/telepresence-oss/values.schema.yaml
+++ b/charts/telepresence-oss/values.schema.yaml
@@ -251,6 +251,10 @@ properties:
items:
$ref: "#/$defs/subject"
+ global:
+ type: object
+ additionalProperties: true
+
grpc:
type: object
additionalProperties: false
diff --git a/docs/release-notes.md b/docs/release-notes.md
index 5bc750b760..a27f82e1b0 100644
--- a/docs/release-notes.md
+++ b/docs/release-notes.md
@@ -2,6 +2,12 @@
[comment]: # (Code generated by relnotesgen. DO NOT EDIT.)
#
Telepresence Release Notes
## Version 2.22.3
+##

[The Telepresence Helm chart could not be used as a dependency in another chart.](https://github.com/telepresenceio/telepresence/issues/3833)
+
+
+The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object. This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the `global` object.
+
+
## 
[Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager](https://github.com/telepresenceio/telepresence/issues/3831)
diff --git a/docs/release-notes.mdx b/docs/release-notes.mdx
index 9c7e521ad9..0665f99bb4 100644
--- a/docs/release-notes.mdx
+++ b/docs/release-notes.mdx
@@ -8,6 +8,12 @@ import { Note, Title, Body } from '@site/src/components/ReleaseNotes'
# Telepresence Release Notes
## Version 2.22.3
+
+ The Telepresence Helm chart could not be used as a dependency in another chart.
+
+The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object. This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the `global` object.
+
+
Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager
diff --git a/integration_test/install_test.go b/integration_test/install_test.go
index f308d2645a..6d566aeba8 100644
--- a/integration_test/install_test.go
+++ b/integration_test/install_test.go
@@ -1,10 +1,14 @@
package integration_test
import (
+ "archive/tar"
+ "compress/gzip"
"context"
"fmt"
+ "io"
"os"
"path/filepath"
+ "runtime"
"strings"
"time"
@@ -330,3 +334,112 @@ func ensureTrafficManager(ctx context.Context, kc *k8s.Cluster) error {
k8s.GetManagerNamespace(ctx),
&helm.Request{Type: helm.Install})
}
+
+func unTgz(ctx context.Context, srcTgz, dstPath string) error {
+ rd, err := os.Open(srcTgz)
+ if err != nil {
+ return err
+ }
+ defer rd.Close()
+
+ err = dos.MkdirAll(ctx, dstPath, 0o755)
+ if err != nil {
+ return err
+ }
+
+ zrd, err := gzip.NewReader(rd)
+ if err != nil {
+ return err
+ }
+ src := tar.NewReader(zrd)
+ for {
+ header, err := src.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ dst := dstPath + "/" + header.Name
+ mode := os.FileMode(header.Mode)
+ switch header.Typeflag {
+ case tar.TypeDir:
+ err = dos.MkdirAll(ctx, dst, mode)
+ if err != nil {
+ return err
+ }
+ case tar.TypeReg:
+ err = dos.MkdirAll(ctx, filepath.Dir(dst), 0o755)
+ if err != nil {
+ return err
+ }
+ w, err := dos.OpenFile(ctx, dst, os.O_CREATE|os.O_WRONLY, mode)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(w, src)
+ _ = w.Close()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unable to untar type : %c in file %s", header.Typeflag, header.Name)
+ }
+ }
+ return nil
+}
+
+func (is *installSuite) Test_HelmSubChart() {
+ if runtime.GOOS == "windows" || !(is.ManagerVersion().EQ(version.Structured) && is.ClientVersion().EQ(version.Structured)) {
+ is.T().Skip("Not part of compatibility tests. Need forward slashes in path, and PackageHelmChart assumes current version.")
+ }
+ ctx := is.Context()
+ require := is.Require()
+
+ t := is.T()
+ subChart, err := is.PackageHelmChart(ctx)
+ require.NoError(err)
+
+ base := t.TempDir()
+ require.NoError(unTgz(ctx, subChart, filepath.Join(base, "charts")))
+
+ chart := fmt.Sprintf(`apiVersion: v2
+dependencies:
+ - name: telepresence-oss
+ registry: ../charts/telepresence-oss
+ version: %s
+ condition: enabled
+description: Helm chart to deploy telepresence
+name: parent
+version: 1.0.0`, is.ClientVersion())
+
+ vals := is.GetSetArgsForHelm(ctx, map[string]any{
+ "global": map[string]any{
+ "some-string": "value",
+ "some-obj": map[string]any{
+ "foo": "bar",
+ },
+ "some-bool": true,
+ },
+ "telepresence-oss": map[string]any{
+ "clientRbac": map[string]any{
+ "create": true,
+ "subjects": []rbac.Subject{
+ {
+ Kind: "ServiceAccount",
+ Name: itest.TestUser,
+ Namespace: is.ManagerNamespace(),
+ },
+ },
+ },
+ },
+ }, false)
+ require.NoError(dos.WriteFile(ctx, filepath.Join(base, "Chart.yaml"), []byte(chart), 0o644))
+
+ vals = append([]string{"template", "parent", base, "-n", is.ManagerNamespace()}, vals...)
+ so, err := itest.Output(ctx, "helm", vals...)
+ require.NoError(err)
+ require.Contains(so, "# Source: parent/charts/telepresence-oss/templates/clientRbac/connect.yaml")
+ require.Contains(so, "name: "+itest.TestUser)
+}
diff --git a/integration_test/itest/cluster.go b/integration_test/itest/cluster.go
index 302c1dad57..cfd256cff6 100644
--- a/integration_test/itest/cluster.go
+++ b/integration_test/itest/cluster.go
@@ -195,6 +195,21 @@ func (s *cluster) Initialize(ctx context.Context) context.Context {
s.agentVersion = s.managerVersion
}
+ // We cannot use t.TempDir() here, because it will not mount correctly in
+ // rancher-desktop and docker-desktop (unless they are configured to allow
+ // mounts directly from /tmp). So we use a tempdir in BUILD_OUTPUT instead
+ // because it's believed to be both mountable and writable.
+ tempDir := dos.Getenv(ctx, "TELEPRESENCE_TEMP_DIR")
+ if tempDir == "" {
+ tempDir = filepath.Join(BuildOutput(ctx), "tmp")
+ }
+ _ = dos.RemoveAll(ctx, tempDir)
+ require.NoError(t, dos.MkdirAll(ctx, tempDir, 0o777))
+ ctx = withTempDirBase(ctx, &tempDirBase{tempDir: tempDir})
+ t.Cleanup(func() {
+ _ = os.RemoveAll(tempDir)
+ })
+
registry := dos.Getenv(ctx, "TELEPRESENCE_REGISTRY")
if registry == "" {
registry = "ghcr.io/telepresenceio"
@@ -290,11 +305,8 @@ func (s *cluster) Initialize(ctx context.Context) context.Context {
s.ensureQuit(ctx)
s.ensureNoManager(ctx)
- _ = Run(ctx, "kubectl", "delete", "ns", "-l", AssignPurposeLabel)
_ = Run(ctx, "kubectl", "delete", "-f", filepath.Join("testdata", "k8s", "client_rbac.yaml"))
- _ = Run(ctx, "kubectl", "delete", "ns", "-l", AssignPurposeLabel)
- _ = Run(ctx, "kubectl", "delete", "pv", "-l", AssignPurposeLabel)
- _ = Run(ctx, "kubectl", "delete", "storageclass", "-l", AssignPurposeLabel)
+ _ = Run(ctx, "kubectl", "delete", "all", "-l", AssignPurposeLabel)
return ctx
}
@@ -354,10 +366,7 @@ func (s *cluster) tearDown(ctx context.Context) {
if s.kubeConfig != "" {
ctx = WithWorkingDir(ctx, GetOSSRoot(ctx))
_ = Run(ctx, "kubectl", "delete", "-f", filepath.Join("testdata", "k8s", "client_rbac.yaml"))
- _ = Run(ctx, "kubectl", "delete", "--wait=false", "ns", "-l", AssignPurposeLabel)
- _ = Run(ctx, "kubectl", "delete", "--wait=false", "pv", "-l", AssignPurposeLabel)
- _ = Run(ctx, "kubectl", "delete", "--wait=false", "storageclass", "-l", AssignPurposeLabel)
- _ = Run(ctx, "kubectl", "delete", "--wait=false", "mutatingwebhookconfigurations", "-l", AssignPurposeLabel)
+ _ = Run(ctx, "kubectl", "delete", "--wait=false", "all", "-l", AssignPurposeLabel)
}
}
@@ -439,7 +448,7 @@ func (s *cluster) withBasicConfig(c context.Context, t *testing.T) context.Conte
require.NoError(t, err)
configYamlStr := string(configYaml)
- configDir := t.TempDir()
+ configDir := TempDir(c)
c = filelocation.WithAppUserConfigDir(c, configDir)
c, err = SetConfig(c, configDir, configYamlStr)
require.NoError(t, err)
@@ -1071,7 +1080,7 @@ func WithConfig(c context.Context, modifierFunc func(config client.Config)) cont
configYaml, err := configCopy.(client.Config).MarshalYAML()
require.NoError(t, err)
configYamlStr := string(configYaml)
- configDir, err := os.MkdirTemp(t.TempDir(), "config")
+ configDir, err := os.MkdirTemp(TempDir(c), "config")
require.NoError(t, err)
c, err = SetConfig(c, configDir, configYamlStr)
require.NoError(t, err)
diff --git a/integration_test/itest/namespace.go b/integration_test/itest/namespace.go
index dfb01c0715..3f265bede9 100644
--- a/integration_test/itest/namespace.go
+++ b/integration_test/itest/namespace.go
@@ -103,7 +103,14 @@ func (s *nsPair) setup(ctx context.Context) bool {
return false
}
err := Kubectl(ctx, s.Namespace, "apply", "-f", filepath.Join(GetOSSRoot(ctx), "testdata", "k8s", "client_sa.yaml"))
- assert.NoError(t, err, "failed to create connect ServiceAccount")
+ if assert.NoError(t, err, "failed to create connect ServiceAccount") {
+ db, err := ReadTemplate(ctx, filepath.Join("testdata", "k8s", "client_rancher.goyaml"), map[string]string{
+ "ManagerNamespace": s.Namespace,
+ })
+ if assert.NoError(t, err) {
+ assert.NoError(t, Kubectl(dos.WithStdin(ctx, bytes.NewReader(db)), s.Namespace, "apply", "-f", "-"))
+ }
+ }
return !t.Failed()
}
diff --git a/integration_test/itest/tempdir.go b/integration_test/itest/tempdir.go
new file mode 100644
index 0000000000..079581d940
--- /dev/null
+++ b/integration_test/itest/tempdir.go
@@ -0,0 +1,37 @@
+package itest
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sync/atomic"
+
+ "github.com/stretchr/testify/require"
+)
+
+type tempDirBase struct {
+ tempDir string
+ tempDirSeq uint64
+}
+
+type tempDirBaseKey struct{}
+
+func withTempDirBase(ctx context.Context, td *tempDirBase) context.Context {
+ return context.WithValue(ctx, tempDirBaseKey{}, td)
+}
+
+// TempDir returns a temporary directory for the test to use.
+// The directory is automatically removed when the test and
+// all its subtests complete.
+// Each subsequent call to t.TempDir returns a unique directory;
+// if the directory creation fails, TempDir terminates the test by calling Fatal.
+func TempDir(ctx context.Context) string {
+ t := getT(ctx)
+ if td, ok := ctx.Value(tempDirBaseKey{}).(*tempDirBase); ok {
+ seq := atomic.AddUint64(&td.tempDirSeq, 1)
+ dir := fmt.Sprintf("%s%c%03d", td.tempDir, os.PathSeparator, seq)
+ require.NoError(t, os.Mkdir(dir, 0o777))
+ return dir
+ }
+ return t.TempDir()
+}
diff --git a/integration_test/testdata/k8s/client_rancher.goyaml b/integration_test/testdata/k8s/client_rancher.goyaml
new file mode 100644
index 0000000000..6d54d82b38
--- /dev/null
+++ b/integration_test/testdata/k8s/client_rancher.goyaml
@@ -0,0 +1,27 @@
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rancher-inspect
+ labels:
+ purpose: tp-cli-testing
+rules:
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rancher-inspect
+ labels:
+ purpose: tp-cli-testing
+subjects:
+ - kind: ServiceAccount
+ name: telepresence-test-developer
+ namespace: {{ .ManagerNamespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ name: rancher-inspect
+ kind: ClusterRole