Skip to content

Commit f5d237a

Browse files
committed
Merge branch 'release/v2.22' into release/v2
2 parents cc4366c + c83926d commit f5d237a

File tree

20 files changed

+432
-63
lines changed

20 files changed

+432
-63
lines changed

CHANGELOG.yml

+37
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,43 @@ items:
3636
Similar to an `ingest`, a `wiretap` will always enforce read-only status on all volume mounts, and since that
3737
makes the `wiretap` completely read-only, there's no limit to how many simultaneous wiretaps that can be
3838
served. In fact, a `wiretap` and an `intercept` on the same port can run simultaneously.
39+
- version: 2.22.3
40+
date: 2025-04-08
41+
notes:
42+
- type: change
43+
title: The Windows install script will now install Telepresence to "%ProgramFiles%\telepresence"
44+
body: |-
45+
Telepresence is now installed into "%ProgramFiles%\telepresence" instead of "C:\telepresence".
46+
The directory and the Path entry for `C:\telepresence` are not longer used and should be removed.
47+
- type: bugfix
48+
title: The Windows install script didn't handle upgrades properly
49+
body: |-
50+
The following changes were made:
51+
52+
- The script now requires administrator privileges
53+
- The Path environment is only updated when there's a need for it
54+
docs: https://github.com/telepresenceio/telepresence/issues/3827
55+
- type: bugfix
56+
title: The Telepresence Helm chart could not be used as a dependency in another chart.
57+
body: >-
58+
The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object.
59+
This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent
60+
chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the
61+
`global` object.
62+
docs: https://github.com/telepresenceio/telepresence/issues/3833
63+
- type: bugfix
64+
title: Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager
65+
body: >-
66+
A shared informer was sometimes reused when namespaces were removed and then later added again, leading
67+
to errors like "handler ... was not added to shared informer because it has stopped already".
68+
docs: https://github.com/telepresenceio/telepresence/issues/3831
69+
- type: bugfix
70+
title: Single label name DNS lookups didn't work unless at least one traffic-agent was installed
71+
body: >-
72+
A problem with incorrect handling of single label names in the traffic-manager's DNS resolver was fixed. The
73+
problem would cause lookups like `curl echo` to fail, even though telepresence was connected to a namespace
74+
containing an "echo" service, unless at least one of the workloads in the connected namespace had a
75+
traffic-agent.
3976
- version: 2.22.2
4077
date: 2025-03-28
4178
notes:

charts/telepresence-oss/values.schema.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,10 @@ properties:
251251
items:
252252
$ref: "#/$defs/subject"
253253

254+
global:
255+
type: object
256+
additionalProperties: true
257+
254258
grpc:
255259
type: object
256260
additionalProperties: false

cmd/traffic/cmd/manager/mutator/watcher.go

+6-3
Original file line numberDiff line numberDiff line change
@@ -442,11 +442,11 @@ func (c *configWatcher) namespacesChangeWatcher(ctx context.Context) error {
442442
dlog.Debugf(ctx, "Adding watchers for namespace %s", ns)
443443
iwc, err := c.startInformers(ctx, ns)
444444
if err != nil {
445-
dlog.Errorf(ctx, "Failed to create watchers namespace %s: %v", ns, err)
445+
dlog.Errorf(ctx, "Failed to create watchers for namespace %s: %v", ns, err)
446446
return nil, true
447447
}
448448
if err = c.startWatchers(ctx, iwc); err != nil {
449-
dlog.Errorf(ctx, "Failed to start watchers namespace %s: %v", ns, err)
449+
dlog.Errorf(ctx, "Failed to start watchers for namespace %s: %v", ns, err)
450450
return nil, true
451451
}
452452
return iwc, false
@@ -475,7 +475,10 @@ func (c *configWatcher) DeleteMapsAndRolloutAll(ctx context.Context) {
475475
}
476476

477477
func (c *configWatcher) deleteMapsAndRolloutNS(ctx context.Context, ns string, iwc *informersWithCancel) {
478-
defer c.informers.Delete(ns)
478+
defer func() {
479+
c.informers.Delete(ns)
480+
informer.DropFactory(ctx, ns)
481+
}()
479482

480483
dlog.Debugf(ctx, "Cancelling watchers for namespace %s", ns)
481484
for i := 0; i < watcherMax; i++ {

cmd/traffic/cmd/manager/service.go

+47-41
Original file line numberDiff line numberDiff line change
@@ -841,7 +841,7 @@ func (s *service) LookupDNS(ctx context.Context, request *rpc.DNSRequest) (respo
841841
default:
842842
result = rrs.String()
843843
}
844-
dlog.Debugf(ctx, "LookupDNS: %s %s -> %s", request.Name, qtn, result)
844+
dlog.Debugf(ctx, "%s %s -> %s", request.Name, qtn, result)
845845
}()
846846
}
847847

@@ -873,58 +873,64 @@ func (s *service) LookupDNS(ctx context.Context, request *rpc.DNSRequest) (respo
873873
dlog.Errorf(ctx, "AgentsLookupDNS %s %s: %v", request.Name, qtn, err)
874874
} else if rCode != state.RcodeNoAgents {
875875
if len(rrs) == 0 {
876-
dlog.Tracef(ctx, "LookupDNS on agents: %s %s -> %s", request.Name, qtn, dns2.RcodeToString[rCode])
876+
dlog.Tracef(ctx, "agents: %s %s -> %s", request.Name, qtn, dns2.RcodeToString[rCode])
877877
} else {
878-
dlog.Tracef(ctx, "LookupDNS on agents: %s %s -> %s", request.Name, qtn, rrs)
878+
dlog.Tracef(ctx, "agents: %s %s -> %s", request.Name, qtn, rrs)
879879
}
880880
}
881881
}
882882

883883
if rCode == state.RcodeNoAgents {
884-
client := s.state.GetClient(sessionID)
885-
name := request.Name
886-
restoreName := false
887-
nDots := 0
888-
if client != nil {
889-
for _, c := range name {
890-
if c == '.' {
891-
nDots++
892-
}
893-
}
894-
if nDots == 1 && client.Namespace != tmNamespace {
895-
noSearchDomain = client.Namespace + "."
896-
name += noSearchDomain
897-
restoreName = true
884+
rrs, rCode = s.lookupFromManager(ctx, sessionID, qType, request.Name, noSearchDomain)
885+
}
886+
return dnsproxy.ToRPC(rrs, rCode)
887+
}
888+
889+
func (s *service) lookupFromManager(ctx context.Context, sessionID tunnel.SessionID, qType uint16, qName, noSearchDomain string) (dnsproxy.RRs, int) {
890+
name := qName
891+
client := s.state.GetClient(sessionID)
892+
tmNamespace := managerutil.GetEnv(ctx).ManagerNamespace
893+
restoreName := false
894+
nDots := 0
895+
if client != nil {
896+
for _, c := range name {
897+
if c == '.' {
898+
nDots++
898899
}
899900
}
900-
dlog.Tracef(ctx, "LookupDNS on traffic-manager: %s", name)
901-
rrs, rCode, err = dnsproxy.Lookup(ctx, qType, name, noSearchDomain)
902-
if err != nil {
903-
// Could still be x.y.<client namespace>, but let's avoid x.<cluster domain>.<client namespace> and x.<client-namespace>.<client namespace>
904-
if client != nil && nDots > 1 && client.Namespace != tmNamespace && !strings.HasSuffix(name, s.dotClusterDomain) && !hasDomainSuffix(name, client.Namespace) {
905-
name += client.Namespace + "."
906-
restoreName = true
907-
dlog.Debugf(ctx, "LookupDNS on traffic-manager: %s", name)
908-
rrs, rCode, err = dnsproxy.Lookup(ctx, qType, name, noSearchDomain)
909-
}
910-
if err != nil {
911-
dlog.Tracef(ctx, "LookupDNS on traffic-manager: %s %s -> %s %s", request.Name, qtn, dns2.RcodeToString[rCode], err)
912-
return nil, err
913-
}
901+
if nDots == 1 && client.Namespace != tmNamespace {
902+
name += client.Namespace + "."
903+
restoreName = true
914904
}
915-
if len(rrs) == 0 {
916-
dlog.Tracef(ctx, "LookupDNS on traffic-manager: %s %s -> %s", request.Name, qtn, dns2.RcodeToString[rCode])
917-
} else {
918-
if restoreName {
919-
dlog.Tracef(ctx, "LookupDNS on traffic-manager: restore %s to %s", name, request.Name)
920-
for _, rr := range rrs {
921-
rr.Header().Name = request.Name
922-
}
905+
}
906+
dlog.Tracef(ctx, "traffic-manager: %s", name)
907+
qtn := dns2.TypeToString[qType]
908+
rrs, rCode, err := dnsproxy.Lookup(ctx, qType, name, noSearchDomain)
909+
if err == nil && rCode == dns2.RcodeNameError {
910+
// Could still be x.y.<client namespace>, but let's avoid x.<cluster domain>.<client namespace> and x.<client-namespace>.<client namespace>
911+
if client != nil && nDots > 1 && client.Namespace != tmNamespace && !strings.HasSuffix(name, s.dotClusterDomain) && !hasDomainSuffix(name, client.Namespace) {
912+
name += client.Namespace + "."
913+
restoreName = true
914+
dlog.Debugf(ctx, "traffic-manager: %s", name)
915+
rrs, rCode, err = dnsproxy.Lookup(ctx, qType, name, noSearchDomain)
916+
}
917+
}
918+
if err != nil {
919+
dlog.Errorf(ctx, "traffic-manager: %s %s -> %s %s", qName, qtn, dns2.RcodeToString[rCode], err)
920+
return nil, dns2.RcodeServerFailure
921+
}
922+
if len(rrs) == 0 {
923+
dlog.Tracef(ctx, "traffic-manager: %s %s -> %s", qName, qtn, dns2.RcodeToString[rCode])
924+
} else {
925+
if restoreName {
926+
dlog.Tracef(ctx, "traffic-manager: restore %s to %s", name, qName)
927+
for _, rr := range rrs {
928+
rr.Header().Name = qName
923929
}
924-
dlog.Tracef(ctx, "LookupDNS on traffic-manager: %s %s -> %s", request.Name, qtn, rrs)
925930
}
931+
dlog.Tracef(ctx, "traffic-manager: %s %s -> %s", qName, qtn, rrs)
926932
}
927-
return dnsproxy.ToRPC(rrs, rCode)
933+
return rrs, rCode
928934
}
929935

930936
func (s *service) AgentLookupDNSResponse(ctx context.Context, response *rpc.DNSAgentResponse) (*empty.Empty, error) {

docs/release-notes.md

+35
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,41 @@ The new `telepresence wiretap` command introduces a read-only form of an `interc
99
Similar to an `ingest`, a `wiretap` will always enforce read-only status on all volume mounts, and since that makes the `wiretap` completely read-only, there's no limit to how many simultaneous wiretaps that can be served. In fact, a `wiretap` and an `intercept` on the same port can run simultaneously.
1010
</div>
1111

12+
## Version 2.22.3 <span style="font-size: 16px;">(April 8)</span>
13+
## <div style="display:flex;"><img src="images/change.png" alt="change" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">The Windows install script will now install Telepresence to "%ProgramFiles%\telepresence"</div></div>
14+
<div style="margin-left: 15px">
15+
16+
Telepresence is now installed into "%ProgramFiles%\telepresence" instead of "C:\telepresence".
17+
The directory and the Path entry for `C:\telepresence` are not longer used and should be removed.
18+
</div>
19+
20+
## <div style="display:flex;"><img src="images/bugfix.png" alt="bugfix" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">[The Windows install script didn't handle upgrades properly](https://github.com/telepresenceio/telepresence/issues/3827)</div></div>
21+
<div style="margin-left: 15px">
22+
23+
The following changes were made:
24+
25+
- The script now requires administrator privileges
26+
- The Path environment is only updated when there's a need for it
27+
</div>
28+
29+
## <div style="display:flex;"><img src="images/bugfix.png" alt="bugfix" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">[The Telepresence Helm chart could not be used as a dependency in another chart.](https://github.com/telepresenceio/telepresence/issues/3833)</div></div>
30+
<div style="margin-left: 15px">
31+
32+
The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object. This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the `global` object.
33+
</div>
34+
35+
## <div style="display:flex;"><img src="images/bugfix.png" alt="bugfix" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">[Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager](https://github.com/telepresenceio/telepresence/issues/3831)</div></div>
36+
<div style="margin-left: 15px">
37+
38+
A shared informer was sometimes reused when namespaces were removed and then later added again, leading to errors like "handler ... was not added to shared informer because it has stopped already".
39+
</div>
40+
41+
## <div style="display:flex;"><img src="images/bugfix.png" alt="bugfix" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">Single label name DNS lookups didn't work unless at least one traffic-agent was installed</div></div>
42+
<div style="margin-left: 15px">
43+
44+
A problem with incorrect handling of single label names in the traffic-manager's DNS resolver was fixed. The problem would cause lookups like `curl echo` to fail, even though telepresence was connected to a namespace containing an "echo" service, unless at least one of the workloads in the connected namespace had a traffic-agent.
45+
</div>
46+
1247
## Version 2.22.2 <span style="font-size: 16px;">(March 28)</span>
1348
## <div style="display:flex;"><img src="images/bugfix.png" alt="bugfix" style="width:30px;height:fit-content;"/><div style="display:flex;margin-left:7px;">[Panic when using telepresence replace in a IPv6-only cluster](https://github.com/telepresenceio/telepresence/issues/3828)</div></div>
1449
<div style="margin-left: 15px">

docs/release-notes.mdx

+35
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,41 @@ The new `telepresence wiretap` command introduces a read-only form of an `interc
1515
Similar to an `ingest`, a `wiretap` will always enforce read-only status on all volume mounts, and since that makes the `wiretap` completely read-only, there's no limit to how many simultaneous wiretaps that can be served. In fact, a `wiretap` and an `intercept` on the same port can run simultaneously.
1616
</Body>
1717
</Note>
18+
## Version 2.22.3 <span style={{fontSize:'16px'}}>(April 8)</span>
19+
<Note>
20+
<Title type="change">The Windows install script will now install Telepresence to "%ProgramFiles%\telepresence"</Title>
21+
<Body>
22+
Telepresence is now installed into "%ProgramFiles%\telepresence" instead of "C:\telepresence".
23+
The directory and the Path entry for `C:\telepresence` are not longer used and should be removed.
24+
</Body>
25+
</Note>
26+
<Note>
27+
<Title type="bugfix" docs="https://github.com/telepresenceio/telepresence/issues/3827">The Windows install script didn't handle upgrades properly</Title>
28+
<Body>
29+
The following changes were made:
30+
31+
- The script now requires administrator privileges
32+
- The Path environment is only updated when there's a need for it
33+
</Body>
34+
</Note>
35+
<Note>
36+
<Title type="bugfix" docs="https://github.com/telepresenceio/telepresence/issues/3833">The Telepresence Helm chart could not be used as a dependency in another chart.</Title>
37+
<Body>
38+
The JSON schema validation implemented in Telepresence 2.22.0 had a defect: it rejected the `global` object. This object, a Helm-managed construct, facilitates the propagation of arbitrary configurations from a parent chart to its dependencies. Consequently, charts intended for dependency use must permit the presence of the `global` object.
39+
</Body>
40+
</Note>
41+
<Note>
42+
<Title type="bugfix" docs="https://github.com/telepresenceio/telepresence/issues/3831">Recreating namespaces was not possible when using a dynamically namespaced Traffic Manager</Title>
43+
<Body>
44+
A shared informer was sometimes reused when namespaces were removed and then later added again, leading to errors like "handler ... was not added to shared informer because it has stopped already".
45+
</Body>
46+
</Note>
47+
<Note>
48+
<Title type="bugfix">Single label name DNS lookups didn't work unless at least one traffic-agent was installed</Title>
49+
<Body>
50+
A problem with incorrect handling of single label names in the traffic-manager's DNS resolver was fixed. The problem would cause lookups like `curl echo` to fail, even though telepresence was connected to a namespace containing an "echo" service, unless at least one of the workloads in the connected namespace had a traffic-agent.
51+
</Body>
52+
</Note>
1853
## Version 2.22.2 <span style={{fontSize:'16px'}}>(March 28)</span>
1954
<Note>
2055
<Title type="bugfix" docs="https://github.com/telepresenceio/telepresence/issues/3828">Panic when using telepresence replace in a IPv6-only cluster</Title>

go.mod

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ require (
3939
github.com/stretchr/testify v1.10.0
4040
github.com/telepresenceio/go-fuseftp v0.6.6
4141
github.com/telepresenceio/go-fuseftp/rpc v0.6.6
42-
github.com/telepresenceio/telepresence/rpc/v2 v2.22.2
42+
github.com/telepresenceio/telepresence/rpc/v2 v2.22.3
4343
github.com/vishvananda/netlink v1.3.0
4444
golang.org/x/net v0.37.0
4545
golang.org/x/sys v0.31.0

integration_test/docker_daemon_test.go

+13
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,19 @@ func (s *dockerDaemonSuite) Test_DockerDaemon_daemonHostNotConflict() {
118118
s.TelepresenceConnect(ctx)
119119
}
120120

121+
func (s *dockerDaemonSuite) Test_DockerDaemon_singleNameLookup() {
122+
ctx := s.Context()
123+
const svc = "echo-easy"
124+
s.ApplyApp(ctx, svc, "deploy/"+svc)
125+
defer s.DeleteSvcAndWorkload(ctx, "deploy", svc)
126+
out := s.TelepresenceConnect(ctx, "--docker", "--", itest.GetExecutable(ctx), "curl", "--silent", "--max-time", "1", svc)
127+
s.Contains(out, "Request served by "+svc)
128+
so, err := itest.TelepresenceStatus(ctx)
129+
s.NoError(err)
130+
s.Nil(so.ContainerizedDaemon)
131+
s.False(so.UserDaemon.Running)
132+
}
133+
121134
func (s *dockerDaemonSuite) Test_DockerDaemon_cacheFiles() {
122135
ctx := s.Context()
123136
rq := s.Require()

0 commit comments

Comments
 (0)