Skip to content

Commit 4d2bee9

Browse files
authored
Merge pull request #3825 from telepresenceio/thallgren/wiretap
Add a telepresence wiretap command.
2 parents 7e54d74 + be34639 commit 4d2bee9

34 files changed

+1990
-1276
lines changed

CHANGELOG.yml

+12
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,18 @@
2424
#
2525
# For older changes, see CHANGELOG.OLD.md
2626
items:
27+
- version: 2.23.0
28+
date: (TBD)
29+
notes:
30+
- type: feature
31+
title: New telepresence wiretap command
32+
body: >-
33+
The new `telepresence wiretap` command introduces a read-only form of an `intercept` where the original
34+
container will run unaffected while a copy of the wiretapped traffic is sent to the client.
35+
36+
Similar to an `ingest`, a `wiretap` will always enforce read-only status on all volume mounts, and since that
37+
makes the `wiretap` completely read-only, there's no limit to how many simultaneous wiretaps that can be
38+
served. In fact, a `wiretap` and an `intercept` on the same port can run simultaneously.
2739
- version: 2.22.1
2840
date: (TBD)
2941
notes:

cmd/traffic/cmd/agent/fwdstate.go

+96-90
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,23 @@ import (
44
"context"
55
"fmt"
66
"net/http"
7+
"slices"
78
"time"
89

910
"github.com/datawire/dlib/dlog"
1011
"github.com/telepresenceio/telepresence/rpc/v2/manager"
1112
"github.com/telepresenceio/telepresence/v2/pkg/forwarder"
13+
"github.com/telepresenceio/telepresence/v2/pkg/iputil"
1214
"github.com/telepresenceio/telepresence/v2/pkg/restapi"
1315
"github.com/telepresenceio/telepresence/v2/pkg/tunnel"
1416
)
1517

1618
type fwdState struct {
1719
*state
18-
intercept InterceptTarget
19-
container string
20-
forwarder forwarder.Interceptor
21-
chosenIntercept *manager.InterceptInfo
20+
intercept InterceptTarget
21+
container string
22+
forwarder forwarder.Interceptor
23+
chosenInterceptId string
2224
}
2325

2426
// NewInterceptState creates an InterceptState that performs intercepts by using an Interceptor which indiscriminately
@@ -73,118 +75,122 @@ func (pm *ProviderMux) CreateClientStream(ctx context.Context, tag tunnel.Tag, s
7375
}
7476

7577
func (fs *fwdState) HandleIntercepts(ctx context.Context, cepts []*manager.InterceptInfo) []*manager.ReviewInterceptRequest {
76-
var myChoice, activeIntercept *manager.InterceptInfo
77-
if fs.chosenIntercept != nil {
78-
chosenID := fs.chosenIntercept.Id
79-
for _, is := range cepts {
80-
if chosenID == is.Id {
81-
fs.chosenIntercept = is
82-
myChoice = is
78+
var active []*manager.InterceptInfo
79+
var waiting []*manager.InterceptInfo
80+
for _, is := range cepts {
81+
switch is.Disposition {
82+
case manager.InterceptDispositionType_ACTIVE:
83+
active = append(active, is)
84+
case manager.InterceptDispositionType_WAITING:
85+
waiting = append(waiting, is)
86+
}
87+
}
88+
89+
var activeIntercept *manager.InterceptInfo
90+
if fs.chosenInterceptId != "" {
91+
for _, is := range active {
92+
if fs.chosenInterceptId == is.Id {
93+
if !is.Spec.Wiretap {
94+
activeIntercept = is
95+
}
8396
break
8497
}
8598
}
99+
}
100+
101+
if activeIntercept == nil {
102+
fs.chosenInterceptId = ""
86103

87-
if myChoice == nil {
88-
// Chosen intercept is not present in the snapshot
89-
fs.chosenIntercept = nil
90-
} else if myChoice.Disposition == manager.InterceptDispositionType_ACTIVE {
91-
// The chosen intercept still exists and is active
92-
activeIntercept = myChoice
93-
}
94-
} else {
95104
// Attach to already ACTIVE intercept if there is one.
96-
for _, cept := range cepts {
97-
if cept.Disposition == manager.InterceptDispositionType_ACTIVE {
98-
myChoice = cept
99-
fs.chosenIntercept = cept
100-
activeIntercept = cept
105+
for _, is := range active {
106+
if !is.Spec.Wiretap {
107+
fs.chosenInterceptId = is.Id
108+
activeIntercept = is
101109
break
102110
}
103111
}
104112
}
105113

114+
fwd := fs.forwarder
106115
if fs.sessionInfo != nil {
107116
// Update forwarding.
108-
fs.forwarder.SetStreamProvider(
117+
fwd.SetStreamProvider(
109118
&ProviderMux{
110119
AgentProvider: fs,
111120
ManagerProvider: &tunnel.TrafficManagerStreamProvider{Manager: fs.ManagerClient(), AgentSessionID: tunnel.SessionID(fs.sessionInfo.SessionId)},
112121
})
113122
}
114-
fs.forwarder.SetIntercepting(activeIntercept)
123+
fwd.SetIntercepting(activeIntercept)
115124

116-
// Review waiting intercepts
117-
reviews := make([]*manager.ReviewInterceptRequest, 0, len(cepts))
118-
for _, cept := range cepts {
119-
container := cept.Spec.ContainerName
120-
if container == "" {
121-
container = fs.container
125+
// Remove inactive wiretaps.
126+
for _, id := range fwd.WiretapIDs() {
127+
if !slices.ContainsFunc(active, func(ii *manager.InterceptInfo) bool { return ii.Id == id && ii.Spec.Wiretap }) {
128+
dlog.Debugf(ctx, "removing wiretap id %s", id)
129+
fwd.RemoveWiretap(id)
122130
}
123-
cs := fs.containerStates[container]
124-
if cs == nil {
125-
reviews = append(reviews, &manager.ReviewInterceptRequest{
126-
Id: cept.Id,
127-
Disposition: manager.InterceptDispositionType_AGENT_ERROR,
128-
Message: fmt.Sprintf("No match for container %q", container),
129-
MechanismArgsDesc: "all TCP connections",
130-
})
131-
continue
131+
}
132+
133+
// Add active wiretaps.
134+
for _, ii := range active {
135+
if ii.Spec.Wiretap {
136+
if !fwd.HasWiretap(ii.Id) {
137+
dlog.Debugf(ctx, "adding wiretap id %s to %s", ii.Id, iputil.JoinHostPort(ii.Spec.TargetHost, uint16(ii.Spec.TargetPort)))
138+
fwd.AddWiretap(ii)
139+
}
132140
}
133-
if cept.Disposition == manager.InterceptDispositionType_WAITING {
141+
}
142+
143+
// Review waiting intercepts
144+
reviews := make([]*manager.ReviewInterceptRequest, 0, len(waiting))
145+
for _, ii := range waiting {
146+
switch {
147+
case activeIntercept == nil || ii.Spec.Wiretap:
134148
// This intercept is ready to be active
135-
switch {
136-
case cept == myChoice:
137-
// We've already chosen this one, but it's not active yet in this
138-
// snapshot. Let's go ahead and tell the manager to mark it ACTIVE.
139-
dlog.Infof(ctx, "Setting intercept %q as ACTIVE (again?)", cept.Id)
140-
reviews = append(reviews, &manager.ReviewInterceptRequest{
141-
Id: cept.Id,
142-
Disposition: manager.InterceptDispositionType_ACTIVE,
143-
PodIp: fs.PodIP(),
144-
FtpPort: int32(fs.FtpPort()),
145-
SftpPort: int32(fs.SftpPort()),
146-
MountPoint: cs.MountPoint(),
147-
Mounts: cs.Mounts().ToRPC(),
148-
MechanismArgsDesc: "all TCP connections",
149-
Environment: cs.Env(),
150-
})
151-
case fs.chosenIntercept == nil:
152-
// We don't have an intercept in play, so choose this one. All
153-
// agents will get intercepts in the same order every time, so
154-
// this will yield a consistent result. Note that the intercept
155-
// will not become active at this time. That will happen later,
156-
// once the manager assigns a port.
157-
dlog.Infof(ctx, "Setting intercept %q as ACTIVE", cept.Id)
158-
fs.chosenIntercept = cept
159-
myChoice = cept
160-
reviews = append(reviews, &manager.ReviewInterceptRequest{
161-
Id: cept.Id,
162-
Disposition: manager.InterceptDispositionType_ACTIVE,
163-
PodIp: fs.PodIP(),
164-
FtpPort: int32(fs.FtpPort()),
165-
SftpPort: int32(fs.SftpPort()),
166-
MountPoint: cs.MountPoint(),
167-
Mounts: cs.Mounts().ToRPC(),
168-
MechanismArgsDesc: "all TCP connections",
169-
Environment: cs.Env(),
170-
})
171-
default:
172-
// We already have an intercept in play, so reject this one.
173-
chosenID := fs.chosenIntercept.Id
174-
dlog.Infof(ctx, "Setting intercept %q as AGENT_ERROR; as it conflicts with %q as the current chosen-to-be-ACTIVE intercept", cept.Id, chosenID)
175-
var msg string
176-
if fs.chosenIntercept.Disposition == manager.InterceptDispositionType_ACTIVE {
177-
msg = fmt.Sprintf("Conflicts with the currently-served intercept %q", chosenID)
178-
} else {
179-
msg = fmt.Sprintf("Conflicts with the currently-waiting-to-be-served intercept %q", chosenID)
180-
}
149+
container := ii.Spec.ContainerName
150+
if container == "" {
151+
container = fs.container
152+
}
153+
cs := fs.containerStates[container]
154+
if cs == nil {
181155
reviews = append(reviews, &manager.ReviewInterceptRequest{
182-
Id: cept.Id,
156+
Id: ii.Id,
183157
Disposition: manager.InterceptDispositionType_AGENT_ERROR,
184-
Message: msg,
158+
Message: fmt.Sprintf("No match for container %q", container),
185159
MechanismArgsDesc: "all TCP connections",
186160
})
161+
continue
162+
}
163+
if !ii.Spec.Wiretap {
164+
// We can only have one active intercept that isn't a wiretap
165+
activeIntercept = ii
187166
}
167+
reviews = append(reviews, &manager.ReviewInterceptRequest{
168+
Id: ii.Id,
169+
Disposition: manager.InterceptDispositionType_ACTIVE,
170+
PodIp: fs.PodIP(),
171+
FtpPort: int32(fs.FtpPort()),
172+
SftpPort: int32(fs.SftpPort()),
173+
MountPoint: cs.MountPoint(),
174+
Mounts: cs.Mounts().ToRPC(),
175+
MechanismArgsDesc: "all TCP connections",
176+
Environment: cs.Env(),
177+
})
178+
default:
179+
// We already have an intercept in play, so reject this one.
180+
chosenID := activeIntercept.Id
181+
dlog.Infof(ctx, "Setting intercept %q as AGENT_ERROR; as it conflicts with %q as the current chosen-to-be-ACTIVE intercept", ii.Id, chosenID)
182+
var msg string
183+
if activeIntercept.Disposition == manager.InterceptDispositionType_ACTIVE {
184+
msg = fmt.Sprintf("Conflicts with the currently-served intercept %q", chosenID)
185+
} else {
186+
msg = fmt.Sprintf("Conflicts with the currently-waiting-to-be-served intercept %q", chosenID)
187+
}
188+
reviews = append(reviews, &manager.ReviewInterceptRequest{
189+
Id: ii.Id,
190+
Disposition: manager.InterceptDispositionType_AGENT_ERROR,
191+
Message: msg,
192+
MechanismArgsDesc: "all TCP connections",
193+
})
188194
}
189195
}
190196
return reviews

cmd/traffic/cmd/manager/state/intercept.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -208,10 +208,14 @@ func (s *state) preparePorts(ac *agentconfig.Sidecar, cn *agentconfig.Container,
208208
return info.Disposition == rpc.InterceptDispositionType_ACTIVE && info.Spec.Agent == ac.AgentName && info.Spec.Namespace == ac.Namespace
209209
})
210210

211-
if spec.Mechanism != "http" {
211+
if !(spec.Wiretap || spec.Mechanism == "http") {
212212
// Intercept is global, so it will conflict with any other intercept using the same port and protocol.
213213
for _, otherIc := range otherIcs {
214214
oSpec := otherIc.Spec // Validate that there's no port conflict
215+
if oSpec.Wiretap {
216+
// wiretaps will not cause conflicts
217+
continue
218+
}
215219
for cp := range uniqueContainerPorts {
216220
if cp.Port == uint16(oSpec.ContainerPort) && string(cp.Proto) == oSpec.Protocol {
217221
name := oSpec.Name

docs/compare/mirrord.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ This comparison chart applies to the Open Source editions of both products.
5151
| Doesn't require injection of a sidecar |[^3] ||
5252
| Supports connecting to clusters over a corporate VPN |||
5353
| Can intercept traffic |||
54+
| Can mirror traffic |||
5455
| Can ingest a container |||
5556
| Can replace a container |||
56-
| Can mirror traffic |||
5757
| Can act as a cluster VPN only |||
5858
| Will work with statically linked binaries |||
5959
| Runs natively on windows |||

docs/howtos/engage.md

+54-1
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,24 @@ Telepresence offers three powerful ways to develop your services locally:
3838
- You want your local service to only receive specific ingress traffic, while other traffic must be untouched.
3939
- You want your remote container to continue processing other requests or background tasks.
4040

41+
### Wiretap
42+
* **How it Works:**
43+
- Adds a wiretap on a specific service port (or ports) and sends the data to the local workstation.
44+
- Makes the remote environment of the targeted container available to the local workstation.
45+
- Provides read-only access to the volumes mounted by the targeted container.
46+
* **Impact:**
47+
- A Traffic Agent is injected into the pods of the targeted workload.
48+
- All containers keep on running.
49+
- All traffic will still reach the remote service.
50+
- Wiretapped traffic is rerouted to the local workstation.
51+
* **Use-cases:**
52+
- You need a solution where several developers can engage with the same service simultaneously.
53+
- Your main focus is the service API rather than the cluster's pods and containers.
54+
- You want your local service to only receive specific ingress traffic.
55+
- You don't care about the responses sent by your local service.
56+
- You don't want breakpoints in your local service to affect the remote service.
57+
- You want to keep the impact that your local development has on the cluster to a minimum.
58+
4159
### Ingest
4260
* **How it Works:**
4361
- Makes the remote environment of the ingested container available to the local workstation.
@@ -46,7 +64,7 @@ Telepresence offers three powerful ways to develop your services locally:
4664
- A Traffic Agent is injected into the pods of the targeted workload.
4765
- No traffic is rerouted and all containers keep on running.
4866
* **Use-cases:**
49-
- You want to keep the impact of your local development to a minimum.
67+
- You want to keep the impact that your local development has on the cluster to a minimum.
5068
- You have don't need traffic being routed from the cluster, and read-only access to the container's volumes is ok.
5169

5270
## Prerequisites
@@ -199,6 +217,41 @@ You can now:
199217
- Query services only exposed in your cluster's network.
200218
- Set breakpoints in your IDE to investigate bugs.
201219

220+
## Wiretap your application
221+
222+
You can use the `telepresence wiretap` command when you want to wiretap the traffic for a specific service and send a
223+
copy of it to your workstation. The `wiretap` is less intrusive than the `intercept`, because it does not interfere
224+
with the traffic at all.
225+
226+
1. Connect to your cluster with `telepresence connect`.
227+
228+
2. Put a wiretap on all traffic going to the application's http port in your cluster and send it to port 8080 on your workstation.
229+
```console
230+
$ telepresence wiretap example-app --port 8080:http --env-file ~/example-app-intercept.env --mount /tmp/example-app-mounts
231+
Using Deployment example-app
232+
wiretapped
233+
Wiretap name : example-app
234+
State : ACTIVE
235+
Workload kind : Deployment
236+
Destination : 127.0.0.1:8080
237+
Intercepting : all TCP connections
238+
```
239+
240+
* For `--port`: specify the port the local instance of your application is running on, and optionally the remote port
241+
that you want to wiretap. Telepresence will select the remote port automatically when there's only one service
242+
port available to access the workload. You must specify the port to wiretap when the workload exposes multiple
243+
ports. You can do this by specifying the port you want to wiretap after a colon in the `--port` argument (like in
244+
the example), and/or by specifying the service you want to wiretap using the `--service` flag.
245+
246+
* For `--env-file`: specify a file path for Telepresence to write the environment variables that are set for the targeted
247+
container.
248+
249+
3. Start your local application using the environment variables retrieved and the volumes that were mounted in the previous step.
250+
251+
You can now:
252+
- Query services only exposed in your cluster's network.
253+
- Set breakpoints in your IDE to investigate bugs.
254+
202255
### Running everything using Docker
203256

204257
This approach eliminates the need for root access and confines the Telepresence network interface and remote mounts

docs/reference/architecture.md

+3-1
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,13 @@ in the telepresence client binary (`telepresence helm install`) or by using a He
3636

3737
## Traffic Agent
3838

39-
The Traffic Agent is a sidecar container that facilitates engagements. When a `replace`, `ingest` or `intercept` is first
39+
The Traffic Agent is a sidecar container that facilitates engagements. When a `replace`, `ingest`, `intercept`, or `wiretap` is first
4040
started, the Traffic Agent container is injected into the workload's pod(s). You can see the Traffic Agent's status by
4141
running `telepresence list` or `kubectl describe pod <pod-name>`.
4242

4343
Depending on if an `replace` or `intercept` is active or not, the Traffic Agent will either route the incoming request
4444
to your workstation, or it will pass it along to the container in the pod usually handling requests.
4545

46+
When a `wiretap` is active, the Traffic Agent will send a copy of the incoming requests to your workstation.
47+
4648
Please see [Traffic Agent Sidecar](engagements/sidecar.md) for details.

0 commit comments

Comments
 (0)