Skip to content

Commit 11bb23c

Browse files
authored
Implement k8s sync pruning with SDK (#5634)
* Get live resources Signed-off-by: Yoshiki Fujikane <[email protected]> * Find resources which should be removed Signed-off-by: Yoshiki Fujikane <[email protected]> * Prune resources Signed-off-by: Yoshiki Fujikane <[email protected]> * Implement test 'TestDeploymentService_executeK8sSyncStage_withPrune' with SDK Signed-off-by: Yoshiki Fujikane <[email protected]> * Implement test 'TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped' with SDK Signed-off-by: Yoshiki Fujikane <[email protected]> --------- Signed-off-by: Yoshiki Fujikane <[email protected]>
1 parent 2f9451d commit 11bb23c

File tree

2 files changed

+444
-0
lines changed

2 files changed

+444
-0
lines changed

pkg/app/pipedv1/plugin/kubernetes/deployment/plugin.go

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import (
1818
"cmp"
1919
"context"
2020
"errors"
21+
"time"
2122

2223
kubeconfig "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config"
2324
"github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/provider"
@@ -178,6 +179,55 @@ func (p *Plugin) executeK8sSyncStage(ctx context.Context, input *sdk.ExecuteStag
178179
return sdk.StageStatusSuccess
179180
}
180181

182+
// Wait for all applied manifests to be stable.
183+
// In theory, we don't need to wait for them to be stable before going to the next step
184+
// but waiting for a while reduces the number of Kubernetes changes in a short time.
185+
lp.Info("Waiting for the applied manifests to be stable")
186+
select {
187+
case <-time.After(15 * time.Second):
188+
break
189+
case <-ctx.Done():
190+
break
191+
}
192+
193+
lp.Info("Start finding all running resources but no longer defined in Git")
194+
195+
namespacedLiveResources, clusterScopedLiveResources, err := provider.GetLiveResources(ctx, kubectl, deployTargetConfig.KubeConfigPath, input.Request.Deployment.ApplicationID)
196+
if err != nil {
197+
lp.Errorf("Failed while getting live resources (%v)", err)
198+
return sdk.StageStatusFailure
199+
}
200+
201+
if len(namespacedLiveResources)+len(clusterScopedLiveResources) == 0 {
202+
lp.Info("There is no data about live resource so no resource will be removed")
203+
return sdk.StageStatusSuccess
204+
}
205+
206+
lp.Successf("Successfully loaded %d live resources", len(namespacedLiveResources)+len(clusterScopedLiveResources))
207+
208+
removeKeys := provider.FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources)
209+
if len(removeKeys) == 0 {
210+
lp.Info("There are no live resources should be removed")
211+
return sdk.StageStatusSuccess
212+
}
213+
214+
lp.Infof("Start pruning %d resources", len(removeKeys))
215+
var deletedCount int
216+
for _, key := range removeKeys {
217+
if err := kubectl.Delete(ctx, deployTargetConfig.KubeConfigPath, key.Namespace(), key); err != nil {
218+
if errors.Is(err, provider.ErrNotFound) {
219+
lp.Infof("Specified resource does not exist, so skip deleting the resource: %s (%v)", key.ReadableString(), err)
220+
continue
221+
}
222+
lp.Errorf("Failed while deleting resource %s (%v)", key.ReadableString(), err)
223+
continue // continue to delete other resources
224+
}
225+
deletedCount++
226+
lp.Successf("- deleted resource: %s", key.ReadableString())
227+
}
228+
229+
lp.Successf("Successfully deleted %d resources", deletedCount)
230+
181231
return sdk.StageStatusSuccess
182232
}
183233

0 commit comments

Comments
 (0)