Skip to content
This repository was archived by the owner on Jun 26, 2023. It is now read-only.

Commit 083c78c

Browse files
authored
Merge pull request #1251 from phoenixking25/second-ns
[MTB] refactored code and added other ns flag
2 parents 66244f1 + c3a27b5 commit 083c78c

File tree

35 files changed

+173
-39
lines changed

35 files changed

+173
-39
lines changed

benchmarks/kubectl-mtb/internal/kubectl-mtb/run.go

+72-37
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ func reportSuiteDidEnd(suiteSummary *reporter.SuiteSummary, reportersArray []rep
102102
}
103103

104104
func removeBenchmarksWithIDs(ids []string) {
105-
temp := []*benchmark.Benchmark{}
105+
var temp []*benchmark.Benchmark
106106
for _, benchmark := range benchmarks {
107107
found := false
108108
for _, id := range ids {
@@ -143,22 +143,78 @@ func validateFlags(cmd *cobra.Command) error {
143143
return nil
144144
}
145145

146-
func runTests(cmd *cobra.Command, args []string) error {
147-
148-
benchmarkRunOptions.Label, _ = cmd.Flags().GetString("labels")
149-
// Get log level
146+
func setupLogger(cmd *cobra.Command) {
150147
debug, _ := cmd.Flags().GetBool("debug")
151148
if debug {
152149
benchmarkRunOptions.Logger = log.GetLogger(true)
153150
} else {
154151
// default mode production
155152
benchmarkRunOptions.Logger = log.GetLogger(false)
156153
}
154+
}
157155

156+
func setupReporters(cmd *cobra.Command) ([]reporter.Reporter, error) {
158157
// Get reporters from the user
159158
reporterFlag, _ := cmd.Flags().GetString("out")
160159
reporters := strings.Split(reporterFlag, ",")
161-
reportersArray, err := reporter.GetReporters(reporters)
160+
return reporter.GetReporters(reporters)
161+
}
162+
163+
func executePreRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
164+
err := b.PreRun(benchmarkRunOptions)
165+
if err != nil {
166+
benchmarkRunOptions.Logger.Debug(err.Error())
167+
suiteSummary.NumberOfFailedValidations++
168+
ts.Validation = false
169+
ts.ValidationError = err
170+
b.Status = "Error"
171+
}
172+
}
173+
174+
func executeRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
175+
if ts.Validation {
176+
err := b.Run(benchmarkRunOptions)
177+
if err != nil {
178+
benchmarkRunOptions.Logger.Debug(err.Error())
179+
suiteSummary.NumberOfFailedTests++
180+
ts.Test = false
181+
ts.TestError = err
182+
b.Status = "Fail"
183+
} else {
184+
suiteSummary.NumberOfPassedTests++
185+
b.Status = "Pass"
186+
}
187+
}
188+
}
189+
190+
func executePostRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
191+
if ts.Test {
192+
if b.PostRun != nil {
193+
err := b.PostRun(benchmarkRunOptions)
194+
if err != nil {
195+
fmt.Print(err.Error())
196+
}
197+
}
198+
}
199+
}
200+
201+
func shouldSkipTest(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) bool {
202+
if b.NamespaceRequired > 1 {
203+
if benchmarkRunOptions.OtherNamespace != "" && benchmarkRunOptions.OtherTenant != "" {
204+
return false
205+
}
206+
return true
207+
}
208+
return false
209+
}
210+
211+
func runTests(cmd *cobra.Command, args []string) error {
212+
213+
benchmarkRunOptions.Label, _ = cmd.Flags().GetString("labels")
214+
// Get log level
215+
setupLogger(cmd)
216+
217+
reportersArray, err := setupReporters(cmd)
162218
if err != nil {
163219
return err
164220
}
@@ -192,40 +248,17 @@ func runTests(cmd *cobra.Command, args []string) error {
192248

193249
startTest := time.Now()
194250

195-
//Run Prerun
196-
err = b.PreRun(benchmarkRunOptions)
197-
if err != nil {
198-
benchmarkRunOptions.Logger.Debug(err.Error())
199-
suiteSummary.NumberOfFailedValidations++
200-
ts.Validation = false
201-
ts.ValidationError = err
202-
b.Status = "Error"
251+
if shouldSkipTest(b, suiteSummary, ts) {
252+
continue
203253
}
204254

205-
// Check PreRun status
206-
if ts.Validation {
207-
err = b.Run(benchmarkRunOptions)
208-
if err != nil {
209-
benchmarkRunOptions.Logger.Debug(err.Error())
210-
suiteSummary.NumberOfFailedTests++
211-
ts.Test = false
212-
ts.TestError = err
213-
b.Status = "Fail"
214-
} else {
215-
suiteSummary.NumberOfPassedTests++
216-
b.Status = "Pass"
217-
}
218-
}
255+
// Lifecycles
256+
executePreRun(b, suiteSummary, ts)
257+
258+
executeRun(b, suiteSummary, ts)
259+
260+
executePostRun(b, suiteSummary, ts)
219261

220-
// Check Run status
221-
if ts.Test {
222-
if b.PostRun != nil {
223-
err = b.PostRun(benchmarkRunOptions)
224-
if err != nil {
225-
fmt.Print(err.Error())
226-
}
227-
}
228-
}
229262
elapsed := time.Since(startTest)
230263
ts.RunTime = elapsed
231264
reportTestWillRun(ts, reportersArray)
@@ -245,6 +278,8 @@ func newRunCmd() *cobra.Command {
245278
runCmd.Flags().String("as", "", "(required) user name to impersonate")
246279
runCmd.Flags().StringP("out", "o", "default", "(optional) output reporters (default, policyreport)")
247280
runCmd.Flags().StringP("skip", "s", "", "(optional) benchmark IDs to skip")
281+
runCmd.Flags().String("other-namespace", "", "(optional) other tenant namespace")
282+
runCmd.Flags().String("other-tenant-admin","", "(optional) other tenant admin")
248283
runCmd.Flags().StringP("labels", "l", "", "(optional) labels")
249284

250285
return runCmd

benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ type Benchmark struct {
1919
Status string `yaml:"status"`
2020
Rationale string `yaml:"rationale"`
2121
Audit string `yaml:"audit"`
22+
NamespaceRequired int `yaml:"namespaceRequired"`
2223
PreRun func(types.RunOptions) error
2324
Run func(types.RunOptions) error
2425
PostRun func(types.RunOptions) error

benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -33,3 +33,8 @@ kubectl --kubeconfig tenant-a auth can-i verb resource
3333
Each command must return 'no'
3434

3535

36+
37+
**namespaceRequired:**
38+
39+
1
40+

benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Control Plane Isolation
55
description: Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
66
remediation:
77
profileLevel: 1
8+
namespaceRequired: 1
89
rationale: Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
910
audit: |
1011
Run the following commands to retrieve the list of non-namespaced resources

benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod or container that adds new `capabilities` in its `securityContext`.
2828

2929
Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,5 @@ description: Linux
66
remediation: Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
rationale: Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
88
profileLevel: 1
9+
namespaceRequired: 1
910
audit: Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail.

benchmarks/kubectl-mtb/test/benchmarks/block_multitenant_resources/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -30,3 +30,8 @@ For each returned by the first command verify that the resource cannot be modifi
3030

3131
Each command must return 403 FORBIDDEN
3232

33+
34+
**namespaceRequired:**
35+
36+
1
37+

benchmarks/kubectl-mtb/test/benchmarks/block_multitenant_resources/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Tenant Isolation
55
description: Each tenant namespace may contain resources setup by the cluster administrator for multi-tenancy, such as role bindings, and network policies. Tenants should not be allowed to modify the namespaced resources created by the cluster administrator for multi-tenancy. However, for some resources such as network policies, tenants can configure additional instances of the resource for their workloads.
66
remediation:
77
profileLevel: 1
8+
namespaceRequired: 1
89
rationale: Tenants can escalate priviliges and impact other tenants if they are able to delete or modify required multi-tenancy resources such as namespace resource quotas or default network policy.
910
audit: |
1011
The resources managed by the cluster administrator and that cannot be modified by tenant administrator can be identified by a label configured in the benchmarks configuration YAML file. If no label is provided, then this test looks for any existing network policy and role binding (resource quotas are handled by a separate test) and tries to modify and delete them. Run the following commands to retrieve the list of resources managed by the cluster administrator

benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,8 @@ kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota
3232
```
3333
Each command must return 'no'"
3434

35+
36+
**namespaceRequired:**
37+
38+
1
39+

benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Tenant Isolation
55
description: Tenants should not be able to modify the resource quotas defined in their namespaces
66
remediation:
77
profileLevel: 1
8+
namespaceRequired: 1
89
rationale: Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
910
audit: |
1011
Run the following commands to check for permissions to manage quotas in the tenant namespace:

benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its
2828

2929
Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Control Plane Isolation
55
description: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
66
remediation: Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8+
namespaceRequired: 1
89
audit: Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail.
910
rationale: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.

benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod or container that sets `privileged` to `true` in its `securityConte
2828

2929
Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/config.yaml

+2-1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Control Plane Isolation
55
rationale: By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
66
remediation: Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8-
description: Linux
8+
description: Linux
9+
namespaceRequired: 1
910
audit: Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail.

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod or container that sets new `hostIPC` to `true`. The pod creation mu
2828

2929
Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Host Isolation
55
description: Tenants should not be allowed to share the host's inter-process communication (IPC) namespace.
66
remediation: Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8+
namespaceRequired: 1
89
audit: Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail.
910
rationale: The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants.

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -26,3 +26,8 @@ Create a pod defining a container using a host port. The pod creation must fail.
2626

2727
Create a pod defining a container using a host network. The pod creation must fail."
2828

29+
30+
**namespaceRequired:**
31+
32+
1
33+

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Host Isolation
55
description: Tenants should not be allowed to use host networking and host ports for their workloads.
66
remediation:
77
profileLevel: 1
8+
namespaceRequired: 1
89
rationale: Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods
910
audit: |
1011
Create a pod defining a container using a host port. The pod creation must fail.

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_path/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod defining a volume of type hostpath. The pod creation must fail.
2828

2929
Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_path/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Host Protection
55
description: Tenants should not be able to mount host volumes and directories
66
remediation: Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8+
namespaceRequired: 1
89
rationale: The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host.
910
audit: Create a pod defining a volume of type hostpath. The pod creation must fail.

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a pod or container that sets new `hostPID` to `true`. The pod creation mu
2828

2929
Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Host Isolation
55
description: Tenants should not be allowed to share the host process ID (PID) namespace.
66
remediation: Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8+
namespaceRequired: 1
89
audit: Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail.
910
rationale: The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace.

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ Create a deployment and an associated service exposing a NodePort. The service c
2828

2929
Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
3030

31+
32+
**namespaceRequired:**
33+
34+
1
35+

benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ category: Host Isolation
55
description: Tenants should not be able to create services of type NodePort.
66
remediation: Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
77
profileLevel: 1
8+
namespaceRequired: 1
89
audit: Create a deployment and an associated service exposing a NodePort. The service creation must fail.
910
rationale: NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers.

benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -34,3 +34,8 @@ kubectl --kubeconfig=tenant-a -n a1 describe resourcequota
3434

3535
Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.
3636

37+
38+
**namespaceRequired:**
39+
40+
1
41+

benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Fairness
55
description: Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace.
66
remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
77
profileLevel: 1
8+
namespaceRequired: 1
89
Rationale: Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.
910
Audit: |
1011
Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc).

benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,8 @@ kubectl --kubeconfig=tenant-a -n a1 describe quota
3232

3333
Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
3434

35+
36+
**namespaceRequired:**
37+
38+
1
39+

benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ category: Fairness
55
description: Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources.
66
remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
77
profileLevel: 1
8+
namespaceRequired: 1
89
audit: |
910
Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources.
1011
```shell

benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md

+5
Original file line numberDiff line numberDiff line change
@@ -29,3 +29,8 @@ kubectl --kubeconfig=tenant-a -n a1 auth can-i verb networkpolicy
2929
Each command must return 'yes'
3030

3131

32+
33+
**namespaceRequired:**
34+
35+
1
36+

0 commit comments

Comments
 (0)