Skip to content

Commit 1d31e47

Browse files
authored
Merge pull request kubernetes-retired#565 from k82cn/up_sched_conf
Removed reclaim&preempt by default.
2 parents 0f5a458 + 022827b commit 1d31e47

File tree

3 files changed

+25
-26
lines changed

3 files changed

+25
-26
lines changed

pkg/scheduler/util.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import (
2828
)
2929

3030
var defaultSchedulerConf = `
31-
actions: "reclaim, allocate, backfill, preempt"
31+
actions: "allocate, backfill"
3232
tiers:
3333
- plugins:
3434
- name: priority

test/e2e/util.go

+24-25
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,15 @@ import (
4141
"k8s.io/client-go/tools/clientcmd"
4242

4343
kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
44-
"github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
45-
arbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
44+
kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
45+
kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
4646
)
4747

4848
var oneMinute = 1 * time.Minute
4949

5050
var halfCPU = v1.ResourceList{"cpu": resource.MustParse("500m")}
5151
var oneCPU = v1.ResourceList{"cpu": resource.MustParse("1000m")}
5252
var twoCPU = v1.ResourceList{"cpu": resource.MustParse("2000m")}
53-
var threeCPU = v1.ResourceList{"cpu": resource.MustParse("3000m")}
5453

5554
const (
5655
workerPriority = "worker-pri"
@@ -66,7 +65,7 @@ func homeDir() string {
6665

6766
type context struct {
6867
kubeclient *kubernetes.Clientset
69-
karclient *versioned.Clientset
68+
kbclient *kbver.Clientset
7069

7170
namespace string
7271
queues []string
@@ -86,7 +85,7 @@ func initTestContext() *context {
8685
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(home, ".kube", "config"))
8786
Expect(err).NotTo(HaveOccurred())
8887

89-
cxt.karclient = versioned.NewForConfigOrDie(config)
88+
cxt.kbclient = kbver.NewForConfigOrDie(config)
9089
cxt.kubeclient = kubernetes.NewForConfigOrDie(config)
9190

9291
cxt.enableNamespaceAsQueue = enableNamespaceAsQueue
@@ -138,7 +137,7 @@ func queueNotExist(ctx *context) wait.ConditionFunc {
138137
if ctx.enableNamespaceAsQueue {
139138
_, err = ctx.kubeclient.CoreV1().Namespaces().Get(q, metav1.GetOptions{})
140139
} else {
141-
_, err = ctx.karclient.Scheduling().Queues().Get(q, metav1.GetOptions{})
140+
_, err = ctx.kbclient.Scheduling().Queues().Get(q, metav1.GetOptions{})
142141
}
143142

144143
if !(err != nil && errors.IsNotFound(err)) {
@@ -190,7 +189,7 @@ func createQueues(cxt *context) {
190189
},
191190
})
192191
} else {
193-
_, err = cxt.karclient.Scheduling().Queues().Create(&kbv1.Queue{
192+
_, err = cxt.kbclient.Scheduling().Queues().Create(&kbv1.Queue{
194193
ObjectMeta: metav1.ObjectMeta{
195194
Name: q,
196195
},
@@ -204,7 +203,7 @@ func createQueues(cxt *context) {
204203
}
205204

206205
if !cxt.enableNamespaceAsQueue {
207-
_, err := cxt.karclient.Scheduling().Queues().Create(&kbv1.Queue{
206+
_, err := cxt.kbclient.Scheduling().Queues().Create(&kbv1.Queue{
208207
ObjectMeta: metav1.ObjectMeta{
209208
Name: cxt.namespace,
210209
},
@@ -228,7 +227,7 @@ func deleteQueues(cxt *context) {
228227
PropagationPolicy: &foreground,
229228
})
230229
} else {
231-
err = cxt.karclient.Scheduling().Queues().Delete(q, &metav1.DeleteOptions{
230+
err = cxt.kbclient.Scheduling().Queues().Delete(q, &metav1.DeleteOptions{
232231
PropagationPolicy: &foreground,
233232
})
234233
}
@@ -237,7 +236,7 @@ func deleteQueues(cxt *context) {
237236
}
238237

239238
if !cxt.enableNamespaceAsQueue {
240-
err := cxt.karclient.Scheduling().Queues().Delete(cxt.namespace, &metav1.DeleteOptions{
239+
err := cxt.kbclient.Scheduling().Queues().Delete(cxt.namespace, &metav1.DeleteOptions{
241240
PropagationPolicy: &foreground,
242241
})
243242

@@ -334,15 +333,15 @@ func createJobEx(context *context, job *jobSpec) ([]*batchv1.Job, *kbv1.PodGroup
334333
pg.Spec.MinMember = *job.minMember
335334
}
336335

337-
podgroup, err := context.karclient.Scheduling().PodGroups(pg.Namespace).Create(pg)
336+
podgroup, err := context.kbclient.Scheduling().PodGroups(pg.Namespace).Create(pg)
338337
Expect(err).NotTo(HaveOccurred())
339338

340339
return jobs, podgroup
341340
}
342341

343342
func taskPhase(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum int) wait.ConditionFunc {
344343
return func() (bool, error) {
345-
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
344+
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
346345
Expect(err).NotTo(HaveOccurred())
347346

348347
pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})
@@ -368,7 +367,7 @@ func taskPhase(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum int
368367

369368
func taskPhaseEx(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum map[string]int) wait.ConditionFunc {
370369
return func() (bool, error) {
371-
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
370+
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
372371
Expect(err).NotTo(HaveOccurred())
373372

374373
pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})
@@ -419,7 +418,7 @@ func podGroupUnschedulable(ctx *context, pg *kbv1.PodGroup, time time.Time) wait
419418

420419
func podGroupEvicted(ctx *context, pg *kbv1.PodGroup, time time.Time) wait.ConditionFunc {
421420
return func() (bool, error) {
422-
pg, err := ctx.karclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
421+
pg, err := ctx.kbclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
423422
Expect(err).NotTo(HaveOccurred())
424423

425424
events, err := ctx.kubeclient.CoreV1().Events(pg.Namespace).List(metav1.ListOptions{})
@@ -571,7 +570,7 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
571570
pods, err := ctx.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
572571
Expect(err).NotTo(HaveOccurred())
573572

574-
used := map[string]*arbapi.Resource{}
573+
used := map[string]*kbapi.Resource{}
575574

576575
for _, pod := range pods.Items {
577576
nodeName := pod.Spec.NodeName
@@ -584,11 +583,11 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
584583
}
585584

586585
if _, found := used[nodeName]; !found {
587-
used[nodeName] = arbapi.EmptyResource()
586+
used[nodeName] = kbapi.EmptyResource()
588587
}
589588

590589
for _, c := range pod.Spec.Containers {
591-
req := arbapi.NewResource(c.Resources.Requests)
590+
req := kbapi.NewResource(c.Resources.Requests)
592591
used[nodeName].Add(req)
593592
}
594593
}
@@ -601,8 +600,8 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
601600
continue
602601
}
603602

604-
alloc := arbapi.NewResource(node.Status.Allocatable)
605-
slot := arbapi.NewResource(req)
603+
alloc := kbapi.NewResource(node.Status.Allocatable)
604+
slot := kbapi.NewResource(req)
606605

607606
// Removed used resources.
608607
if res, found := used[node.Name]; found {
@@ -640,7 +639,7 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
640639
pods, err := ctx.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
641640
Expect(err).NotTo(HaveOccurred())
642641

643-
used := map[string]*arbapi.Resource{}
642+
used := map[string]*kbapi.Resource{}
644643

645644
for _, pod := range pods.Items {
646645
nodeName := pod.Spec.NodeName
@@ -653,11 +652,11 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
653652
}
654653

655654
if _, found := used[nodeName]; !found {
656-
used[nodeName] = arbapi.EmptyResource()
655+
used[nodeName] = kbapi.EmptyResource()
657656
}
658657

659658
for _, c := range pod.Spec.Containers {
660-
req := arbapi.NewResource(c.Resources.Requests)
659+
req := kbapi.NewResource(c.Resources.Requests)
661660
used[nodeName].Add(req)
662661
}
663662
}
@@ -669,8 +668,8 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
669668

670669
res := int32(0)
671670

672-
alloc := arbapi.NewResource(node.Status.Allocatable)
673-
slot := arbapi.NewResource(req)
671+
alloc := kbapi.NewResource(node.Status.Allocatable)
672+
slot := kbapi.NewResource(req)
674673

675674
// Removed used resources.
676675
if res, found := used[node.Name]; found {
@@ -691,7 +690,7 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
691690
}
692691

693692
func getPodOfPodGroup(ctx *context, pg *kbv1.PodGroup) []*v1.Pod {
694-
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
693+
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
695694
Expect(err).NotTo(HaveOccurred())
696695

697696
pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})

0 commit comments

Comments
 (0)