Skip to content
This repository was archived by the owner on May 25, 2023. It is now read-only.

Commit 0d486c6

Browse files
authored
Merge pull request #560 from k82cn/kb_521
Update PodGroup status.
2 parents 38ad28b + 60255c7 commit 0d486c6

File tree

15 files changed

+338
-381
lines changed

15 files changed

+338
-381
lines changed

pkg/apis/scheduling/v1alpha1/types.go

+29-30
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,10 @@ limitations under the License.
1717
package v1alpha1
1818

1919
import (
20+
"k8s.io/api/core/v1"
2021
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2122
)
2223

23-
// Event represent the phase of PodGroup, e.g. pod-failed.
24-
type Event string
25-
26-
const (
27-
EvictEvent Event = "Evict"
28-
UnschedulableEvent Event = "Unschedulable"
29-
FailedSchedulingEvent Event = "FailedScheduling"
30-
)
31-
3224
// PodGroupPhase is the phase of a pod group at the current time.
3325
type PodGroupPhase string
3426

@@ -41,35 +33,39 @@ const (
4133
// PodRunning means `spec.minMember` pods of PodGroups has been in running phase.
4234
PodGroupRunning PodGroupPhase = "Running"
4335

44-
// PodGroupRecovering means part of `spec.minMember` pods have exception, e.g. killed; scheduler will
45-
// wait for related controller to recover it.
46-
PodGroupRecovering PodGroupPhase = "Recovering"
47-
48-
// PodGroupUnschedulable means part of `spec.minMember` pods are running but the other part can not
36+
// PodGroupUnknown means part of `spec.minMember` pods are running but the other part can not
4937
// be scheduled, e.g. not enough resource; scheduler will wait for related controller to recover it.
50-
PodGroupUnschedulable PodGroupPhase = "Unschedulable"
38+
PodGroupUnknown PodGroupPhase = "Unknown"
5139
)
5240

53-
// PodGroupState contains details for the current state of this pod group.
54-
type PodGroupState struct {
55-
// Current phase of PodGroup.
56-
Phase PodGroupPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase"`
41+
type PodGroupConditionType string
5742

58-
// Last time we probed to this Phase.
59-
// +optional
60-
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,2,opt,name=lastProbeTime"`
43+
const (
44+
PodGroupUnschedulableType PodGroupConditionType = "Unschedulable"
45+
)
46+
47+
// PodGroupCondition contains details for the current state of this pod group.
48+
type PodGroupCondition struct {
49+
// Type is the type of the condition
50+
Type PodGroupConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
51+
52+
// Status is the status of the condition.
53+
Status v1.ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
54+
55+
// The ID of condition transition.
56+
TransitionID string `json:"transitionID,omitempty" protobuf:"bytes,3,opt,name=transitionID"`
6157

6258
// Last time the phase transitioned from another to current phase.
6359
// +optional
64-
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
60+
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
6561

6662
// Unique, one-word, CamelCase reason for the phase's last transition.
6763
// +optional
68-
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
64+
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
6965

7066
// Human-readable message indicating details about last transition.
7167
// +optional
72-
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
68+
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
7369
}
7470

7571
const (
@@ -122,21 +118,24 @@ type PodGroupSpec struct {
122118

123119
// PodGroupStatus represents the current state of a pod group.
124120
type PodGroupStatus struct {
125-
// The state of PodGroup.
121+
// Current phase of PodGroup.
122+
Phase PodGroupPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase"`
123+
124+
// The conditions of PodGroup.
126125
// +optional
127-
State PodGroupState `json:"state,omitempty" protobuf:"bytes,1,opt,name=state,casttype=State"`
126+
Conditions []PodGroupCondition `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
128127

129128
// The number of actively running pods.
130129
// +optional
131-
Running int32 `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
130+
Running int32 `json:"running,omitempty" protobuf:"bytes,3,opt,name=running"`
132131

133132
// The number of pods which reached phase Succeeded.
134133
// +optional
135-
Succeeded int32 `json:"succeeded,omitempty" protobuf:"bytes,3,opt,name=succeeded"`
134+
Succeeded int32 `json:"succeeded,omitempty" protobuf:"bytes,4,opt,name=succeeded"`
136135

137136
// The number of pods which reached phase Failed.
138137
// +optional
139-
Failed int32 `json:"failed,omitempty" protobuf:"bytes,4,opt,name=failed"`
138+
Failed int32 `json:"failed,omitempty" protobuf:"bytes,5,opt,name=failed"`
140139
}
141140

142141
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go

+24-19
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pkg/scheduler/actions/allocate/allocate_test.go

+17-9
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/api/resource"
2929
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3030
"k8s.io/apimachinery/pkg/types"
31+
"k8s.io/client-go/tools/record"
3132

3233
kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
3334
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
@@ -113,12 +114,17 @@ func (fb *fakeBinder) Bind(p *v1.Pod, hostname string) error {
113114
return nil
114115
}
115116

116-
type fakeTaskStatusUpdater struct {
117+
type fakeStatusUpdater struct {
117118
}
118119

119-
func (ftsu *fakeTaskStatusUpdater) Update(pod *v1.Pod, podCondition *v1.PodCondition) error {
120+
func (ftsu *fakeStatusUpdater) UpdatePod(pod *v1.Pod, podCondition *v1.PodCondition) (*v1.Pod, error) {
120121
// do nothing here
121-
return nil
122+
return nil, nil
123+
}
124+
125+
func (ftsu *fakeStatusUpdater) UpdatePodGroup(pg *kbv1.PodGroup) (*kbv1.PodGroup, error) {
126+
// do nothing here
127+
return nil, nil
122128
}
123129

124130
type fakeVolumeBinder struct {
@@ -239,12 +245,14 @@ func TestAllocate(t *testing.T) {
239245
c: make(chan string),
240246
}
241247
schedulerCache := &cache.SchedulerCache{
242-
Nodes: make(map[string]*api.NodeInfo),
243-
Jobs: make(map[api.JobID]*api.JobInfo),
244-
Queues: make(map[api.QueueID]*api.QueueInfo),
245-
Binder: binder,
246-
TaskStatusUpdater: &fakeTaskStatusUpdater{},
247-
VolumeBinder: &fakeVolumeBinder{},
248+
Nodes: make(map[string]*api.NodeInfo),
249+
Jobs: make(map[api.JobID]*api.JobInfo),
250+
Queues: make(map[api.QueueID]*api.QueueInfo),
251+
Binder: binder,
252+
StatusUpdater: &fakeStatusUpdater{},
253+
VolumeBinder: &fakeVolumeBinder{},
254+
255+
Recorder: record.NewFakeRecorder(100),
248256
}
249257
for _, node := range test.nodes {
250258
schedulerCache.AddNode(node)

pkg/scheduler/api/helpers.go

-1
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,5 @@ func MergeErrors(errs ...error) error {
9999
// JobTerminated checkes whether job was terminated.
100100
func JobTerminated(job *JobInfo) bool {
101101
return job.PodGroup == nil &&
102-
job.PDB == nil &&
103102
len(job.Tasks) == 0
104103
}

pkg/scheduler/api/job_info.go

+4-13
Original file line numberDiff line numberDiff line change
@@ -139,9 +139,6 @@ type JobInfo struct {
139139

140140
CreationTimestamp metav1.Time
141141
PodGroup *arbcorev1.PodGroup
142-
143-
// TODO(k82cn): keep backward compatbility, removed it when v1alpha1 finalized.
144-
PDB *policyv1.PodDisruptionBudget
145142
}
146143

147144
func NewJobInfo(uid JobID) *JobInfo {
@@ -196,11 +193,6 @@ func (ji *JobInfo) SetPDB(pdb *policyv1.PodDisruptionBudget) {
196193
}
197194

198195
ji.CreationTimestamp = pdb.GetCreationTimestamp()
199-
ji.PDB = pdb
200-
}
201-
202-
func (ji *JobInfo) UnsetPDB() {
203-
ji.PDB = nil
204196
}
205197

206198
func (ji *JobInfo) GetTasks(statuses ...TaskStatus) []*TaskInfo {
@@ -292,7 +284,6 @@ func (ji *JobInfo) Clone() *JobInfo {
292284
TotalRequest: ji.TotalRequest.Clone(),
293285
NodesFitDelta: make(NodeResourceMap),
294286

295-
PDB: ji.PDB,
296287
PodGroup: ji.PodGroup,
297288

298289
TaskStatusIndex: map[TaskStatus]tasksMap{},
@@ -326,14 +317,14 @@ func (ji JobInfo) String() string {
326317

327318
// Error returns detailed information on why a job's task failed to fit on
328319
// each available node
329-
func (f *JobInfo) FitError() string {
330-
if len(f.NodesFitDelta) == 0 {
320+
func (ji *JobInfo) FitError() string {
321+
if len(ji.NodesFitDelta) == 0 {
331322
reasonMsg := fmt.Sprintf("0 nodes are available")
332323
return reasonMsg
333324
}
334325

335326
reasons := make(map[string]int)
336-
for _, v := range f.NodesFitDelta {
327+
for _, v := range ji.NodesFitDelta {
337328
if v.Get(v1.ResourceCPU) < 0 {
338329
reasons["cpu"]++
339330
}
@@ -353,6 +344,6 @@ func (f *JobInfo) FitError() string {
353344
sort.Strings(reasonStrings)
354345
return reasonStrings
355346
}
356-
reasonMsg := fmt.Sprintf("0/%v nodes are available, %v.", len(f.NodesFitDelta), strings.Join(sortReasonsHistogram(), ", "))
347+
reasonMsg := fmt.Sprintf("0/%v nodes are available, %v.", len(ji.NodesFitDelta), strings.Join(sortReasonsHistogram(), ", "))
357348
return reasonMsg
358349
}

pkg/scheduler/api/types.go

+9
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,15 @@ type CompareFn func(interface{}, interface{}) int
8888
// ValidateFn is the func declaration used to check object's status.
8989
type ValidateFn func(interface{}) bool
9090

91+
//
92+
type ValidateResult struct {
93+
Pass bool
94+
Reason string
95+
Message string
96+
}
97+
98+
type ValidateExFn func(interface{}) *ValidateResult
99+
91100
// PredicateFn is the func declaration used to predicate node for task.
92101
type PredicateFn func(*TaskInfo, *NodeInfo) error
93102

0 commit comments

Comments
 (0)