Skip to content

Commit

Permalink
Use NodeWrapper in test files (#3701)
Browse files Browse the repository at this point in the history
  • Loading branch information
kaisoz authored Dec 2, 2024
1 parent 08e1552 commit 41e4979
Show file tree
Hide file tree
Showing 11 changed files with 216 additions and 461 deletions.
119 changes: 36 additions & 83 deletions pkg/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ import (
"sigs.k8s.io/kueue/pkg/util/slices"
utiltas "sigs.k8s.io/kueue/pkg/util/tas"
utiltesting "sigs.k8s.io/kueue/pkg/util/testing"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
testingpod "sigs.k8s.io/kueue/pkg/util/testingjobs/pod"
"sigs.k8s.io/kueue/pkg/workload"
)
Expand Down Expand Up @@ -3804,27 +3805,15 @@ func TestScheduleForTAS(t *testing.T) {
tasRackLabel = "cloud.provider.com/rack"
)
defaultSingleNode := []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "x1",
Labels: map[string]string{
"tas-node": "true",
corev1.LabelHostname: "x1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("x1").
Label("tas-node", "true").
Label(corev1.LabelHostname, "x1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
defaultSingleLevelTopology := kueuealpha.Topology{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -4159,26 +4148,14 @@ func TestScheduleForTAS(t *testing.T) {
},
"workload requests topology level which is only present in second flavor": {
nodes: []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "x1",
Labels: map[string]string{
"tas-node": "true",
"cloud.com/custom-level": "x1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("x1").
Label("tas-node", "true").
Label("cloud.com/custom-level", "x1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
}).
Ready().
Obj(),
},
topologies: []kueuealpha.Topology{defaultSingleLevelTopology,
{
Expand Down Expand Up @@ -4513,48 +4490,24 @@ func TestScheduleForTAS(t *testing.T) {
},
"workload with multiple PodSets requesting the same TAS flavor; multiple levels": {
nodes: []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "x1",
Labels: map[string]string{
"tas-node": "true",
tasRackLabel: "r1",
corev1.LabelHostname: "x1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "y1",
Labels: map[string]string{
"tas-node": "true",
tasRackLabel: "r1",
corev1.LabelHostname: "y1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("x1").
Label("tas-node", "true").
Label(tasRackLabel, "r1").
Label(corev1.LabelHostname, "x1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
}).
Ready().
Obj(),
*testingnode.MakeNode("y1").
Label("tas-node", "true").
Label(tasRackLabel, "r1").
Label(corev1.LabelHostname, "y1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
}).
Ready().
Obj(),
},
topologies: []kueuealpha.Topology{defaultTwoLevelTopology},
resourceFlavors: []kueue.ResourceFlavor{defaultTASTwoLevelFlavor},
Expand Down
31 changes: 10 additions & 21 deletions test/integration/controller/jobs/job/job_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/util/testing"
testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
"sigs.k8s.io/kueue/pkg/workload"
"sigs.k8s.io/kueue/test/integration/framework"
"sigs.k8s.io/kueue/test/util"
Expand Down Expand Up @@ -2193,27 +2194,15 @@ var _ = ginkgo.Describe("Job controller when TopologyAwareScheduling enabled", g
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b1",
Labels: map[string]string{
nodeGroupLabel: "tas",
tasBlockLabel: "b1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("b1").
Label("node-group", "tas").
Label(tasBlockLabel, "b1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
for _, node := range nodes {
gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())
Expand Down
33 changes: 11 additions & 22 deletions test/integration/controller/jobs/jobset/jobset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/util/testing"
testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
"sigs.k8s.io/kueue/pkg/workload"
"sigs.k8s.io/kueue/test/util"
)
Expand Down Expand Up @@ -1150,28 +1151,16 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled"
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b1r1",
Labels: map[string]string{
nodeGroupLabel: "tas",
tasBlockLabel: "b1",
tasRackLabel: "r1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("b1r1").
Label(nodeGroupLabel, "tas").
Label(tasBlockLabel, "b1").
Label(tasRackLabel, "r1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
for _, node := range nodes {
gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())
Expand Down
33 changes: 11 additions & 22 deletions test/integration/controller/jobs/mpijob/mpijob_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
"sigs.k8s.io/kueue/pkg/util/testing"
testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
testingmpijob "sigs.k8s.io/kueue/pkg/util/testingjobs/mpijob"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
"sigs.k8s.io/kueue/pkg/workload"
"sigs.k8s.io/kueue/test/util"
)
Expand Down Expand Up @@ -934,28 +935,16 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled"
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b1r1",
Labels: map[string]string{
nodeGroupLabel: "tas",
tasBlockLabel: "b1",
tasRackLabel: "r1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("b1r1").
Label(nodeGroupLabel, "tas").
Label(tasBlockLabel, "b1").
Label(tasRackLabel, "r1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
for _, node := range nodes {
gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())
Expand Down
33 changes: 11 additions & 22 deletions test/integration/controller/jobs/mxjob/mxjob_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/util/testing"
testingmxjob "sigs.k8s.io/kueue/pkg/util/testingjobs/mxjob"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow"
"sigs.k8s.io/kueue/test/integration/framework"
"sigs.k8s.io/kueue/test/util"
Expand Down Expand Up @@ -339,28 +340,16 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled",
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b1r1",
Labels: map[string]string{
nodeGroupLabel: "tas",
tasBlockLabel: "b1",
tasRackLabel: "r1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("b1r1").
Label(nodeGroupLabel, "tas").
Label(tasBlockLabel, "b1").
Label(tasRackLabel, "r1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
for _, node := range nodes {
gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/kubeflowjob"
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/util/testing"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
testingpaddlejob "sigs.k8s.io/kueue/pkg/util/testingjobs/paddlejob"
kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow"
"sigs.k8s.io/kueue/test/integration/framework"
Expand Down Expand Up @@ -328,28 +329,16 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "b1r1",
Labels: map[string]string{
nodeGroupLabel: "tas",
tasBlockLabel: "b1",
tasRackLabel: "r1",
},
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
*testingnode.MakeNode("b1r1").
Label(nodeGroupLabel, "tas").
Label(tasBlockLabel, "b1").
Label(tasRackLabel, "r1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
for _, node := range nodes {
gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())
Expand Down
Loading

0 comments on commit 41e4979

Please sign in to comment.