diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index d8236e64e2..67a37ce0ca 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -56,6 +56,7 @@ import ( "sigs.k8s.io/kueue/pkg/util/slices" utiltas "sigs.k8s.io/kueue/pkg/util/tas" utiltesting "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingpod "sigs.k8s.io/kueue/pkg/util/testingjobs/pod" "sigs.k8s.io/kueue/pkg/workload" ) @@ -3804,27 +3805,15 @@ func TestScheduleForTAS(t *testing.T) { tasRackLabel = "cloud.provider.com/rack" ) defaultSingleNode := []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "x1", - Labels: map[string]string{ - "tas-node": "true", - corev1.LabelHostname: "x1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("x1"). + Label("tas-node", "true"). + Label(corev1.LabelHostname, "x1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } defaultSingleLevelTopology := kueuealpha.Topology{ ObjectMeta: metav1.ObjectMeta{ @@ -4159,26 +4148,14 @@ func TestScheduleForTAS(t *testing.T) { }, "workload requests topology level which is only present in second flavor": { nodes: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "x1", - Labels: map[string]string{ - "tas-node": "true", - "cloud.com/custom-level": "x1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("x1"). + Label("tas-node", "true"). + Label("cloud.com/custom-level", "x1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }). + Ready(). + Obj(), }, topologies: []kueuealpha.Topology{defaultSingleLevelTopology, { @@ -4513,48 +4490,24 @@ func TestScheduleForTAS(t *testing.T) { }, "workload with multiple PodSets requesting the same TAS flavor; multiple levels": { nodes: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "x1", - Labels: map[string]string{ - "tas-node": "true", - tasRackLabel: "r1", - corev1.LabelHostname: "x1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("3"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "y1", - Labels: map[string]string{ - "tas-node": "true", - tasRackLabel: "r1", - corev1.LabelHostname: "y1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("3"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("x1"). + Label("tas-node", "true"). + Label(tasRackLabel, "r1"). + Label(corev1.LabelHostname, "x1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + }). + Ready(). + Obj(), + *testingnode.MakeNode("y1"). + Label("tas-node", "true"). + Label(tasRackLabel, "r1"). + Label(corev1.LabelHostname, "y1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + }). + Ready(). + Obj(), }, topologies: []kueuealpha.Topology{defaultTwoLevelTopology}, resourceFlavors: []kueue.ResourceFlavor{defaultTASTwoLevelFlavor}, diff --git a/test/integration/controller/jobs/job/job_controller_test.go b/test/integration/controller/jobs/job/job_controller_test.go index ccf7f437b5..c70fad4a6c 100644 --- a/test/integration/controller/jobs/job/job_controller_test.go +++ b/test/integration/controller/jobs/job/job_controller_test.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" "sigs.k8s.io/kueue/pkg/workload" "sigs.k8s.io/kueue/test/integration/framework" "sigs.k8s.io/kueue/test/util" @@ -2193,27 +2194,15 @@ var _ = ginkgo.Describe("Job controller when TopologyAwareScheduling enabled", g gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/jobset/jobset_controller_test.go b/test/integration/controller/jobs/jobset/jobset_controller_test.go index aa44ca5055..47ccca086c 100644 --- a/test/integration/controller/jobs/jobset/jobset_controller_test.go +++ b/test/integration/controller/jobs/jobset/jobset_controller_test.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" "sigs.k8s.io/kueue/pkg/workload" "sigs.k8s.io/kueue/test/util" ) @@ -1150,28 +1151,16 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/mpijob/mpijob_controller_test.go b/test/integration/controller/jobs/mpijob/mpijob_controller_test.go index d1514399f7..4d09aed023 100644 --- a/test/integration/controller/jobs/mpijob/mpijob_controller_test.go +++ b/test/integration/controller/jobs/mpijob/mpijob_controller_test.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/kueue/pkg/util/testing" testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job" testingmpijob "sigs.k8s.io/kueue/pkg/util/testingjobs/mpijob" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" "sigs.k8s.io/kueue/pkg/workload" "sigs.k8s.io/kueue/test/util" ) @@ -934,28 +935,16 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/mxjob/mxjob_controller_test.go b/test/integration/controller/jobs/mxjob/mxjob_controller_test.go index 67c0a354b5..72a0f1171c 100644 --- a/test/integration/controller/jobs/mxjob/mxjob_controller_test.go +++ b/test/integration/controller/jobs/mxjob/mxjob_controller_test.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" testingmxjob "sigs.k8s.io/kueue/pkg/util/testingjobs/mxjob" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow" "sigs.k8s.io/kueue/test/integration/framework" "sigs.k8s.io/kueue/test/util" @@ -339,28 +340,16 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go b/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go index 2bae1da906..fb57c28045 100644 --- a/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go +++ b/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/kubeflowjob" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingpaddlejob "sigs.k8s.io/kueue/pkg/util/testingjobs/paddlejob" kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow" "sigs.k8s.io/kueue/test/integration/framework" @@ -328,28 +329,16 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/pod/pod_controller_test.go b/test/integration/controller/jobs/pod/pod_controller_test.go index e4bef6e498..bf75057132 100644 --- a/test/integration/controller/jobs/pod/pod_controller_test.go +++ b/test/integration/controller/jobs/pod/pod_controller_test.go @@ -42,6 +42,7 @@ import ( podcontroller "sigs.k8s.io/kueue/pkg/controller/jobs/pod" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingpod "sigs.k8s.io/kueue/pkg/util/testingjobs/pod" "sigs.k8s.io/kueue/pkg/workload" "sigs.k8s.io/kueue/test/util" @@ -1902,27 +1903,15 @@ var _ = ginkgo.Describe("Pod controller when TopologyAwareScheduling enabled", g gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go b/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go index 62e8e1be29..e04f0e2f57 100644 --- a/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go +++ b/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go @@ -39,6 +39,7 @@ import ( "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/kubeflowjob" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingpytorchjob "sigs.k8s.io/kueue/pkg/util/testingjobs/pytorchjob" "sigs.k8s.io/kueue/pkg/workload" kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow" @@ -631,28 +632,16 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/tfjob/tfjob_controller_test.go b/test/integration/controller/jobs/tfjob/tfjob_controller_test.go index 5af3c0ea68..630097c415 100644 --- a/test/integration/controller/jobs/tfjob/tfjob_controller_test.go +++ b/test/integration/controller/jobs/tfjob/tfjob_controller_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/kubeflowjob" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingtfjob "sigs.k8s.io/kueue/pkg/util/testingjobs/tfjob" kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow" "sigs.k8s.io/kueue/test/integration/framework" @@ -342,28 +343,16 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go b/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go index e2c13f231f..2c2394cd4a 100644 --- a/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go +++ b/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/kubeflowjob" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" testingxgboostjob "sigs.k8s.io/kueue/pkg/util/testingjobs/xgboostjob" kftesting "sigs.k8s.io/kueue/test/integration/controller/jobs/kubeflow" "sigs.k8s.io/kueue/test/integration/framework" @@ -325,28 +326,16 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1r1", - Labels: map[string]string{ - nodeGroupLabel: "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1r1"). + Label(nodeGroupLabel, "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) diff --git a/test/integration/tas/tas_test.go b/test/integration/tas/tas_test.go index 5441ea2e2b..0f1faff91d 100644 --- a/test/integration/tas/tas_test.go +++ b/test/integration/tas/tas_test.go @@ -32,6 +32,7 @@ import ( kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/features" "sigs.k8s.io/kueue/pkg/util/testing" + testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node" "sigs.k8s.io/kueue/test/util" ) @@ -219,94 +220,46 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.BeforeEach(func() { nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1-r1", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1-r2", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b1", - tasRackLabel: "r2", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b2-r1", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b2", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b2-r2", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b2", - tasRackLabel: "r2", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1-r1"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), + *testingnode.MakeNode("b1-r2"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r2"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), + *testingnode.MakeNode("b2-r1"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b2"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), + *testingnode.MakeNode("b2-r2"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b2"). + Label(tasRackLabel, "r2"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) @@ -633,28 +586,16 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.By("Create nodes to allow scheduling", func() { nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1-r1", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1-r1"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) @@ -724,38 +665,22 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.By("creating a tainted node which will prevent admitting the workload", func() { nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "b1-r1-x1", - Labels: map[string]string{ - "node-group": "tas", - tasBlockLabel: "b1", - tasRackLabel: "r1", - corev1.LabelHostname: "b1-r1-x1", - }, - }, - Spec: corev1.NodeSpec{ - Taints: []corev1.Taint{ - { - Key: "maintenance", - Value: "true", - Effect: corev1.TaintEffectNoSchedule, - }, - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("b1-r1-x1"). + Label("node-group", "tas"). + Label(tasBlockLabel, "b1"). + Label(tasRackLabel, "r1"). + Label(corev1.LabelHostname, "b1-r1-x1"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }). + Taints(corev1.Taint{ + Key: "maintenance", + Value: "true", + Effect: corev1.TaintEffectNoSchedule, + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed()) @@ -811,46 +736,22 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.BeforeEach(func() { nodes = []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "cpu-node", - Labels: map[string]string{ - "node.kubernetes.io/instance-type": "cpu-node", - tasRackLabel: "cpu-rack", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("5"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "gpu-node", - Labels: map[string]string{ - "node.kubernetes.io/instance-type": "gpu-node", - tasRackLabel: "gpu-rack", - }, - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - gpuResName: resource.MustParse("4"), - }, - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, + *testingnode.MakeNode("cpu-node"). + Label("node.kubernetes.io/instance-type", "cpu-node"). + Label(tasRackLabel, "cpu-rack"). + StatusAllocatable(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5"), + }). + Ready(). + Obj(), + *testingnode.MakeNode("gpu-node"). + Label("node.kubernetes.io/instance-type", "gpu-node"). + Label(tasRackLabel, "gpu-rack"). + StatusAllocatable(corev1.ResourceList{ + gpuResName: resource.MustParse("4"), + }). + Ready(). + Obj(), } for _, node := range nodes { gomega.Expect(k8sClient.Create(ctx, &node)).Should(gomega.Succeed())