Skip to content

Commit

Permalink
ci: Complete ci run
Browse files Browse the repository at this point in the history
Signed-off-by: LY-today <[email protected]>
  • Loading branch information
LY-today committed Dec 20, 2024
1 parent 06a124a commit f632ecf
Show file tree
Hide file tree
Showing 4 changed files with 228 additions and 36 deletions.
Original file line number Diff line number Diff line change
@@ -1,8 +1,25 @@
/*
Copyright 2022 The Koordinator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package noderesourcesfitplus

import (
"context"
"fmt"

"github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config"

Check failure on line 23 in pkg/scheduler/plugins/noderesourcefitplus/node_resources_fit_plus.go

View workflow job for this annotation

GitHub Actions / golangci-lint

File is not `goimports`-ed with -local github.com/koordinator-sh/koordinator (goimports)
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -48,7 +65,7 @@ func (s *Plugin) Name() string {
func (s *Plugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {

nodeInfo, err := s.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
if err != nil {
if err != nil || nodeInfo == nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,25 @@
/*
Copyright 2022 The Koordinator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package noderesourcesfitplus

import (
"context"
"testing"

koordinatorclientset "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned"
koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake"
koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions"
Expand All @@ -22,7 +40,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
"testing"
)

var _ framework.SharedLister = &testSharedLister{}
Expand Down Expand Up @@ -113,14 +130,16 @@ func TestPlugin_Score(t *testing.T) {
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
Capacity: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
},
},
Expand All @@ -130,16 +149,18 @@ func TestPlugin_Score(t *testing.T) {
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
Capacity: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
},
},
Expand Down Expand Up @@ -219,14 +240,18 @@ func TestPlugin_Score(t *testing.T) {
Name: "test-container",
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"nvidia.com/gpu": resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"nvidia.com/gpu": resource.MustParse("2"),
"xxx.com/xx": resource.MustParse("4"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"nvidia.com/gpu": resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"nvidia.com/gpu": resource.MustParse("2"),
"xxx.com/xx": resource.MustParse("4"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
},
},
Expand All @@ -238,6 +263,36 @@ func TestPlugin_Score(t *testing.T) {
if scoreNode1 <= scoreNode2 {
t.Fatal("scoreNode1 must <= scoreNode2")
}

scoreNode3, status := p.(*Plugin).Score(context.TODO(), cycleState, pod, "testNode3")
if scoreNode3 != 0 || status.IsSuccess() {
t.Fatal("getting node Snapshot err")
}

pod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "test-pod-1",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-container",
Resources: corev1.ResourceRequirements{},
},
},
},
}
scoreNode1, _ = p.(*Plugin).Score(context.TODO(), cycleState, pod, "testNode1")
if scoreNode1 != framework.MaxNodeScore {
t.Fatal("weightSum == 0")
}

scoreExtensions := p.(*Plugin).ScoreExtensions()
if scoreExtensions != nil {
t.Fatal("ScoreExtensions err")
}

}

func (f *testSharedLister) StorageInfos() framework.StorageInfoLister {
Expand Down Expand Up @@ -267,3 +322,38 @@ func (f *testSharedLister) HavePodsWithRequiredAntiAffinityList() ([]*framework.
func (f *testSharedLister) Get(nodeName string) (*framework.NodeInfo, error) {
return f.nodeInfoMap[nodeName], nil
}

func TestNew(t *testing.T) {
var v1beta3args v1beta3.LoadAwareSchedulingArgs
v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args)
var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs
err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil)
assert.NoError(t, err)

koordClientSet := koordfake.NewSimpleClientset()
koordSharedInformerFactory := koordinatorinformers.NewSharedInformerFactory(koordClientSet, 0)
extenderFactory, _ := frameworkext.NewFrameworkExtenderFactory(
frameworkext.WithKoordinatorClientSet(koordClientSet),
frameworkext.WithKoordinatorSharedInformerFactory(koordSharedInformerFactory),
)
proxyNew := frameworkext.PluginFactoryProxy(extenderFactory, New)

cs := kubefake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(cs, 0)
snapshot := newTestSharedLister(nil, nil)
registeredPlugins := []schedulertesting.RegisterPluginFunc{
schedulertesting.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
}
fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler",
frameworkruntime.WithClientSet(cs),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithSnapshotSharedLister(snapshot),
)
assert.Nil(t, err)

_, err = proxyNew(&loadAwareSchedulingArgs, fh)
if err == nil {
t.Fatal("want args to be of type NodeResourcesArgs")
}
}
Original file line number Diff line number Diff line change
@@ -1,8 +1,25 @@
/*
Copyright 2022 The Koordinator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package scarceresourceavoidance

import (
"context"
"fmt"

"github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config"

Check failure on line 23 in pkg/scheduler/plugins/scarceresourceavoidance/scarce_resource_avoidance.go

View workflow job for this annotation

GitHub Actions / golangci-lint

File is not `goimports`-ed with -local github.com/koordinator-sh/koordinator (goimports)
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -44,7 +61,7 @@ func (s *Plugin) Name() string {

func (s *Plugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, err := s.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
if err != nil {
if err != nil || nodeInfo == nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,25 @@
/*
Copyright 2022 The Koordinator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package scarceresourceavoidance

import (
"context"
"testing"

koordinatorclientset "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned"

Check failure on line 23 in pkg/scheduler/plugins/scarceresourceavoidance/scarce_resource_avoidance_test.go

View workflow job for this annotation

GitHub Actions / golangci-lint

File is not `goimports`-ed with -local github.com/koordinator-sh/koordinator (goimports)
koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake"
koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions"
Expand All @@ -21,7 +39,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
"testing"
)

var _ framework.SharedLister = &testSharedLister{}
Expand Down Expand Up @@ -108,16 +125,18 @@ func TestPlugin_Score(t *testing.T) {
},
Status: corev1.NodeStatus{
Allocatable: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
Capacity: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceCPU: resource.MustParse("96"),
corev1.ResourceMemory: resource.MustParse("512Gi"),
"nvidia.com/gpu": resource.MustParse("8"),
"xx.xx/xx": resource.MustParse("8"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
},
},
Expand Down Expand Up @@ -184,12 +203,16 @@ func TestPlugin_Score(t *testing.T) {
Name: "test-container",
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"xxx.com/xx": resource.MustParse("4"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("32Gi"),
"xxx.com/xx": resource.MustParse("4"),
corev1.ResourceEphemeralStorage: resource.MustParse("4"),
},
},
},
Expand All @@ -201,6 +224,16 @@ func TestPlugin_Score(t *testing.T) {
if scoreNode1 >= scoreNode2 {
t.Fatal("scoreNode1 must >= scoreNode2")
}

scoreNode3, status := p.(*Plugin).Score(context.TODO(), cycleState, pod, "testNode3")
if scoreNode3 != 0 || status.IsSuccess() {
t.Fatal("getting node Snapshot err")
}

scoreExtensions := p.(*Plugin).ScoreExtensions()
if scoreExtensions != nil {
t.Fatal("ScoreExtensions err")
}
}

func (f *testSharedLister) StorageInfos() framework.StorageInfoLister {
Expand Down Expand Up @@ -230,3 +263,38 @@ func (f *testSharedLister) HavePodsWithRequiredAntiAffinityList() ([]*framework.
func (f *testSharedLister) Get(nodeName string) (*framework.NodeInfo, error) {
return f.nodeInfoMap[nodeName], nil
}

func TestNew(t *testing.T) {
var v1beta3args v1beta3.LoadAwareSchedulingArgs
v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args)
var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs
err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil)
assert.NoError(t, err)

koordClientSet := koordfake.NewSimpleClientset()
koordSharedInformerFactory := koordinatorinformers.NewSharedInformerFactory(koordClientSet, 0)
extenderFactory, _ := frameworkext.NewFrameworkExtenderFactory(
frameworkext.WithKoordinatorClientSet(koordClientSet),
frameworkext.WithKoordinatorSharedInformerFactory(koordSharedInformerFactory),
)
proxyNew := frameworkext.PluginFactoryProxy(extenderFactory, New)

cs := kubefake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(cs, 0)
snapshot := newTestSharedLister(nil, nil)
registeredPlugins := []schedulertesting.RegisterPluginFunc{
schedulertesting.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
}
fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler",
frameworkruntime.WithClientSet(cs),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithSnapshotSharedLister(snapshot),
)
assert.Nil(t, err)

_, err = proxyNew(&loadAwareSchedulingArgs, fh)
if err == nil {
t.Fatal("want args to be of type NodeResourcesArgs")
}
}

0 comments on commit f632ecf

Please sign in to comment.