Skip to content

Commit

Permalink
Use 'vmv1' for neonvm API import everywhere (#1157)
Browse files Browse the repository at this point in the history
Within neonvm-controller, we're consistently using `vmv1` to refer to
the `.../apis/neonvm/v1` import, but elsewhere we're using `vmapi`
instead.

`vmv1` is closer to the typical kubernetes imports like `corev1` and
`metav1`, even if it doesn't exactly match the import path.
  • Loading branch information
sharnoff authored Nov 25, 2024
1 parent 15b68b9 commit 3aebca4
Show file tree
Hide file tree
Showing 18 changed files with 137 additions and 137 deletions.
4 changes: 2 additions & 2 deletions autoscaler-agent/cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
scheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmclient "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned"
"github.com/neondatabase/autoscaling/pkg/agent"
"github.com/neondatabase/autoscaling/pkg/util"
Expand Down Expand Up @@ -46,7 +46,7 @@ func main() {
if err != nil {
logger.Panic("Failed to make K8S client", zap.Error(err))
}
if err = vmapi.AddToScheme(scheme.Scheme); err != nil {
if err = vmv1.AddToScheme(scheme.Scheme); err != nil {
logger.Panic("Failed to add NeonVM scheme", zap.Error(err))
}

Expand Down
8 changes: 4 additions & 4 deletions pkg/agent/billing/billing.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (

"k8s.io/apimachinery/pkg/types"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
"github.com/neondatabase/autoscaling/pkg/api"
"github.com/neondatabase/autoscaling/pkg/billing"
"github.com/neondatabase/autoscaling/pkg/reporting"
Expand Down Expand Up @@ -51,7 +51,7 @@ func (m *metricsTimeSlice) Duration() time.Duration { return m.endTime.Sub(m.sta

type vmMetricsInstant struct {
// cpu stores the cpu allocation at a particular instant.
cpu vmapi.MilliCPU
cpu vmv1.MilliCPU
}

// vmMetricsSeconds is like vmMetrics, but the values cover the allocation over time
Expand Down Expand Up @@ -141,11 +141,11 @@ func (s *metricsState) collect(logger *zap.Logger, store VMStoreForNode, metrics

old := s.present
s.present = make(map[metricsKey]vmMetricsInstant)
var vmsOnThisNode []*vmapi.VirtualMachine
var vmsOnThisNode []*vmv1.VirtualMachine
if store.Failing() {
logger.Error("VM store is currently stopped. No events will be recorded")
} else {
vmsOnThisNode = store.ListIndexed(func(i *VMNodeIndex) []*vmapi.VirtualMachine {
vmsOnThisNode = store.ListIndexed(func(i *VMNodeIndex) []*vmv1.VirtualMachine {
return i.List()
})
}
Expand Down
18 changes: 9 additions & 9 deletions pkg/agent/billing/indexedstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ package billing
import (
"k8s.io/apimachinery/pkg/types"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
"github.com/neondatabase/autoscaling/pkg/util/watch"
)

type VMStoreForNode = watch.IndexedStore[vmapi.VirtualMachine, *VMNodeIndex]
type VMStoreForNode = watch.IndexedStore[vmv1.VirtualMachine, *VMNodeIndex]

// VMNodeIndex is a watch.Index that stores all of the VMs for a particular node
//
Expand All @@ -21,35 +21,35 @@ type VMStoreForNode = watch.IndexedStore[vmapi.VirtualMachine, *VMNodeIndex]
// This comment in particular was particularly instructive:
// https://github.com/kubernetes/kubernetes/issues/53459#issuecomment-1146200268
type VMNodeIndex struct {
forNode map[types.UID]*vmapi.VirtualMachine
forNode map[types.UID]*vmv1.VirtualMachine
node string
}

func NewVMNodeIndex(node string) *VMNodeIndex {
return &VMNodeIndex{
forNode: make(map[types.UID]*vmapi.VirtualMachine),
forNode: make(map[types.UID]*vmv1.VirtualMachine),
node: node,
}
}

func (i *VMNodeIndex) Add(vm *vmapi.VirtualMachine) {
func (i *VMNodeIndex) Add(vm *vmv1.VirtualMachine) {
if vm.Status.Node == i.node {
i.forNode[vm.UID] = vm
}
}

func (i *VMNodeIndex) Update(oldVM, newVM *vmapi.VirtualMachine) {
func (i *VMNodeIndex) Update(oldVM, newVM *vmv1.VirtualMachine) {
i.Delete(oldVM)
i.Add(newVM)
}

func (i *VMNodeIndex) Delete(vm *vmapi.VirtualMachine) {
func (i *VMNodeIndex) Delete(vm *vmv1.VirtualMachine) {
// note: delete is a no-op if the key isn't present.
delete(i.forNode, vm.UID)
}

func (i *VMNodeIndex) List() []*vmapi.VirtualMachine {
items := make([]*vmapi.VirtualMachine, 0, len(i.forNode))
func (i *VMNodeIndex) List() []*vmv1.VirtualMachine {
items := make([]*vmv1.VirtualMachine, 0, len(i.forNode))
for _, vm := range i.forNode {
items = append(items, vm)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/agent/billing/prommetrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (

"github.com/prometheus/client_golang/prometheus"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
"github.com/neondatabase/autoscaling/pkg/reporting"
)

Expand Down Expand Up @@ -74,7 +74,7 @@ type (
autoscalingEnabledFlag bool
)

func (b batchMetrics) inc(isEndpoint isEndpointFlag, autoscalingEnabled autoscalingEnabledFlag, phase vmapi.VmPhase) {
func (b batchMetrics) inc(isEndpoint isEndpointFlag, autoscalingEnabled autoscalingEnabledFlag, phase vmv1.VmPhase) {
key := batchMetricsLabels{
isEndpoint: strconv.FormatBool(bool(isEndpoint)),
autoscalingEnabled: strconv.FormatBool(bool(autoscalingEnabled)),
Expand Down
10 changes: 5 additions & 5 deletions pkg/agent/core/testhelpers/construct.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

"go.uber.org/zap"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
"github.com/neondatabase/autoscaling/pkg/agent/core"
"github.com/neondatabase/autoscaling/pkg/api"
)
Expand Down Expand Up @@ -70,9 +70,9 @@ func CreateVmInfo(config InitialVmInfoConfig, opts ...VmInfoOpt) api.VmInfo {
Name: "test",
Namespace: "test",
Cpu: api.VmCpuInfo{
Min: vmapi.MilliCPU(config.MinCU) * config.ComputeUnit.VCPU,
Use: vmapi.MilliCPU(config.MinCU) * config.ComputeUnit.VCPU,
Max: vmapi.MilliCPU(config.MaxCU) * config.ComputeUnit.VCPU,
Min: vmv1.MilliCPU(config.MinCU) * config.ComputeUnit.VCPU,
Use: vmv1.MilliCPU(config.MinCU) * config.ComputeUnit.VCPU,
Max: vmv1.MilliCPU(config.MaxCU) * config.ComputeUnit.VCPU,
},
Mem: api.VmMemInfo{
SlotSize: config.MemorySlotSize,
Expand Down Expand Up @@ -162,7 +162,7 @@ func WithCurrentCU(cu uint16) VmInfoOpt {
})
}

func WithCurrentRevision(rev vmapi.RevisionWithTime) VmInfoOpt {
func WithCurrentRevision(rev vmv1.RevisionWithTime) VmInfoOpt {
return vmInfoModifier(func(c InitialVmInfoConfig, vm *api.VmInfo) {
vm.CurrentRevision = &rev
})
Expand Down
6 changes: 3 additions & 3 deletions pkg/agent/globalstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (

"k8s.io/client-go/kubernetes"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmclient "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned"
"github.com/neondatabase/autoscaling/pkg/agent/schedwatch"
"github.com/neondatabase/autoscaling/pkg/api"
Expand Down Expand Up @@ -64,9 +64,9 @@ func (r MainRunner) newAgentState(
return state, promReg
}

func vmIsOurResponsibility(vm *vmapi.VirtualMachine, config *Config, nodeName string) bool {
func vmIsOurResponsibility(vm *vmv1.VirtualMachine, config *Config, nodeName string) bool {
return vm.Status.Node == nodeName &&
(vm.Status.Phase.IsAlive() && vm.Status.Phase != vmapi.VmMigrating) &&
(vm.Status.Phase.IsAlive() && vm.Status.Phase != vmv1.VmMigrating) &&
vm.Status.PodIP != "" &&
api.HasAutoscalingEnabled(vm) &&
vm.Spec.SchedulerName == config.Scheduler.SchedulerName
Expand Down
40 changes: 20 additions & 20 deletions pkg/agent/watch.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmclient "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned"
"github.com/neondatabase/autoscaling/pkg/api"
"github.com/neondatabase/autoscaling/pkg/util"
Expand Down Expand Up @@ -67,7 +67,7 @@ func startVMWatcher(
perVMMetrics PerVMMetrics,
nodeName string,
submitEvent func(vmEvent),
) (*watch.Store[vmapi.VirtualMachine], error) {
) (*watch.Store[vmv1.VirtualMachine], error) {
logger := parentLogger.Named("vm-watch")

return watch.Watch(
Expand All @@ -84,13 +84,13 @@ func startVMWatcher(
RetryRelistAfter: util.NewTimeRange(time.Millisecond, 500, 1000),
RetryWatchAfter: util.NewTimeRange(time.Millisecond, 500, 1000),
},
watch.Accessors[*vmapi.VirtualMachineList, vmapi.VirtualMachine]{
Items: func(list *vmapi.VirtualMachineList) []vmapi.VirtualMachine { return list.Items },
watch.Accessors[*vmv1.VirtualMachineList, vmv1.VirtualMachine]{
Items: func(list *vmv1.VirtualMachineList) []vmv1.VirtualMachine { return list.Items },
},
watch.InitModeDefer,
metav1.ListOptions{},
watch.HandlerFuncs[*vmapi.VirtualMachine]{
AddFunc: func(vm *vmapi.VirtualMachine, preexisting bool) {
watch.HandlerFuncs[*vmv1.VirtualMachine]{
AddFunc: func(vm *vmv1.VirtualMachine, preexisting bool) {
setVMMetrics(&perVMMetrics, vm, nodeName)

if vmIsOurResponsibility(vm, config, nodeName) {
Expand All @@ -105,7 +105,7 @@ func startVMWatcher(
submitEvent(event)
}
},
UpdateFunc: func(oldVM, newVM *vmapi.VirtualMachine) {
UpdateFunc: func(oldVM, newVM *vmv1.VirtualMachine) {
updateVMMetrics(&perVMMetrics, oldVM, newVM, nodeName)

oldIsOurs := vmIsOurResponsibility(oldVM, config, nodeName)
Expand All @@ -114,7 +114,7 @@ func startVMWatcher(
return
}

var vmForEvent *vmapi.VirtualMachine
var vmForEvent *vmv1.VirtualMachine
var eventKind vmEventKind

if !oldIsOurs && newIsOurs {
Expand All @@ -139,7 +139,7 @@ func startVMWatcher(

submitEvent(event)
},
DeleteFunc: func(vm *vmapi.VirtualMachine, maybeStale bool) {
DeleteFunc: func(vm *vmv1.VirtualMachine, maybeStale bool) {
deleteVMMetrics(&perVMMetrics, vm, nodeName)

if vmIsOurResponsibility(vm, config, nodeName) {
Expand All @@ -158,7 +158,7 @@ func startVMWatcher(
)
}

func makeVMEvent(logger *zap.Logger, vm *vmapi.VirtualMachine, kind vmEventKind) (vmEvent, error) {
func makeVMEvent(logger *zap.Logger, vm *vmv1.VirtualMachine, kind vmEventKind) (vmEvent, error) {
info, err := api.ExtractVmInfo(logger, vm)
if err != nil {
return vmEvent{}, fmt.Errorf("Error extracting VM info: %w", err)
Expand All @@ -184,7 +184,7 @@ func makeVMEvent(logger *zap.Logger, vm *vmapi.VirtualMachine, kind vmEventKind)
// We're not reusing api.ExtractVmInfo even though it also looks at the bounds
// annotation, because its data is less precise - CPU and memory values might
// come from the VM spec without us knowing.
func extractAutoscalingBounds(vm *vmapi.VirtualMachine) *api.ScalingBounds {
func extractAutoscalingBounds(vm *vmv1.VirtualMachine) *api.ScalingBounds {
boundsJSON, ok := vm.Annotations[api.AnnotationAutoscalingBounds]
if !ok {
return nil
Expand All @@ -201,7 +201,7 @@ type pair[T1 any, T2 any] struct {
second T2
}

func makeVMMetric(vm *vmapi.VirtualMachine, valType vmResourceValueType, val float64) vmMetric {
func makeVMMetric(vm *vmv1.VirtualMachine, valType vmResourceValueType, val float64) vmMetric {
endpointID := vm.Labels[endpointLabel]
projectID := vm.Labels[projectLabel]
labels := makePerVMMetricsLabels(vm.Namespace, vm.Name, endpointID, projectID, valType)
Expand All @@ -211,11 +211,11 @@ func makeVMMetric(vm *vmapi.VirtualMachine, valType vmResourceValueType, val flo
}
}

func makeVMCPUMetrics(vm *vmapi.VirtualMachine) []vmMetric {
func makeVMCPUMetrics(vm *vmv1.VirtualMachine) []vmMetric {
var metrics []vmMetric

// metrics from spec
specPairs := []pair[vmResourceValueType, vmapi.MilliCPU]{
specPairs := []pair[vmResourceValueType, vmv1.MilliCPU]{
{vmResourceValueSpecMin, vm.Spec.Guest.CPUs.Min},
{vmResourceValueSpecMax, vm.Spec.Guest.CPUs.Max},
{vmResourceValueSpecUse, vm.Spec.Guest.CPUs.Use},
Expand All @@ -239,15 +239,15 @@ func makeVMCPUMetrics(vm *vmapi.VirtualMachine) []vmMetric {
}
for _, p := range boundPairs {
// avoid using resource.Quantity.AsApproximateFloat64() since it's quite inaccurate
m := makeVMMetric(vm, p.first, vmapi.MilliCPUFromResourceQuantity(p.second).AsFloat64())
m := makeVMMetric(vm, p.first, vmv1.MilliCPUFromResourceQuantity(p.second).AsFloat64())
metrics = append(metrics, m)
}
}

return metrics
}

func makeVMMemMetrics(vm *vmapi.VirtualMachine) []vmMetric {
func makeVMMemMetrics(vm *vmv1.VirtualMachine) []vmMetric {
var metrics []vmMetric

memorySlotsToBytes := func(m int32) int64 {
Expand Down Expand Up @@ -288,7 +288,7 @@ func makeVMMemMetrics(vm *vmapi.VirtualMachine) []vmMetric {

// makeVMRestartMetrics makes metrics related to VM restarts. Currently, it
// only includes one metrics, which is restartCount.
func makeVMRestartMetrics(vm *vmapi.VirtualMachine) []vmMetric {
func makeVMRestartMetrics(vm *vmv1.VirtualMachine) []vmMetric {
endpointID := vm.Labels[endpointLabel]
projectID := vm.Labels[projectLabel]
labels := makePerVMMetricsLabels(vm.Namespace, vm.Name, endpointID, projectID, "")
Expand All @@ -300,7 +300,7 @@ func makeVMRestartMetrics(vm *vmapi.VirtualMachine) []vmMetric {
}
}

func setVMMetrics(perVMMetrics *PerVMMetrics, vm *vmapi.VirtualMachine, nodeName string) {
func setVMMetrics(perVMMetrics *PerVMMetrics, vm *vmv1.VirtualMachine, nodeName string) {
if vm.Status.Node != nodeName {
return
}
Expand All @@ -321,7 +321,7 @@ func setVMMetrics(perVMMetrics *PerVMMetrics, vm *vmapi.VirtualMachine, nodeName
}
}

func updateVMMetrics(perVMMetrics *PerVMMetrics, oldVM, newVM *vmapi.VirtualMachine, nodeName string) {
func updateVMMetrics(perVMMetrics *PerVMMetrics, oldVM, newVM *vmv1.VirtualMachine, nodeName string) {
if newVM.Status.Node != nodeName || oldVM.Status.Node != nodeName {
// this case we don't need an in-place metric update. Either we just have
// to add the new metrics, or delete the old ones, or nothing!
Expand Down Expand Up @@ -359,7 +359,7 @@ func updateVMMetrics(perVMMetrics *PerVMMetrics, oldVM, newVM *vmapi.VirtualMach
updateMetrics(perVMMetrics.restartCount, oldRestartCountMetrics, newRestartCountMetrics)
}

func deleteVMMetrics(perVMMetrics *PerVMMetrics, vm *vmapi.VirtualMachine, nodeName string) {
func deleteVMMetrics(perVMMetrics *PerVMMetrics, vm *vmv1.VirtualMachine, nodeName string) {
if vm.Status.Node != nodeName {
return
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/api/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (

"k8s.io/apimachinery/pkg/api/resource"

vmapi "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1"
"github.com/neondatabase/autoscaling/pkg/util"
)

Expand Down Expand Up @@ -295,7 +295,7 @@ func (b Bytes) Format(state fmt.State, verb rune) {
//
// In all cases, each resource type is considered separately from the others.
type Resources struct {
VCPU vmapi.MilliCPU `json:"vCPUs"`
VCPU vmv1.MilliCPU `json:"vCPUs"`
// Mem gives the number of bytes of memory requested
Mem Bytes `json:"mem"`
}
Expand Down Expand Up @@ -376,7 +376,7 @@ func (r Resources) SaturatingSub(other Resources) Resources {
// Mul returns the result of multiplying each resource by factor
func (r Resources) Mul(factor uint16) Resources {
return Resources{
VCPU: vmapi.MilliCPU(factor) * r.VCPU,
VCPU: vmv1.MilliCPU(factor) * r.VCPU,
Mem: Bytes(factor) * r.Mem,
}
}
Expand Down Expand Up @@ -461,13 +461,13 @@ func (m MoreResources) And(cmp MoreResources) MoreResources {
// VCPUChange is used to notify runner that it had some changes in its CPUs
// runner uses this info to adjust qemu cgroup
type VCPUChange struct {
VCPUs vmapi.MilliCPU
VCPUs vmv1.MilliCPU
}

// VCPUCgroup is used in runner to reply to controller
// it represents the vCPU usage as controlled by cgroup
type VCPUCgroup struct {
VCPUs vmapi.MilliCPU
VCPUs vmv1.MilliCPU
}

// this a similar version type for controller <-> runner communications
Expand Down
Loading

0 comments on commit 3aebca4

Please sign in to comment.