diff --git a/agent/discovery/sd/discoverer/config.go b/agent/discovery/sd/discoverer/config.go new file mode 100644 index 000000000..56be70303 --- /dev/null +++ b/agent/discovery/sd/discoverer/config.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discoverer + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/kubernetes" +) + +type Config struct { + K8S []kubernetes.Config `yaml:"k8s"` +} + +func validateConfig(cfg Config) error { + if len(cfg.K8S) == 0 { + return errors.New("empty config") + } + return nil +} diff --git a/agent/discovery/sd/discoverer/kubernetes/clientset.go b/agent/discovery/sd/discoverer/kubernetes/clientset.go new file mode 100644 index 000000000..37fbb1831 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/clientset.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "errors" + "os" + "path/filepath" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const envFakeClient = "KUBERNETES_FAKE_CLIENTSET" + +func newClientset() (kubernetes.Interface, error) { + switch { + case os.Getenv(envFakeClient) != "": + return fake.NewSimpleClientset(), nil + case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "": + return newClientsetInCluster() + default: + return newClientsetOutOfCluster() + } +} + +func newClientsetInCluster() (*kubernetes.Clientset, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + config.UserAgent = "Netdata/auto-discovery" + + return kubernetes.NewForConfig(config) +} + +func newClientsetOutOfCluster() (*kubernetes.Clientset, error) { + home := homeDir() + if home == "" { + return nil, errors.New("couldn't find home directory") + } + + path := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", path) + if err != nil { + return nil, err + } + + config.UserAgent = "Netdata/auto-discovery" + + return kubernetes.NewForConfig(config) +} + +func homeDir() string { + if h := os.Getenv("HOME"); h != "" { + return h + } + return os.Getenv("USERPROFILE") // windows +} diff --git a/agent/discovery/sd/discoverer/kubernetes/config.go b/agent/discovery/sd/discoverer/kubernetes/config.go new file mode 100644 index 000000000..a88e45155 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/config.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import "fmt" + +type Config struct { + APIServer string `yaml:"api_server"` + Tags string `yaml:"tags"` + Namespaces []string `yaml:"namespaces"` + Role string `yaml:"role"` + LocalMode bool `yaml:"local_mode"` + Selector struct { + Label string `yaml:"label"` + Field string `yaml:"field"` + } `yaml:"selector"` +} + +func validateConfig(cfg Config) error { + if !isRoleValid(cfg.Role) { + return fmt.Errorf("invalid role '%s', valid roles: '%s', '%s'", cfg.Role, RolePod, RoleService) + } + if cfg.Tags == "" { + return fmt.Errorf("no tags set for '%s' role", cfg.Role) + } + return nil +} + +func isRoleValid(role string) bool { + return role == RolePod || role == RoleService +} diff --git a/agent/discovery/sd/discoverer/kubernetes/kubernetes.go b/agent/discovery/sd/discoverer/kubernetes/kubernetes.go new file mode 100644 index 000000000..ab6bff281 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/kubernetes.go @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + "github.com/ilyam8/hashstructure" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type Role string + +const ( + RolePod = "pod" + RoleService = "service" +) + +const ( + envNodeName = "MY_NODE_NAME" +) + +type ( + Discovery struct { + tags model.Tags + namespaces []string + role string + selectorLabel string + selectorField string + client kubernetes.Interface + discoverers []discoverer + started chan struct{} + log *logger.Logger + } + discoverer interface { + Discover(ctx context.Context, ch chan<- []model.TargetGroup) + } +) + +func NewDiscovery(cfg Config) (*Discovery, error) { + if err := validateConfig(cfg); err != nil { + return nil, fmt.Errorf("k8s discovery config validation: %v", err) + } + + d, err := initDiscovery(cfg) + if err != nil { + return nil, fmt.Errorf("k8s discovery initialization ('%s'): %v", cfg.Role, err) + } + return d, nil +} + +func initDiscovery(cfg Config) (*Discovery, error) { + tags, err := model.ParseTags(cfg.Tags) + if err != nil { + return nil, fmt.Errorf("parse config->tags: %v", err) + } + client, err := newClientset() + if err != nil { + return nil, fmt.Errorf("create clientset: %v", err) + } + namespaces := cfg.Namespaces + if len(namespaces) == 0 { + namespaces = []string{apiv1.NamespaceAll} + } + if cfg.LocalMode && cfg.Role == RolePod { + if name := os.Getenv(envNodeName); name != "" { + cfg.Selector.Field = joinSelectors(cfg.Selector.Field, "spec.nodeName="+name) + } else { + return nil, fmt.Errorf("local_mode is enabled, but env '%s' not set", envNodeName) + } + } + + d := &Discovery{ + tags: tags, + namespaces: namespaces, + role: cfg.Role, + selectorLabel: cfg.Selector.Label, + selectorField: cfg.Selector.Field, + client: client, + discoverers: make([]discoverer, 0, len(namespaces)), + started: make(chan struct{}), + log: logger.New("k8s discovery manager", ""), + } + return d, nil +} + +func (d *Discovery) String() string { + return "k8s discovery manager" +} + +const resyncPeriod = 10 * time.Minute + +func (d *Discovery) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + for _, namespace := range d.namespaces { + var dd discoverer + switch d.role { + case RolePod: + dd = d.setupPodDiscoverer(ctx, namespace) + case RoleService: + dd = d.setupServiceDiscoverer(ctx, namespace) + default: + panic(fmt.Sprintf("unknown k8 discovery role: '%s'", d.role)) + } + d.discoverers = append(d.discoverers, dd) + } + if len(d.discoverers) == 0 { + panic("k8s cant run discovery: zero discoverers") + } + + d.log.Infof("registered: %v", d.discoverers) + + var wg sync.WaitGroup + updates := make(chan []model.TargetGroup) + + for _, dd := range d.discoverers { + wg.Add(1) + go func(dd discoverer) { defer wg.Done(); dd.Discover(ctx, updates) }(dd) + } + + wg.Add(1) + go func() { defer wg.Done(); d.run(ctx, updates, in) }() + + close(d.started) + + wg.Wait() + <-ctx.Done() +} + +func (d *Discovery) run(ctx context.Context, updates chan []model.TargetGroup, in chan<- []model.TargetGroup) { + for { + select { + case <-ctx.Done(): + return + case groups := <-updates: + for _, group := range groups { + for _, t := range group.Targets() { + t.Tags().Merge(d.tags) + } + } + select { + case <-ctx.Done(): + return + case in <- groups: + } + } + } +} + +func (d *Discovery) setupPodDiscoverer(ctx context.Context, namespace string) *Pod { + pod := d.client.CoreV1().Pods(namespace) + podLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectorField + options.LabelSelector = d.selectorLabel + return pod.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectorField + options.LabelSelector = d.selectorLabel + return pod.Watch(ctx, options) + }, + } + + cmap := d.client.CoreV1().ConfigMaps(namespace) + cmapLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return cmap.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return cmap.Watch(ctx, options) + }, + } + + secret := d.client.CoreV1().Secrets(namespace) + secretLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return secret.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return secret.Watch(ctx, options) + }, + } + + return NewPod( + cache.NewSharedInformer(podLW, &apiv1.Pod{}, resyncPeriod), + cache.NewSharedInformer(cmapLW, &apiv1.ConfigMap{}, resyncPeriod), + cache.NewSharedInformer(secretLW, &apiv1.Secret{}, resyncPeriod), + ) +} + +func (d *Discovery) setupServiceDiscoverer(ctx context.Context, namespace string) *Service { + svc := d.client.CoreV1().Services(namespace) + clw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectorField + options.LabelSelector = d.selectorLabel + return svc.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectorField + options.LabelSelector = d.selectorLabel + return svc.Watch(ctx, options) + }, + } + inf := cache.NewSharedInformer(clw, &apiv1.Service{}, resyncPeriod) + return NewService(inf) +} + +func enqueue(queue *workqueue.Type, obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + queue.Add(key) +} + +//func send(ctx context.Context, in chan<- []model.TargetGroup, groups []model.TargetGroup) { +// +//} + +func send(ctx context.Context, in chan<- []model.TargetGroup, group model.TargetGroup) { + if group == nil { + return + } + select { + case <-ctx.Done(): + case in <- []model.TargetGroup{group}: + } +} + +func calcHash(obj interface{}) (uint64, error) { + return hashstructure.Hash(obj, nil) +} + +func joinSelectors(srs ...string) string { + var i int + for _, v := range srs { + if v != "" { + srs[i] = v + i++ + } + } + return strings.Join(srs[:i], ",") +} diff --git a/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go b/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go new file mode 100644 index 000000000..6a3030262 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "fmt" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +func TestMain(m *testing.M) { + _ = os.Setenv(envNodeName, "m01") + _ = os.Setenv(envFakeClient, "true") + code := m.Run() + _ = os.Unsetenv(envNodeName) + _ = os.Unsetenv(envFakeClient) + os.Exit(code) +} + +func TestNewDiscovery(t *testing.T) { + tests := map[string]struct { + cfg Config + wantErr bool + }{ + "role pod and local mode": {cfg: Config{Role: RolePod, Tags: "k8s", LocalMode: true}}, + "role service and local mode": {cfg: Config{Role: RoleService, Tags: "k8s", LocalMode: true}}, + "empty config": {wantErr: true}, + "invalid role": {cfg: Config{Role: "invalid"}, wantErr: true}, + "lack of tags": {cfg: Config{Role: RolePod}, wantErr: true}, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + discovery, err := NewDiscovery(test.cfg) + + if test.wantErr { + assert.Error(t, err) + assert.Nil(t, discovery) + } else { + assert.NoError(t, err) + assert.NotNil(t, discovery) + if test.cfg.LocalMode && test.cfg.Role == RolePod { + assert.Contains(t, discovery.selectorField, "spec.nodeName=m01") + } + if test.cfg.LocalMode && test.cfg.Role != RolePod { + assert.Empty(t, discovery.selectorField) + } + } + }) + } +} + +func TestDiscovery_Discover(t *testing.T) { + const prod = "prod" + const dev = "dev" + prodNamespace := newNamespace(prod) + devNamespace := newNamespace(dev) + + tests := map[string]func() discoverySim{ + "multiple namespaces pod discovery": func() discoverySim { + httpdProd, nginxProd := newHTTPDPod(), newNGINXPod() + httpdProd.Namespace = prod + nginxProd.Namespace = prod + + httpdDev, nginxDev := newHTTPDPod(), newNGINXPod() + httpdDev.Namespace = dev + nginxDev.Namespace = dev + + discovery, _ := prepareDiscovery( + RolePod, + []string{prod, dev}, + prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev) + + sim := discoverySim{ + discovery: discovery, + sortBeforeVerify: true, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpdDev), + preparePodGroup(nginxDev), + preparePodGroup(httpdProd), + preparePodGroup(nginxProd), + }, + } + return sim + }, + "multiple namespaces ClusterIP service discovery": func() discoverySim { + httpdProd, nginxProd := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpdProd.Namespace = prod + nginxProd.Namespace = prod + + httpdDev, nginxDev := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpdDev.Namespace = dev + nginxDev.Namespace = dev + + discovery, _ := prepareDiscovery( + RoleService, + []string{prod, dev}, + prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev) + + sim := discoverySim{ + discovery: discovery, + sortBeforeVerify: true, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpdDev), + prepareSvcGroup(nginxDev), + prepareSvcGroup(httpdProd), + prepareSvcGroup(nginxProd), + }, + } + return sim + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { sim().run(t) }) + } +} + +var discoveryTags model.Tags = map[string]struct{}{"k8s": {}} + +func prepareAllNsDiscovery(role string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { + return prepareDiscovery(role, []string{apiv1.NamespaceAll}, objects...) +} + +func prepareDiscovery(role string, namespaces []string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { + clientset := fake.NewSimpleClientset(objects...) + discovery := &Discovery{ + tags: discoveryTags, + namespaces: namespaces, + role: role, + selectorLabel: "", + selectorField: "", + client: clientset, + discoverers: nil, + started: make(chan struct{}), + } + return discovery, clientset +} + +func newNamespace(name string) *apiv1.Namespace { + return &apiv1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} +} + +func mustCalcHash(target interface{}) uint64 { + hash, err := calcHash(target) + if err != nil { + panic(fmt.Sprintf("hash calculation: %v", err)) + } + return hash +} diff --git a/agent/discovery/sd/discoverer/kubernetes/pod.go b/agent/discovery/sd/discoverer/kubernetes/pod.go new file mode 100644 index 000000000..779d59993 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/pod.go @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type podGroup struct { + targets []model.Target + source string +} + +func (pg podGroup) Source() string { return pg.source } +func (pg podGroup) Targets() []model.Target { return pg.targets } + +type PodTarget struct { + model.Base `hash:"ignore"` + hash uint64 + tuid string + Address string + + Namespace string + Name string + Annotations map[string]interface{} + Labels map[string]interface{} + NodeName string + PodIP string + + ControllerName string + ControllerKind string + + ContName string + Image string + Env map[string]interface{} + Port string + PortName string + PortProtocol string +} + +func (pt PodTarget) Hash() uint64 { return pt.hash } +func (pt PodTarget) TUID() string { return pt.tuid } + +type Pod struct { + podInformer cache.SharedInformer + cmapInformer cache.SharedInformer + secretInformer cache.SharedInformer + queue *workqueue.Type + log *logger.Logger +} + +func NewPod(pod, cmap, secret cache.SharedInformer) *Pod { + if pod == nil || cmap == nil || secret == nil { + panic("nil cmap or secret informer") + } + + queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"}) + + _, _ = pod.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, + DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + }) + + return &Pod{ + podInformer: pod, + cmapInformer: cmap, + secretInformer: secret, + queue: queue, + log: logger.New("k8s pod discovery", ""), + } +} + +func (p *Pod) String() string { + return fmt.Sprintf("k8s %s discovery", RolePod) +} + +func (p *Pod) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + p.log.Info("instance is started") + defer p.log.Info("instance is stopped") + defer p.queue.ShutDown() + + go p.podInformer.Run(ctx.Done()) + go p.cmapInformer.Run(ctx.Done()) + go p.secretInformer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), + p.podInformer.HasSynced, p.cmapInformer.HasSynced, p.secretInformer.HasSynced) { + p.log.Error("failed to sync caches") + return + } + + go p.run(ctx, in) + <-ctx.Done() +} + +func (p *Pod) run(ctx context.Context, in chan<- []model.TargetGroup) { + for { + item, shutdown := p.queue.Get() + if shutdown { + return + } + + func() { + defer p.queue.Done(item) + + key := item.(string) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + item, exists, err := p.podInformer.GetStore().GetByKey(key) + if err != nil { + return + } + + if !exists { + group := &podGroup{source: podSourceFromNsName(namespace, name)} + send(ctx, in, group) + return + } + + pod, err := toPod(item) + if err != nil { + return + } + + group := p.buildGroup(pod) + send(ctx, in, group) + }() + } +} + +func (p *Pod) buildGroup(pod *apiv1.Pod) model.TargetGroup { + if pod.Status.PodIP == "" || len(pod.Spec.Containers) == 0 { + return &podGroup{ + source: podSource(pod), + } + } + return &podGroup{ + source: podSource(pod), + targets: p.buildTargets(pod), + } +} + +func (p *Pod) buildTargets(pod *apiv1.Pod) (targets []model.Target) { + var name, kind string + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + name = ref.Name + kind = ref.Kind + break + } + } + + for _, container := range pod.Spec.Containers { + env := p.collectEnv(pod.Namespace, container) + + if len(container.Ports) == 0 { + target := &PodTarget{ + tuid: podTUID(pod, container), + Address: pod.Status.PodIP, + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: toMapInterface(pod.Annotations), + Labels: toMapInterface(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: name, + ControllerKind: kind, + ContName: container.Name, + Image: container.Image, + Env: toMapInterface(env), + } + hash, err := calcHash(target) + if err != nil { + continue + } + target.hash = hash + + targets = append(targets, target) + } else { + for _, port := range container.Ports { + portNum := strconv.FormatUint(uint64(port.ContainerPort), 10) + target := &PodTarget{ + tuid: podTUIDWithPort(pod, container, port), + Address: net.JoinHostPort(pod.Status.PodIP, portNum), + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: toMapInterface(pod.Annotations), + Labels: toMapInterface(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: name, + ControllerKind: kind, + ContName: container.Name, + Image: container.Image, + Env: toMapInterface(env), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + } + hash, err := calcHash(target) + if err != nil { + continue + } + target.hash = hash + + targets = append(targets, target) + } + } + } + return targets +} + +func (p *Pod) collectEnv(ns string, container apiv1.Container) map[string]string { + vars := make(map[string]string) + + // When a key exists in multiple sources, + // the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // + // Order (https://github.com/kubernetes/kubectl/blob/master/pkg/describe/describe.go) + // - envFrom: configMapRef, secretRef + // - env: value || valueFrom: fieldRef, resourceFieldRef, secretRef, configMap + + for _, src := range container.EnvFrom { + switch { + case src.ConfigMapRef != nil: + p.envFromConfigMap(vars, ns, src) + case src.SecretRef != nil: + p.envFromSecret(vars, ns, src) + } + } + + for _, env := range container.Env { + if env.Name == "" || isVar(env.Name) { + continue + } + switch { + case env.Value != "": + vars[env.Name] = env.Value + case env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil: + p.valueFromSecret(vars, ns, env) + case env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil: + p.valueFromConfigMap(vars, ns, env) + } + } + if len(vars) == 0 { + return nil + } + return vars +} + +func (p *Pod) valueFromConfigMap(vars map[string]string, ns string, env apiv1.EnvVar) { + if env.ValueFrom.ConfigMapKeyRef.Name == "" || env.ValueFrom.ConfigMapKeyRef.Key == "" { + return + } + + sr := env.ValueFrom.ConfigMapKeyRef + key := ns + "/" + sr.Name + item, exist, err := p.cmapInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + cmap, err := toConfigMap(item) + if err != nil { + return + } + if v, ok := cmap.Data[sr.Key]; ok { + vars[env.Name] = v + } +} + +func (p *Pod) valueFromSecret(vars map[string]string, ns string, env apiv1.EnvVar) { + if env.ValueFrom.SecretKeyRef.Name == "" || env.ValueFrom.SecretKeyRef.Key == "" { + return + } + + secretKey := env.ValueFrom.SecretKeyRef + key := ns + "/" + secretKey.Name + + item, exist, err := p.secretInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + secret, err := toSecret(item) + if err != nil { + return + } + + if v, ok := secret.Data[secretKey.Key]; ok { + vars[env.Name] = string(v) + } +} + +func (p *Pod) envFromConfigMap(vars map[string]string, ns string, src apiv1.EnvFromSource) { + if src.ConfigMapRef.Name == "" { + return + } + + key := ns + "/" + src.ConfigMapRef.Name + item, exist, err := p.cmapInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + cmap, err := toConfigMap(item) + if err != nil { + return + } + + for k, v := range cmap.Data { + vars[src.Prefix+k] = v + } +} + +func (p *Pod) envFromSecret(vars map[string]string, ns string, src apiv1.EnvFromSource) { + if src.SecretRef.Name == "" { + return + } + + key := ns + "/" + src.SecretRef.Name + item, exist, err := p.secretInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + secret, err := toSecret(item) + if err != nil { + return + } + + for k, v := range secret.Data { + vars[src.Prefix+k] = string(v) + } +} + +func podTUID(pod *apiv1.Pod, container apiv1.Container) string { + return fmt.Sprintf("%s_%s_%s", + pod.Namespace, + pod.Name, + container.Name, + ) +} + +func podTUIDWithPort(pod *apiv1.Pod, container apiv1.Container, port apiv1.ContainerPort) string { + return fmt.Sprintf("%s_%s_%s_%s_%s", + pod.Namespace, + pod.Name, + container.Name, + strings.ToLower(string(port.Protocol)), + strconv.FormatUint(uint64(port.ContainerPort), 10), + ) +} + +func podSourceFromNsName(namespace, name string) string { + return "k8s/pod/" + namespace + "/" + name +} + +func podSource(pod *apiv1.Pod) string { + return podSourceFromNsName(pod.Namespace, pod.Name) +} + +func toPod(item interface{}) (*apiv1.Pod, error) { + pod, ok := item.(*apiv1.Pod) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", item) + } + return pod, nil +} + +func toConfigMap(item interface{}) (*apiv1.ConfigMap, error) { + cmap, ok := item.(*apiv1.ConfigMap) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", item) + } + return cmap, nil +} + +func toSecret(item interface{}) (*apiv1.Secret, error) { + secret, ok := item.(*apiv1.Secret) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", item) + } + return secret, nil +} + +func isVar(name string) bool { + // Variable references $(VAR_NAME) are expanded using the previous defined + // environment variables in the container and any service environment + // variables. + return strings.IndexByte(name, '$') != -1 +} + +func toMapInterface(src map[string]string) map[string]interface{} { + if src == nil { + return nil + } + m := make(map[string]interface{}, len(src)) + for k, v := range src { + m[k] = v + } + return m +} diff --git a/agent/discovery/sd/discoverer/kubernetes/pod_test.go b/agent/discovery/sd/discoverer/kubernetes/pod_test.go new file mode 100644 index 000000000..155a78bd5 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/pod_test.go @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "net" + "strconv" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +func TestPodGroup_Source(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedSource []string + }{ + "pods with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + expectedSource: []string{ + "k8s/pod/default/httpd-dd95c4d68-5bkwl", + "k8s/pod/default/nginx-7cfd77469b-q6kxj", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []string + for _, group := range sim.run(t) { + actual = append(actual, group.Source()) + } + + assert.Equal(t, test.expectedSource, actual) + }) + } +} + +func TestPodGroup_Targets(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedNumTargets int + }{ + "pods with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + expectedNumTargets: 4, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual int + for _, group := range sim.run(t) { + actual += len(group.Targets()) + } + + assert.Equal(t, test.expectedNumTargets, actual) + }) + } +} + +func TestPodTarget_Hash(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedHash []uint64 + }{ + "pods with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + expectedHash: []uint64{ + 12703169414253998055, + 13351713096133918928, + 8241692333761256175, + 11562466355572729519, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []uint64 + for _, group := range sim.run(t) { + for _, tg := range group.Targets() { + actual = append(actual, tg.Hash()) + } + } + + assert.Equal(t, test.expectedHash, actual) + }) + } +} + +func TestPodTarget_TUID(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedTUID []string + }{ + "pods with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + expectedTUID: []string{ + "default_httpd-dd95c4d68-5bkwl_httpd_tcp_80", + "default_httpd-dd95c4d68-5bkwl_httpd_tcp_443", + "default_nginx-7cfd77469b-q6kxj_nginx_tcp_80", + "default_nginx-7cfd77469b-q6kxj_nginx_tcp_443", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []string + for _, group := range sim.run(t) { + for _, tg := range group.Targets() { + actual = append(actual, tg.TUID()) + } + } + + assert.Equal(t, test.expectedTUID, actual) + }) + } +} + +func TestNewPod(t *testing.T) { + tests := map[string]struct { + podInf cache.SharedInformer + cmapInf cache.SharedInformer + secretInf cache.SharedInformer + wantPanic bool + }{ + "valid informers": { + podInf: cache.NewSharedInformer(nil, &apiv1.Pod{}, resyncPeriod), + cmapInf: cache.NewSharedInformer(nil, &apiv1.ConfigMap{}, resyncPeriod), + secretInf: cache.NewSharedInformer(nil, &apiv1.Secret{}, resyncPeriod), + }, + "nil informers": {wantPanic: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.wantPanic { + assert.Panics(t, func() { NewPod(nil, nil, nil) }) + } else { + assert.IsType(t, &Pod{}, NewPod(test.podInf, test.cmapInf, test.secretInf)) + } + }) + } +} + +func TestPod_String(t *testing.T) { + var p Pod + assert.NotEmpty(t, p.String()) +} + +func TestPod_Discover(t *testing.T) { + tests := map[string]func() discoverySim{ + "ADD: pods exist before run": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + "ADD: pods exist before run and add after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, clientset := prepareAllNsDiscovery(RolePod, httpd) + podClient := clientset.CoreV1().Pods("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + "DELETE: remove pods after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, clientset := prepareAllNsDiscovery(RolePod, httpd, nginx) + podClient := clientset.CoreV1().Pods("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _ = podClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{}) + }, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + preparePodGroup(nginx), + prepareEmptyPodGroup(httpd), + prepareEmptyPodGroup(nginx), + }, + } + return sim + }, + "DELETE,ADD: remove and add pods after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, clientset := prepareAllNsDiscovery(RolePod, httpd) + podClient := clientset.CoreV1().Pods("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + preparePodGroup(httpd), + prepareEmptyPodGroup(httpd), + preparePodGroup(nginx), + }, + } + return sim + }, + "ADD: pods with empty PodIP": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Status.PodIP = "" + nginx.Status.PodIP = "" + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareEmptyPodGroup(httpd), + prepareEmptyPodGroup(nginx), + }, + } + return sim + }, + "UPDATE: set pods PodIP after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Status.PodIP = "" + nginx.Status.PodIP = "" + discovery, clientset := prepareAllNsDiscovery(RolePod, httpd, nginx) + podClient := clientset.CoreV1().Pods("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _, _ = podClient.Update(ctx, newHTTPDPod(), metav1.UpdateOptions{}) + _, _ = podClient.Update(ctx, newNGINXPod(), metav1.UpdateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + prepareEmptyPodGroup(httpd), + prepareEmptyPodGroup(nginx), + preparePodGroup(newHTTPDPod()), + preparePodGroup(newNGINXPod()), + }, + } + return sim + }, + "ADD: pods without containers": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Spec.Containers = httpd.Spec.Containers[:0] + nginx.Spec.Containers = httpd.Spec.Containers[:0] + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareEmptyPodGroup(httpd), + prepareEmptyPodGroup(nginx), + }, + } + return sim + }, + "Env: from value": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *apiv1.Container) { + c.Env = []apiv1.EnvVar{ + {Name: "key1", Value: "value1"}, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + + discovery, _ := prepareAllNsDiscovery(RolePod, httpd) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroupWithEnv(httpd, data), + }, + } + return sim + }, + "Env: from Secret": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *apiv1.Container) { + c.Env = []apiv1.EnvVar{ + { + Name: "key1", + ValueFrom: &apiv1.EnvVarSource{SecretKeyRef: &apiv1.SecretKeySelector{ + LocalObjectReference: apiv1.LocalObjectReference{Name: "my-secret"}, + Key: "key1", + }}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + secret := prepareSecret("my-secret", data) + + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, secret) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroupWithEnv(httpd, data), + }, + } + return sim + }, + "Env: from ConfigMap": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *apiv1.Container) { + c.Env = []apiv1.EnvVar{ + { + Name: "key1", + ValueFrom: &apiv1.EnvVarSource{ConfigMapKeyRef: &apiv1.ConfigMapKeySelector{ + LocalObjectReference: apiv1.LocalObjectReference{Name: "my-cmap"}, + Key: "key1", + }}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + cmap := prepareConfigMap("my-cmap", data) + + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, cmap) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroupWithEnv(httpd, data), + }, + } + return sim + }, + "EnvFrom: from ConfigMap": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *apiv1.Container) { + c.EnvFrom = []apiv1.EnvFromSource{ + { + ConfigMapRef: &apiv1.ConfigMapEnvSource{ + LocalObjectReference: apiv1.LocalObjectReference{Name: "my-cmap"}}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1", "key2": "value2"} + cmap := prepareConfigMap("my-cmap", data) + + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, cmap) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroupWithEnv(httpd, data), + }, + } + return sim + }, + "EnvFrom: from Secret": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *apiv1.Container) { + c.EnvFrom = []apiv1.EnvFromSource{ + { + SecretRef: &apiv1.SecretEnvSource{ + LocalObjectReference: apiv1.LocalObjectReference{Name: "my-secret"}}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1", "key2": "value2"} + secret := prepareSecret("my-secret", data) + + discovery, _ := prepareAllNsDiscovery(RolePod, httpd, secret) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + preparePodGroupWithEnv(httpd, data), + }, + } + return sim + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { sim().run(t) }) + } +} + +func mangleContainers(containers []apiv1.Container, m func(container *apiv1.Container)) { + for i := range containers { + m(&containers[i]) + } +} + +var controllerTrue = true + +func newHTTPDPod() *apiv1.Pod { + return &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpd-dd95c4d68-5bkwl", + Namespace: "default", + UID: "1cebb6eb-0c1e-495b-8131-8fa3e6668dc8", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "httpd", "tier": "frontend"}, + OwnerReferences: []metav1.OwnerReference{ + {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue}, + }, + }, + Spec: apiv1.PodSpec{ + NodeName: "m01", + Containers: []apiv1.Container{ + { + Name: "httpd", + Image: "httpd", + Ports: []apiv1.ContainerPort{ + {Name: "http", Protocol: apiv1.ProtocolTCP, ContainerPort: 80}, + {Name: "https", Protocol: apiv1.ProtocolTCP, ContainerPort: 443}, + }, + }, + }, + }, + Status: apiv1.PodStatus{ + PodIP: "172.17.0.1", + }, + } +} + +func newNGINXPod() *apiv1.Pod { + return &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-7cfd77469b-q6kxj", + Namespace: "default", + UID: "09e883f2-d740-4c5f-970d-02cf02876522", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "nginx", "tier": "frontend"}, + OwnerReferences: []metav1.OwnerReference{ + {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue}, + }, + }, + Spec: apiv1.PodSpec{ + NodeName: "m01", + Containers: []apiv1.Container{ + { + Name: "nginx", + Image: "nginx", + Ports: []apiv1.ContainerPort{ + {Name: "http", Protocol: apiv1.ProtocolTCP, ContainerPort: 80}, + {Name: "https", Protocol: apiv1.ProtocolTCP, ContainerPort: 443}, + }, + }, + }, + }, + Status: apiv1.PodStatus{ + PodIP: "172.17.0.2", + }, + } +} + +func prepareConfigMap(name string, data map[string]string) *apiv1.ConfigMap { + return &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8167" + name), + }, + Data: data, + } +} + +func prepareSecret(name string, data map[string]string) *apiv1.Secret { + secretData := make(map[string][]byte, len(data)) + for k, v := range data { + secretData[k] = []byte(v) + } + return &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8161" + name), + }, + Data: secretData, + } +} + +func prepareEmptyPodGroup(pod *apiv1.Pod) *podGroup { + return &podGroup{source: podSource(pod)} +} + +func preparePodGroup(pod *apiv1.Pod) *podGroup { + group := prepareEmptyPodGroup(pod) + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + portNum := strconv.FormatUint(uint64(port.ContainerPort), 10) + target := &PodTarget{ + tuid: podTUIDWithPort(pod, container, port), + Address: net.JoinHostPort(pod.Status.PodIP, portNum), + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: toMapInterface(pod.Annotations), + Labels: toMapInterface(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: "netdata-test", + ControllerKind: "DaemonSet", + ContName: container.Name, + Image: container.Image, + Env: nil, + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + } + target.hash = mustCalcHash(target) + target.Tags().Merge(discoveryTags) + group.targets = append(group.targets, target) + } + } + return group +} + +func preparePodGroupWithEnv(pod *apiv1.Pod, env map[string]string) *podGroup { + group := preparePodGroup(pod) + for _, target := range group.Targets() { + target.(*PodTarget).Env = toMapInterface(env) + target.(*PodTarget).hash = mustCalcHash(target) + } + return group +} diff --git a/agent/discovery/sd/discoverer/kubernetes/service.go b/agent/discovery/sd/discoverer/kubernetes/service.go new file mode 100644 index 000000000..b9f592c92 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/service.go @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type serviceGroup struct { + targets []model.Target + source string +} + +func (sg serviceGroup) Source() string { return sg.source } +func (sg serviceGroup) Targets() []model.Target { return sg.targets } + +type ServiceTarget struct { + model.Base `hash:"ignore"` + hash uint64 + tuid string + Address string + + Namespace string + Name string + Annotations map[string]interface{} + Labels map[string]interface{} + + Port string + PortName string + PortProtocol string + ClusterIP string + ExternalName string + Type string +} + +func (st ServiceTarget) Hash() uint64 { return st.hash } +func (st ServiceTarget) TUID() string { return st.tuid } + +type Service struct { + informer cache.SharedInformer + queue *workqueue.Type + log *logger.Logger +} + +func NewService(inf cache.SharedInformer) *Service { + if inf == nil { + panic("nil service informer") + } + + queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "service"}) + _, _ = inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, + DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + }) + + return &Service{ + informer: inf, + queue: queue, + log: logger.New("k8s service discovery", ""), + } +} + +func (s *Service) String() string { + return fmt.Sprintf("k8s %s discovery", RoleService) +} + +func (s *Service) Discover(ctx context.Context, ch chan<- []model.TargetGroup) { + s.log.Info("instance is started") + defer s.log.Info("instance is stopped") + defer s.queue.ShutDown() + + go s.informer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { + s.log.Error("failed to sync caches") + return + } + + go s.run(ctx, ch) + <-ctx.Done() +} + +func (s *Service) run(ctx context.Context, ch chan<- []model.TargetGroup) { + for { + item, shutdown := s.queue.Get() + if shutdown { + return + } + + func() { + defer s.queue.Done(item) + + key := item.(string) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + item, exists, err := s.informer.GetStore().GetByKey(key) + if err != nil { + return + } + + if !exists { + group := &serviceGroup{source: serviceSourceFromNsName(namespace, name)} + send(ctx, ch, group) + return + } + + svc, err := toService(item) + if err != nil { + return + } + + group := s.buildGroup(svc) + send(ctx, ch, group) + }() + } +} + +func (s *Service) buildGroup(svc *apiv1.Service) model.TargetGroup { + // TODO: headless service? + if svc.Spec.ClusterIP == "" || len(svc.Spec.Ports) == 0 { + return &serviceGroup{ + source: serviceSource(svc), + } + } + return &serviceGroup{ + source: serviceSource(svc), + targets: s.buildTargets(svc), + } +} + +func (s *Service) buildTargets(svc *apiv1.Service) (targets []model.Target) { + for _, port := range svc.Spec.Ports { + portNum := strconv.FormatInt(int64(port.Port), 10) + target := &ServiceTarget{ + tuid: serviceTUID(svc, port), + Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum), + Namespace: svc.Namespace, + Name: svc.Name, + Annotations: toMapInterface(svc.Annotations), + Labels: toMapInterface(svc.Labels), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + ClusterIP: svc.Spec.ClusterIP, + ExternalName: svc.Spec.ExternalName, + Type: string(svc.Spec.Type), + } + hash, err := calcHash(target) + if err != nil { + continue + } + target.hash = hash + + targets = append(targets, target) + } + return targets +} + +func serviceTUID(svc *apiv1.Service, port apiv1.ServicePort) string { + return fmt.Sprintf("%s_%s_%s_%s", + svc.Namespace, + svc.Name, + strings.ToLower(string(port.Protocol)), + strconv.FormatInt(int64(port.Port), 10), + ) +} + +func serviceSourceFromNsName(namespace, name string) string { + return "k8s/service/" + namespace + "/" + name +} + +func serviceSource(svc *apiv1.Service) string { + return serviceSourceFromNsName(svc.Namespace, svc.Name) +} + +func toService(o interface{}) (*apiv1.Service, error) { + svc, ok := o.(*apiv1.Service) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", o) + } + return svc, nil +} diff --git a/agent/discovery/sd/discoverer/kubernetes/service_test.go b/agent/discovery/sd/discoverer/kubernetes/service_test.go new file mode 100644 index 000000000..b07602e88 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/service_test.go @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "net" + "strconv" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +func TestServiceGroup_Source(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedSource []string + }{ + "ClusterIP svc with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + expectedSource: []string{ + "k8s/service/default/httpd-cluster-ip-service", + "k8s/service/default/nginx-cluster-ip-service", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []string + for _, group := range sim.run(t) { + actual = append(actual, group.Source()) + } + + assert.Equal(t, test.expectedSource, actual) + }) + } +} + +func TestServiceGroup_Targets(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedNumTargets int + }{ + "ClusterIP svc with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + expectedNumTargets: 4, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual int + for _, group := range sim.run(t) { + actual += len(group.Targets()) + } + + assert.Equal(t, test.expectedNumTargets, actual) + }) + } +} + +func TestServiceTarget_Hash(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedHash []uint64 + }{ + "ClusterIP svc with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + expectedHash: []uint64{ + 17611803477081780974, + 6019985892433421258, + 4151907287549842238, + 5757608926096186119, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []uint64 + for _, group := range sim.run(t) { + for _, tg := range group.Targets() { + actual = append(actual, tg.Hash()) + } + } + + assert.Equal(t, test.expectedHash, actual) + }) + } +} + +func TestServiceTarget_TUID(t *testing.T) { + tests := map[string]struct { + sim func() discoverySim + expectedTUID []string + }{ + "ClusterIP svc with multiple ports": { + sim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + expectedTUID: []string{ + "default_httpd-cluster-ip-service_tcp_80", + "default_httpd-cluster-ip-service_tcp_443", + "default_nginx-cluster-ip-service_tcp_80", + "default_nginx-cluster-ip-service_tcp_443", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.sim() + var actual []string + for _, group := range sim.run(t) { + for _, tg := range group.Targets() { + actual = append(actual, tg.TUID()) + } + } + + assert.Equal(t, test.expectedTUID, actual) + }) + } +} + +func TestNewService(t *testing.T) { + tests := map[string]struct { + informer cache.SharedInformer + wantPanic bool + }{ + "valid informer": {informer: cache.NewSharedInformer(nil, &apiv1.Service{}, resyncPeriod)}, + "nil informer": {wantPanic: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.wantPanic { + assert.Panics(t, func() { NewService(nil) }) + } else { + assert.IsType(t, &Service{}, NewService(test.informer)) + } + }) + } +} + +func TestService_String(t *testing.T) { + var s Service + assert.NotEmpty(t, s.String()) +} + +func TestService_Discover(t *testing.T) { + tests := map[string]func() discoverySim{ + "ADD: ClusterIP svc exist before run": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + "ADD: ClusterIP svc exist before run and add after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, clientset := prepareAllNsDiscovery(RoleService, httpd) + svcClient := clientset.CoreV1().Services("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + "DELETE: ClusterIP svc remove after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, clientset := prepareAllNsDiscovery(RoleService, httpd, nginx) + svcClient := clientset.CoreV1().Services("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _ = svcClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{}) + }, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareSvcGroup(nginx), + prepareEmptySvcGroup(httpd), + prepareEmptySvcGroup(nginx), + }, + } + return sim + }, + "ADD,DELETE: ClusterIP svc remove and add after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + discovery, clientset := prepareAllNsDiscovery(RoleService, httpd) + svcClient := clientset.CoreV1().Services("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + prepareSvcGroup(httpd), + prepareEmptySvcGroup(httpd), + prepareSvcGroup(nginx), + }, + } + return sim + }, + "ADD: Headless svc exist before run": func() discoverySim { + httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService() + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareEmptySvcGroup(httpd), + prepareEmptySvcGroup(nginx), + }, + } + return sim + }, + "UPDATE: Headless => ClusterIP svc after sync": func() discoverySim { + httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService() + httpdUpd, nginxUpd := *httpd, *nginx + httpdUpd.Spec.ClusterIP = "10.100.0.1" + nginxUpd.Spec.ClusterIP = "10.100.0.2" + discovery, clientset := prepareAllNsDiscovery(RoleService, httpd, nginx) + svcClient := clientset.CoreV1().Services("default") + + sim := discoverySim{ + discovery: discovery, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _, _ = svcClient.Update(ctx, &httpdUpd, metav1.UpdateOptions{}) + _, _ = svcClient.Update(ctx, &nginxUpd, metav1.UpdateOptions{}) + }, + expectedGroups: []model.TargetGroup{ + prepareEmptySvcGroup(httpd), + prepareEmptySvcGroup(nginx), + prepareSvcGroup(&httpdUpd), + prepareSvcGroup(&nginxUpd), + }, + } + return sim + }, + "ADD: ClusterIP svc with zero exposed ports": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpd.Spec.Ports = httpd.Spec.Ports[:0] + nginx.Spec.Ports = httpd.Spec.Ports[:0] + discovery, _ := prepareAllNsDiscovery(RoleService, httpd, nginx) + + sim := discoverySim{ + discovery: discovery, + expectedGroups: []model.TargetGroup{ + prepareEmptySvcGroup(httpd), + prepareEmptySvcGroup(nginx), + }, + } + return sim + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { sim().run(t) }) + } + +} + +func newHTTPDClusterIPService() *apiv1.Service { + return &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpd-cluster-ip-service", + Namespace: "default", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "httpd", "tier": "frontend"}, + }, + Spec: apiv1.ServiceSpec{ + Ports: []apiv1.ServicePort{ + {Name: "http", Protocol: apiv1.ProtocolTCP, Port: 80}, + {Name: "https", Protocol: apiv1.ProtocolTCP, Port: 443}, + }, + Type: apiv1.ServiceTypeClusterIP, + ClusterIP: "10.100.0.1", + Selector: map[string]string{"app": "httpd", "tier": "frontend"}, + }, + } +} + +func newNGINXClusterIPService() *apiv1.Service { + return &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-cluster-ip-service", + Namespace: "default", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "nginx", "tier": "frontend"}, + }, + Spec: apiv1.ServiceSpec{ + Ports: []apiv1.ServicePort{ + {Name: "http", Protocol: apiv1.ProtocolTCP, Port: 80}, + {Name: "https", Protocol: apiv1.ProtocolTCP, Port: 443}, + }, + Type: apiv1.ServiceTypeClusterIP, + ClusterIP: "10.100.0.2", + Selector: map[string]string{"app": "nginx", "tier": "frontend"}, + }, + } +} + +func newHTTPDHeadlessService() *apiv1.Service { + svc := newHTTPDClusterIPService() + svc.Name = "httpd-headless-service" + svc.Spec.ClusterIP = "" + return svc +} + +func newNGINXHeadlessService() *apiv1.Service { + svc := newNGINXClusterIPService() + svc.Name = "nginx-headless-service" + svc.Spec.ClusterIP = "" + return svc +} + +func prepareEmptySvcGroup(svc *apiv1.Service) *serviceGroup { + return &serviceGroup{source: serviceSource(svc)} +} + +func prepareSvcGroup(svc *apiv1.Service) *serviceGroup { + group := prepareEmptySvcGroup(svc) + for _, port := range svc.Spec.Ports { + portNum := strconv.FormatInt(int64(port.Port), 10) + target := &ServiceTarget{ + tuid: serviceTUID(svc, port), + Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum), + Namespace: svc.Namespace, + Name: svc.Name, + Annotations: toMapInterface(svc.Annotations), + Labels: toMapInterface(svc.Labels), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + ClusterIP: svc.Spec.ClusterIP, + ExternalName: svc.Spec.ExternalName, + Type: string(svc.Spec.Type), + } + target.hash = mustCalcHash(target) + target.Tags().Merge(discoveryTags) + group.targets = append(group.targets, target) + } + return group +} diff --git a/agent/discovery/sd/discoverer/kubernetes/sim_test.go b/agent/discovery/sd/discoverer/kubernetes/sim_test.go new file mode 100644 index 000000000..01e5167b1 --- /dev/null +++ b/agent/discovery/sd/discoverer/kubernetes/sim_test.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/tools/cache" +) + +const ( + startWaitTimeout = time.Second * 3 + finishWaitTimeout = time.Second * 5 +) + +type discoverySim struct { + discovery *Discovery + runAfterSync func(ctx context.Context) + sortBeforeVerify bool + expectedGroups []model.TargetGroup +} + +func (sim discoverySim) run(t *testing.T) []model.TargetGroup { + t.Helper() + require.NotNil(t, sim.discovery) + require.NotEmpty(t, sim.expectedGroups) + + in, out := make(chan []model.TargetGroup), make(chan []model.TargetGroup) + go sim.collectGroups(t, in, out) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + go sim.discovery.Discover(ctx, in) + + select { + case <-sim.discovery.started: + case <-time.After(startWaitTimeout): + t.Fatalf("discovery %s filed to start in %s", sim.discovery.discoverers, startWaitTimeout) + } + + synced := cache.WaitForCacheSync(ctx.Done(), sim.discovery.hasSynced) + require.Truef(t, synced, "discovery %s failed to sync", sim.discovery.discoverers) + + if sim.runAfterSync != nil { + sim.runAfterSync(ctx) + } + + groups := <-out + + if sim.sortBeforeVerify { + sortGroups(groups) + } + + sim.verifyResult(t, groups) + return groups +} + +func (sim discoverySim) collectGroups(t *testing.T, in, out chan []model.TargetGroup) { + var groups []model.TargetGroup +loop: + for { + select { + case inGroups := <-in: + if groups = append(groups, inGroups...); len(groups) >= len(sim.expectedGroups) { + break loop + } + case <-time.After(finishWaitTimeout): + t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped", + sim.discovery.discoverers, finishWaitTimeout, len(groups), len(sim.expectedGroups)) + break loop + } + } + out <- groups +} + +func (sim discoverySim) verifyResult(t *testing.T, result []model.TargetGroup) { + var expected, actual interface{} + + if len(sim.expectedGroups) == len(result) { + expected = sim.expectedGroups + actual = result + } else { + want := make(map[string]model.TargetGroup) + for _, group := range sim.expectedGroups { + want[group.Source()] = group + } + got := make(map[string]model.TargetGroup) + for _, group := range result { + got[group.Source()] = group + } + expected, actual = want, got + } + + assert.Equal(t, expected, actual) +} + +type hasSynced interface { + hasSynced() bool +} + +var ( + _ hasSynced = &Discovery{} + _ hasSynced = &Pod{} + _ hasSynced = &Service{} +) + +func (d *Discovery) hasSynced() bool { + for _, dd := range d.discoverers { + v, ok := dd.(hasSynced) + if !ok || !v.hasSynced() { + return false + } + } + return true +} + +func (p *Pod) hasSynced() bool { + return p.podInformer.HasSynced() && p.cmapInformer.HasSynced() && p.secretInformer.HasSynced() +} + +func (s *Service) hasSynced() bool { + return s.informer.HasSynced() +} + +func sortGroups(groups []model.TargetGroup) { + if len(groups) == 0 { + return + } + sort.Slice(groups, func(i, j int) bool { return groups[i].Source() < groups[j].Source() }) +} diff --git a/agent/discovery/sd/discoverer/manager.go b/agent/discovery/sd/discoverer/manager.go new file mode 100644 index 000000000..c3296e0dc --- /dev/null +++ b/agent/discovery/sd/discoverer/manager.go @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discoverer + +import ( + "context" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/kubernetes" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/netdata/go.d.plugin/logger" +) + +type Discoverer interface { + Discover(ctx context.Context, in chan<- []model.TargetGroup) +} + +func New(cfg Config) (*Manager, error) { + if err := validateConfig(cfg); err != nil { + return nil, err + } + + mgr := &Manager{ + log: logger.New("discovery manager", ""), + send: make(chan struct{}, 1), + sendEvery: 5 * time.Second, + discoverers: make([]Discoverer, 0), + mux: &sync.Mutex{}, + groups: make(map[string]model.TargetGroup), + } + + if err := mgr.registerDiscoverers(cfg); err != nil { + return nil, err + } + + mgr.log.Infof("registered: %v", mgr.discoverers) + + return mgr, nil +} + +type Manager struct { + log *logger.Logger + discoverers []Discoverer + send chan struct{} + sendEvery time.Duration + mux *sync.Mutex + groups map[string]model.TargetGroup +} + +func (m *Manager) registerDiscoverers(conf Config) error { + for _, cfg := range conf.K8S { + d, err := kubernetes.NewDiscovery(cfg) + if err != nil { + return err + } + m.discoverers = append(m.discoverers, d) + } + return nil +} + +func (m *Manager) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + m.log.Info("instance is started") + defer m.log.Info("instance is stopped") + + var wg sync.WaitGroup + + for _, d := range m.discoverers { + wg.Add(1) + go func(d Discoverer) { defer wg.Done(); m.runDiscoverer(ctx, d) }(d) + } + + wg.Add(1) + go func() { defer wg.Done(); m.run(ctx, in) }() + + wg.Wait() + <-ctx.Done() +} + +func (m *Manager) runDiscoverer(ctx context.Context, d Discoverer) { + updates := make(chan []model.TargetGroup) + go d.Discover(ctx, updates) + + for { + select { + case <-ctx.Done(): + return + case groups, ok := <-updates: + if !ok { + return + } + func() { + m.mux.Lock() + defer m.mux.Unlock() + + m.groupsUpdate(groups) + m.triggerSend() + }() + } + } +} + +func (m *Manager) run(ctx context.Context, in chan<- []model.TargetGroup) { + tk := time.NewTicker(m.sendEvery) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-tk.C: + select { + case <-m.send: + m.trySend(in) + default: + } + } + } +} + +func (m *Manager) trySend(in chan<- []model.TargetGroup) { + m.mux.Lock() + defer m.mux.Unlock() + + select { + case in <- m.groupsAsList(): + m.groupsReset() + default: + m.triggerSend() + } +} + +func (m *Manager) triggerSend() { + select { + case m.send <- struct{}{}: + default: + } +} + +func (m *Manager) groupsUpdate(groups []model.TargetGroup) { + for _, group := range groups { + if group != nil { + m.groups[group.Source()] = group + } + } +} + +func (m *Manager) groupsReset() { + for key := range m.groups { + delete(m.groups, key) + } +} + +func (m *Manager) groupsAsList() []model.TargetGroup { + groups := make([]model.TargetGroup, 0, len(m.groups)) + for _, group := range m.groups { + groups = append(groups, group) + } + return groups +} diff --git a/agent/discovery/sd/discoverer/manager_test.go b/agent/discovery/sd/discoverer/manager_test.go new file mode 100644 index 000000000..d334e91a4 --- /dev/null +++ b/agent/discovery/sd/discoverer/manager_test.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discoverer + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" +) + +func TestNew(t *testing.T) { + +} + +func TestManager_Discover(t *testing.T) { + tests := map[string]func() discoverySim{ + "2 discoverers unique groups with delayed collect": func() discoverySim { + const numGroups, numTargets = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + d2 := prepareMockDiscoverer("test2", numGroups, numTargets) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + + sim := discoverySim{ + mgr: mgr, + collectDelay: mgr.sendEvery + time.Second, + expectedGroups: expected, + } + return sim + }, + "2 discoverers unique groups": func() discoverySim { + const numGroups, numTargets = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + d2 := prepareMockDiscoverer("test2", numGroups, numTargets) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "2 discoverers same groups": func() discoverySim { + const numGroups, numTargets = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + mgr := prepareManager(d1, d1) + expected := combineGroups(d1.groups) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "2 discoverers empty groups": func() discoverySim { + const numGroups, numTargets = 1, 0 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + d2 := prepareMockDiscoverer("test2", numGroups, numTargets) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "2 discoverers nil groups": func() discoverySim { + const numGroups, numTargets = 0, 0 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + d2 := prepareMockDiscoverer("test2", numGroups, numTargets) + mgr := prepareManager(d1, d2) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: nil, + } + return sim + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { sim().run(t) }) + } +} + +func prepareMockDiscoverer(source string, groups, targets int) mockDiscoverer { + d := mockDiscoverer{} + + for i := 0; i < groups; i++ { + group := mockGroup{ + source: fmt.Sprintf("%s_group_%d", source, i+1), + } + for j := 0; j < targets; j++ { + group.targets = append(group.targets, + mockTarget{Name: fmt.Sprintf("%s_group_%d_target_%d", source, i+1, j+1)}) + } + d.groups = append(d.groups, group) + } + return d +} + +func prepareManager(discoverers ...Discoverer) *Manager { + mgr := &Manager{ + send: make(chan struct{}, 1), + sendEvery: 2 * time.Second, + discoverers: discoverers, + mux: &sync.Mutex{}, + groups: make(map[string]model.TargetGroup), + } + return mgr +} + +type mockDiscoverer struct { + groups []model.TargetGroup +} + +func (md mockDiscoverer) Discover(ctx context.Context, out chan<- []model.TargetGroup) { + for { + select { + case <-ctx.Done(): + return + case out <- md.groups: + return + } + } +} + +type mockGroup struct { + targets []model.Target + source string +} + +func (mg mockGroup) Targets() []model.Target { return mg.targets } +func (mg mockGroup) Source() string { return mg.source } + +type mockTarget struct { + Name string +} + +func (mt mockTarget) Tags() model.Tags { return model.Tags{} } +func (mt mockTarget) TUID() string { return "" } +func (mt mockTarget) Hash() uint64 { return 0 } + +func combineGroups(groups ...[]model.TargetGroup) (combined []model.TargetGroup) { + for _, set := range groups { + combined = append(combined, set...) + } + return combined +} diff --git a/agent/discovery/sd/discoverer/sim_test.go b/agent/discovery/sd/discoverer/sim_test.go new file mode 100644 index 000000000..cf7de3531 --- /dev/null +++ b/agent/discovery/sd/discoverer/sim_test.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discoverer + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type discoverySim struct { + mgr *Manager + collectDelay time.Duration + expectedGroups []model.TargetGroup +} + +func (sim discoverySim) run(t *testing.T) { + t.Helper() + require.NotNil(t, sim.mgr) + + in, out := make(chan []model.TargetGroup), make(chan []model.TargetGroup) + go sim.collectGroups(t, in, out) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sim.mgr.Discover(ctx, in) + + actualGroups := <-out + + sortGroups(sim.expectedGroups) + sortGroups(actualGroups) + + assert.Equal(t, sim.expectedGroups, actualGroups) +} + +func (sim discoverySim) collectGroups(t *testing.T, in, out chan []model.TargetGroup) { + time.Sleep(sim.collectDelay) + + timeout := sim.mgr.sendEvery + time.Second*2 + var groups []model.TargetGroup +loop: + for { + select { + case inGroups := <-in: + if groups = append(groups, inGroups...); len(groups) >= len(sim.expectedGroups) { + break loop + } + case <-time.After(timeout): + t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped", + sim.mgr.discoverers, timeout, len(groups), len(sim.expectedGroups)) + break loop + } + } + out <- groups +} + +func sortGroups(groups []model.TargetGroup) { + if len(groups) == 0 { + return + } + sort.Slice(groups, func(i, j int) bool { return groups[i].Source() < groups[j].Source() }) +} diff --git a/agent/discovery/sd/discovery.go b/agent/discovery/sd/discovery.go new file mode 100644 index 000000000..99de90bea --- /dev/null +++ b/agent/discovery/sd/discovery.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sd diff --git a/agent/discovery/sd/model/tags.go b/agent/discovery/sd/model/tags.go new file mode 100644 index 000000000..e36f0b8f5 --- /dev/null +++ b/agent/discovery/sd/model/tags.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model + +import ( + "fmt" + "sort" + "strings" +) + +type Base struct { + tags Tags +} + +func (b *Base) Tags() Tags { + if b.tags == nil { + b.tags = NewTags() + } + return b.tags +} + +type Tags map[string]struct{} + +func NewTags() Tags { + return Tags{} +} + +func (t Tags) Merge(tags Tags) { + for tag := range tags { + if strings.HasPrefix(tag, "-") { + delete(t, tag[1:]) + } else { + t[tag] = struct{}{} + } + } +} + +func (t Tags) String() string { + ts := make([]string, 0, len(t)) + for key := range t { + ts = append(ts, key) + } + sort.Strings(ts) + return fmt.Sprintf("{%s}", strings.Join(ts, ", ")) +} + +func ParseTags(line string) (Tags, error) { + words := strings.Fields(line) + if len(words) == 0 { + return NewTags(), nil + } + + tags := NewTags() + for _, tag := range words { + if !isTagWordValid(tag) { + return nil, fmt.Errorf("tags '%s' contains tag '%s' with forbidden symbol", line, tag) + } + tags[tag] = struct{}{} + } + return tags, nil +} + +func isTagWordValid(word string) bool { + // valid: + // ^[a-zA-Z][a-zA-Z0-9=_.]*$ + word = strings.TrimPrefix(word, "-") + if len(word) == 0 { + return false + } + for i, b := range word { + switch { + default: + return false + case b >= 'a' && b <= 'z': + case b >= 'A' && b <= 'Z': + case b >= '0' && b <= '9' && i > 0: + case (b == '=' || b == '_' || b == '.') && i > 0: + } + } + return true +} diff --git a/agent/discovery/sd/model/tags_test.go b/agent/discovery/sd/model/tags_test.go new file mode 100644 index 000000000..4f07bcbf6 --- /dev/null +++ b/agent/discovery/sd/model/tags_test.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model diff --git a/agent/discovery/sd/model/target.go b/agent/discovery/sd/model/target.go new file mode 100644 index 000000000..24f37f60f --- /dev/null +++ b/agent/discovery/sd/model/target.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model + +type Target interface { + Hash() uint64 + Tags() Tags + TUID() string +} + +type TargetGroup interface { + Targets() []Target + Source() string +} + +type Config struct { + Tags Tags + Conf string + Stale bool +} diff --git a/agent/discovery/sd/pipeline/config.go b/agent/discovery/sd/pipeline/config.go new file mode 100644 index 000000000..d59a44d40 --- /dev/null +++ b/agent/discovery/sd/pipeline/config.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "errors" + "fmt" +) + +type Config struct { +} + +type ( + TagConfig []TagRuleConfig // mandatory, at least 1 + TagRuleConfig struct { + Name string `yaml:"name"` + Selector string `yaml:"selector"` // mandatory + Tags string `yaml:"tags"` // mandatory + Match []TagRuleMatchConfig `yaml:"match"` // mandatory, at least 1 + } + TagRuleMatchConfig struct { + Selector string `yaml:"selector"` // optional + Tags string `yaml:"tags"` // mandatory + Expr string `yaml:"expr"` // mandatory + } +) + +type ( + BuildConfig []BuildRuleConfig // mandatory, at least 1 + BuildRuleConfig struct { + Name string `yaml:"name"` // optional + Selector string `yaml:"selector"` // mandatory + Tags string `yaml:"tags"` // mandatory + Apply []BuildRuleApplyConfig `yaml:"apply"` // mandatory, at least 1 + } + BuildRuleApplyConfig struct { + Selector string `yaml:"selector"` // mandatory + Template string `yaml:"template"` // mandatory + } +) + +func validateTagConfig(cfg TagConfig) error { + if len(cfg) == 0 { + return errors.New("empty config, need least 1 rule") + } + for i, rule := range cfg { + if rule.Selector == "" { + return fmt.Errorf("'rule->selector' not set (rule %s[%d])", rule.Name, i+1) + } + if rule.Tags == "" { + return fmt.Errorf("'rule->tags' not set (rule %s[%d])", rule.Name, i+1) + } + if len(rule.Match) == 0 { + return fmt.Errorf("'rule->match' not set, need at least 1 rule match (rule %s[%d])", rule.Name, i+1) + } + + for j, match := range rule.Match { + if match.Tags == "" { + return fmt.Errorf("'rule->match->tags' not set (rule %s[%d]/match [%d])", rule.Name, i+1, j+1) + } + if match.Expr == "" { + return fmt.Errorf("'rule->match->expr' not set (rule %s[%d]/match [%d])", rule.Name, i+1, j+1) + } + } + } + return nil +} + +func validateBuildConfig(cfg BuildConfig) error { + if len(cfg) == 0 { + return errors.New("empty config, need least 1 rule") + } + for i, ruleCfg := range cfg { + if ruleCfg.Selector == "" { + return fmt.Errorf("'rule->selector' not set (rule %s[%d])", ruleCfg.Name, i+1) + } + + if ruleCfg.Tags == "" { + return fmt.Errorf("'rule->tags' not set (rule %s[%d])", ruleCfg.Name, i+1) + } + if len(ruleCfg.Apply) == 0 { + return fmt.Errorf("'rule->apply' not set (rule %s[%d])", ruleCfg.Name, i+1) + } + + for j, applyCfg := range ruleCfg.Apply { + if applyCfg.Selector == "" { + return fmt.Errorf("'rule->apply->selector' not set (rule %s[%d]/apply [%d])", ruleCfg.Name, i+1, j+1) + } + if applyCfg.Template == "" { + return fmt.Errorf("'rule->apply->template' not set (rule %s[%d]/apply [%d])", ruleCfg.Name, i+1, j+1) + } + } + } + return nil +} diff --git a/agent/discovery/sd/pipeline/funcmap.go b/agent/discovery/sd/pipeline/funcmap.go new file mode 100644 index 000000000..0ac331b05 --- /dev/null +++ b/agent/discovery/sd/pipeline/funcmap.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "regexp" + "sync" + "text/template" + + "github.com/Masterminds/sprig/v3" + "github.com/gobwas/glob" +) + +var funcMap = func() template.FuncMap { + custom := map[string]interface{}{ + "glob": globAny, + "re": regexpAny, + } + + fm := sprig.HermeticTxtFuncMap() + for name, fn := range custom { + fm[name] = fn + } + + return fm +}() + +func globAny(value, pattern string, rest ...string) bool { + switch len(rest) { + case 0: + return globOnce(value, pattern) + default: + return globOnce(value, pattern) || globAny(value, rest[0], rest[1:]...) + } +} + +func regexpAny(value, pattern string, rest ...string) bool { + switch len(rest) { + case 0: + return regexpOnce(value, pattern) + default: + return regexpOnce(value, pattern) || regexpAny(value, rest[0], rest[1:]...) + } +} + +func globOnce(value, pattern string) bool { + g, _ := globStore(pattern) + return g != nil && g.Match(value) +} + +func regexpOnce(value, pattern string) bool { + r, _ := regexpStore(pattern) + return r != nil && r.MatchString(value) +} + +// TODO: cleanup? +var globStore = func() func(pattern string) (glob.Glob, error) { + var l sync.RWMutex + store := make(map[string]struct { + g glob.Glob + err error + }) + + return func(pattern string) (glob.Glob, error) { + if pattern == "" { + return nil, nil + } + l.Lock() + defer l.Unlock() + entry, ok := store[pattern] + if !ok { + entry.g, entry.err = glob.Compile(pattern, '/') + store[pattern] = entry + } + return entry.g, entry.err + } +}() + +// TODO: cleanup? +var regexpStore = func() func(pattern string) (*regexp.Regexp, error) { + var l sync.RWMutex + store := make(map[string]struct { + r *regexp.Regexp + err error + }) + + return func(pattern string) (*regexp.Regexp, error) { + if pattern == "" { + return nil, nil + } + l.Lock() + defer l.Unlock() + entry, ok := store[pattern] + if !ok { + entry.r, entry.err = regexp.Compile(pattern) + store[pattern] = entry + } + return entry.r, entry.err + } +}() diff --git a/agent/discovery/sd/pipeline/funcmap_test.go b/agent/discovery/sd/pipeline/funcmap_test.go new file mode 100644 index 000000000..6e93a6995 --- /dev/null +++ b/agent/discovery/sd/pipeline/funcmap_test.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_globAny(t *testing.T) { + tests := map[string]struct { + patterns []string + value string + wantFalse bool + }{ + "one param, matches": { + patterns: []string{"*"}, + value: "value", + }, + "one param, matches with *": { + patterns: []string{"**/value"}, + value: "/one/two/three/value", + }, + "one param, not matches": { + patterns: []string{"Value"}, + value: "value", + wantFalse: true, + }, + "several params, last one matches": { + patterns: []string{"not", "matches", "*"}, + value: "value", + }, + "several params, no matches": { + patterns: []string{"not", "matches", "really"}, + value: "value", + wantFalse: true, + }, + } + + for name, test := range tests { + name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) + + if test.wantFalse { + assert.Falsef(t, globAny(test.value, test.patterns[0], test.patterns[1:]...), name) + } else { + assert.Truef(t, globAny(test.value, test.patterns[0], test.patterns[1:]...), name) + } + } +} + +func Test_regexpAny(t *testing.T) { + tests := map[string]struct { + patterns []string + value string + wantFalse bool + }{ + "one param, matches": { + patterns: []string{"^value$"}, + value: "value", + }, + "one param, not matches": { + patterns: []string{"^Value$"}, + value: "value", + wantFalse: true, + }, + "several params, last one matches": { + patterns: []string{"not", "matches", "va[lue]{3}"}, + value: "value", + }, + "several params, no matches": { + patterns: []string{"not", "matches", "val[^l]ue"}, + value: "value", + wantFalse: true, + }, + } + + for name, test := range tests { + name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) + + if test.wantFalse { + assert.Falsef(t, regexpAny(test.value, test.patterns[0], test.patterns[1:]...), name) + } else { + assert.Truef(t, regexpAny(test.value, test.patterns[0], test.patterns[1:]...), name) + } + } +} diff --git a/agent/discovery/sd/pipeline/pipeline.go b/agent/discovery/sd/pipeline/pipeline.go new file mode 100644 index 000000000..c9f8b0416 --- /dev/null +++ b/agent/discovery/sd/pipeline/pipeline.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline diff --git a/agent/discovery/sd/pipeline/selector.go b/agent/discovery/sd/pipeline/selector.go new file mode 100644 index 000000000..ad23e7f39 --- /dev/null +++ b/agent/discovery/sd/pipeline/selector.go @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" +) + +type Selector interface { + Matches(model.Tags) bool +} + +type ( + exactSelector string + trueSelector struct{} + negSelector struct{ Selector } + orSelector struct{ lhs, rhs Selector } + andSelector struct{ lhs, rhs Selector } +) + +func (s exactSelector) Matches(tags model.Tags) bool { _, ok := tags[string(s)]; return ok } +func (s trueSelector) Matches(model.Tags) bool { return true } +func (s negSelector) Matches(tags model.Tags) bool { return !s.Selector.Matches(tags) } +func (s orSelector) Matches(tags model.Tags) bool { return s.lhs.Matches(tags) || s.rhs.Matches(tags) } +func (s andSelector) Matches(tags model.Tags) bool { return s.lhs.Matches(tags) && s.rhs.Matches(tags) } + +func (s exactSelector) String() string { return "{" + string(s) + "}" } +func (s negSelector) String() string { return "{!" + stringify(s.Selector) + "}" } +func (s trueSelector) String() string { return "{*}" } +func (s orSelector) String() string { return "{" + stringify(s.lhs) + "|" + stringify(s.rhs) + "}" } +func (s andSelector) String() string { return "{" + stringify(s.lhs) + ", " + stringify(s.rhs) + "}" } +func stringify(sr Selector) string { return strings.Trim(fmt.Sprintf("%s", sr), "{}") } + +func ParseSelector(line string) (sr Selector, err error) { + words := strings.Fields(line) + if len(words) == 0 { + return trueSelector{}, nil + } + + var srs []Selector + for _, word := range words { + if idx := strings.IndexByte(word, '|'); idx > 0 { + sr, err = parseOrSelectorWord(word) + } else { + sr, err = parseSingleSelectorWord(word) + } + if err != nil { + return nil, fmt.Errorf("selector '%s' contains selector '%s' with forbidden symbol", line, word) + } + srs = append(srs, sr) + } + + switch len(srs) { + case 0: + return trueSelector{}, nil + case 1: + return srs[0], nil + default: + return newAndSelector(srs[0], srs[1], srs[2:]...), nil + } +} + +func MustParseSelector(line string) Selector { + sr, err := ParseSelector(line) + if err != nil { + panic(fmt.Sprintf("selector '%s' parse error: %v", line, err)) + } + return sr +} + +func parseOrSelectorWord(orWord string) (sr Selector, err error) { + var srs []Selector + for _, word := range strings.Split(orWord, "|") { + if sr, err = parseSingleSelectorWord(word); err != nil { + return nil, err + } + srs = append(srs, sr) + } + switch len(srs) { + case 0: + return trueSelector{}, nil + case 1: + return srs[0], nil + default: + return newOrSelector(srs[0], srs[1], srs[2:]...), nil + } +} + +func parseSingleSelectorWord(word string) (Selector, error) { + if len(word) == 0 { + return nil, errors.New("empty word") + } + neg := word[0] == '!' + if neg { + word = word[1:] + } + if len(word) == 0 { + return nil, errors.New("empty word") + } + if word != "*" && !isSelectorWordValid(word) { + return nil, errors.New("forbidden symbol") + } + + var sr Selector + switch word { + case "*": + sr = trueSelector{} + default: + sr = exactSelector(word) + } + if neg { + return negSelector{sr}, nil + } + return sr, nil +} + +func newAndSelector(lhs, rhs Selector, others ...Selector) Selector { + m := andSelector{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newAndSelector(m, others[0], others[1:]...) + } +} + +func newOrSelector(lhs, rhs Selector, others ...Selector) Selector { + m := orSelector{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newOrSelector(m, others[0], others[1:]...) + } +} + +func isSelectorWordValid(word string) bool { + // valid: + // * + // ^[a-zA-Z][a-zA-Z0-9=_.]*$ + if len(word) == 0 { + return false + } + if word == "*" { + return true + } + for i, b := range word { + switch { + default: + return false + case b >= 'a' && b <= 'z': + case b >= 'A' && b <= 'Z': + case b >= '0' && b <= '9' && i > 0: + case (b == '=' || b == '_' || b == '.') && i > 0: + } + } + return true +} diff --git a/agent/discovery/sd/pipeline/selector_test.go b/agent/discovery/sd/pipeline/selector_test.go new file mode 100644 index 000000000..b642ddccf --- /dev/null +++ b/agent/discovery/sd/pipeline/selector_test.go @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "regexp" + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" +) + +var reSrString = regexp.MustCompile(`^{[^{}]+}$`) + +func TestExactSelector_String(t *testing.T) { + sr := exactSelector("selector") + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestNegSelector_String(t *testing.T) { + srs := []Selector{ + exactSelector("selector"), + negSelector{exactSelector("selector")}, + orSelector{ + lhs: exactSelector("selector"), + rhs: exactSelector("selector")}, + orSelector{ + lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + }, + andSelector{ + lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + }, + } + + for i, sr := range srs { + neg := negSelector{sr} + assert.True(t, reSrString.MatchString(neg.String()), "selector num %d", i+1) + } +} + +func TestOrSelector_String(t *testing.T) { + sr := orSelector{ + lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + } + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestAndSelector_String(t *testing.T) { + sr := andSelector{ + lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + } + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestExactSelector_Match(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []exactSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []exactSelector{ + "a", + "b", + }, + } + notMatchTests := struct { + tags model.Tags + srs []exactSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []exactSelector{ + "c", + "d", + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.Matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.Matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestNegSelector_Match(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []negSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []negSelector{ + {exactSelector("c")}, + {exactSelector("d")}, + }, + } + notMatchTests := struct { + tags model.Tags + srs []negSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []negSelector{ + {exactSelector("a")}, + {exactSelector("b")}, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.Matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.Matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestOrSelector_Match(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []orSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []orSelector{ + { + lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("b")}, + }, + }, + } + notMatchTests := struct { + tags model.Tags + srs []orSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []orSelector{ + { + lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("f")}, + }, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.Matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.Matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestAndSelector_Match(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []andSelector + }{ + tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}}, + srs: []andSelector{ + { + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + }, + }, + } + notMatchTests := struct { + tags model.Tags + srs []andSelector + }{ + tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}}, + srs: []andSelector{ + { + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("z")}, + }, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.Matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.Matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestParseSelector(t *testing.T) { + tests := map[string]struct { + wantSelector Selector + wantErr bool + }{ + "": {wantSelector: trueSelector{}}, + "a": {wantSelector: exactSelector("a")}, + "Z": {wantSelector: exactSelector("Z")}, + "a_b": {wantSelector: exactSelector("a_b")}, + "a=b": {wantSelector: exactSelector("a=b")}, + "!a": {wantSelector: negSelector{exactSelector("a")}}, + "a b": {wantSelector: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}}, + "a|b": {wantSelector: orSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}}, + "*": {wantSelector: trueSelector{}}, + "!*": {wantSelector: negSelector{trueSelector{}}}, + "a b !c d|e f": { + wantSelector: andSelector{ + lhs: andSelector{ + lhs: andSelector{ + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: negSelector{exactSelector("c")}, + }, + rhs: orSelector{ + lhs: exactSelector("d"), + rhs: exactSelector("e"), + }, + }, + rhs: exactSelector("f"), + }, + }, + "!": {wantErr: true}, + "a !": {wantErr: true}, + "a!b": {wantErr: true}, + "0a": {wantErr: true}, + "a b c*": {wantErr: true}, + "__": {wantErr: true}, + "a|b|c*": {wantErr: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sr, err := ParseSelector(name) + + if test.wantErr { + assert.Nil(t, sr) + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.wantSelector, sr) + } + }) + } +} + +func TestMustParseSelector(t *testing.T) { + tests := []string{ + "!", + "a !", + "a!b", + "0a", + "a b c*", + "__", + "a|b|c*", + } + + for _, test := range tests { + f := func() { + MustParseSelector(test) + } + assert.Panicsf(t, f, test) + } +} diff --git a/go.mod b/go.mod index c6c0c3eb5..a4ad3ea86 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.20 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 + github.com/Masterminds/sprig/v3 v3.2.3 github.com/Wing924/ltsv v0.3.1 github.com/apparentlymart/go-cidr v1.1.0 github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de @@ -16,6 +17,7 @@ require ( github.com/fsnotify/fsnotify v1.6.0 github.com/go-redis/redis/v8 v8.11.5 github.com/go-sql-driver/mysql v1.7.1 + github.com/gobwas/glob v0.2.3 github.com/godbus/dbus/v5 v5.1.0 github.com/gofrs/flock v0.8.1 github.com/golang/mock v1.6.0 @@ -52,6 +54,8 @@ require ( cloud.google.com/go/compute v1.15.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect @@ -96,6 +100,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -118,6 +123,8 @@ require ( github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect @@ -133,8 +140,10 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect diff --git a/go.sum b/go.sum index 82884eceb..55ca56f7c 100644 --- a/go.sum +++ b/go.sum @@ -71,15 +71,19 @@ github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -329,6 +333,8 @@ github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrt github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -515,6 +521,8 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -525,6 +533,7 @@ github.com/ilyam8/hashstructure v1.1.0/go.mod h1:LoLuwBSNpZOi3eTMfAqe2i4oW9QkI08 github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -707,6 +716,7 @@ github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -717,6 +727,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -905,6 +916,8 @@ github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY52 github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1117,6 +1130,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= @@ -1216,6 +1230,7 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= @@ -1332,6 +1347,7 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= @@ -1339,6 +1355,7 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= @@ -1352,6 +1369,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=