diff --git a/go.mod b/go.mod index 888eb885..2b9b794e 100644 --- a/go.mod +++ b/go.mod @@ -6,12 +6,12 @@ go 1.23.0 replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.6 require ( - github.com/digitalocean/godo v1.121.0 + github.com/digitalocean/godo v1.130.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.33.1 - golang.org/x/oauth2 v0.22.0 + golang.org/x/oauth2 v0.23.0 k8s.io/api v0.31.0 k8s.io/apimachinery v0.31.0 k8s.io/client-go v0.31.0 @@ -60,7 +60,7 @@ require ( go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect diff --git a/go.sum b/go.sum index dff2adda..592cd0d7 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.121.0 h1:ilXiHuEnhbJs2fmFEPX0r/QQ6KfiOIMAhJN3f8NiCfI= -github.com/digitalocean/godo v1.121.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.130.0 h1:DbJg0wvBxTkYjY5Q9S1mwzAZLd5Wht3r57yFH4yeMCk= +github.com/digitalocean/godo v1.130.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -163,8 +163,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -178,8 +178,8 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 7203f1ed..68e7d48f 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,56 @@ # Change Log +## [v1.130.0] - 2024-11-14 + +- #755 - @vsharma6855 - Add Missing Database Configs for Postgresql and MYSQL +- #754 - @blesswinsamuel - APPS-9858 Add method to obtain websocket URL to get console access into components + +## [v1.129.0] - 2024-11-06 + +- #752 - @andrewsomething - Support maps in Stringify +- #749 - @loosla - [droplets]: add droplet backup policies +- #730 - @rak16 - DOCR-1201: Add new RegistriesService to support methods for multiple-registry open beta +- #748 - @andrewsomething - Support Droplet GPU information + +## [v1.128.0] - 2024-10-24 + +- #746 - @blesswinsamuel - Add archive field to AppSpec to archive/restore apps +- #745 - @asaha2 - Add load balancer monitoring endpoints +- #744 - @asaha2 - Adjust delete dangerous +- #743 - @asaha2 - Introduce droplet autoscale godo methods +- #740 - @blesswinsamuel - Add maintenance field to AppSpec to enable/disable maintenance mode +- #739 - @markusthoemmes - Add protocol to AppSpec and pending to detect responses + +## [v1.127.0] - 2024-10-18 + +- #737 - @loosla - [databases]: change Opensearch ism_history_max_docs type to int64 to … +- #735 - @loosla - [databases]: add a missing field to Opensearch advanced configuration +- #729 - @loosla - [databases]: add support for Opensearch advanced configuration + +## [v1.126.0] - 2024-09-25 + +- #732 - @gottwald - DOKS: add custom CIDR fields +- #727 - @loosla - [databases]: add support for Kafka advanced configuration + +## [v1.125.0] - 2024-09-17 + +- #726 - @loosla - [databases]: add support for MongoDB advanced configuration +- #724 - @andrewsomething - Bump go version to 1.22 +- #723 - @jauderho - Update Go dependencies and remove replace statements + +## [v1.124.0] - 2024-09-10 + +- #721 - @vsharma6855 - [DBAAS] | Add API endpoint for applying cluster patches + +## [v1.123.0] - 2024-09-06 + +- #719 - @andrewsomething - apps: mark ListTiers and GetTier as deprecated + +## [v1.122.0] - 2024-09-04 + +- #717 - @danaelhe - DB: Fix Logsink Attribute Types +- #716 - @bhardwajRahul - Databases: Add support for OpenSearch ACL + ## [v1.121.0] - 2024-08-20 - #715 - @danaelhe - Databases: Bring back Logsink Support diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 22970540..0232ae94 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -462,6 +462,15 @@ type AppLogDestinationSpecPapertrail struct { Endpoint string `json:"endpoint"` } +// AppMaintenanceSpec struct for AppMaintenanceSpec +type AppMaintenanceSpec struct { + // Indicates whether maintenance mode should be enabled for the app. + Enabled bool `json:"enabled,omitempty"` + // Indicates whether the app should be archived. Setting this to true implies that enabled is set to true. + // Note that this feature is currently in closed beta. + Archive bool `json:"archive,omitempty"` +} + // AppRouteSpec struct for AppRouteSpec type AppRouteSpec struct { // (Deprecated) An HTTP path prefix. Paths must start with / and must be unique across all components within an app. @@ -495,7 +504,8 @@ type AppServiceSpec struct { InstanceCount int64 `json:"instance_count,omitempty"` Autoscaling *AppAutoscalingSpec `json:"autoscaling,omitempty"` // The internal port on which this service's run command will listen. Default: 8080 If there is not an environment variable with the name `PORT`, one will be automatically added with its value set to the value of this field. - HTTPPort int64 `json:"http_port,omitempty"` + HTTPPort int64 `json:"http_port,omitempty"` + Protocol ServingProtocol `json:"protocol,omitempty"` // (Deprecated) A list of HTTP routes that should be routed to this component. Routes []*AppRouteSpec `json:"routes,omitempty"` HealthCheck *AppServiceSpecHealthCheck `json:"health_check,omitempty"` @@ -559,10 +569,11 @@ type AppSpec struct { // A list of environment variables made available to all components in the app. Envs []*AppVariableDefinition `json:"envs,omitempty"` // A list of alerts which apply to the app. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - Ingress *AppIngressSpec `json:"ingress,omitempty"` - Egress *AppEgressSpec `json:"egress,omitempty"` - Features []string `json:"features,omitempty"` + Alerts []*AppAlertSpec `json:"alerts,omitempty"` + Ingress *AppIngressSpec `json:"ingress,omitempty"` + Egress *AppEgressSpec `json:"egress,omitempty"` + Features []string `json:"features,omitempty"` + Maintenance *AppMaintenanceSpec `json:"maintenance,omitempty"` } // AppStaticSiteSpec struct for AppStaticSiteSpec @@ -917,6 +928,8 @@ type DetectResponse struct { TemplateFound bool `json:"template_found,omitempty"` TemplateValid bool `json:"template_valid,omitempty"` TemplateError string `json:"template_error,omitempty"` + // Whether or not the underlying detection is still pending. If true, the request can be retried as-is until this field is false and the response contains the detection result. + Pending bool `json:"pending,omitempty"` } // DetectResponseComponent struct for DetectResponseComponent @@ -1252,6 +1265,15 @@ type ResetDatabasePasswordResponse struct { Deployment *Deployment `json:"deployment,omitempty"` } +// ServingProtocol - HTTP: The app is serving the HTTP protocol. Default. - HTTP2: The app is serving the HTTP/2 protocol. Currently, this needs to be implemented in the service by serving HTTP/2 with prior knowledge. +type ServingProtocol string + +// List of ServingProtocol +const ( + SERVINGPROTOCOL_HTTP ServingProtocol = "HTTP" + SERVINGPROTOCOL_HTTP2 ServingProtocol = "HTTP2" +) + // AppStringMatch struct for AppStringMatch type AppStringMatch struct { // Exact string match. Only 1 of `exact`, `prefix`, or `regex` must be set. diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index ebf341c0..24fe738e 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -40,6 +40,7 @@ type AppsService interface { CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) + GetExec(ctx context.Context, appID, deploymentID, component string) (*AppExec, *Response, error) ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) @@ -77,6 +78,11 @@ type AppLogs struct { HistoricURLs []string `json:"historic_urls"` } +// AppExec represents the websocket URL used for sending/receiving console input and output. +type AppExec struct { + URL string `json:"url"` +} + // AppUpdateRequest represents a request to update an app. type AppUpdateRequest struct { Spec *AppSpec `json:"spec"` @@ -368,6 +374,27 @@ func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, compon return logs, resp, nil } +// GetExec retrieves the websocket URL used for sending/receiving console input and output. +func (s *AppsServiceOp) GetExec(ctx context.Context, appID, deploymentID, component string) (*AppExec, *Response, error) { + var url string + if deploymentID == "" { + url = fmt.Sprintf("%s/%s/components/%s/exec", appsBasePath, appID, component) + } else { + url = fmt.Sprintf("%s/%s/deployments/%s/components/%s/exec", appsBasePath, appID, deploymentID, component) + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + logs := new(AppExec) + resp, err := s.client.Do(ctx, req, logs) + if err != nil { + return nil, resp, err + } + return logs, resp, nil +} + // ListRegions lists all regions supported by App Platform. func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) { path := fmt.Sprintf("%s/regions", appsBasePath) @@ -384,6 +411,9 @@ func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Respons } // ListTiers lists available app tiers. +// +// Deprecated: The '/v2/apps/tiers' endpoint has been deprecated as app tiers +// are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers", appsBasePath) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) @@ -399,6 +429,9 @@ func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, e } // GetTier retrieves information about a specific app tier. +// +// Deprecated: The '/v2/apps/tiers/{slug}' endpoints have been deprecated as app +// tiers are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index 05c5ac4b..02e7c4b1 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -1421,6 +1421,22 @@ func (a *AppLogDestinationSpecPapertrail) GetEndpoint() string { return a.Endpoint } +// GetArchive returns the Archive field. +func (a *AppMaintenanceSpec) GetArchive() bool { + if a == nil { + return false + } + return a.Archive +} + +// GetEnabled returns the Enabled field. +func (a *AppMaintenanceSpec) GetEnabled() bool { + if a == nil { + return false + } + return a.Enabled +} + // GetAppID returns the AppID field. func (a *AppProposeRequest) GetAppID() string { if a == nil { @@ -1757,6 +1773,14 @@ func (a *AppServiceSpec) GetName() string { return a.Name } +// GetProtocol returns the Protocol field. +func (a *AppServiceSpec) GetProtocol() ServingProtocol { + if a == nil { + return "" + } + return a.Protocol +} + // GetRoutes returns the Routes field. func (a *AppServiceSpec) GetRoutes() []*AppRouteSpec { if a == nil { @@ -1941,6 +1965,14 @@ func (a *AppSpec) GetJobs() []*AppJobSpec { return a.Jobs } +// GetMaintenance returns the Maintenance field. +func (a *AppSpec) GetMaintenance() *AppMaintenanceSpec { + if a == nil { + return nil + } + return a.Maintenance +} + // GetName returns the Name field. func (a *AppSpec) GetName() string { if a == nil { @@ -3093,6 +3125,14 @@ func (d *DetectResponse) GetComponents() []*DetectResponseComponent { return d.Components } +// GetPending returns the Pending field. +func (d *DetectResponse) GetPending() bool { + if d == nil { + return false + } + return d.Pending +} + // GetTemplate returns the Template field. func (d *DetectResponse) GetTemplate() *DeployTemplate { if d == nil { diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 8c406551..e2a7d567 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -3,6 +3,7 @@ package godo import ( "context" "fmt" + "math/big" "net/http" "strings" "time" @@ -16,6 +17,7 @@ const ( databaseResizePath = databaseBasePath + "/%s/resize" databaseMigratePath = databaseBasePath + "/%s/migrate" databaseMaintenancePath = databaseBasePath + "/%s/maintenance" + databaseUpdateInstallationPath = databaseBasePath + "/%s/install_update" databaseBackupsPath = databaseBasePath + "/%s/backups" databaseUsersPath = databaseBasePath + "/%s/users" databaseUserPath = databaseBasePath + "/%s/users/%s" @@ -120,6 +122,7 @@ type DatabasesService interface { Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error) Migrate(context.Context, string, *DatabaseMigrateRequest) (*Response, error) UpdateMaintenance(context.Context, string, *DatabaseUpdateMaintenanceRequest) (*Response, error) + InstallUpdate(context.Context, string) (*Response, error) ListBackups(context.Context, string, *ListOptions) ([]DatabaseBackup, *Response, error) GetUser(context.Context, string, string) (*DatabaseUser, *Response, error) ListUsers(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error) @@ -150,9 +153,15 @@ type DatabasesService interface { GetPostgreSQLConfig(context.Context, string) (*PostgreSQLConfig, *Response, error) GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) + GetMongoDBConfig(context.Context, string) (*MongoDBConfig, *Response, error) + GetOpensearchConfig(context.Context, string) (*OpensearchConfig, *Response, error) + GetKafkaConfig(context.Context, string) (*KafkaConfig, *Response, error) UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) + UpdateMongoDBConfig(context.Context, string, *MongoDBConfig) (*Response, error) + UpdateOpensearchConfig(context.Context, string, *OpensearchConfig) (*Response, error) + UpdateKafkaConfig(context.Context, string, *KafkaConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) ListTopics(context.Context, string, *ListOptions) ([]DatabaseTopic, *Response, error) @@ -252,9 +261,16 @@ type KafkaACL struct { Topic string `json:"topic,omitempty"` } -// DatabaseUserSettings contains Kafka-specific user settings +// OpenSearchACL contains OpenSearch specific user access control information +type OpenSearchACL struct { + Permission string `json:"permission,omitempty"` + Index string `json:"index,omitempty"` +} + +// DatabaseUserSettings contains user settings type DatabaseUserSettings struct { - ACL []*KafkaACL `json:"acl,omitempty"` + ACL []*KafkaACL `json:"acl,omitempty"` + OpenSearchACL []*OpenSearchACL `json:"opensearch_acl,omitempty"` } // DatabaseMySQLUserSettings contains MySQL-specific user settings @@ -505,19 +521,19 @@ type DatabaseUpdateLogsinkRequest struct { // DatabaseLogsinkConfig represents one of the configurable options (rsyslog_logsink, elasticsearch_logsink, or opensearch_logsink) for a logsink. type DatabaseLogsinkConfig struct { - URL string `json:"url,omitempty"` - IndexPrefix string `json:"index_prefix,omitempty"` - IndexDaysMax string `json:"index_days_max,omitempty"` - Timeout string `json:"timeout,omitempty"` - Server string `json:"server,omitempty"` - Port int `json:"port,omitempty"` - TLS bool `json:"tls,omitempty"` - Format string `json:"format,omitempty"` - Logline string `json:"logline,omitempty"` - SD string `json:"sd,omitempty"` - CA string `json:"ca,omitempty"` - Key string `json:"key,omitempty"` - Cert string `json:"cert,omitempty"` + URL string `json:"url,omitempty"` + IndexPrefix string `json:"index_prefix,omitempty"` + IndexDaysMax int `json:"index_days_max,omitempty"` + Timeout float32 `json:"timeout,omitempty"` + Server string `json:"server,omitempty"` + Port int `json:"port,omitempty"` + TLS bool `json:"tls,omitempty"` + Format string `json:"format,omitempty"` + Logline string `json:"logline,omitempty"` + SD string `json:"sd,omitempty"` + CA string `json:"ca,omitempty"` + Key string `json:"key,omitempty"` + Cert string `json:"cert,omitempty"` } // PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. @@ -573,6 +589,9 @@ type PostgreSQLConfig struct { BackupMinute *int `json:"backup_minute,omitempty"` WorkMem *int `json:"work_mem,omitempty"` TimeScaleDB *PostgreSQLTimeScaleDBConfig `json:"timescaledb,omitempty"` + SynchronousReplication *string `json:"synchronous_replication,omitempty"` + StatMonitorEnable *bool `json:"stat_monitor_enable,omitempty"` + MaxFailoverReplicationTimeLag *int64 `json:"max_failover_replication_time_lag,omitempty"` } // PostgreSQLBouncerConfig configuration @@ -637,6 +656,85 @@ type MySQLConfig struct { BackupHour *int `json:"backup_hour,omitempty"` BackupMinute *int `json:"backup_minute,omitempty"` BinlogRetentionPeriod *int `json:"binlog_retention_period,omitempty"` + InnodbChangeBufferMaxSize *int `json:"innodb_change_buffer_max_size,omitempty"` + InnodbFlushNeighbors *int `json:"innodb_flush_neighbors,omitempty"` + InnodbReadIoThreads *int `json:"innodb_read_io_threads,omitempty"` + InnodbThreadConcurrency *int `json:"innodb_thread_concurrency,omitempty"` + InnodbWriteIoThreads *int `json:"innodb_write_io_threads,omitempty"` + NetBufferLength *int `json:"net_buffer_length,omitempty"` + LogOutput *string `json:"log_output,omitempty"` +} + +// MongoDBConfig holds advanced configurations for MongoDB database clusters. +type MongoDBConfig struct { + DefaultReadConcern *string `json:"default_read_concern,omitempty"` + DefaultWriteConcern *string `json:"default_write_concern,omitempty"` + TransactionLifetimeLimitSeconds *int `json:"transaction_lifetime_limit_seconds,omitempty"` + SlowOpThresholdMs *int `json:"slow_op_threshold_ms,omitempty"` + Verbosity *int `json:"verbosity,omitempty"` +} + +// KafkaConfig holds advanced configurations for Kafka database clusters. +type KafkaConfig struct { + GroupInitialRebalanceDelayMs *int `json:"group_initial_rebalance_delay_ms,omitempty"` + GroupMinSessionTimeoutMs *int `json:"group_min_session_timeout_ms,omitempty"` + GroupMaxSessionTimeoutMs *int `json:"group_max_session_timeout_ms,omitempty"` + MessageMaxBytes *int `json:"message_max_bytes,omitempty"` + LogCleanerDeleteRetentionMs *int64 `json:"log_cleaner_delete_retention_ms,omitempty"` + LogCleanerMinCompactionLagMs *uint64 `json:"log_cleaner_min_compaction_lag_ms,omitempty"` + LogFlushIntervalMs *uint64 `json:"log_flush_interval_ms,omitempty"` + LogIndexIntervalBytes *int `json:"log_index_interval_bytes,omitempty"` + LogMessageDownconversionEnable *bool `json:"log_message_downconversion_enable,omitempty"` + LogMessageTimestampDifferenceMaxMs *uint64 `json:"log_message_timestamp_difference_max_ms,omitempty"` + LogPreallocate *bool `json:"log_preallocate,omitempty"` + LogRetentionBytes *big.Int `json:"log_retention_bytes,omitempty"` + LogRetentionHours *int `json:"log_retention_hours,omitempty"` + LogRetentionMs *big.Int `json:"log_retention_ms,omitempty"` + LogRollJitterMs *uint64 `json:"log_roll_jitter_ms,omitempty"` + LogSegmentDeleteDelayMs *int `json:"log_segment_delete_delay_ms,omitempty"` + AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` +} + +// OpensearchConfig holds advanced configurations for Opensearch database clusters. +type OpensearchConfig struct { + HttpMaxContentLengthBytes *int `json:"http_max_content_length_bytes,omitempty"` + HttpMaxHeaderSizeBytes *int `json:"http_max_header_size_bytes,omitempty"` + HttpMaxInitialLineLengthBytes *int `json:"http_max_initial_line_length_bytes,omitempty"` + IndicesQueryBoolMaxClauseCount *int `json:"indices_query_bool_max_clause_count,omitempty"` + IndicesFielddataCacheSizePercentage *int `json:"indices_fielddata_cache_size_percentage,omitempty"` + IndicesMemoryIndexBufferSizePercentage *int `json:"indices_memory_index_buffer_size_percentage,omitempty"` + IndicesMemoryMinIndexBufferSizeMb *int `json:"indices_memory_min_index_buffer_size_mb,omitempty"` + IndicesMemoryMaxIndexBufferSizeMb *int `json:"indices_memory_max_index_buffer_size_mb,omitempty"` + IndicesQueriesCacheSizePercentage *int `json:"indices_queries_cache_size_percentage,omitempty"` + IndicesRecoveryMaxMbPerSec *int `json:"indices_recovery_max_mb_per_sec,omitempty"` + IndicesRecoveryMaxConcurrentFileChunks *int `json:"indices_recovery_max_concurrent_file_chunks,omitempty"` + ThreadPoolSearchSize *int `json:"thread_pool_search_size,omitempty"` + ThreadPoolSearchThrottledSize *int `json:"thread_pool_search_throttled_size,omitempty"` + ThreadPoolGetSize *int `json:"thread_pool_get_size,omitempty"` + ThreadPoolAnalyzeSize *int `json:"thread_pool_analyze_size,omitempty"` + ThreadPoolWriteSize *int `json:"thread_pool_write_size,omitempty"` + ThreadPoolForceMergeSize *int `json:"thread_pool_force_merge_size,omitempty"` + ThreadPoolSearchQueueSize *int `json:"thread_pool_search_queue_size,omitempty"` + ThreadPoolSearchThrottledQueueSize *int `json:"thread_pool_search_throttled_queue_size,omitempty"` + ThreadPoolGetQueueSize *int `json:"thread_pool_get_queue_size,omitempty"` + ThreadPoolAnalyzeQueueSize *int `json:"thread_pool_analyze_queue_size,omitempty"` + ThreadPoolWriteQueueSize *int `json:"thread_pool_write_queue_size,omitempty"` + IsmEnabled *bool `json:"ism_enabled,omitempty"` + IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` + IsmHistoryMaxAgeHours *int `json:"ism_history_max_age_hours,omitempty"` + IsmHistoryMaxDocs *int64 `json:"ism_history_max_docs,omitempty"` + IsmHistoryRolloverCheckPeriodHours *int `json:"ism_history_rollover_check_period_hours,omitempty"` + IsmHistoryRolloverRetentionPeriodDays *int `json:"ism_history_rollover_retention_period_days,omitempty"` + SearchMaxBuckets *int `json:"search_max_buckets,omitempty"` + ActionAutoCreateIndexEnabled *bool `json:"action_auto_create_index_enabled,omitempty"` + EnableSecurityAudit *bool `json:"enable_security_audit,omitempty"` + ActionDestructiveRequiresName *bool `json:"action_destructive_requires_name,omitempty"` + ClusterMaxShardsPerNode *int `json:"cluster_max_shards_per_node,omitempty"` + OverrideMainResponseVersion *bool `json:"override_main_response_version,omitempty"` + ScriptMaxCompilationsRate *string `json:"script_max_compilations_rate,omitempty"` + ClusterRoutingAllocationNodeConcurrentRecoveries *int `json:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + ReindexRemoteWhitelist []string `json:"reindex_remote_whitelist,omitempty"` + PluginsAlertingFilterByBackendRolesEnabled *bool `json:"plugins_alerting_filter_by_backend_roles_enabled,omitempty"` } type databaseUserRoot struct { @@ -679,6 +777,18 @@ type databaseMySQLConfigRoot struct { Config *MySQLConfig `json:"config"` } +type databaseMongoDBConfigRoot struct { + Config *MongoDBConfig `json:"config"` +} + +type databaseOpensearchConfigRoot struct { + Config *OpensearchConfig `json:"config"` +} + +type databaseKafkaConfigRoot struct { + Config *KafkaConfig `json:"config"` +} + type databaseBackupsRoot struct { Backups []DatabaseBackup `json:"backups"` } @@ -933,6 +1043,20 @@ func (svc *DatabasesServiceOp) UpdateMaintenance(ctx context.Context, databaseID return resp, nil } +// InstallUpdate starts installation of updates +func (svc *DatabasesServiceOp) InstallUpdate(ctx context.Context, databaseID string) (*Response, error) { + path := fmt.Sprintf(databaseUpdateInstallationPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListBackups returns a list of the current backups of a database func (svc *DatabasesServiceOp) ListBackups(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseBackup, *Response, error) { path := fmt.Sprintf(databaseBackupsPath, databaseID) @@ -1476,6 +1600,102 @@ func (svc *DatabasesServiceOp) UpdateMySQLConfig(ctx context.Context, databaseID return resp, nil } +// GetMongoDBConfig retrieves the config for a MongoDB database cluster. +func (svc *DatabasesServiceOp) GetMongoDBConfig(ctx context.Context, databaseID string) (*MongoDBConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseMongoDBConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateMongoDBConfig updates the config for a MongoDB database cluster. +func (svc *DatabasesServiceOp) UpdateMongoDBConfig(ctx context.Context, databaseID string, config *MongoDBConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseMongoDBConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// GetKafkaConfig retrieves the config for a Kafka database cluster. +func (svc *DatabasesServiceOp) GetKafkaConfig(ctx context.Context, databaseID string) (*KafkaConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseKafkaConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateKafkaConfig updates the config for a Kafka database cluster. +func (svc *DatabasesServiceOp) UpdateKafkaConfig(ctx context.Context, databaseID string, config *KafkaConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseKafkaConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// GetOpensearchConfig retrieves the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) GetOpensearchConfig(ctx context.Context, databaseID string) (*OpensearchConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseOpensearchConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateOpensearchConfig updates the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) UpdateOpensearchConfig(ctx context.Context, databaseID string, config *OpensearchConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseOpensearchConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListOptions gets the database options available. func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { root := new(databaseOptionsRoot) diff --git a/vendor/github.com/digitalocean/godo/droplet_actions.go b/vendor/github.com/digitalocean/godo/droplet_actions.go index 2e09d0c5..ed0f583c 100644 --- a/vendor/github.com/digitalocean/godo/droplet_actions.go +++ b/vendor/github.com/digitalocean/godo/droplet_actions.go @@ -30,6 +30,8 @@ type DropletActionsService interface { SnapshotByTag(context.Context, string, string) ([]Action, *Response, error) EnableBackups(context.Context, int) (*Action, *Response, error) EnableBackupsByTag(context.Context, string) ([]Action, *Response, error) + EnableBackupsWithPolicy(context.Context, int, *DropletBackupPolicyRequest) (*Action, *Response, error) + ChangeBackupPolicy(context.Context, int, *DropletBackupPolicyRequest) (*Action, *Response, error) DisableBackups(context.Context, int) (*Action, *Response, error) DisableBackupsByTag(context.Context, string) ([]Action, *Response, error) PasswordReset(context.Context, int) (*Action, *Response, error) @@ -169,6 +171,42 @@ func (s *DropletActionsServiceOp) EnableBackupsByTag(ctx context.Context, tag st return s.doActionByTag(ctx, tag, request) } +// EnableBackupsWithPolicy enables droplet's backup with a backup policy applied. +func (s *DropletActionsServiceOp) EnableBackupsWithPolicy(ctx context.Context, id int, policy *DropletBackupPolicyRequest) (*Action, *Response, error) { + if policy == nil { + return nil, nil, NewArgError("policy", "policy can't be nil") + } + + policyMap := map[string]interface{}{ + "plan": policy.Plan, + "weekday": policy.Weekday, + } + if policy.Hour != nil { + policyMap["hour"] = policy.Hour + } + + request := &ActionRequest{"type": "enable_backups", "backup_policy": policyMap} + return s.doAction(ctx, id, request) +} + +// ChangeBackupPolicy updates a backup policy when backups are enabled. +func (s *DropletActionsServiceOp) ChangeBackupPolicy(ctx context.Context, id int, policy *DropletBackupPolicyRequest) (*Action, *Response, error) { + if policy == nil { + return nil, nil, NewArgError("policy", "policy can't be nil") + } + + policyMap := map[string]interface{}{ + "plan": policy.Plan, + "weekday": policy.Weekday, + } + if policy.Hour != nil { + policyMap["hour"] = policy.Hour + } + + request := &ActionRequest{"type": "change_backup_policy", "backup_policy": policyMap} + return s.doAction(ctx, id, request) +} + // DisableBackups disables backups for a Droplet. func (s *DropletActionsServiceOp) DisableBackups(ctx context.Context, id int) (*Action, *Response, error) { request := &ActionRequest{"type": "disable_backups"} diff --git a/vendor/github.com/digitalocean/godo/droplet_autoscale.go b/vendor/github.com/digitalocean/godo/droplet_autoscale.go new file mode 100644 index 00000000..4ef9dff8 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplet_autoscale.go @@ -0,0 +1,258 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const ( + dropletAutoscaleBasePath = "/v2/droplets/autoscale" +) + +// DropletAutoscaleService defines an interface for managing droplet autoscale pools through DigitalOcean API +type DropletAutoscaleService interface { + Create(context.Context, *DropletAutoscalePoolRequest) (*DropletAutoscalePool, *Response, error) + Get(context.Context, string) (*DropletAutoscalePool, *Response, error) + List(context.Context, *ListOptions) ([]*DropletAutoscalePool, *Response, error) + ListMembers(context.Context, string, *ListOptions) ([]*DropletAutoscaleResource, *Response, error) + ListHistory(context.Context, string, *ListOptions) ([]*DropletAutoscaleHistoryEvent, *Response, error) + Update(context.Context, string, *DropletAutoscalePoolRequest) (*DropletAutoscalePool, *Response, error) + Delete(context.Context, string) (*Response, error) + DeleteDangerous(context.Context, string) (*Response, error) +} + +// DropletAutoscalePool represents a DigitalOcean droplet autoscale pool +type DropletAutoscalePool struct { + ID string `json:"id"` + Name string `json:"name"` + Config *DropletAutoscaleConfiguration `json:"config"` + DropletTemplate *DropletAutoscaleResourceTemplate `json:"droplet_template"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CurrentUtilization *DropletAutoscaleResourceUtilization `json:"current_utilization,omitempty"` + Status string `json:"status"` +} + +// DropletAutoscaleConfiguration represents a DigitalOcean droplet autoscale pool configuration +type DropletAutoscaleConfiguration struct { + MinInstances uint64 `json:"min_instances,omitempty"` + MaxInstances uint64 `json:"max_instances,omitempty"` + TargetCPUUtilization float64 `json:"target_cpu_utilization,omitempty"` + TargetMemoryUtilization float64 `json:"target_memory_utilization,omitempty"` + CooldownMinutes uint32 `json:"cooldown_minutes,omitempty"` + TargetNumberInstances uint64 `json:"target_number_instances,omitempty"` +} + +// DropletAutoscaleResourceTemplate represents a DigitalOcean droplet autoscale pool resource template +type DropletAutoscaleResourceTemplate struct { + Size string `json:"size"` + Region string `json:"region"` + Image string `json:"image"` + Tags []string `json:"tags"` + SSHKeys []string `json:"ssh_keys"` + VpcUUID string `json:"vpc_uuid"` + WithDropletAgent bool `json:"with_droplet_agent"` + ProjectID string `json:"project_id"` + IPV6 bool `json:"ipv6"` + UserData string `json:"user_data"` +} + +// DropletAutoscaleResourceUtilization represents a DigitalOcean droplet autoscale pool resource utilization +type DropletAutoscaleResourceUtilization struct { + Memory float64 `json:"memory,omitempty"` + CPU float64 `json:"cpu,omitempty"` +} + +// DropletAutoscaleResource represents a DigitalOcean droplet autoscale pool resource +type DropletAutoscaleResource struct { + DropletID uint64 `json:"droplet_id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + HealthStatus string `json:"health_status"` + UnhealthyReason string `json:"unhealthy_reason,omitempty"` + Status string `json:"status"` + CurrentUtilization *DropletAutoscaleResourceUtilization `json:"current_utilization,omitempty"` +} + +// DropletAutoscaleHistoryEvent represents a DigitalOcean droplet autoscale pool history event +type DropletAutoscaleHistoryEvent struct { + HistoryEventID string `json:"history_event_id"` + CurrentInstanceCount uint64 `json:"current_instance_count"` + DesiredInstanceCount uint64 `json:"desired_instance_count"` + Reason string `json:"reason"` + Status string `json:"status"` + ErrorReason string `json:"error_reason,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// DropletAutoscalePoolRequest represents a DigitalOcean droplet autoscale pool create/update request +type DropletAutoscalePoolRequest struct { + Name string `json:"name"` + Config *DropletAutoscaleConfiguration `json:"config"` + DropletTemplate *DropletAutoscaleResourceTemplate `json:"droplet_template"` +} + +type dropletAutoscalePoolRoot struct { + AutoscalePool *DropletAutoscalePool `json:"autoscale_pool"` +} + +type dropletAutoscalePoolsRoot struct { + AutoscalePools []*DropletAutoscalePool `json:"autoscale_pools"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dropletAutoscaleMembersRoot struct { + Droplets []*DropletAutoscaleResource `json:"droplets"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dropletAutoscaleHistoryEventsRoot struct { + History []*DropletAutoscaleHistoryEvent `json:"history"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +// DropletAutoscaleServiceOp handles communication with droplet autoscale-related methods of the DigitalOcean API +type DropletAutoscaleServiceOp struct { + client *Client +} + +var _ DropletAutoscaleService = &DropletAutoscaleServiceOp{} + +// Create a new droplet autoscale pool +func (d *DropletAutoscaleServiceOp) Create(ctx context.Context, createReq *DropletAutoscalePoolRequest) (*DropletAutoscalePool, *Response, error) { + req, err := d.client.NewRequest(ctx, http.MethodPost, dropletAutoscaleBasePath, createReq) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscalePoolRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + return root.AutoscalePool, resp, nil +} + +// Get an existing droplet autoscale pool +func (d *DropletAutoscaleServiceOp) Get(ctx context.Context, id string) (*DropletAutoscalePool, *Response, error) { + req, err := d.client.NewRequest(ctx, http.MethodGet, fmt.Sprintf("%s/%s", dropletAutoscaleBasePath, id), nil) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscalePoolRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + return root.AutoscalePool, resp, err +} + +// List all existing droplet autoscale pools +func (d *DropletAutoscaleServiceOp) List(ctx context.Context, opts *ListOptions) ([]*DropletAutoscalePool, *Response, error) { + path, err := addOptions(dropletAutoscaleBasePath, opts) + if err != nil { + return nil, nil, err + } + req, err := d.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscalePoolsRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + if root.Links != nil { + resp.Links = root.Links + } + if root.Meta != nil { + resp.Meta = root.Meta + } + return root.AutoscalePools, resp, err +} + +// ListMembers all members for an existing droplet autoscale pool +func (d *DropletAutoscaleServiceOp) ListMembers(ctx context.Context, id string, opts *ListOptions) ([]*DropletAutoscaleResource, *Response, error) { + path, err := addOptions(fmt.Sprintf("%s/%s/members", dropletAutoscaleBasePath, id), opts) + if err != nil { + return nil, nil, err + } + req, err := d.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscaleMembersRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + if root.Links != nil { + resp.Links = root.Links + } + if root.Meta != nil { + resp.Meta = root.Meta + } + return root.Droplets, resp, err +} + +// ListHistory all history events for an existing droplet autoscale pool +func (d *DropletAutoscaleServiceOp) ListHistory(ctx context.Context, id string, opts *ListOptions) ([]*DropletAutoscaleHistoryEvent, *Response, error) { + path, err := addOptions(fmt.Sprintf("%s/%s/history", dropletAutoscaleBasePath, id), opts) + if err != nil { + return nil, nil, err + } + req, err := d.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscaleHistoryEventsRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + if root.Links != nil { + resp.Links = root.Links + } + if root.Meta != nil { + resp.Meta = root.Meta + } + return root.History, resp, err +} + +// Update an existing autoscale pool +func (d *DropletAutoscaleServiceOp) Update(ctx context.Context, id string, updateReq *DropletAutoscalePoolRequest) (*DropletAutoscalePool, *Response, error) { + req, err := d.client.NewRequest(ctx, http.MethodPut, fmt.Sprintf("%s/%s", dropletAutoscaleBasePath, id), updateReq) + if err != nil { + return nil, nil, err + } + root := new(dropletAutoscalePoolRoot) + resp, err := d.client.Do(ctx, req, root) + if err != nil { + return nil, nil, err + } + return root.AutoscalePool, resp, nil +} + +// Delete an existing autoscale pool +func (d *DropletAutoscaleServiceOp) Delete(ctx context.Context, id string) (*Response, error) { + req, err := d.client.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("%s/%s", dropletAutoscaleBasePath, id), nil) + if err != nil { + return nil, err + } + return d.client.Do(ctx, req, nil) +} + +// DeleteDangerous deletes an existing autoscale pool with all underlying resources +func (d *DropletAutoscaleServiceOp) DeleteDangerous(ctx context.Context, id string) (*Response, error) { + req, err := d.client.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("%s/%s/dangerous", dropletAutoscaleBasePath, id), nil) + req.Header.Set("X-Dangerous", "true") + if err != nil { + return nil, err + } + return d.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go index 5f198636..2ddd7d6b 100644 --- a/vendor/github.com/digitalocean/godo/droplets.go +++ b/vendor/github.com/digitalocean/godo/droplets.go @@ -17,6 +17,7 @@ var errNoNetworks = errors.New("no networks have been defined") // See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplets type DropletsService interface { List(context.Context, *ListOptions) ([]Droplet, *Response, error) + ListWithGPUs(context.Context, *ListOptions) ([]Droplet, *Response, error) ListByName(context.Context, string, *ListOptions) ([]Droplet, *Response, error) ListByTag(context.Context, string, *ListOptions) ([]Droplet, *Response, error) Get(context.Context, int) (*Droplet, *Response, error) @@ -29,6 +30,9 @@ type DropletsService interface { Backups(context.Context, int, *ListOptions) ([]Image, *Response, error) Actions(context.Context, int, *ListOptions) ([]Action, *Response, error) Neighbors(context.Context, int) ([]Droplet, *Response, error) + GetBackupPolicy(context.Context, int) (*DropletBackupPolicy, *Response, error) + ListBackupPolicies(context.Context, *ListOptions) (map[int]*DropletBackupPolicy, *Response, error) + ListSupportedBackupPolicies(context.Context) ([]*SupportedBackupPolicy, *Response, error) } // DropletsServiceOp handles communication with the Droplet related methods of the @@ -217,37 +221,46 @@ func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) { // DropletCreateRequest represents a request to create a Droplet. type DropletCreateRequest struct { - Name string `json:"name"` - Region string `json:"region"` - Size string `json:"size"` - Image DropletCreateImage `json:"image"` - SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` - Backups bool `json:"backups"` - IPv6 bool `json:"ipv6"` - PrivateNetworking bool `json:"private_networking"` - Monitoring bool `json:"monitoring"` - UserData string `json:"user_data,omitempty"` - Volumes []DropletCreateVolume `json:"volumes,omitempty"` - Tags []string `json:"tags"` - VPCUUID string `json:"vpc_uuid,omitempty"` - WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` + Name string `json:"name"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + Monitoring bool `json:"monitoring"` + UserData string `json:"user_data,omitempty"` + Volumes []DropletCreateVolume `json:"volumes,omitempty"` + Tags []string `json:"tags"` + VPCUUID string `json:"vpc_uuid,omitempty"` + WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` + BackupPolicy *DropletBackupPolicyRequest `json:"backup_policy,omitempty"` } // DropletMultiCreateRequest is a request to create multiple Droplets. type DropletMultiCreateRequest struct { - Names []string `json:"names"` - Region string `json:"region"` - Size string `json:"size"` - Image DropletCreateImage `json:"image"` - SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` - Backups bool `json:"backups"` - IPv6 bool `json:"ipv6"` - PrivateNetworking bool `json:"private_networking"` - Monitoring bool `json:"monitoring"` - UserData string `json:"user_data,omitempty"` - Tags []string `json:"tags"` - VPCUUID string `json:"vpc_uuid,omitempty"` - WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` + Names []string `json:"names"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + Monitoring bool `json:"monitoring"` + UserData string `json:"user_data,omitempty"` + Tags []string `json:"tags"` + VPCUUID string `json:"vpc_uuid,omitempty"` + WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` + BackupPolicy *DropletBackupPolicyRequest `json:"backup_policy,omitempty"` +} + +// DropletBackupPolicyRequest defines the backup policy when creating a Droplet. +type DropletBackupPolicyRequest struct { + Plan string `json:"plan,omitempty"` + Weekday string `json:"weekday,omitempty"` + Hour *int `json:"hour,omitempty"` } func (d DropletCreateRequest) String() string { @@ -321,6 +334,17 @@ func (s *DropletsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Dropl return s.list(ctx, path) } +// ListWithGPUs lists all Droplets with GPUs. +func (s *DropletsServiceOp) ListWithGPUs(ctx context.Context, opt *ListOptions) ([]Droplet, *Response, error) { + path := fmt.Sprintf("%s?type=gpus", dropletBasePath) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(ctx, path) +} + // ListByName lists all Droplets filtered by name returning only exact matches. // It is case-insensitive func (s *DropletsServiceOp) ListByName(ctx context.Context, name string, opt *ListOptions) ([]Droplet, *Response, error) { @@ -606,3 +630,109 @@ func (s *DropletsServiceOp) dropletActionStatus(ctx context.Context, uri string) return action.Status, nil } + +// DropletBackupPolicy defines the information about a droplet's backup policy. +type DropletBackupPolicy struct { + DropletID int `json:"droplet_id,omitempty"` + BackupEnabled bool `json:"backup_enabled,omitempty"` + BackupPolicy *DropletBackupPolicyConfig `json:"backup_policy,omitempty"` + NextBackupWindow *BackupWindow `json:"next_backup_window,omitempty"` +} + +// DropletBackupPolicyConfig defines the backup policy for a Droplet. +type DropletBackupPolicyConfig struct { + Plan string `json:"plan,omitempty"` + Weekday string `json:"weekday,omitempty"` + Hour int `json:"hour,omitempty"` + WindowLengthHours int `json:"window_length_hours,omitempty"` + RetentionPeriodDays int `json:"retention_period_days,omitempty"` +} + +// dropletBackupPolicyRoot represents a DropletBackupPolicy root +type dropletBackupPolicyRoot struct { + DropletBackupPolicy *DropletBackupPolicy `json:"policy,omitempty"` +} + +type dropletBackupPoliciesRoot struct { + DropletBackupPolicies map[int]*DropletBackupPolicy `json:"policies,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta"` +} + +// Get individual droplet backup policy. +func (s *DropletsServiceOp) GetBackupPolicy(ctx context.Context, dropletID int) (*DropletBackupPolicy, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/backups/policy", dropletBasePath, dropletID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletBackupPolicyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DropletBackupPolicy, resp, err +} + +// List all droplet backup policies. +func (s *DropletsServiceOp) ListBackupPolicies(ctx context.Context, opt *ListOptions) (map[int]*DropletBackupPolicy, *Response, error) { + path := fmt.Sprintf("%s/backups/policies", dropletBasePath) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletBackupPoliciesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.DropletBackupPolicies, resp, nil +} + +type SupportedBackupPolicy struct { + Name string `json:"name,omitempty"` + PossibleWindowStarts []int `json:"possible_window_starts,omitempty"` + WindowLengthHours int `json:"window_length_hours,omitempty"` + RetentionPeriodDays int `json:"retention_period_days,omitempty"` + PossibleDays []string `json:"possible_days,omitempty"` +} + +type dropletSupportedBackupPoliciesRoot struct { + SupportedBackupPolicies []*SupportedBackupPolicy `json:"supported_policies,omitempty"` +} + +// List supported droplet backup policies. +func (s *DropletsServiceOp) ListSupportedBackupPolicies(ctx context.Context) ([]*SupportedBackupPolicy, *Response, error) { + path := fmt.Sprintf("%s/backups/supported_policies", dropletBasePath) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletSupportedBackupPoliciesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.SupportedBackupPolicies, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 9362e118..4b9ad6b6 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.121.0" + libraryVersion = "1.130.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -65,6 +65,7 @@ type Client struct { Domains DomainsService Droplets DropletsService DropletActions DropletActionsService + DropletAutoscale DropletAutoscaleService Firewalls FirewallsService FloatingIPs FloatingIPsService FloatingIPActions FloatingIPActionsService @@ -80,6 +81,7 @@ type Client struct { Projects ProjectsService Regions RegionsService Registry RegistryService + Registries RegistriesService ReservedIPs ReservedIPsService ReservedIPActions ReservedIPActionsService Sizes SizesService @@ -275,6 +277,7 @@ func NewClient(httpClient *http.Client) *Client { c.Domains = &DomainsServiceOp{client: c} c.Droplets = &DropletsServiceOp{client: c} c.DropletActions = &DropletActionsServiceOp{client: c} + c.DropletAutoscale = &DropletAutoscaleServiceOp{client: c} c.Firewalls = &FirewallsServiceOp{client: c} c.FloatingIPs = &FloatingIPsServiceOp{client: c} c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} @@ -290,6 +293,7 @@ func NewClient(httpClient *http.Client) *Client { c.Projects = &ProjectsServiceOp{client: c} c.Regions = &RegionsServiceOp{client: c} c.Registry = &RegistryServiceOp{client: c} + c.Registries = &RegistriesServiceOp{client: c} c.ReservedIPs = &ReservedIPsServiceOp{client: c} c.ReservedIPActions = &ReservedIPActionsServiceOp{client: c} c.Sizes = &SizesServiceOp{client: c} diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 8ef9d241..9b3bcfa1 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -65,11 +65,13 @@ type KubernetesServiceOp struct { // KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. type KubernetesClusterCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + VPCUUID string `json:"vpc_uuid,omitempty"` + ClusterSubnet string `json:"cluster_subnet,omitempty"` + ServiceSubnet string `json:"service_subnet,omitempty"` // Create cluster with highly available control plane HA bool `json:"ha"` diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go index 937bb8d9..00feb256 100644 --- a/vendor/github.com/digitalocean/godo/monitoring.go +++ b/vendor/github.com/digitalocean/godo/monitoring.go @@ -10,9 +10,10 @@ import ( ) const ( - monitoringBasePath = "v2/monitoring" - alertPolicyBasePath = monitoringBasePath + "/alerts" - dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet" + monitoringBasePath = "v2/monitoring" + alertPolicyBasePath = monitoringBasePath + "/alerts" + dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet" + loadBalancerMetricsBasePath = monitoringBasePath + "/metrics/load_balancer" DropletCPUUtilizationPercent = "v1/insights/droplet/cpu" DropletMemoryUtilizationPercent = "v1/insights/droplet/memory_utilization_percent" @@ -67,6 +68,34 @@ type MonitoringService interface { GetDropletCachedMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) GetDropletFreeMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) GetDropletTotalMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) + + GetLoadBalancerFrontendHttpRequestsPerSecond(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendConnectionsCurrent(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendConnectionsLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendCpuUtilization(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendNetworkThroughputHttp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendNetworkThroughputUdp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendNetworkThroughputTcp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendNlbTcpNetworkThroughput(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendNlbUdpNetworkThroughput(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendFirewallDroppedBytes(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendFirewallDroppedPackets(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendHttpResponses(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendTlsConnectionsCurrent(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendTlsConnectionsLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerFrontendTlsConnectionsExceedingRateLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpSessionDurationAvg(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpSessionDuration50P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpSessionDuration95P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpResponseTimeAvg(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpResponseTime50P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpResponseTime95P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpResponseTime99P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsQueueSize(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHttpResponses(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsConnections(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsHealthChecks(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + GetLoadBalancerDropletsDowntime(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) } // MonitoringServiceOp handles communication with monitoring related methods of the @@ -163,6 +192,13 @@ type DropletBandwidthMetricsRequest struct { Direction string } +// LoadBalancerMetricsRequest holds the information needed to retrieve Load Balancer various metrics. +type LoadBalancerMetricsRequest struct { + LoadBalancerID string + Start time.Time + End time.Time +} + // MetricsResponse holds a Metrics query response. type MetricsResponse struct { Status string `json:"status"` @@ -372,3 +408,157 @@ func (s *MonitoringServiceOp) getDropletMetrics(ctx context.Context, path string return root, resp, err } + +// GetLoadBalancerFrontendHttpRequestsPerSecond retrieves frontend HTTP requests per second for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendHttpRequestsPerSecond(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_http_requests_per_second", args) +} + +// GetLoadBalancerFrontendConnectionsCurrent retrieves frontend total current active connections for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendConnectionsCurrent(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_connections_current", args) +} + +// GetLoadBalancerFrontendConnectionsLimit retrieves frontend max connections limit for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendConnectionsLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_connections_limit", args) +} + +// GetLoadBalancerFrontendCpuUtilization retrieves frontend average percentage cpu utilization for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendCpuUtilization(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_cpu_utilization", args) +} + +// GetLoadBalancerFrontendNetworkThroughputHttp retrieves frontend HTTP throughput for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendNetworkThroughputHttp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_network_throughput_http", args) +} + +// GetLoadBalancerFrontendNetworkThroughputUdp retrieves frontend UDP throughput for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendNetworkThroughputUdp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_network_throughput_udp", args) +} + +// GetLoadBalancerFrontendNetworkThroughputTcp retrieves frontend TCP throughput for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendNetworkThroughputTcp(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_network_throughput_tcp", args) +} + +// GetLoadBalancerFrontendNlbTcpNetworkThroughput retrieves frontend TCP throughput for a given network load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendNlbTcpNetworkThroughput(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_nlb_tcp_network_throughput", args) +} + +// GetLoadBalancerFrontendNlbUdpNetworkThroughput retrieves frontend UDP throughput for a given network load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendNlbUdpNetworkThroughput(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_nlb_udp_network_throughput", args) +} + +// GetLoadBalancerFrontendFirewallDroppedBytes retrieves firewall dropped bytes for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendFirewallDroppedBytes(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_firewall_dropped_bytes", args) +} + +// GetLoadBalancerFrontendFirewallDroppedPackets retrieves firewall dropped packets for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendFirewallDroppedPackets(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_firewall_dropped_packets", args) +} + +// GetLoadBalancerFrontendHttpResponses retrieves frontend HTTP rate of response code for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendHttpResponses(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_http_responses", args) +} + +// GetLoadBalancerFrontendTlsConnectionsCurrent retrieves frontend current TLS connections rate for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendTlsConnectionsCurrent(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_tls_connections_current", args) +} + +// GetLoadBalancerFrontendTlsConnectionsLimit retrieves frontend max TLS connections limit for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendTlsConnectionsLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_tls_connections_limit", args) +} + +// GetLoadBalancerFrontendTlsConnectionsExceedingRateLimit retrieves frontend closed TLS connections for exceeded rate limit for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerFrontendTlsConnectionsExceedingRateLimit(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/frontend_tls_connections_exceeding_rate_limit", args) +} + +// GetLoadBalancerDropletsHttpSessionDurationAvg retrieves droplet average HTTP session duration for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpSessionDurationAvg(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_session_duration_avg", args) +} + +// GetLoadBalancerDropletsHttpSessionDuration50P retrieves droplet 50th percentile HTTP session duration for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpSessionDuration50P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_session_duration_50p", args) +} + +// GetLoadBalancerDropletsHttpSessionDuration95P retrieves droplet 95th percentile HTTP session duration for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpSessionDuration95P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_session_duration_95p", args) +} + +// GetLoadBalancerDropletsHttpResponseTimeAvg retrieves droplet average HTTP response time for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpResponseTimeAvg(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_response_time_avg", args) +} + +// GetLoadBalancerDropletsHttpResponseTime50P retrieves droplet 50th percentile HTTP response time for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpResponseTime50P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_response_time_50p", args) +} + +// GetLoadBalancerDropletsHttpResponseTime95P retrieves droplet 95th percentile HTTP response time for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpResponseTime95P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_response_time_95p", args) +} + +// GetLoadBalancerDropletsHttpResponseTime99P retrieves droplet 99th percentile HTTP response time for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpResponseTime99P(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_response_time_99p", args) +} + +// GetLoadBalancerDropletsQueueSize retrieves droplet queue size for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsQueueSize(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_queue_size", args) +} + +// GetLoadBalancerDropletsHttpResponses retrieves droplet HTTP rate of response code for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHttpResponses(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_http_responses", args) +} + +// GetLoadBalancerDropletsConnections retrieves droplet active connections for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsConnections(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_connections", args) +} + +// GetLoadBalancerDropletsHealthChecks retrieves droplet health check status for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsHealthChecks(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_health_checks", args) +} + +// GetLoadBalancerDropletsDowntime retrieves droplet downtime status for a given load balancer. +func (s *MonitoringServiceOp) GetLoadBalancerDropletsDowntime(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + return s.getLoadBalancerMetrics(ctx, "/droplets_downtime", args) +} + +func (s *MonitoringServiceOp) getLoadBalancerMetrics(ctx context.Context, path string, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) { + fullPath := loadBalancerMetricsBasePath + path + req, err := s.client.NewRequest(ctx, http.MethodGet, fullPath, nil) + if err != nil { + return nil, nil, err + } + + q := req.URL.Query() + q.Add("lb_id", args.LoadBalancerID) + q.Add("start", fmt.Sprintf("%d", args.Start.Unix())) + q.Add("end", fmt.Sprintf("%d", args.End.Unix())) + req.URL.RawQuery = q.Encode() + + root := new(MetricsResponse) + resp, err := s.client.Do(ctx, req, root) + + return root, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/registry.go b/vendor/github.com/digitalocean/godo/registry.go index b0c24328..e6482268 100644 --- a/vendor/github.com/digitalocean/godo/registry.go +++ b/vendor/github.com/digitalocean/godo/registry.go @@ -14,6 +14,9 @@ const ( registryPath = "/v2/registry" // RegistryServer is the hostname of the DigitalOcean registry service RegistryServer = "registry.digitalocean.com" + + // Multi-registry Open Beta API constants + registriesPath = "/v2/registries" ) // RegistryService is an interface for interfacing with the Registry endpoints @@ -240,6 +243,19 @@ type RegistryValidateNameRequest struct { Name string `json:"name"` } +// Multi-registry Open Beta API structs + +type registriesRoot struct { + Registries []*Registry `json:"registries,omitempty"` + TotalStorageUsageBytes uint64 `json:"total_storage_usage_bytes,omitempty"` +} + +// RegistriesCreateRequest represents a request to create a secondary registry. +type RegistriesCreateRequest struct { + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` +} + // Get retrieves the details of a Registry. func (svc *RegistryServiceOp) Get(ctx context.Context) (*Registry, *Response, error) { req, err := svc.client.NewRequest(ctx, http.MethodGet, registryPath, nil) @@ -610,3 +626,107 @@ func (svc *RegistryServiceOp) ValidateName(ctx context.Context, request *Registr } return resp, nil } + +// RegistriesService is an interface for interfacing with the new multiple-registry beta endpoints +// of the DigitalOcean API. +// +// We are creating a separate Service in alignment with the new /v2/registries endpoints. +type RegistriesService interface { + Get(context.Context, string) (*Registry, *Response, error) + List(context.Context) ([]*Registry, *Response, error) + Create(context.Context, *RegistriesCreateRequest) (*Registry, *Response, error) + Delete(context.Context, string) (*Response, error) + DockerCredentials(context.Context, string, *RegistryDockerCredentialsRequest) (*DockerCredentials, *Response, error) +} + +var _ RegistriesService = &RegistriesServiceOp{} + +// RegistriesServiceOp handles communication with the multiple-registry beta methods. +type RegistriesServiceOp struct { + client *Client +} + +// Get returns the details of a named Registry. +func (svc *RegistriesServiceOp) Get(ctx context.Context, registry string) (*Registry, *Response, error) { + path := fmt.Sprintf("%s/%s", registriesPath, registry) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(registryRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Registry, resp, nil +} + +// List returns a list of the named Registries. +func (svc *RegistriesServiceOp) List(ctx context.Context) ([]*Registry, *Response, error) { + req, err := svc.client.NewRequest(ctx, http.MethodGet, registriesPath, nil) + if err != nil { + return nil, nil, err + } + root := new(registriesRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Registries, resp, nil +} + +// Create creates a named Registry. +func (svc *RegistriesServiceOp) Create(ctx context.Context, create *RegistriesCreateRequest) (*Registry, *Response, error) { + req, err := svc.client.NewRequest(ctx, http.MethodPost, registriesPath, create) + if err != nil { + return nil, nil, err + } + root := new(registryRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Registry, resp, nil +} + +// Delete deletes a named Registry. There is no way to recover a Registry once it has +// been destroyed. +func (svc *RegistriesServiceOp) Delete(ctx context.Context, registry string) (*Response, error) { + path := fmt.Sprintf("%s/%s", registriesPath, registry) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// DockerCredentials retrieves a Docker config file containing named Registry's credentials. +func (svc *RegistriesServiceOp) DockerCredentials(ctx context.Context, registry string, request *RegistryDockerCredentialsRequest) (*DockerCredentials, *Response, error) { + path := fmt.Sprintf("%s/%s/%s", registriesPath, registry, "docker-credentials") + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + q := req.URL.Query() + q.Add("read_write", strconv.FormatBool(request.ReadWrite)) + if request.ExpirySeconds != nil { + q.Add("expiry_seconds", strconv.Itoa(*request.ExpirySeconds)) + } + req.URL.RawQuery = q.Encode() + + var buf bytes.Buffer + resp, err := svc.client.Do(ctx, req, &buf) + if err != nil { + return nil, resp, err + } + + dc := &DockerCredentials{ + DockerConfigJSON: buf.Bytes(), + } + return dc, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/sizes.go b/vendor/github.com/digitalocean/godo/sizes.go index a3cb7452..72d5321c 100644 --- a/vendor/github.com/digitalocean/godo/sizes.go +++ b/vendor/github.com/digitalocean/godo/sizes.go @@ -22,16 +22,44 @@ var _ SizesService = &SizesServiceOp{} // Size represents a DigitalOcean Size type Size struct { - Slug string `json:"slug,omitempty"` - Memory int `json:"memory,omitempty"` - Vcpus int `json:"vcpus,omitempty"` - Disk int `json:"disk,omitempty"` - PriceMonthly float64 `json:"price_monthly,omitempty"` - PriceHourly float64 `json:"price_hourly,omitempty"` - Regions []string `json:"regions,omitempty"` - Available bool `json:"available,omitempty"` - Transfer float64 `json:"transfer,omitempty"` - Description string `json:"description,omitempty"` + Slug string `json:"slug,omitempty"` + Memory int `json:"memory,omitempty"` + Vcpus int `json:"vcpus,omitempty"` + Disk int `json:"disk,omitempty"` + PriceMonthly float64 `json:"price_monthly,omitempty"` + PriceHourly float64 `json:"price_hourly,omitempty"` + Regions []string `json:"regions,omitempty"` + Available bool `json:"available,omitempty"` + Transfer float64 `json:"transfer,omitempty"` + Description string `json:"description,omitempty"` + GPUInfo *GPUInfo `json:"gpu_info,omitempty"` + DiskInfo []DiskInfo `json:"disk_info,omitempty"` +} + +// DiskInfo containing information about the disks available to Droplets created +// with this size. +type DiskInfo struct { + Type string `json:"type,omitempty"` + Size *DiskSize `json:"size,omitempty"` +} + +// DiskSize provides information about the size of a disk. +type DiskSize struct { + Amount int `json:"amount,omitempty"` + Unit string `json:"unit,omitempty"` +} + +// GPUInfo provides information about the GPU available to Droplets created with this size. +type GPUInfo struct { + Count int `json:"count,omitempty"` + VRAM *VRAM `json:"vram,omitempty"` + Model string `json:"model,omitempty"` +} + +// VRAM provides information about the amount of VRAM available to the GPU. +type VRAM struct { + Amount int `json:"amount,omitempty"` + Unit string `json:"unit,omitempty"` } func (s Size) String() string { diff --git a/vendor/github.com/digitalocean/godo/strings.go b/vendor/github.com/digitalocean/godo/strings.go index f92893ed..5a258131 100644 --- a/vendor/github.com/digitalocean/godo/strings.go +++ b/vendor/github.com/digitalocean/godo/strings.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "reflect" + "sort" "strings" ) @@ -46,6 +47,8 @@ func stringifyValue(w io.Writer, val reflect.Value) { return case reflect.Struct: stringifyStruct(w, v) + case reflect.Map: + stringifyMap(w, v) default: if v.CanInterface() { fmt.Fprint(w, v.Interface()) @@ -66,6 +69,27 @@ func stringifySlice(w io.Writer, v reflect.Value) { _, _ = w.Write([]byte{']'}) } +func stringifyMap(w io.Writer, v reflect.Value) { + _, _ = w.Write([]byte("map[")) + + // Sort the keys so that the output is stable + keys := v.MapKeys() + sort.Slice(keys, func(i, j int) bool { + return fmt.Sprintf("%v", keys[i]) < fmt.Sprintf("%v", keys[j]) + }) + + for i, key := range keys { + stringifyValue(w, key) + _, _ = w.Write([]byte{':'}) + stringifyValue(w, v.MapIndex(key)) + if i < len(keys)-1 { + _, _ = w.Write([]byte(", ")) + } + } + + _, _ = w.Write([]byte("]")) +} + func stringifyStruct(w io.Writer, v reflect.Value) { if v.Type().Name() != "" { _, _ = w.Write([]byte(v.Type().String())) diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..109997d7 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09e..e14b766a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200a..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac17..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533e..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646b..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f6..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..d003c3d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..0d45a941 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b9..9f2550dc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd1..5cee9a31 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d5..7b97a154 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3..4c2e1bdc 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 2eb31792..cc889c85 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,8 +7,8 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.121.0 -## explicit; go 1.20 +# github.com/digitalocean/godo v1.130.0 +## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics # github.com/emicklei/go-restful/v3 v3.12.1 @@ -214,11 +214,11 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.24.0 +# golang.org/x/sys v0.25.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix