From 9a582c75dbd3b66e4d6706c7e835e290d6363f5c Mon Sep 17 00:00:00 2001 From: Pallab Pain Date: Sun, 28 Jan 2024 17:42:56 +0530 Subject: [PATCH] chore: updates code formatting --- CHANGELOG.md | 22 +- cmd/gh-action-integration-generator/main.go | 5 +- cmd/headscale/cli/debug.go | 4 +- cmd/headscale/cli/nodes.go | 7 +- cmd/headscale/cli/preauthkeys.go | 6 +- cmd/headscale/cli/routes.go | 22 +- cmd/headscale/cli/utils.go | 6 +- hscontrol/app.go | 22 +- hscontrol/auth.go | 12 +- hscontrol/auth_noise.go | 4 +- hscontrol/db/addresses.go | 4 +- hscontrol/db/db.go | 440 ++++++++++---------- hscontrol/db/node.go | 5 +- hscontrol/db/node_test.go | 44 +- hscontrol/db/preauth_keys.go | 5 +- hscontrol/db/routes.go | 11 +- hscontrol/derp/server/derp_server.go | 5 +- hscontrol/mapper/mapper.go | 40 +- hscontrol/mapper/mapper_test.go | 16 +- hscontrol/mapper/tail_test.go | 8 +- hscontrol/notifier/notifier.go | 31 +- hscontrol/oidc.go | 12 +- hscontrol/platform_config.go | 10 +- hscontrol/policy/acls.go | 8 +- hscontrol/policy/acls_test.go | 80 +++- hscontrol/poll.go | 30 +- hscontrol/tailsql.go | 17 +- hscontrol/types/common.go | 8 +- hscontrol/types/node.go | 23 +- hscontrol/types/node_test.go | 13 +- hscontrol/types/routes.go | 3 +- integration/acl_test.go | 24 +- integration/auth_oidc_test.go | 31 +- integration/auth_web_flow_test.go | 17 +- integration/cli_test.go | 105 +++-- integration/dockertestutil/network.go | 14 +- integration/embedded_derp_test.go | 18 +- integration/general_test.go | 246 +++++++---- integration/hsic/hsic.go | 38 +- integration/route_test.go | 115 +++-- integration/scenario.go | 18 +- integration/scenario_test.go | 6 +- integration/ssh_test.go | 12 +- integration/tsic/tsic.go | 16 +- integration/utils.go | 7 +- proto/headscale/v1/headscale.proto | 170 ++++---- proto/headscale/v1/node.proto | 14 +- proto/headscale/v1/preauthkey.proto | 6 +- proto/headscale/v1/routes.proto | 12 +- 49 files changed, 1185 insertions(+), 607 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fccae5c535c..20ce044090a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,17 +34,17 @@ after improving the test harness as part of adopting [#1460](https://github.com/ ### Changes -* Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644) -* Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484) -* SSH support [#1487](https://github.com/juanfont/headscale/pull/1487) -* State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492) -* Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on [#1460](https://github.com/juanfont/headscale/pull/1460) -* Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480) -* Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524) -* Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563) -* Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259) -* Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565) -* Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702) +- Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644) +- Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484) +- SSH support [#1487](https://github.com/juanfont/headscale/pull/1487) +- State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492) +- Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on [#1460](https://github.com/juanfont/headscale/pull/1460) +- Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480) +- Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524) +- Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563) +- Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259) +- Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565) +- Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702) ## 0.22.3 (2023-05-12) diff --git a/cmd/gh-action-integration-generator/main.go b/cmd/gh-action-integration-generator/main.go index d5798a95e78..f4707da48c6 100644 --- a/cmd/gh-action-integration-generator/main.go +++ b/cmd/gh-action-integration-generator/main.go @@ -163,7 +163,10 @@ func main() { log.Fatalf("failed to render template: %s", err) } - testPath := path.Join(githubWorkflowPath, fmt.Sprintf(jobFileNameTemplate, test)) + testPath := path.Join( + githubWorkflowPath, + fmt.Sprintf(jobFileNameTemplate, test), + ) err := os.WriteFile(testPath, content.Bytes(), workflowFilePerm) if err != nil { diff --git a/cmd/headscale/cli/debug.go b/cmd/headscale/cli/debug.go index 054fc07fc39..04f20092a9b 100644 --- a/cmd/headscale/cli/debug.go +++ b/cmd/headscale/cli/debug.go @@ -11,7 +11,9 @@ import ( ) const ( - errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix") + errPreAuthKeyMalformed = Error( + "key is malformed. expected 64 hex characters with `nodekey` prefix", + ) ) // Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors diff --git a/cmd/headscale/cli/nodes.go b/cmd/headscale/cli/nodes.go index ac996245655..e78142ee840 100644 --- a/cmd/headscale/cli/nodes.go +++ b/cmd/headscale/cli/nodes.go @@ -153,7 +153,12 @@ var registerNodeCmd = &cobra.Command{ SuccessOutput( response.GetNode(), - fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output) + fmt.Sprintf( + "Node %s registered", + response.GetNode().GetGivenName(), + ), + output, + ) }, } diff --git a/cmd/headscale/cli/preauthkeys.go b/cmd/headscale/cli/preauthkeys.go index c8dd2adcb2f..793b8f64167 100644 --- a/cmd/headscale/cli/preauthkeys.go +++ b/cmd/headscale/cli/preauthkeys.go @@ -214,7 +214,11 @@ var createPreAuthKeyCmd = &cobra.Command{ return } - SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output) + SuccessOutput( + response.GetPreAuthKey(), + response.GetPreAuthKey().GetKey(), + output, + ) }, } diff --git a/cmd/headscale/cli/routes.go b/cmd/headscale/cli/routes.go index 86ef295c094..ffa9dfc0fc1 100644 --- a/cmd/headscale/cli/routes.go +++ b/cmd/headscale/cli/routes.go @@ -164,7 +164,11 @@ var enableRouteCmd = &cobra.Command{ if err != nil { ErrorOutput( err, - fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()), + fmt.Sprintf( + "Cannot enable route %d: %s", + routeID, + status.Convert(err).Message(), + ), output, ) @@ -207,7 +211,11 @@ var disableRouteCmd = &cobra.Command{ if err != nil { ErrorOutput( err, - fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()), + fmt.Sprintf( + "Cannot disable route %d: %s", + routeID, + status.Convert(err).Message(), + ), output, ) @@ -250,7 +258,11 @@ var deleteRouteCmd = &cobra.Command{ if err != nil { ErrorOutput( err, - fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()), + fmt.Sprintf( + "Cannot delete route %d: %s", + routeID, + status.Convert(err).Message(), + ), output, ) @@ -267,7 +279,9 @@ var deleteRouteCmd = &cobra.Command{ // routesToPtables converts the list of routes to a nice table. func routesToPtables(routes []*v1.Route) pterm.TableData { - tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}} + tableData := pterm.TableData{ + {"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}, + } for _, route := range routes { var isPrimaryStr string diff --git a/cmd/headscale/cli/utils.go b/cmd/headscale/cli/utils.go index a193d17dfb4..34bd037926a 100644 --- a/cmd/headscale/cli/utils.go +++ b/cmd/headscale/cli/utils.go @@ -89,7 +89,11 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc. // Try to give the user better feedback if we cannot write to the headscale // socket. - socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint + socket, err := os.OpenFile( + cfg.UnixSocket, + os.O_WRONLY, + SocketWritePermissions, + ) //nolint if err != nil { if os.IsPermission(err) { log.Fatal(). diff --git a/hscontrol/app.go b/hscontrol/app.go index 75dfddee1f2..4aead8a46ce 100644 --- a/hscontrol/app.go +++ b/hscontrol/app.go @@ -113,7 +113,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath) if err != nil { - return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err) + return nil, fmt.Errorf( + "failed to read or create Noise protocol private key: %w", + err, + ) } var dbString string @@ -200,7 +203,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { if cfg.DERP.ServerEnabled { derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath) if err != nil { - return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err) + return nil, fmt.Errorf( + "failed to read or create DERP server private key: %w", + err, + ) } if derpServerKey.Equal(*noisePrivateKey) { @@ -268,7 +274,8 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) { case <-ticker.C: log.Info().Msg("Fetching DERPMap updates") h.DERPMap = derp.GetDERPMap(h.cfg.DERP) - if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { + if h.cfg.DERP.ServerEnabled && + h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { region, _ := h.DERPServer.GenerateRegion() h.DERPMap.Regions[region.RegionID] = ®ion } @@ -471,7 +478,10 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { if h.cfg.DERP.ServerEnabled { router.HandleFunc("/derp", h.DERPServer.DERPHandler) router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler) - router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.DERPMap)) + router.HandleFunc( + "/bootstrap-dns", + derpServer.DERPBootstrapDNSHandler(h.DERPMap), + ) } apiRouter := router.PathPrefix("/api").Subrouter() @@ -711,7 +721,9 @@ func (h *Headscale) Serve() error { var tailsqlContext context.Context if tailsqlEnabled { if h.cfg.DBtype != db.Sqlite { - log.Fatal().Str("type", h.cfg.DBtype).Msgf("tailsql only support %q", db.Sqlite) + log.Fatal(). + Str("type", h.cfg.DBtype). + Msgf("tailsql only support %q", db.Sqlite) } if tailsqlTSKey == "" { log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set") diff --git a/hscontrol/auth.go b/hscontrol/auth.go index 4fe5a16b5cf..8a58e8bac07 100644 --- a/hscontrol/auth.go +++ b/hscontrol/auth.go @@ -66,7 +66,11 @@ func (h *Headscale) handleRegister( logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey) now := time.Now().UTC() logTrace("handleRegister called, looking up machine in DB") - node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey) + node, err := h.db.GetNodeByAnyKey( + machineKey, + registerRequest.NodeKey, + registerRequest.OldNodeKey, + ) logTrace("handleRegister database lookup has returned") if errors.Is(err, gorm.ErrRecordNotFound) { // If the node has AuthKey set, handle registration via PreAuthKeys @@ -302,7 +306,11 @@ func (h *Headscale) handleAuthKey( // The error is not important, because if it does not // exist, then this is a new node and we will move // on to registration. - node, _ := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey) + node, _ := h.db.GetNodeByAnyKey( + machineKey, + registerRequest.NodeKey, + registerRequest.OldNodeKey, + ) if node != nil { log.Trace(). Caller(). diff --git a/hscontrol/auth_noise.go b/hscontrol/auth_noise.go index 323a49b0936..37b9bf239ad 100644 --- a/hscontrol/auth_noise.go +++ b/hscontrol/auth_noise.go @@ -14,7 +14,9 @@ func (ns *noiseServer) NoiseRegistrationHandler( writer http.ResponseWriter, req *http.Request, ) { - log.Trace().Caller().Msgf("Noise registration handler for client %s", req.RemoteAddr) + log.Trace(). + Caller(). + Msgf("Noise registration handler for client %s", req.RemoteAddr) if req.Method != http.MethodPost { http.Error(writer, "Wrong method", http.StatusMethodNotAllowed) diff --git a/hscontrol/db/addresses.go b/hscontrol/db/addresses.go index beccf84379c..8db7759bf5e 100644 --- a/hscontrol/db/addresses.go +++ b/hscontrol/db/addresses.go @@ -38,7 +38,9 @@ func (hsdb *HSDatabase) getAvailableIP(ipPrefix netip.Prefix) (*netip.Addr, erro return nil, err } - ipPrefixNetworkAddress, ipPrefixBroadcastAddress := util.GetIPPrefixEndpoints(ipPrefix) + ipPrefixNetworkAddress, ipPrefixBroadcastAddress := util.GetIPPrefixEndpoints( + ipPrefix, + ) // Get the first IP in our prefix ip := ipPrefixNetworkAddress.Next() diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index 030a6f0babd..ef3626345fa 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -61,256 +61,273 @@ func NewHeadscaleDatabase( return nil, err } - migrations := gormigrate.New(dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{ - // New migrations should be added as transactions at the end of this list. - // The initial commit here is quite messy, completely out of order and - // has no versioning and is the tech debt of not having versioned migrations - // prior to this point. This first migration is all DB changes to bring a DB - // up to 0.23.0. - { - ID: "202312101416", - Migrate: func(tx *gorm.DB) error { - if dbType == Postgres { - tx.Exec(`create extension if not exists "uuid-ossp";`) - } - - _ = tx.Migrator().RenameTable("namespaces", "users") - - // the big rename from Machine to Node - _ = tx.Migrator().RenameTable("machines", "nodes") - _ = tx.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id") - - err = tx.AutoMigrate(types.User{}) - if err != nil { - return err - } - - _ = tx.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id") - _ = tx.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id") - - _ = tx.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses") - _ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname") - - // GivenName is used as the primary source of DNS names, make sure - // the field is populated and normalized if it was not when the - // node was registered. - _ = tx.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name") - - // If the Node table has a column for registered, - // find all occourences of "false" and drop them. Then - // remove the column. - if tx.Migrator().HasColumn(&types.Node{}, "registered") { - log.Info(). - Msg(`Database has legacy "registered" column in node, removing...`) - - nodes := types.Nodes{} - if err := tx.Not("registered").Find(&nodes).Error; err != nil { - log.Error().Err(err).Msg("Error accessing db") + migrations := gormigrate.New( + dbConn, + gormigrate.DefaultOptions, + []*gormigrate.Migration{ + // New migrations should be added as transactions at the end of this list. + // The initial commit here is quite messy, completely out of order and + // has no versioning and is the tech debt of not having versioned migrations + // prior to this point. This first migration is all DB changes to bring a DB + // up to 0.23.0. + { + ID: "202312101416", + Migrate: func(tx *gorm.DB) error { + if dbType == Postgres { + tx.Exec(`create extension if not exists "uuid-ossp";`) } - for _, node := range nodes { + _ = tx.Migrator().RenameTable("namespaces", "users") + + // the big rename from Machine to Node + _ = tx.Migrator().RenameTable("machines", "nodes") + _ = tx.Migrator(). + RenameColumn(&types.Route{}, "machine_id", "node_id") + + err = tx.AutoMigrate(types.User{}) + if err != nil { + return err + } + + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "namespace_id", "user_id") + _ = tx.Migrator(). + RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id") + + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "ip_address", "ip_addresses") + _ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname") + + // GivenName is used as the primary source of DNS names, make sure + // the field is populated and normalized if it was not when the + // node was registered. + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "nickname", "given_name") + + // If the Node table has a column for registered, + // find all occourences of "false" and drop them. Then + // remove the column. + if tx.Migrator().HasColumn(&types.Node{}, "registered") { log.Info(). - Str("node", node.Hostname). - Str("machine_key", node.MachineKey.ShortString()). - Msg("Deleting unregistered node") - if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil { - log.Error(). - Err(err). + Msg(`Database has legacy "registered" column in node, removing...`) + + nodes := types.Nodes{} + if err := tx.Not("registered").Find(&nodes).Error; err != nil { + log.Error().Err(err).Msg("Error accessing db") + } + + for _, node := range nodes { + log.Info(). Str("node", node.Hostname). Str("machine_key", node.MachineKey.ShortString()). - Msg("Error deleting unregistered node") + Msg("Deleting unregistered node") + if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil { + log.Error(). + Err(err). + Str("node", node.Hostname). + Str("machine_key", node.MachineKey.ShortString()). + Msg("Error deleting unregistered node") + } + } + + err := tx.Migrator().DropColumn(&types.Node{}, "registered") + if err != nil { + log.Error().Err(err).Msg("Error dropping registered column") } } - err := tx.Migrator().DropColumn(&types.Node{}, "registered") + err = tx.AutoMigrate(&types.Route{}) if err != nil { - log.Error().Err(err).Msg("Error dropping registered column") - } - } - - err = tx.AutoMigrate(&types.Route{}) - if err != nil { - return err - } - - err = tx.AutoMigrate(&types.Node{}) - if err != nil { - return err - } - - // Ensure all keys have correct prefixes - // https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35 - type result struct { - ID uint64 - MachineKey string - NodeKey string - DiscoKey string - } - var results []result - err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").Find(&results).Error - if err != nil { - return err - } - - for _, node := range results { - mKey := node.MachineKey - if !strings.HasPrefix(node.MachineKey, "mkey:") { - mKey = "mkey:" + node.MachineKey - } - nKey := node.NodeKey - if !strings.HasPrefix(node.NodeKey, "nodekey:") { - nKey = "nodekey:" + node.NodeKey + return err } - dKey := node.DiscoKey - if !strings.HasPrefix(node.DiscoKey, "discokey:") { - dKey = "discokey:" + node.DiscoKey + err = tx.AutoMigrate(&types.Node{}) + if err != nil { + return err } - err := tx.Exec( - "UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id", - sql.Named("mKey", mKey), - sql.Named("nKey", nKey), - sql.Named("dKey", dKey), - sql.Named("id", node.ID), - ).Error + // Ensure all keys have correct prefixes + // https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35 + type result struct { + ID uint64 + MachineKey string + NodeKey string + DiscoKey string + } + var results []result + err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes"). + Find(&results). + Error if err != nil { return err } - } - if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") { - log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...") + for _, node := range results { + mKey := node.MachineKey + if !strings.HasPrefix(node.MachineKey, "mkey:") { + mKey = "mkey:" + node.MachineKey + } + nKey := node.NodeKey + if !strings.HasPrefix(node.NodeKey, "nodekey:") { + nKey = "nodekey:" + node.NodeKey + } - type NodeAux struct { - ID uint64 - EnabledRoutes types.IPPrefixes - } + dKey := node.DiscoKey + if !strings.HasPrefix(node.DiscoKey, "discokey:") { + dKey = "discokey:" + node.DiscoKey + } - nodesAux := []NodeAux{} - err := tx.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error - if err != nil { - log.Fatal().Err(err).Msg("Error accessing db") + err := tx.Exec( + "UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id", + sql.Named("mKey", mKey), + sql.Named("nKey", nKey), + sql.Named("dKey", dKey), + sql.Named("id", node.ID), + ).Error + if err != nil { + return err + } } - for _, node := range nodesAux { - for _, prefix := range node.EnabledRoutes { - if err != nil { - log.Error(). - Err(err). - Str("enabled_route", prefix.String()). - Msg("Error parsing enabled_route") - - continue - } - err = tx.Preload("Node"). - Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)). - First(&types.Route{}). - Error - if err == nil { - log.Info(). - Str("enabled_route", prefix.String()). - Msg("Route already migrated to new table, skipping") + if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") { + log.Info(). + Msgf("Database has legacy enabled_routes column in node, migrating...") - continue - } + type NodeAux struct { + ID uint64 + EnabledRoutes types.IPPrefixes + } - route := types.Route{ - NodeID: node.ID, - Advertised: true, - Enabled: true, - Prefix: types.IPPrefix(prefix), - } - if err := tx.Create(&route).Error; err != nil { - log.Error().Err(err).Msg("Error creating route") - } else { - log.Info(). - Uint64("node_id", route.NodeID). - Str("prefix", prefix.String()). - Msg("Route migrated") + nodesAux := []NodeAux{} + err := tx.Table("nodes"). + Select("id, enabled_routes"). + Scan(&nodesAux). + Error + if err != nil { + log.Fatal().Err(err).Msg("Error accessing db") + } + for _, node := range nodesAux { + for _, prefix := range node.EnabledRoutes { + if err != nil { + log.Error(). + Err(err). + Str("enabled_route", prefix.String()). + Msg("Error parsing enabled_route") + + continue + } + + err = tx.Preload("Node"). + Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)). + First(&types.Route{}). + Error + if err == nil { + log.Info(). + Str("enabled_route", prefix.String()). + Msg("Route already migrated to new table, skipping") + + continue + } + + route := types.Route{ + NodeID: node.ID, + Advertised: true, + Enabled: true, + Prefix: types.IPPrefix(prefix), + } + if err := tx.Create(&route).Error; err != nil { + log.Error().Err(err).Msg("Error creating route") + } else { + log.Info(). + Uint64("node_id", route.NodeID). + Str("prefix", prefix.String()). + Msg("Route migrated") + } } } - } - err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes") - if err != nil { - log.Error().Err(err).Msg("Error dropping enabled_routes column") - } - } - - if tx.Migrator().HasColumn(&types.Node{}, "given_name") { - nodes := types.Nodes{} - if err := tx.Find(&nodes).Error; err != nil { - log.Error().Err(err).Msg("Error accessing db") + err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes") + if err != nil { + log.Error(). + Err(err). + Msg("Error dropping enabled_routes column") + } } - for item, node := range nodes { - if node.GivenName == "" { - normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper( - node.Hostname, - ) - if err != nil { - log.Error(). - Caller(). - Str("hostname", node.Hostname). - Err(err). - Msg("Failed to normalize node hostname in DB migration") - } + if tx.Migrator().HasColumn(&types.Node{}, "given_name") { + nodes := types.Nodes{} + if err := tx.Find(&nodes).Error; err != nil { + log.Error().Err(err).Msg("Error accessing db") + } - err = tx.Model(nodes[item]).Updates(types.Node{ - GivenName: normalizedHostname, - }).Error - if err != nil { - log.Error(). - Caller(). - Str("hostname", node.Hostname). - Err(err). - Msg("Failed to save normalized node name in DB migration") + for item, node := range nodes { + if node.GivenName == "" { + normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper( + node.Hostname, + ) + if err != nil { + log.Error(). + Caller(). + Str("hostname", node.Hostname). + Err(err). + Msg("Failed to normalize node hostname in DB migration") + } + + err = tx.Model(nodes[item]).Updates(types.Node{ + GivenName: normalizedHostname, + }).Error + if err != nil { + log.Error(). + Caller(). + Str("hostname", node.Hostname). + Err(err). + Msg("Failed to save normalized node name in DB migration") + } } } } - } - err = tx.AutoMigrate(&KV{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&KV{}) + if err != nil { + return err + } - err = tx.AutoMigrate(&types.PreAuthKey{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&types.PreAuthKey{}) + if err != nil { + return err + } - err = tx.AutoMigrate(&types.PreAuthKeyACLTag{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&types.PreAuthKeyACLTag{}) + if err != nil { + return err + } - _ = tx.Migrator().DropTable("shared_machines") + _ = tx.Migrator().DropTable("shared_machines") - err = tx.AutoMigrate(&types.APIKey{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&types.APIKey{}) + if err != nil { + return err + } - return nil + return nil + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, }, - Rollback: func(tx *gorm.DB) error { - return nil + { + // drop key-value table, it is not used, and has not contained + // useful data for a long time or ever. + ID: "202312101430", + Migrate: func(tx *gorm.DB) error { + return tx.Migrator().DropTable("kvs") + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, }, }, - { - // drop key-value table, it is not used, and has not contained - // useful data for a long time or ever. - ID: "202312101430", - Migrate: func(tx *gorm.DB) error { - return tx.Migrator().DropTable("kvs") - }, - Rollback: func(tx *gorm.DB) error { - return nil - }, - }, - }) + ) if err = migrations.Migrate(); err != nil { log.Fatal().Err(err).Msgf("Migration failed: %v", err) @@ -328,7 +345,10 @@ func NewHeadscaleDatabase( } func openDB(dbType, connectionAddr string, debug bool) (*gorm.DB, error) { - log.Debug().Str("type", dbType).Str("connection", connectionAddr).Msg("opening database") + log.Debug(). + Str("type", dbType). + Str("connection", connectionAddr). + Msg("opening database") var dbLogger logger.Interface if debug { diff --git a/hscontrol/db/node.go b/hscontrol/db/node.go index e2a82cc3ec1..bd7d74032a3 100644 --- a/hscontrol/db/node.go +++ b/hscontrol/db/node.go @@ -486,7 +486,10 @@ func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) { // adding it to the registrationCache if len(node.IPAddresses) > 0 { if err := hsdb.db.Save(&node).Error; err != nil { - return nil, fmt.Errorf("failed register existing node in the database: %w", err) + return nil, fmt.Errorf( + "failed register existing node in the database: %w", + err, + ) } log.Trace(). diff --git a/hscontrol/db/node_test.go b/hscontrol/db/node_test.go index 140c264ba3e..048e7aa1039 100644 --- a/hscontrol/db/node_test.go +++ b/hscontrol/db/node_test.go @@ -126,7 +126,11 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) { } db.db.Save(&node) - _, err = db.GetNodeByAnyKey(machineKey.Public(), nodeKey.Public(), oldNodeKey.Public()) + _, err = db.GetNodeByAnyKey( + machineKey.Public(), + nodeKey.Public(), + oldNodeKey.Public(), + ) c.Assert(err, check.IsNil) } @@ -266,10 +270,18 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) { testPeers, err := db.ListPeers(testNode) c.Assert(err, check.IsNil) - adminRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, adminNode, adminPeers) + adminRules, _, err := policy.GenerateFilterAndSSHRules( + aclPolicy, + adminNode, + adminPeers, + ) c.Assert(err, check.IsNil) - testRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, testNode, testPeers) + testRules, _, err := policy.GenerateFilterAndSSHRules( + aclPolicy, + testNode, + testPeers, + ) c.Assert(err, check.IsNil) peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules) @@ -387,7 +399,12 @@ func (s *Suite) TestGenerateGivenName(c *check.C) { givenName, err = db.GenerateGivenName(machineKey2.Public(), "hostname-1") comment = check.Commentf("Same user, unique nodes, same hostname, conflict") c.Assert(err, check.IsNil, comment) - c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment) + c.Assert( + givenName, + check.Matches, + fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), + comment, + ) } func (s *Suite) TestSetTags(c *check.C) { @@ -461,7 +478,9 @@ func TestHeadscale_generateGivenName(t *testing.T) { suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine", randomSuffix: false, }, - want: regexp.MustCompile("^testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine$"), + want: regexp.MustCompile( + "^testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine$", + ), wantErr: false, }, { @@ -470,7 +489,9 @@ func TestHeadscale_generateGivenName(t *testing.T) { suppliedName: "nodeeeeeee12345678901234567890123456789012345678901234567890123", randomSuffix: false, }, - want: regexp.MustCompile("^nodeeeeeee12345678901234567890123456789012345678901234567890123$"), + want: regexp.MustCompile( + "^nodeeeeeee12345678901234567890123456789012345678901234567890123$", + ), wantErr: false, }, { @@ -497,7 +518,9 @@ func TestHeadscale_generateGivenName(t *testing.T) { suppliedName: "test", randomSuffix: true, }, - want: regexp.MustCompile(fmt.Sprintf("^test-[a-z0-9]{%d}$", NodeGivenNameHashLength)), + want: regexp.MustCompile( + fmt.Sprintf("^test-[a-z0-9]{%d}$", NodeGivenNameHashLength), + ), wantErr: false, }, { @@ -506,7 +529,12 @@ func TestHeadscale_generateGivenName(t *testing.T) { suppliedName: "nodeeee12345678901234567890123456789012345678901234567890123", randomSuffix: true, }, - want: regexp.MustCompile(fmt.Sprintf("^nodeeee1234567890123456789012345678901234567890123456-[a-z0-9]{%d}$", NodeGivenNameHashLength)), + want: regexp.MustCompile( + fmt.Sprintf( + "^nodeeee1234567890123456789012345678901234567890123456-[a-z0-9]{%d}$", + NodeGivenNameHashLength, + ), + ), wantErr: false, }, } diff --git a/hscontrol/db/preauth_keys.go b/hscontrol/db/preauth_keys.go index e743988fd87..45f8044e545 100644 --- a/hscontrol/db/preauth_keys.go +++ b/hscontrol/db/preauth_keys.go @@ -117,7 +117,10 @@ func (hsdb *HSDatabase) listPreAuthKeys(userName string) ([]types.PreAuthKey, er } // GetPreAuthKey returns a PreAuthKey for a given key. -func (hsdb *HSDatabase) GetPreAuthKey(user string, key string) (*types.PreAuthKey, error) { +func (hsdb *HSDatabase) GetPreAuthKey( + user string, + key string, +) (*types.PreAuthKey, error) { hsdb.mu.RLock() defer hsdb.mu.RUnlock() diff --git a/hscontrol/db/routes.go b/hscontrol/db/routes.go index dcf00bcb66f..86d2ec45a38 100644 --- a/hscontrol/db/routes.go +++ b/hscontrol/db/routes.go @@ -62,14 +62,18 @@ func (hsdb *HSDatabase) getRoutesByPrefix(pref netip.Prefix) (types.Routes, erro return routes, nil } -func (hsdb *HSDatabase) GetNodeAdvertisedRoutes(node *types.Node) (types.Routes, error) { +func (hsdb *HSDatabase) GetNodeAdvertisedRoutes( + node *types.Node, +) (types.Routes, error) { hsdb.mu.RLock() defer hsdb.mu.RUnlock() return hsdb.getNodeAdvertisedRoutes(node) } -func (hsdb *HSDatabase) getNodeAdvertisedRoutes(node *types.Node) (types.Routes, error) { +func (hsdb *HSDatabase) getNodeAdvertisedRoutes( + node *types.Node, +) (types.Routes, error) { var routes types.Routes err := hsdb.db. Preload("Node"). @@ -639,7 +643,8 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes( aclPolicy *policy.ACLPolicy, node *types.Node, ) error { - if len(aclPolicy.AutoApprovers.ExitNode) == 0 && len(aclPolicy.AutoApprovers.Routes) == 0 { + if len(aclPolicy.AutoApprovers.ExitNode) == 0 && + len(aclPolicy.AutoApprovers.Routes) == 0 { // No autoapprovers configured return nil } diff --git a/hscontrol/derp/server/derp_server.go b/hscontrol/derp/server/derp_server.go index ad325c7a1c2..ce63ae9fd0e 100644 --- a/hscontrol/derp/server/derp_server.go +++ b/hscontrol/derp/server/derp_server.go @@ -40,7 +40,10 @@ func NewDERPServer( cfg *types.DERPConfig, ) (*DERPServer, error) { log.Trace().Caller().Msg("Creating new embedded DERP server") - server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains + server := derp.NewServer( + derpKey, + util.TSLogfWrapper(), + ) // nolint // zerolinter complains return &DERPServer{ serverURL: serverURL, diff --git a/hscontrol/mapper/mapper.go b/hscontrol/mapper/mapper.go index 9998f128a91..95890cac673 100644 --- a/hscontrol/mapper/mapper.go +++ b/hscontrol/mapper/mapper.go @@ -107,7 +107,12 @@ func NewMapper( } func (m *Mapper) String() string { - return fmt.Sprintf("Mapper: { seq: %d, uid: %s, created: %s }", m.seq, m.uid, m.created) + return fmt.Sprintf( + "Mapper: { seq: %d, uid: %s, created: %s }", + m.seq, + m.uid, + m.created, + ) } func generateUserProfiles( @@ -363,7 +368,12 @@ func (m *Mapper) PeerChangedResponse( return nil, err } - return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...) + return m.marshalMapResponse( + mapRequest, + &resp, + node, + mapRequest.Compress, + messages...) } // PeerChangedPatchResponse creates a patch MapResponse with @@ -499,7 +509,13 @@ func (m *Mapper) marshalMapResponse( mapResponsePath := path.Join( mPath, - fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), + fmt.Sprintf( + "%d-%s-%d-%s.json", + now, + m.uid, + atomic.LoadUint64(&m.seq), + responseType, + ), ) log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath) @@ -573,7 +589,14 @@ func (m *Mapper) baseWithConfigMapResponse( ) (*tailcfg.MapResponse, error) { resp := m.baseMapResponse() - tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort) + tailnode, err := tailNode( + node, + capVer, + pol, + m.dnsCfg, + m.baseDomain, + m.randomClientPort, + ) if err != nil { return nil, err } @@ -646,7 +669,14 @@ func appendPeerChanges( peers, ) - tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort) + tailPeers, err := tailNodes( + changed, + capVer, + pol, + dnsCfg, + baseDomain, + randomClientPort, + ) if err != nil { return err } diff --git a/hscontrol/mapper/mapper_test.go b/hscontrol/mapper/mapper_test.go index bcc17dd4f18..5db2306c9cb 100644 --- a/hscontrol/mapper/mapper_test.go +++ b/hscontrol/mapper/mapper_test.go @@ -367,9 +367,11 @@ func Test_fullMapResponse(t *testing.T) { Domain: "", CollectServices: "false", PacketFilter: []tailcfg.FilterRule{}, - UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}}, - SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, - ControlTime: &time.Time{}, + UserProfiles: []tailcfg.UserProfile{ + {LoginName: "mini", DisplayName: "mini"}, + }, + SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, + ControlTime: &time.Time{}, Debug: &tailcfg.Debug{ DisableLogTail: true, }, @@ -399,9 +401,11 @@ func Test_fullMapResponse(t *testing.T) { Domain: "", CollectServices: "false", PacketFilter: []tailcfg.FilterRule{}, - UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}}, - SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, - ControlTime: &time.Time{}, + UserProfiles: []tailcfg.UserProfile{ + {LoginName: "mini", DisplayName: "mini"}, + }, + SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, + ControlTime: &time.Time{}, Debug: &tailcfg.Debug{ DisableLogTail: true, }, diff --git a/hscontrol/mapper/tail_test.go b/hscontrol/mapper/tail_test.go index f6e370c4a57..b2ec3c89acb 100644 --- a/hscontrol/mapper/tail_test.go +++ b/hscontrol/mapper/tail_test.go @@ -112,13 +112,17 @@ func TestTailNode(t *testing.T) { IsPrimary: false, }, { - Prefix: types.IPPrefix(netip.MustParsePrefix("192.168.0.0/24")), + Prefix: types.IPPrefix( + netip.MustParsePrefix("192.168.0.0/24"), + ), Advertised: true, Enabled: true, IsPrimary: true, }, { - Prefix: types.IPPrefix(netip.MustParsePrefix("172.0.0.0/10")), + Prefix: types.IPPrefix( + netip.MustParsePrefix("172.0.0.0/10"), + ), Advertised: true, Enabled: false, IsPrimary: true, diff --git a/hscontrol/notifier/notifier.go b/hscontrol/notifier/notifier.go index 77e8b199bf3..c223b8c0aa5 100644 --- a/hscontrol/notifier/notifier.go +++ b/hscontrol/notifier/notifier.go @@ -21,8 +21,14 @@ func NewNotifier() *Notifier { } func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node") - defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to add node") + log.Trace(). + Caller(). + Str("key", machineKey.ShortString()). + Msg("acquiring lock to add node") + defer log.Trace(). + Caller(). + Str("key", machineKey.ShortString()). + Msg("releasing lock to add node") n.l.Lock() defer n.l.Unlock() @@ -40,8 +46,14 @@ func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpd } func (n *Notifier) RemoveNode(machineKey key.MachinePublic) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node") - defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to remove node") + log.Trace(). + Caller(). + Str("key", machineKey.ShortString()). + Msg("acquiring lock to remove node") + defer log.Trace(). + Caller(). + Str("key", machineKey.ShortString()). + Msg("releasing lock to remove node") n.l.Lock() defer n.l.Unlock() @@ -90,12 +102,19 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string) continue } - log.Trace().Caller().Str("machine", key).Strs("ignoring", ignore).Msg("sending update") + log.Trace(). + Caller(). + Str("machine", key). + Strs("ignoring", ignore). + Msg("sending update") c <- update } } -func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) { +func (n *Notifier) NotifyByMachineKey( + update types.StateUpdate, + mKey key.MachinePublic, +) { log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify") defer log.Trace(). Caller(). diff --git a/hscontrol/oidc.go b/hscontrol/oidc.go index 568519fd51f..9e8601ef67a 100644 --- a/hscontrol/oidc.go +++ b/hscontrol/oidc.go @@ -29,12 +29,16 @@ const ( var ( errEmptyOIDCCallbackParams = errors.New("empty OIDC callback params") - errNoOIDCIDToken = errors.New("could not extract ID Token for OIDC callback") - errOIDCAllowedDomains = errors.New( + errNoOIDCIDToken = errors.New( + "could not extract ID Token for OIDC callback", + ) + errOIDCAllowedDomains = errors.New( "authenticated principal does not match any allowed domain", ) - errOIDCAllowedGroups = errors.New("authenticated principal is not in any allowed group") - errOIDCAllowedUsers = errors.New( + errOIDCAllowedGroups = errors.New( + "authenticated principal is not in any allowed group", + ) + errOIDCAllowedUsers = errors.New( "authenticated principal does not match any allowed user", ) errOIDCInvalidNodeState = errors.New( diff --git a/hscontrol/platform_config.go b/hscontrol/platform_config.go index 0404f5465ee..8dfaf8a6f08 100644 --- a/hscontrol/platform_config.go +++ b/hscontrol/platform_config.go @@ -255,7 +255,9 @@ func (h *Headscale) ApplePlatformConfig( writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusBadRequest) _, err := writer.Write( - []byte("Invalid platform. Only ios, macos-app-store and macos-standalone are supported"), + []byte( + "Invalid platform. Only ios, macos-app-store and macos-standalone are supported", + ), ) if err != nil { log.Error(). @@ -390,7 +392,8 @@ var macosAppStoreTemplate = template.Must(template.New("macosTemplate").Parse(` `)) -var macosStandaloneTemplate = template.Must(template.New("macosStandaloneTemplate").Parse(` +var macosStandaloneTemplate = template.Must( + template.New("macosStandaloneTemplate").Parse(` PayloadType io.tailscale.ipn.macsys @@ -405,4 +408,5 @@ var macosStandaloneTemplate = template.Must(template.New("macosStandaloneTemplat ControlURL {{.URL}} -`)) +`), +) diff --git a/hscontrol/policy/acls.go b/hscontrol/policy/acls.go index 1dd664c8b80..fd791c6565b 100644 --- a/hscontrol/policy/acls.go +++ b/hscontrol/policy/acls.go @@ -232,7 +232,10 @@ func (pol *ACLPolicy) generateFilterRules( // ReduceFilterRules takes a node and a set of rules and removes all rules and destinations // that are not relevant to that particular node. -func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.FilterRule { +func ReduceFilterRules( + node *types.Node, + rules []tailcfg.FilterRule, +) []tailcfg.FilterRule { ret := []tailcfg.FilterRule{} for _, rule := range rules { @@ -431,7 +434,8 @@ func parseDestination(dest string) (string, string, error) { filteredMaybeIPv6Str = networkParts[0] } - if maybeIPv6, err := netip.ParseAddr(filteredMaybeIPv6Str); err != nil && !maybeIPv6.Is6() { + if maybeIPv6, err := netip.ParseAddr(filteredMaybeIPv6Str); err != nil && + !maybeIPv6.Is6() { log.Trace().Err(err).Msg("trying to parse as IPv6") return "", "", fmt.Errorf( diff --git a/hscontrol/policy/acls_test.go b/hscontrol/policy/acls_test.go index 4a74bdaf1a3..8b63d395f88 100644 --- a/hscontrol/policy/acls_test.go +++ b/hscontrol/policy/acls_test.go @@ -96,8 +96,14 @@ func TestParsing(t *testing.T) { { SrcIPs: []string{"100.100.101.0/24", "192.168.1.0/24"}, DstPorts: []tailcfg.NetPortRange{ - {IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 22, Last: 22}}, - {IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}}, + { + IP: "0.0.0.0/0", + Ports: tailcfg.PortRange{First: 22, Last: 22}, + }, + { + IP: "0.0.0.0/0", + Ports: tailcfg.PortRange{First: 3389, Last: 3389}, + }, {IP: "::/0", Ports: tailcfg.PortRange{First: 22, Last: 22}}, {IP: "::/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}}, {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, @@ -160,7 +166,10 @@ func TestParsing(t *testing.T) { { SrcIPs: []string{"0.0.0.0/0", "::/0"}, DstPorts: []tailcfg.NetPortRange{ - {IP: "100.100.100.100/32", Ports: tailcfg.PortRange{First: 53, Last: 53}}, + { + IP: "100.100.100.100/32", + Ports: tailcfg.PortRange{First: 53, Last: 53}, + }, }, IPProto: []int{protocolUDP}, }, @@ -996,7 +1005,9 @@ func Test_expandAlias(t *testing.T) { alias: "*", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, + IPAddresses: types.NodeAddresses{ + netip.MustParseAddr("100.64.0.1"), + }, }, &types.Node{ IPAddresses: types.NodeAddresses{ @@ -1151,7 +1162,9 @@ func Test_expandAlias(t *testing.T) { &types.Node{ IPAddresses: types.NodeAddresses{ netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), + netip.MustParseAddr( + "fd7a:115c:a1e0:ab12:4843:2222:6273:2222", + ), }, User: types.User{Name: "mickael"}, }, @@ -1173,7 +1186,9 @@ func Test_expandAlias(t *testing.T) { &types.Node{ IPAddresses: types.NodeAddresses{ netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), + netip.MustParseAddr( + "fd7a:115c:a1e0:ab12:4843:2222:6273:2222", + ), }, User: types.User{Name: "mickael"}, }, @@ -1811,7 +1826,9 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { &types.Node{ IPAddresses: types.NodeAddresses{ netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), + netip.MustParseAddr( + "fd7a:115c:a1e0:ab12:4843:2222:6273:2222", + ), }, User: types.User{Name: "mickael"}, }, @@ -1851,14 +1868,21 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { tt.args.peers, ) if (err != nil) != tt.wantErr { - t.Errorf("ACLgenerateFilterRules() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf( + "ACLgenerateFilterRules() error = %v, wantErr %v", + err, + tt.wantErr, + ) return } if diff := cmp.Diff(tt.want, got); diff != "" { log.Trace().Interface("got", got).Msg("result") - t.Errorf("ACLgenerateFilterRules() unexpected result (-want +got):\n%s", diff) + t.Errorf( + "ACLgenerateFilterRules() unexpected result (-want +got):\n%s", + diff, + ) } }) } @@ -1989,7 +2013,10 @@ func TestReduceFilterRules(t *testing.T) { if diff := cmp.Diff(tt.want, got); diff != "" { log.Trace().Interface("got", got).Msg("result") - t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff) + t.Errorf( + "TestReduceFilterRules() unexpected result (-want +got):\n%s", + diff, + ) } }) } @@ -2888,7 +2915,10 @@ func TestSSHRules(t *testing.T) { SSHUsers: map[string]string{ "autogroup:nonroot": "=", }, - Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, + Action: &tailcfg.SSHAction{ + Accept: true, + AllowLocalPortForwarding: true, + }, }, { SSHUsers: map[string]string{ @@ -2899,7 +2929,10 @@ func TestSSHRules(t *testing.T) { Any: true, }, }, - Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, + Action: &tailcfg.SSHAction{ + Accept: true, + AllowLocalPortForwarding: true, + }, }, { Principals: []*tailcfg.SSHPrincipal{ @@ -2910,7 +2943,10 @@ func TestSSHRules(t *testing.T) { SSHUsers: map[string]string{ "autogroup:nonroot": "=", }, - Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, + Action: &tailcfg.SSHAction{ + Accept: true, + AllowLocalPortForwarding: true, + }, }, { SSHUsers: map[string]string{ @@ -2921,7 +2957,10 @@ func TestSSHRules(t *testing.T) { Any: true, }, }, - Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, + Action: &tailcfg.SSHAction{ + Accept: true, + AllowLocalPortForwarding: true, + }, }, }, }, @@ -2937,9 +2976,11 @@ func TestSSHRules(t *testing.T) { }, peers: types.Nodes{ &types.Node{ - Hostname: "testnodes2", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.99.42")}, - UserID: 0, + Hostname: "testnodes2", + IPAddresses: types.NodeAddresses{ + netip.MustParseAddr("100.64.99.42"), + }, + UserID: 0, User: types.User{ Name: "user1", }, @@ -3101,7 +3142,10 @@ func TestValidExpandTagOwnersInSources(t *testing.T) { } if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("TestValidExpandTagOwnersInSources() unexpected result (-want +got):\n%s", diff) + t.Errorf( + "TestValidExpandTagOwnersInSources() unexpected result (-want +got):\n%s", + diff, + ) } } diff --git a/hscontrol/poll.go b/hscontrol/poll.go index c867f2618a6..2cdfa4710a6 100644 --- a/hscontrol/poll.go +++ b/hscontrol/poll.go @@ -423,10 +423,21 @@ func (h *Headscale) handlePoll( } } - data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message) + data, err = mapp.PeerChangedResponse( + mapRequest, + node, + update.ChangeNodes, + h.ACLPolicy, + update.Message, + ) case types.StatePeerChangedPatch: logInfo("Sending PeerChangedPatch MapResponse") - data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy) + data, err = mapp.PeerChangedPatchResponse( + mapRequest, + node, + update.ChangePatches, + h.ACLPolicy, + ) case types.StatePeerRemoved: logInfo("Sending PeerRemoved MapResponse") data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed) @@ -530,7 +541,10 @@ func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) { } } -func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) { +func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate]( + channel C, + node, name string, +) { log.Trace(). Str("handler", "PollNetMap"). Str("node", node). @@ -575,8 +589,14 @@ func (h *Headscale) handleLiteRequest( } } -func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) { - trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname) +func logTracePeerChange( + hostname string, + hostinfoChange bool, + change *tailcfg.PeerChange, +) { + trace := log.Trace(). + Str("node_id", change.NodeID.String()). + Str("hostname", hostname) if change.Key != nil { trace = trace.Str("node_key", change.Key.ShortString()) diff --git a/hscontrol/tailsql.go b/hscontrol/tailsql.go index 973915ddfa3..052bc25d5f8 100644 --- a/hscontrol/tailsql.go +++ b/hscontrol/tailsql.go @@ -12,7 +12,11 @@ import ( "tailscale.com/types/logger" ) -func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath string) error { +func runTailSQLService( + ctx context.Context, + logf logger.Logf, + stateDir, dbPath string, +) error { opts := tailsql.Options{ Hostname: "tailsql-headscale", StateDir: stateDir, @@ -73,10 +77,13 @@ func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath s fmt.Errorf("no cert domains available for HTTPS") } base := "https://" + certDomains[0] - go http.Serve(lst, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - target := base + r.RequestURI - http.Redirect(w, r, target, http.StatusPermanentRedirect) - })) + go http.Serve( + lst, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + target := base + r.RequestURI + http.Redirect(w, r, target, http.StatusPermanentRedirect) + }), + ) // log.Printf("Redirecting HTTP to HTTPS at %q", base) // For the real service, start a separate listener. diff --git a/hscontrol/types/common.go b/hscontrol/types/common.go index e38d8e39525..e9861a1dd44 100644 --- a/hscontrol/types/common.go +++ b/hscontrol/types/common.go @@ -142,7 +142,9 @@ func (su *StateUpdate) Valid() bool { } case StatePeerChangedPatch: if su.ChangePatches == nil { - panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update") + panic( + "Mandatory field ChangePatches is not set on StatePeerChangedPatch update", + ) } case StatePeerRemoved: if su.Removed == nil { @@ -150,7 +152,9 @@ func (su *StateUpdate) Valid() bool { } case StateSelfUpdate: if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 { - panic("Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node") + panic( + "Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node", + ) } case StateDERPUpdated: if su.DERPMap == nil { diff --git a/hscontrol/types/node.go b/hscontrol/types/node.go index 4434264690d..778b50fa665 100644 --- a/hscontrol/types/node.go +++ b/hscontrol/types/node.go @@ -22,9 +22,11 @@ import ( var ( ErrNodeAddressesInvalid = errors.New("failed to parse node addresses") - ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars") - ErrNodeHasNoGivenName = errors.New("node has no given name") - ErrNodeUserHasNoName = errors.New("node user has no name") + ErrHostnameTooLong = errors.New( + "hostname too long, cannot except 255 ASCII chars", + ) + ErrNodeHasNoGivenName = errors.New("node has no given name") + ErrNodeUserHasNoName = errors.New("node user has no name") ) // Node is a Headscale client. @@ -343,15 +345,24 @@ func (node *Node) Proto() *v1.Node { return nodeProto } -func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (string, error) { +func (node *Node) GetFQDN( + dnsConfig *tailcfg.DNSConfig, + baseDomain string, +) (string, error) { var hostname string if dnsConfig != nil && dnsConfig.Proxied { // MagicDNS if node.GivenName == "" { - return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName) + return "", fmt.Errorf( + "failed to create valid FQDN: %w", + ErrNodeHasNoGivenName, + ) } if node.User.Name == "" { - return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeUserHasNoName) + return "", fmt.Errorf( + "failed to create valid FQDN: %w", + ErrNodeUserHasNoName, + ) } hostname = fmt.Sprintf( diff --git a/hscontrol/types/node_test.go b/hscontrol/types/node_test.go index 7e6c9840ebb..0ead321d067 100644 --- a/hscontrol/types/node_test.go +++ b/hscontrol/types/node_test.go @@ -134,11 +134,20 @@ func TestNodeAddressesOrder(t *testing.T) { } if len(strSlice) != len(expected) { - t.Fatalf("unexpected slice length: got %v, want %v", len(strSlice), len(expected)) + t.Fatalf( + "unexpected slice length: got %v, want %v", + len(strSlice), + len(expected), + ) } for i, addr := range strSlice { if addr != expected[i] { - t.Errorf("unexpected address at index %v: got %v, want %v", i, addr, expected[i]) + t.Errorf( + "unexpected address at index %v: got %v, want %v", + i, + addr, + expected[i], + ) } } } diff --git a/hscontrol/types/routes.go b/hscontrol/types/routes.go index 697cbc3608a..3e463f3dfef 100644 --- a/hscontrol/types/routes.go +++ b/hscontrol/types/routes.go @@ -35,7 +35,8 @@ func (r *Route) String() string { } func (r *Route) IsExitRoute() bool { - return netip.Prefix(r.Prefix) == ExitRouteV4 || netip.Prefix(r.Prefix) == ExitRouteV6 + return netip.Prefix(r.Prefix) == ExitRouteV4 || + netip.Prefix(r.Prefix) == ExitRouteV6 } func (r *Route) IsAnnouncable() bool { diff --git a/integration/acl_test.go b/integration/acl_test.go index 9a415ab2b35..5862b56d662 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -222,19 +222,25 @@ func TestACLHostsInNetMapTable(t *testing.T) { policy: policy.ACLPolicy{ ACLs: []policy.ACL{ { - Action: "accept", - Sources: []string{"user1"}, - Destinations: append([]string{"user1:*"}, veryLargeDestination...), + Action: "accept", + Sources: []string{"user1"}, + Destinations: append( + []string{"user1:*"}, + veryLargeDestination...), }, { - Action: "accept", - Sources: []string{"user2"}, - Destinations: append([]string{"user2:*"}, veryLargeDestination...), + Action: "accept", + Sources: []string{"user2"}, + Destinations: append( + []string{"user2:*"}, + veryLargeDestination...), }, { - Action: "accept", - Sources: []string{"user1"}, - Destinations: append([]string{"user2:*"}, veryLargeDestination...), + Action: "accept", + Sources: []string{"user1"}, + Destinations: append( + []string{"user2:*"}, + veryLargeDestination...), }, }, }, want: map[string]int{ diff --git a/integration/auth_oidc_test.go b/integration/auth_oidc_test.go index 7a0ed9c74a7..f00aea99165 100644 --- a/integration/auth_oidc_test.go +++ b/integration/auth_oidc_test.go @@ -62,7 +62,10 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { "HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID, "CREDENTIALS_DIRECTORY_TEST": "/tmp", "HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret", - "HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain), + "HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf( + "%t", + oidcConfig.StripEmaildomain, + ), } err = scenario.CreateHeadscaleEnv( @@ -70,7 +73,10 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { hsic.WithTestName("oidcauthping"), hsic.WithConfigEnv(oidcMap), hsic.WithHostnameAsServerURL(), - hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(oidcConfig.ClientSecret)), + hsic.WithFileInContainer( + "/tmp/hs_client_oidc_secret", + []byte(oidcConfig.ClientSecret), + ), ) assertNoErrHeadscaleEnv(t, err) @@ -116,10 +122,13 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { assertNoErrf(t, "failed to run mock OIDC server: %s", err) oidcMap := map[string]string{ - "HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer, - "HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID, - "HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret, - "HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain), + "HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer, + "HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID, + "HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret, + "HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf( + "%t", + oidcConfig.StripEmaildomain, + ), "HEADSCALE_OIDC_USE_EXPIRY_FROM_TOKEN": "1", } @@ -145,7 +154,11 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { }) success := pingAllHelper(t, allClients, allAddrs) - t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps)) + t.Logf( + "%d successful pings out of %d (before expiry)", + success, + len(allClients)*len(allIps), + ) // This is not great, but this sadly is a time dependent test, so the // safe thing to do is wait out the whole TTL time before checking if @@ -191,7 +204,9 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv( return nil } -func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*types.OIDCConfig, error) { +func (s *AuthOIDCScenario) runMockOIDC( + accessTTL time.Duration, +) (*types.OIDCConfig, error) { port, err := dockertestutil.RandomFreeHostPort() if err != nil { log.Fatalf("could not find an open port: %s", err) diff --git a/integration/auth_web_flow_test.go b/integration/auth_web_flow_test.go index 90ce571bbe3..0b0a51fb9cd 100644 --- a/integration/auth_web_flow_test.go +++ b/integration/auth_web_flow_test.go @@ -240,7 +240,11 @@ func (s *AuthWebFlowScenario) runTailscaleUp( err := client.WaitForRunning() if err != nil { - log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err) + log.Printf( + "error waiting for client %s to be ready: %s", + client.Hostname(), + err, + ) } } @@ -251,7 +255,11 @@ func (s *AuthWebFlowScenario) runTailscaleUp( for _, client := range user.Clients { err := client.WaitForRunning() if err != nil { - return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err) + return fmt.Errorf( + "%s failed to up tailscale node: %w", + client.Hostname(), + err, + ) } } @@ -261,7 +269,10 @@ func (s *AuthWebFlowScenario) runTailscaleUp( return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable) } -func (s *AuthWebFlowScenario) runHeadscaleRegister(userStr string, loginURL *url.URL) error { +func (s *AuthWebFlowScenario) runHeadscaleRegister( + userStr string, + loginURL *url.URL, +) error { headscale, err := s.Headscale() if err != nil { return err diff --git a/integration/cli_test.go b/integration/cli_test.go index d2d741e0405..43527e6c14b 100644 --- a/integration/cli_test.go +++ b/integration/cli_test.go @@ -14,7 +14,11 @@ import ( "github.com/stretchr/testify/assert" ) -func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error { +func executeAndUnmarshal[T any]( + headscale ControlServer, + command []string, + result T, +) error { str, err := headscale.Execute(command) if err != nil { return err @@ -95,7 +99,10 @@ func TestUserCommand(t *testing.T) { ) assertNoErr(t, err) - result = []string{listAfterRenameUsers[0].GetName(), listAfterRenameUsers[1].GetName()} + result = []string{ + listAfterRenameUsers[0].GetName(), + listAfterRenameUsers[1].GetName(), + } sort.Strings(result) assert.Equal( @@ -120,7 +127,11 @@ func TestPreAuthKeyCommand(t *testing.T) { user: 0, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clipak"), + ) assertNoErr(t, err) headscale, err := scenario.Headscale() @@ -195,15 +206,21 @@ func TestPreAuthKeyCommand(t *testing.T) { assert.True( t, - listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)), + listedPreAuthKeys[1].GetExpiration(). + AsTime(). + Before(time.Now().Add(time.Hour*26)), ) assert.True( t, - listedPreAuthKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)), + listedPreAuthKeys[2].GetExpiration(). + AsTime(). + Before(time.Now().Add(time.Hour*26)), ) assert.True( t, - listedPreAuthKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)), + listedPreAuthKeys[3].GetExpiration(). + AsTime(). + Before(time.Now().Add(time.Hour*26)), ) for index := range listedPreAuthKeys { @@ -211,7 +228,11 @@ func TestPreAuthKeyCommand(t *testing.T) { continue } - assert.Equal(t, listedPreAuthKeys[index].GetAclTags(), []string{"tag:test1", "tag:test2"}) + assert.Equal( + t, + listedPreAuthKeys[index].GetAclTags(), + []string{"tag:test1", "tag:test2"}, + ) } // Test key expiry @@ -243,9 +264,18 @@ func TestPreAuthKeyCommand(t *testing.T) { ) assertNoErr(t, err) - assert.True(t, listedPreAuthKeysAfterExpire[1].GetExpiration().AsTime().Before(time.Now())) - assert.True(t, listedPreAuthKeysAfterExpire[2].GetExpiration().AsTime().After(time.Now())) - assert.True(t, listedPreAuthKeysAfterExpire[3].GetExpiration().AsTime().After(time.Now())) + assert.True( + t, + listedPreAuthKeysAfterExpire[1].GetExpiration().AsTime().Before(time.Now()), + ) + assert.True( + t, + listedPreAuthKeysAfterExpire[2].GetExpiration().AsTime().After(time.Now()), + ) + assert.True( + t, + listedPreAuthKeysAfterExpire[3].GetExpiration().AsTime().After(time.Now()), + ) } func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { @@ -262,7 +292,11 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { user: 0, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clipaknaexp"), + ) assertNoErr(t, err) headscale, err := scenario.Headscale() @@ -307,7 +341,9 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { assert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now())) assert.True( t, - listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Minute*70)), + listedPreAuthKeys[1].GetExpiration(). + AsTime(). + Before(time.Now().Add(time.Minute*70)), ) } @@ -325,7 +361,11 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { user: 0, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clipakresueeph"), + ) assertNoErr(t, err) headscale, err := scenario.Headscale() @@ -521,7 +561,9 @@ func TestApiKeyCommand(t *testing.T) { // Expired assert.True( t, - listedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()), + listedAfterExpireAPIKeys[index].GetExpiration(). + AsTime(). + Before(time.Now()), ) } else { // Not expired @@ -678,7 +720,11 @@ func TestNodeAdvertiseTagNoACLCommand(t *testing.T) { "user1": 1, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:test"})}, hsic.WithTestName("cliadvtags")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{tsic.WithTags([]string{"tag:test"})}, + hsic.WithTestName("cliadvtags"), + ) assertNoErr(t, err) headscale, err := scenario.Headscale() @@ -728,20 +774,25 @@ func TestNodeAdvertiseTagWithACLCommand(t *testing.T) { "user1": 1, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:exists"})}, hsic.WithTestName("cliadvtags"), hsic.WithACLPolicy( - &policy.ACLPolicy{ - ACLs: []policy.ACL{ - { - Action: "accept", - Sources: []string{"*"}, - Destinations: []string{"*:*"}, + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{tsic.WithTags([]string{"tag:exists"})}, + hsic.WithTestName("cliadvtags"), + hsic.WithACLPolicy( + &policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:exists": {"user1"}, }, }, - TagOwners: map[string][]string{ - "tag:exists": {"user1"}, - }, - }, - )) + ), + ) assertNoErr(t, err) headscale, err := scenario.Headscale() diff --git a/integration/dockertestutil/network.go b/integration/dockertestutil/network.go index 89fdc8ec350..a9e12ede2d0 100644 --- a/integration/dockertestutil/network.go +++ b/integration/dockertestutil/network.go @@ -10,7 +10,10 @@ import ( var ErrContainerNotFound = errors.New("container not found") -func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) { +func GetFirstOrCreateNetwork( + pool *dockertest.Pool, + name string, +) (*dockertest.Network, error) { networks, err := pool.NetworksByName(name) if err != nil || len(networks) == 0 { if _, err := pool.CreateNetwork(name); err == nil { @@ -43,9 +46,12 @@ func AddContainerToNetwork( return err } - err = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{ - Container: containers[0].ID, - }) + err = pool.Client.ConnectNetwork( + network.Network.ID, + docker.NetworkConnectionOptions{ + Container: containers[0].ID, + }, + ) if err != nil { return err } diff --git a/integration/embedded_derp_test.go b/integration/embedded_derp_test.go index 3a407496487..28070aebb22 100644 --- a/integration/embedded_derp_test.go +++ b/integration/embedded_derp_test.go @@ -90,7 +90,11 @@ func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv( return err } - headscaleURL.Host = fmt.Sprintf("%s:%s", hsServer.GetHostname(), headscaleURL.Port()) + headscaleURL.Host = fmt.Sprintf( + "%s:%s", + hsServer.GetHostname(), + headscaleURL.Port(), + ) err = hsServer.WaitForRunning() if err != nil { @@ -156,14 +160,22 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser( networkName, ) if err != nil { - return fmt.Errorf("failed to create or get %s network: %w", networkName, err) + return fmt.Errorf( + "failed to create or get %s network: %w", + networkName, + err, + ) } s.tsicNetworks[networkName] = network err = hsServer.ConnectToNetwork(network) if err != nil { - return fmt.Errorf("failed to connect headscale to %s network: %w", networkName, err) + return fmt.Errorf( + "failed to connect headscale to %s network: %w", + networkName, + err, + ) } version := requestedVersion diff --git a/integration/general_test.go b/integration/general_test.go index 15c3a72c366..aa6d787b5c5 100644 --- a/integration/general_test.go +++ b/integration/general_test.go @@ -31,7 +31,11 @@ func TestPingAllByIP(t *testing.T) { "user2": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("pingallbyip"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -64,7 +68,11 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { "user2": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("pingallbyip"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -180,7 +188,11 @@ func TestEphemeral(t *testing.T) { t.Fatalf("failed to create user %s: %s", userName, err) } - err = scenario.CreateTailscaleNodesInUser(userName, "all", clientCount, []tsic.Option{}...) + err = scenario.CreateTailscaleNodesInUser( + userName, + "all", + clientCount, + []tsic.Option{}...) if err != nil { t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err) } @@ -254,7 +266,11 @@ func TestPingAllByHostname(t *testing.T) { "user4": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("pingallbyname"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -300,7 +316,11 @@ func TestTaildrop(t *testing.T) { "taildrop": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("taildrop"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -318,10 +338,19 @@ func TestTaildrop(t *testing.T) { command := []string{"apk", "add", "curl"} _, _, err := client.Execute(command) if err != nil { - t.Fatalf("failed to install curl on %s, err: %s", client.Hostname(), err) + t.Fatalf( + "failed to install curl on %s, err: %s", + client.Hostname(), + err, + ) } } - curlCommand := []string{"curl", "--unix-socket", "/var/run/tailscale/tailscaled.sock", "http://local-tailscaled.sock/localapi/v0/file-targets"} + curlCommand := []string{ + "curl", + "--unix-socket", + "/var/run/tailscale/tailscaled.sock", + "http://local-tailscaled.sock/localapi/v0/file-targets", + } err = retry(10, 1*time.Second, func() error { result, _, err := client.Execute(curlCommand) if err != nil { @@ -338,21 +367,38 @@ func TestTaildrop(t *testing.T) { for _, ft := range fts { ftStr += fmt.Sprintf("\t%s\n", ft.Node.Name) } - return fmt.Errorf("client %s does not have all its peers as FileTargets, got %d, want: %d\n%s", client.Hostname(), len(fts), len(allClients)-1, ftStr) + return fmt.Errorf( + "client %s does not have all its peers as FileTargets, got %d, want: %d\n%s", + client.Hostname(), + len(fts), + len(allClients)-1, + ftStr, + ) } return err }) if err != nil { - t.Errorf("failed to query localapi for filetarget on %s, err: %s", client.Hostname(), err) + t.Errorf( + "failed to query localapi for filetarget on %s, err: %s", + client.Hostname(), + err, + ) } } for _, client := range allClients { - command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())} + command := []string{ + "touch", + fmt.Sprintf("/tmp/file_from_%s", client.Hostname()), + } if _, _, err := client.Execute(command); err != nil { - t.Fatalf("failed to create taildrop file on %s, err: %s", client.Hostname(), err) + t.Fatalf( + "failed to create taildrop file on %s, err: %s", + client.Hostname(), + err, + ) } for _, peer := range allClients { @@ -363,32 +409,35 @@ func TestTaildrop(t *testing.T) { // It is safe to ignore this error as we handled it when caching it peerFQDN, _ := peer.FQDN() - t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) { - command := []string{ - "tailscale", "file", "cp", - fmt.Sprintf("/tmp/file_from_%s", client.Hostname()), - fmt.Sprintf("%s:", peerFQDN), - } - - err := retry(10, 1*time.Second, func() error { - t.Logf( - "Sending file from %s to %s\n", - client.Hostname(), - peer.Hostname(), - ) - _, _, err := client.Execute(command) - - return err - }) - if err != nil { - t.Fatalf( - "failed to send taildrop file on %s with command %q, err: %s", - client.Hostname(), - strings.Join(command, " "), - err, - ) - } - }) + t.Run( + fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), + func(t *testing.T) { + command := []string{ + "tailscale", "file", "cp", + fmt.Sprintf("/tmp/file_from_%s", client.Hostname()), + fmt.Sprintf("%s:", peerFQDN), + } + + err := retry(10, 1*time.Second, func() error { + t.Logf( + "Sending file from %s to %s\n", + client.Hostname(), + peer.Hostname(), + ) + _, _, err := client.Execute(command) + + return err + }) + if err != nil { + t.Fatalf( + "failed to send taildrop file on %s with command %q, err: %s", + client.Hostname(), + strings.Join(command, " "), + err, + ) + } + }, + ) } } @@ -399,7 +448,11 @@ func TestTaildrop(t *testing.T) { "/tmp/", } if _, _, err := client.Execute(command); err != nil { - t.Fatalf("failed to get taildrop file on %s, err: %s", client.Hostname(), err) + t.Fatalf( + "failed to get taildrop file on %s, err: %s", + client.Hostname(), + err, + ) } for _, peer := range allClients { @@ -407,29 +460,32 @@ func TestTaildrop(t *testing.T) { continue } - t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) { - command := []string{ - "ls", - fmt.Sprintf("/tmp/file_from_%s", peer.Hostname()), - } - log.Printf( - "Checking file in %s from %s\n", - client.Hostname(), - peer.Hostname(), - ) - - result, _, err := client.Execute(command) - assertNoErrf(t, "failed to execute command to ls taildrop: %s", err) - - log.Printf("Result for %s: %s\n", peer.Hostname(), result) - if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result { - t.Fatalf( - "taildrop result is not correct %s, wanted %s", - result, - fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()), + t.Run( + fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), + func(t *testing.T) { + command := []string{ + "ls", + fmt.Sprintf("/tmp/file_from_%s", peer.Hostname()), + } + log.Printf( + "Checking file in %s from %s\n", + client.Hostname(), + peer.Hostname(), ) - } - }) + + result, _, err := client.Execute(command) + assertNoErrf(t, "failed to execute command to ls taildrop: %s", err) + + log.Printf("Result for %s: %s\n", peer.Hostname(), result) + if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result { + t.Fatalf( + "taildrop result is not correct %s, wanted %s", + result, + fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()), + ) + } + }, + ) } } } @@ -447,7 +503,11 @@ func TestResolveMagicDNS(t *testing.T) { "magicdns2": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("magicdns"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -512,7 +572,11 @@ func TestExpireNode(t *testing.T) { "user1": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("expirenode"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -529,7 +593,11 @@ func TestExpireNode(t *testing.T) { }) success := pingAllHelper(t, allClients, allAddrs) - t.Logf("before expire: %d successful pings out of %d", success, len(allClients)*len(allIps)) + t.Logf( + "before expire: %d successful pings out of %d", + success, + len(allClients)*len(allIps), + ) for _, client := range allClients { status, err := client.Status() @@ -557,7 +625,11 @@ func TestExpireNode(t *testing.T) { err = expiredNodeKey.UnmarshalText([]byte(node.GetNodeKey())) assertNoErr(t, err) - t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String()) + t.Logf( + "Node %s with node_key %s has been expired", + node.GetName(), + expiredNodeKey.String(), + ) time.Sleep(2 * time.Minute) @@ -576,16 +648,40 @@ func TestExpireNode(t *testing.T) { assertNotNil(t, peerStatus.Expired) assert.NotNil(t, peerStatus.KeyExpiry) - t.Logf("node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry) + t.Logf( + "node %q should have a key expire before %s, was %s", + peerStatus.HostName, + now.String(), + peerStatus.KeyExpiry, + ) if peerStatus.KeyExpiry != nil { - assert.Truef(t, peerStatus.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry) + assert.Truef( + t, + peerStatus.KeyExpiry.Before(now), + "node %q should have a key expire before %s, was %s", + peerStatus.HostName, + now.String(), + peerStatus.KeyExpiry, + ) } - assert.Truef(t, peerStatus.Expired, "node %q should be expired, expired is %v", peerStatus.HostName, peerStatus.Expired) + assert.Truef( + t, + peerStatus.Expired, + "node %q should be expired, expired is %v", + peerStatus.HostName, + peerStatus.Expired, + ) - _, stderr, _ := client.Execute([]string{"tailscale", "ping", node.GetName()}) + _, stderr, _ := client.Execute( + []string{"tailscale", "ping", node.GetName()}, + ) if !strings.Contains(stderr, "node key has expired") { - t.Errorf("expected to be unable to ping expired host %q from %q", node.GetName(), client.Hostname()) + t.Errorf( + "expected to be unable to ping expired host %q from %q", + node.GetName(), + client.Hostname(), + ) } } else { t.Errorf("failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired", node.GetName(), expiredNodeKey) @@ -614,7 +710,11 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { "user1": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("onlinelastseen")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("onlinelastseen"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -631,7 +731,11 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { }) success := pingAllHelper(t, allClients, allAddrs) - t.Logf("before expire: %d successful pings out of %d", success, len(allClients)*len(allIps)) + t.Logf( + "before expire: %d successful pings out of %d", + success, + len(allClients)*len(allIps), + ) for _, client := range allClients { status, err := client.Status() diff --git a/integration/hsic/hsic.go b/integration/hsic/hsic.go index 5019895a3c1..854d21cf9d6 100644 --- a/integration/hsic/hsic.go +++ b/integration/hsic/hsic.go @@ -227,8 +227,12 @@ func New( // Cmd: []string{"headscale", "serve"}, // TODO(kradalby): Get rid of this hack, we currently need to give us some // to inject the headscale configuration further down. - Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve ; /bin/sleep 30"}, - Env: env, + Entrypoint: []string{ + "/bin/bash", + "-c", + "/bin/sleep 3 ; headscale serve ; /bin/sleep 30", + }, + Env: env, } if len(hsic.hostPortBindings) > 0 { @@ -285,7 +289,10 @@ func New( if hsic.hasTLS() { err = hsic.WriteFile(tlsCertPath, hsic.tlsCert) if err != nil { - return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err) + return nil, fmt.Errorf( + "failed to write TLS certificate to container: %w", + err, + ) } err = hsic.WriteFile(tlsKeyPath, hsic.tlsKey) @@ -491,8 +498,10 @@ func (t *HeadscaleInContainer) WaitForRunning() error { client := &http.Client{} if t.hasTLS() { - insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint - insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint + insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint + insecureTransport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } //nolint client = &http.Client{Transport: insecureTransport} } @@ -578,7 +587,15 @@ func (t *HeadscaleInContainer) CreateAuthKey( func (t *HeadscaleInContainer) ListNodesInUser( user string, ) ([]*v1.Node, error) { - command := []string{"headscale", "--user", user, "nodes", "list", "--output", "json"} + command := []string{ + "headscale", + "--user", + user, + "nodes", + "list", + "--output", + "json", + } result, _, err := dockertestutil.ExecuteCommand( t.container, @@ -662,9 +679,12 @@ func createCertificate(hostname string) ([]byte, []byte, error) { NotBefore: time.Now(), NotAfter: time.Now().Add(60 * time.Minute), SubjectKeyId: []byte{1, 2, 3, 4, 6}, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature, - DNSNames: []string{hostname}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature, + DNSNames: []string{hostname}, } certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) diff --git a/integration/route_test.go b/integration/route_test.go index 741ba24ec7c..0b8fcdf4742 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -36,7 +36,11 @@ func TestEnablingRoutes(t *testing.T) { user: 3, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clienableroute"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -227,7 +231,12 @@ func TestEnablingRoutes(t *testing.T) { for _, peerKey := range status.Peers() { peerStatus := status.Peer[peerKey] - if string(peerStatus.ID) == fmt.Sprintf("%d", routeToBeDisabled.GetNode().GetId()) { + if string( + peerStatus.ID, + ) == fmt.Sprintf( + "%d", + routeToBeDisabled.GetNode().GetId(), + ) { assert.Nilf( t, peerStatus.PrimaryRoutes, @@ -254,7 +263,11 @@ func TestHASubnetRouterFailover(t *testing.T) { user: 3, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute")) + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clienableroute"), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -800,25 +813,30 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { user: 1, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( - &policy.ACLPolicy{ - ACLs: []policy.ACL{ - { - Action: "accept", - Sources: []string{"*"}, - Destinations: []string{"*:*"}, + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, + hsic.WithTestName("clienableroute"), + hsic.WithACLPolicy( + &policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, }, - }, - TagOwners: map[string][]string{ - "tag:approve": {user}, - }, - AutoApprovers: policy.AutoApprovers{ - Routes: map[string][]string{ - expectedRoutes: {"tag:approve"}, + TagOwners: map[string][]string{ + "tag:approve": {user}, + }, + AutoApprovers: policy.AutoApprovers{ + Routes: map[string][]string{ + expectedRoutes: {"tag:approve"}, + }, }, }, - }, - )) + ), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -944,30 +962,35 @@ func TestSubnetRouteACL(t *testing.T) { user: 2, } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( - &policy.ACLPolicy{ - Groups: policy.Groups{ - "group:admins": {user}, - }, - ACLs: []policy.ACL{ - { - Action: "accept", - Sources: []string{"group:admins"}, - Destinations: []string{"group:admins:*"}, + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clienableroute"), + hsic.WithACLPolicy( + &policy.ACLPolicy{ + Groups: policy.Groups{ + "group:admins": {user}, }, - { - Action: "accept", - Sources: []string{"group:admins"}, - Destinations: []string{"10.33.0.0/16:*"}, + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"group:admins:*"}, + }, + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"10.33.0.0/16:*"}, + }, + // { + // Action: "accept", + // Sources: []string{"group:admins"}, + // Destinations: []string{"0.0.0.0/0:*"}, + // }, }, - // { - // Action: "accept", - // Sources: []string{"group:admins"}, - // Destinations: []string{"0.0.0.0/0:*"}, - // }, }, - }, - )) + ), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -1140,7 +1163,11 @@ func TestSubnetRouteACL(t *testing.T) { } if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" { - t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff) + t.Errorf( + "Client (%s) filter, unexpected result (-want +got):\n%s", + client.Hostname(), + diff, + ) } subnetNm, err := subRouter1.Netmap() @@ -1190,6 +1217,10 @@ func TestSubnetRouteACL(t *testing.T) { } if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" { - t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff) + t.Errorf( + "Subnet (%s) filter, unexpected result (-want +got):\n%s", + subRouter1.Hostname(), + diff, + ) } } diff --git a/integration/scenario.go b/integration/scenario.go index c11af72368f..a6adbf3b742 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -312,7 +312,11 @@ func (s *Scenario) CreateTailscaleNodesInUser( headscale, err := s.Headscale() if err != nil { - return fmt.Errorf("failed to create tailscale node (version: %s): %w", version, err) + return fmt.Errorf( + "failed to create tailscale node (version: %s): %w", + version, + err, + ) } cert := headscale.GetCert() @@ -358,7 +362,11 @@ func (s *Scenario) CreateTailscaleNodesInUser( return err } - log.Printf("testing versions %v, MustTestVersions %v", lo.Uniq(versions), MustTestVersions) + log.Printf( + "testing versions %v, MustTestVersions %v", + lo.Uniq(versions), + MustTestVersions, + ) return nil } @@ -386,7 +394,11 @@ func (s *Scenario) RunTailscaleUp( for _, client := range user.Clients { err := client.WaitForRunning() if err != nil { - return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err) + return fmt.Errorf( + "%s failed to up tailscale node: %w", + client.Hostname(), + err, + ) } } diff --git a/integration/scenario_test.go b/integration/scenario_test.go index cc9810a46c9..7d35a67a57b 100644 --- a/integration/scenario_test.go +++ b/integration/scenario_test.go @@ -180,7 +180,11 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { } if len(ips) != count*2 { - t.Fatalf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2) + t.Fatalf( + "got the wrong amount of tailscale ips, %d != %d", + len(ips), + count*2, + ) } }) } diff --git a/integration/ssh_test.go b/integration/ssh_test.go index 587190e4a8d..721467fab9d 100644 --- a/integration/ssh_test.go +++ b/integration/ssh_test.go @@ -382,7 +382,11 @@ func TestSSHUserOnlyIsolation(t *testing.T) { } } -func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) { +func doSSH( + t *testing.T, + client TailscaleClient, + peer TailscaleClient, +) (string, string, error) { t.Helper() peerFQDN, _ := peer.FQDN() @@ -410,7 +414,11 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien assertContains(t, peer.ID(), strings.ReplaceAll(result, "\n", "")) } -func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) { +func assertSSHPermissionDenied( + t *testing.T, + client TailscaleClient, + peer TailscaleClient, +) { t.Helper() result, stderr, _ := doSSH(t, client, peer) diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index c30118dd5dc..73fdf95b5a9 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -273,7 +273,10 @@ func New( if tsic.hasTLS() { err = tsic.WriteFile(headscaleCertPath, tsic.headscaleCert) if err != nil { - return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err) + return nil, fmt.Errorf( + "failed to write TLS certificate to container: %w", + err, + ) } } @@ -480,7 +483,11 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) { result, _, err := t.Execute(command) if err != nil { - return []netip.Addr{}, fmt.Errorf("%s failed to join tailscale client: %w", t.hostname, err) + return []netip.Addr{}, fmt.Errorf( + "%s failed to join tailscale client: %w", + t.hostname, + err, + ) } for _, address := range strings.Split(result, "\n") { @@ -532,7 +539,10 @@ func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { result, stderr, err := t.Execute(command) if err != nil { fmt.Printf("stderr: %s\n", stderr) - return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) + return nil, fmt.Errorf( + "failed to execute tailscale debug netmap command: %w", + err, + ) } var nm netmap.NetworkMap diff --git a/integration/utils.go b/integration/utils.go index e17e18a274d..5100ba44a87 100644 --- a/integration/utils.go +++ b/integration/utils.go @@ -75,7 +75,12 @@ func assertContains(t *testing.T, str, subStr string) { } } -func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string, opts ...tsic.PingOption) int { +func pingAllHelper( + t *testing.T, + clients []TailscaleClient, + addrs []string, + opts ...tsic.PingOption, +) int { t.Helper() success := 0 diff --git a/proto/headscale/v1/headscale.proto b/proto/headscale/v1/headscale.proto index 48bd5efce1f..f8cc596f0ce 100644 --- a/proto/headscale/v1/headscale.proto +++ b/proto/headscale/v1/headscale.proto @@ -13,175 +13,175 @@ import "headscale/v1/apikey.proto"; service HeadscaleService { // --- User start --- - rpc GetUser(GetUserRequest) returns(GetUserResponse) { - option(google.api.http) = { - get : "/api/v1/user/{name}" + rpc GetUser(GetUserRequest) returns (GetUserResponse) { + option (google.api.http) = { + get: "/api/v1/user/{name}" }; } - rpc CreateUser(CreateUserRequest) returns(CreateUserResponse) { - option(google.api.http) = { - post : "/api/v1/user" - body : "*" + rpc CreateUser(CreateUserRequest) returns (CreateUserResponse) { + option (google.api.http) = { + post: "/api/v1/user" + body: "*" }; } - rpc RenameUser(RenameUserRequest) returns(RenameUserResponse) { - option(google.api.http) = { - post : "/api/v1/user/{old_name}/rename/{new_name}" + rpc RenameUser(RenameUserRequest) returns (RenameUserResponse) { + option (google.api.http) = { + post: "/api/v1/user/{old_name}/rename/{new_name}" }; } - rpc DeleteUser(DeleteUserRequest) returns(DeleteUserResponse) { - option(google.api.http) = { - delete : "/api/v1/user/{name}" + rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse) { + option (google.api.http) = { + delete: "/api/v1/user/{name}" }; } - rpc ListUsers(ListUsersRequest) returns(ListUsersResponse) { - option(google.api.http) = { - get : "/api/v1/user" + rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) { + option (google.api.http) = { + get: "/api/v1/user" }; } // --- User end --- // --- PreAuthKeys start --- - rpc CreatePreAuthKey(CreatePreAuthKeyRequest) returns(CreatePreAuthKeyResponse) { - option(google.api.http) = { - post : "/api/v1/preauthkey" - body : "*" + rpc CreatePreAuthKey(CreatePreAuthKeyRequest) returns (CreatePreAuthKeyResponse) { + option (google.api.http) = { + post: "/api/v1/preauthkey" + body: "*" }; } - rpc ExpirePreAuthKey(ExpirePreAuthKeyRequest) returns(ExpirePreAuthKeyResponse) { - option(google.api.http) = { - post : "/api/v1/preauthkey/expire" - body : "*" + rpc ExpirePreAuthKey(ExpirePreAuthKeyRequest) returns (ExpirePreAuthKeyResponse) { + option (google.api.http) = { + post: "/api/v1/preauthkey/expire" + body: "*" }; } - rpc ListPreAuthKeys(ListPreAuthKeysRequest) returns(ListPreAuthKeysResponse) { - option(google.api.http) = { - get : "/api/v1/preauthkey" + rpc ListPreAuthKeys(ListPreAuthKeysRequest) returns (ListPreAuthKeysResponse) { + option (google.api.http) = { + get: "/api/v1/preauthkey" }; } // --- PreAuthKeys end --- // --- Node start --- - rpc DebugCreateNode(DebugCreateNodeRequest) returns(DebugCreateNodeResponse) { - option(google.api.http) = { - post : "/api/v1/debug/node" - body : "*" + rpc DebugCreateNode(DebugCreateNodeRequest) returns (DebugCreateNodeResponse) { + option (google.api.http) = { + post: "/api/v1/debug/node" + body: "*" }; } - rpc GetNode(GetNodeRequest) returns(GetNodeResponse) { - option(google.api.http) = { - get : "/api/v1/node/{node_id}" + rpc GetNode(GetNodeRequest) returns (GetNodeResponse) { + option (google.api.http) = { + get: "/api/v1/node/{node_id}" }; } - rpc SetTags(SetTagsRequest) returns(SetTagsResponse) { - option(google.api.http) = { - post : "/api/v1/node/{node_id}/tags" - body : "*" + rpc SetTags(SetTagsRequest) returns (SetTagsResponse) { + option (google.api.http) = { + post: "/api/v1/node/{node_id}/tags" + body: "*" }; } - rpc RegisterNode(RegisterNodeRequest) returns(RegisterNodeResponse) { - option(google.api.http) = { - post : "/api/v1/node/register" + rpc RegisterNode(RegisterNodeRequest) returns (RegisterNodeResponse) { + option (google.api.http) = { + post: "/api/v1/node/register" }; } - rpc DeleteNode(DeleteNodeRequest) returns(DeleteNodeResponse) { - option(google.api.http) = { - delete : "/api/v1/node/{node_id}" + rpc DeleteNode(DeleteNodeRequest) returns (DeleteNodeResponse) { + option (google.api.http) = { + delete: "/api/v1/node/{node_id}" }; } - rpc ExpireNode(ExpireNodeRequest) returns(ExpireNodeResponse) { - option(google.api.http) = { - post : "/api/v1/node/{node_id}/expire" + rpc ExpireNode(ExpireNodeRequest) returns (ExpireNodeResponse) { + option (google.api.http) = { + post: "/api/v1/node/{node_id}/expire" }; } - rpc RenameNode(RenameNodeRequest) returns(RenameNodeResponse) { - option(google.api.http) = { - post : "/api/v1/node/{node_id}/rename/{new_name}" + rpc RenameNode(RenameNodeRequest) returns (RenameNodeResponse) { + option (google.api.http) = { + post: "/api/v1/node/{node_id}/rename/{new_name}" }; } - rpc ListNodes(ListNodesRequest) returns(ListNodesResponse) { - option(google.api.http) = { - get : "/api/v1/node" + rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) { + option (google.api.http) = { + get: "/api/v1/node" }; } - rpc MoveNode(MoveNodeRequest) returns(MoveNodeResponse) { - option(google.api.http) = { - post : "/api/v1/node/{node_id}/user" + rpc MoveNode(MoveNodeRequest) returns (MoveNodeResponse) { + option (google.api.http) = { + post: "/api/v1/node/{node_id}/user" }; } // --- Node end --- // --- Route start --- - rpc GetRoutes(GetRoutesRequest) returns(GetRoutesResponse) { - option(google.api.http) = { - get : "/api/v1/routes" + rpc GetRoutes(GetRoutesRequest) returns (GetRoutesResponse) { + option (google.api.http) = { + get: "/api/v1/routes" }; } - rpc EnableRoute(EnableRouteRequest) returns(EnableRouteResponse) { - option(google.api.http) = { - post : "/api/v1/routes/{route_id}/enable" + rpc EnableRoute(EnableRouteRequest) returns (EnableRouteResponse) { + option (google.api.http) = { + post: "/api/v1/routes/{route_id}/enable" }; } - rpc DisableRoute(DisableRouteRequest) returns(DisableRouteResponse) { - option(google.api.http) = { - post : "/api/v1/routes/{route_id}/disable" + rpc DisableRoute(DisableRouteRequest) returns (DisableRouteResponse) { + option (google.api.http) = { + post: "/api/v1/routes/{route_id}/disable" }; } - rpc GetNodeRoutes(GetNodeRoutesRequest) returns(GetNodeRoutesResponse) { - option(google.api.http) = { - get : "/api/v1/node/{node_id}/routes" + rpc GetNodeRoutes(GetNodeRoutesRequest) returns (GetNodeRoutesResponse) { + option (google.api.http) = { + get: "/api/v1/node/{node_id}/routes" }; } - rpc DeleteRoute(DeleteRouteRequest) returns(DeleteRouteResponse) { - option(google.api.http) = { - delete : "/api/v1/routes/{route_id}" + rpc DeleteRoute(DeleteRouteRequest) returns (DeleteRouteResponse) { + option (google.api.http) = { + delete: "/api/v1/routes/{route_id}" }; } // --- Route end --- // --- ApiKeys start --- - rpc CreateApiKey(CreateApiKeyRequest) returns(CreateApiKeyResponse) { - option(google.api.http) = { - post : "/api/v1/apikey" - body : "*" + rpc CreateApiKey(CreateApiKeyRequest) returns (CreateApiKeyResponse) { + option (google.api.http) = { + post: "/api/v1/apikey" + body: "*" }; } - rpc ExpireApiKey(ExpireApiKeyRequest) returns(ExpireApiKeyResponse) { - option(google.api.http) = { - post : "/api/v1/apikey/expire" - body : "*" + rpc ExpireApiKey(ExpireApiKeyRequest) returns (ExpireApiKeyResponse) { + option (google.api.http) = { + post: "/api/v1/apikey/expire" + body: "*" }; } - rpc ListApiKeys(ListApiKeysRequest) returns(ListApiKeysResponse) { - option(google.api.http) = { - get : "/api/v1/apikey" + rpc ListApiKeys(ListApiKeysRequest) returns (ListApiKeysResponse) { + option (google.api.http) = { + get: "/api/v1/apikey" }; } - rpc DeleteApiKey(DeleteApiKeyRequest) returns(DeleteApiKeyResponse) { - option(google.api.http) = { - delete : "/api/v1/apikey/{prefix}" + rpc DeleteApiKey(DeleteApiKeyRequest) returns (DeleteApiKeyResponse) { + option (google.api.http) = { + delete: "/api/v1/apikey/{prefix}" }; } // --- ApiKeys end --- diff --git a/proto/headscale/v1/node.proto b/proto/headscale/v1/node.proto index 476aa59a440..cfefab11312 100644 --- a/proto/headscale/v1/node.proto +++ b/proto/headscale/v1/node.proto @@ -20,7 +20,7 @@ message Node { string disco_key = 4; repeated string ip_addresses = 5; string name = 6; - User user = 7; + User user = 7; google.protobuf.Timestamp last_seen = 8; google.protobuf.Timestamp last_successful_update = 9; @@ -49,7 +49,7 @@ message Node { message RegisterNodeRequest { string user = 1; - string key = 2; + string key = 2; } message RegisterNodeResponse { @@ -66,7 +66,7 @@ message GetNodeResponse { message SetTagsRequest { uint64 node_id = 1; - repeated string tags = 2; + repeated string tags = 2; } message SetTagsResponse { @@ -89,8 +89,8 @@ message ExpireNodeResponse { } message RenameNodeRequest { - uint64 node_id = 1; - string new_name = 2; + uint64 node_id = 1; + string new_name = 2; } message RenameNodeResponse { @@ -107,7 +107,7 @@ message ListNodesResponse { message MoveNodeRequest { uint64 node_id = 1; - string user = 2; + string user = 2; } message MoveNodeResponse { @@ -115,7 +115,7 @@ message MoveNodeResponse { } message DebugCreateNodeRequest { - string user = 1; + string user = 1; string key = 2; string name = 3; repeated string routes = 4; diff --git a/proto/headscale/v1/preauthkey.proto b/proto/headscale/v1/preauthkey.proto index 7d0de2946a8..1ab3a72771e 100644 --- a/proto/headscale/v1/preauthkey.proto +++ b/proto/headscale/v1/preauthkey.proto @@ -5,7 +5,7 @@ option go_package = "github.com/juanfont/headscale/gen/go/v1"; import "google/protobuf/timestamp.proto"; message PreAuthKey { - string user = 1; + string user = 1; string id = 2; string key = 3; bool reusable = 4; @@ -17,7 +17,7 @@ message PreAuthKey { } message CreatePreAuthKeyRequest { - string user = 1; + string user = 1; bool reusable = 2; bool ephemeral = 3; google.protobuf.Timestamp expiration = 4; @@ -30,7 +30,7 @@ message CreatePreAuthKeyResponse { message ExpirePreAuthKeyRequest { string user = 1; - string key = 2; + string key = 2; } message ExpirePreAuthKeyResponse { diff --git a/proto/headscale/v1/routes.proto b/proto/headscale/v1/routes.proto index ea90025960d..10b6e0aad86 100644 --- a/proto/headscale/v1/routes.proto +++ b/proto/headscale/v1/routes.proto @@ -6,12 +6,12 @@ import "google/protobuf/timestamp.proto"; import "headscale/v1/node.proto"; message Route { - uint64 id = 1; - Node node = 2; - string prefix = 3; - bool advertised = 4; - bool enabled = 5; - bool is_primary = 6; + uint64 id = 1; + Node node = 2; + string prefix = 3; + bool advertised = 4; + bool enabled = 5; + bool is_primary = 6; google.protobuf.Timestamp created_at = 7; google.protobuf.Timestamp updated_at = 8;