-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcluster.go
124 lines (111 loc) · 3.79 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
package main
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
kubermaticv1 "k8c.io/kubermatic/v2/pkg/apis/kubermatic/v1"
kubermaticv1helper "k8c.io/kubermatic/v2/pkg/apis/kubermatic/v1/helper"
"k8c.io/kubermatic/v2/pkg/resources/reconciling"
"k8c.io/kubermatic/v2/pkg/semver"
kubermaticversions "k8c.io/kubermatic/v2/pkg/version/kubermatic"
)
// CreateCluster creates a user cluster
func CreateCluster(ctx context.Context, client ctrlruntimeclient.Client, projectName, serviceAccount, network, subnetwork string,
k8sVersion semver.Semver) (*kubermaticv1.Cluster, error) {
cluster := &kubermaticv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: utilrand.String(10),
Labels: map[string]string{
kubermaticv1.ProjectIDLabelKey: projectName,
},
},
Spec: kubermaticv1.ClusterSpec{
Version: k8sVersion,
HumanReadableName: "sample-cluster",
Cloud: kubermaticv1.CloudSpec{
GCP: &kubermaticv1.GCPCloudSpec{
ServiceAccount: serviceAccount,
Network: network,
Subnetwork: subnetwork,
},
DatacenterName: "gcp-westeurope-2",
},
AuditLogging: &kubermaticv1.AuditLoggingSettings{
Enabled: false,
PolicyPreset: "",
},
ClusterNetwork: kubermaticv1.ClusterNetworkingConfig{
KonnectivityEnabled: pointer.Bool(true),
IPFamily: kubermaticv1.IPFamilyIPv4,
ProxyMode: "ipvs",
Pods: kubermaticv1.NetworkRanges{
CIDRBlocks: []string{"172.25.0.0/16"},
},
Services: kubermaticv1.NetworkRanges{
CIDRBlocks: []string{"10.240.16.0/20"},
},
NodeCIDRMaskSizeIPv4: pointer.Int32(24),
NodeLocalDNSCacheEnabled: pointer.Bool(true),
},
CNIPlugin: &kubermaticv1.CNIPluginSettings{
Type: kubermaticv1.CNIPluginTypeCanal,
Version: "v3.23",
},
OPAIntegration: &kubermaticv1.OPAIntegrationSettings{},
MLA: &kubermaticv1.MLASettings{},
KubernetesDashboard: &kubermaticv1.KubernetesDashboard{
Enabled: true,
},
EnableUserSSHKeyAgent: pointer.Bool(true),
EnableOperatingSystemManager: pointer.Bool(true),
ContainerRuntime: "containerd",
Pause: false,
},
}
err := client.Create(ctx, cluster)
if err != nil {
return nil, err
}
waiter := reconciling.WaitUntilObjectExistsInCacheConditionFunc(ctx, client, zap.NewNop().Sugar(), ctrlruntimeclient.ObjectKeyFromObject(cluster), cluster)
if err := wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) {
success, err := waiter()
if err != nil {
return false, err
}
if !success {
return false, nil
}
return true, nil
}); err != nil {
return nil, fmt.Errorf("failed waiting for the new cluster to appear in the cache: %w", err)
}
// In the future, this will not be required anymore, until then we sadly have
// to manually ensure that the owner email is set correctly
err = kubermaticv1helper.UpdateClusterStatus(ctx, client, cluster, func(c *kubermaticv1.Cluster) {
c.Status.UserEmail = "[email protected]"
})
if err != nil {
return nil, err
}
// Wait for the cluster to be ready
if err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
if err := client.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(cluster), cluster); err != nil {
return false, fmt.Errorf("failed to get a project %w", err)
}
// Check if cluster is ready for interaction
versions := kubermaticversions.NewDefaultVersions()
if !kubermaticv1helper.IsClusterInitialized(cluster, versions) {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
return cluster, nil
}