Skip to content

Commit

Permalink
Add lifecycle hook handlers to test extension
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <[email protected]>
Co-authored-by: ykakarap <[email protected]>
  • Loading branch information
killianmuldoon and ykakarap committed Jun 21, 2022
1 parent 8d7f010 commit b24f302
Show file tree
Hide file tree
Showing 8 changed files with 315 additions and 4 deletions.
56 changes: 52 additions & 4 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,11 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"

runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
"sigs.k8s.io/cluster-api/test/framework"
Expand Down Expand Up @@ -117,22 +119,26 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
})

It("Should create and upgrade a workload cluster", func() {
clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
By("Deploy Test Extension")
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
Expect(err).ToNot(HaveOccurred(), "Failed to read the extension config deployment manifest file")
Expect(err).ToNot(HaveOccurred(), "Failed to read the extension deployment manifest file")

// Set the SERVICE_NAMESPACE, which is used in the cert-manager Certificate CR.
// We have to dynamically set the namespace here, because it depends on the test run and thus
// cannot be set when rendering the test extension YAML with kustomize.
testExtensionDeployment := strings.ReplaceAll(string(testExtensionDeploymentTemplate), "${SERVICE_NAMESPACE}", namespace.Name)
Expect(testExtensionDeployment).ToNot(BeEmpty(), "Test Extension deployment manifest file should not be empty")

Expect(testExtensionDeployment).ToNot(BeEmpty(), "Test Extension deployment manifest file should not be empty")
Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(testExtensionDeployment), "--namespace", namespace.Name)).To(Succeed())

By("Deploy Test Extension ExtensionConfig")
By("Deploy Test Extension ExtensionConfig and ConfigMap")
ext = extensionConfig(specName, namespace)
err = input.BootstrapClusterProxy.GetClient().Create(ctx, ext)
Expect(err).ToNot(HaveOccurred(), "Failed to create the extension config")
responses := responsesConfigMap(clusterName, namespace)
err = input.BootstrapClusterProxy.GetClient().Create(ctx, responses)
Expect(err).ToNot(HaveOccurred(), "Failed to create the responses configmap")

By("Creating a workload cluster")

Expand All @@ -145,7 +151,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: pointer.StringDeref(input.Flavor, "upgrades"),
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
ClusterName: clusterName,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
Expand Down Expand Up @@ -194,6 +200,17 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})

By("Checking all lifecycle hooks have been called")
// Assert that each hook passed to this function is marked as "true" in the response configmap
err = checkLifecycleHooks(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
"BeforeClusterCreate": "",
"BeforeClusterUpgrade": "",
"AfterControlPlaneInitialized": "",
"AfterControlPlaneUpgrade": "",
"AfterClusterUpgrade": "",
})
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")

By("PASSED!")
})

Expand Down Expand Up @@ -241,3 +258,34 @@ func extensionConfig(specName string, namespace *corev1.Namespace) *runtimev1.Ex
},
}
}

// responsesConfigMap generates a ConfigMap with preloaded responses for the test extension.
func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-hookresponses", name),
Namespace: namespace.Name,
},
// Every response contain only Status:Success. The test checks whether each handler has been called at least once.
Data: map[string]string{
"BeforeClusterCreate-response": `{"Status": "Success"}`,
"BeforeClusterUpgrade-response": `{"Status": "Success"}`,
"AfterControlPlaneInitialized-response": `{"Status": "Success"}`,
"AfterControlPlaneUpgrade-response": `{"Status": "Success"}`,
"AfterClusterUpgrade-response": `{"Status": "Success"}`,
},
}
}

func checkLifecycleHooks(ctx context.Context, c client.Client, namespace string, clusterName string, hooks map[string]string) error {
configMap := &corev1.ConfigMap{}
configMapName := clusterName + "-hookresponses"
err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap)
Expect(err).ToNot(HaveOccurred(), "Failed to get the hook response configmap")
for hook := range hooks {
if _, ok := configMap.Data[hook+"-called"]; !ok {
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hook, namespace, configMapName)
}
}
return nil
}
1 change: 1 addition & 0 deletions test/extension/config/default/extension.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ spec:
image: controller:latest
name: extension
terminationGracePeriodSeconds: 10
serviceAccountName: test-extension
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
Expand Down
3 changes: 3 additions & 0 deletions test/extension/config/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ commonLabels:
resources:
- extension.yaml
- service.yaml
- role.yaml
- rolebinding.yaml
- service_account.yaml

bases:
- ../certmanager
Expand Down
16 changes: 16 additions & 0 deletions test/extension/config/default/role.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: test-extension
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- patch
- update
- create
12 changes: 12 additions & 0 deletions test/extension/config/default/rolebinding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: test-extension
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: test-extension
subjects:
- kind: ServiceAccount
name: test-extension
namespace: ${SERVICE_NAMESPACE}
4 changes: 4 additions & 0 deletions test/extension/config/default/service_account.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: test-extension
159 changes: 159 additions & 0 deletions test/extension/handlers/lifecycle/handlers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// Package lifecycle contains the handlers for the lifecycle hooks.
package lifecycle

import (
"context"
"fmt"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"

runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
)

// Handler is the handler for the lifecycle hooks.
type Handler struct {
Client client.Client
}

// DoBeforeClusterCreate implements the BeforeClusterCreate hook.
func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehooksv1.BeforeClusterCreateRequest, response *runtimehooksv1.BeforeClusterCreateResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterCreate is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
log.Info("BeforeClusterCreate has been recorded in configmap", "cm", cluster.Name+"-hookresponses")

err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response)
if err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

// DoBeforeClusterUpgrade implements the BeforeClusterUpgrade hook.
func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimehooksv1.BeforeClusterUpgradeRequest, response *runtimehooksv1.BeforeClusterUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response)
if err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

// DoAfterControlPlaneInitialized implements the AfterControlPlaneInitialized hook.
func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *runtimehooksv1.AfterControlPlaneInitializedRequest, response *runtimehooksv1.AfterControlPlaneInitializedResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneInitialized is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response)
if err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

// DoAfterControlPlaneUpgrade implements the AfterControlPlaneUpgrade hook.
func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runtimehooksv1.AfterControlPlaneUpgradeRequest, response *runtimehooksv1.AfterControlPlaneUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response)
if err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

// DoAfterClusterUpgrade implements the AfterClusterUpgrade hook.
func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehooksv1.AfterClusterUpgradeRequest, response *runtimehooksv1.AfterClusterUpgradeResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("AfterClusterUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response)
if err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
configMapName := name + "-hookresponses"
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
}
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-response"]), response); err != nil {
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
}
return nil
}

func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
configMapName := name + "-hookresponses"
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
}

patch := client.RawPatch(types.MergePatchType,
[]byte(fmt.Sprintf(`{"data":{"%s-called":"true"}}`, hookName)))
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
return errors.Wrapf(err, "failed to update the ConfigMap %s/%s", namespace, configMapName)
}
return nil
}
Loading

0 comments on commit b24f302

Please sign in to comment.