Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Implements BeforeClusterDelete hook #6644

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/hooks"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/predicates"
)
Expand Down Expand Up @@ -91,6 +93,12 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req
}
// If the owner cluster is in deletion process, delete its ClusterResourceSetBinding
if !cluster.DeletionTimestamp.IsZero() {
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) {
// If the Cluster is not yet ready to be deleted then do not delete the ClusterResourceSetBinding.
return ctrl.Result{}, nil
}
}
log.Info("deleting ClusterResourceSetBinding because the owner Cluster is currently being deleted")
return ctrl.Result{}, r.Client.Delete(ctx, binding)
}
Expand Down
4 changes: 4 additions & 0 deletions exp/runtime/api/v1alpha1/extensionconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,4 +210,8 @@ const (
// The annotation will be used to track the intent to call a hook as soon as an operation completes;
// the intent will be removed as soon as the hook call completes successfully.
PendingHooksAnnotation string = "runtime.cluster.x-k8s.io/pending-hooks"

// OkToDeleteAnnotation is the annotation used to indicate if a cluster is ready to be fully deleted.
// This annotation is added to the cluster after the BeforeClusterDelete hook has passed.
OkToDeleteAnnotation string = "runtime.cluster.x-k8s.io/ok-to-delete"
)
9 changes: 9 additions & 0 deletions internal/controllers/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
"sigs.k8s.io/cluster-api/controllers/external"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/hooks"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
Expand Down Expand Up @@ -215,6 +216,14 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster)
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)

// If the RuntimeSDK and ClusterTopology flags are enabled, for clusters with managed topologies
// only proceed with delete if the cluster is marked as `ok-to-delete`
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) {
return ctrl.Result{}, nil
}
}

descendants, err := r.listDescendants(ctx, cluster)
if err != nil {
log.Error(err, "Failed to list descendants")
Expand Down
51 changes: 51 additions & 0 deletions internal/controllers/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,19 @@ import (

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -350,6 +354,53 @@ func TestClusterReconciler(t *testing.T) {
})
}

func TestClusterReconciler_reconcileDelete(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()

fakeInfraCluster := builder.InfrastructureCluster("test-ns", "test-cluster").Build()

tests := []struct {
name string
cluster *clusterv1.Cluster
wantDelete bool
}{
{
name: "should proceed with delete if the cluster has the ok-to-delete annotation",
cluster: func() *clusterv1.Cluster {
fakeCluster := builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build()
if fakeCluster.Annotations == nil {
fakeCluster.Annotations = map[string]string{}
}
fakeCluster.Annotations[runtimev1.OkToDeleteAnnotation] = ""
return fakeCluster
}(),
wantDelete: true,
},
{
name: "should not proceed with delete if the cluster does not have the ok-to-delete annotation",
cluster: builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build(),
wantDelete: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
fakeClient := fake.NewClientBuilder().WithObjects(fakeInfraCluster, tt.cluster).Build()
r := &Reconciler{
Client: fakeClient,
APIReader: fakeClient,
}

_, _ = r.reconcileDelete(ctx, tt.cluster)
infraCluster := builder.InfrastructureCluster("", "").Build()
err := fakeClient.Get(ctx, client.ObjectKeyFromObject(fakeInfraCluster), infraCluster)
g.Expect(apierrors.IsNotFound(err)).To(Equal(tt.wantDelete))
})
}
}

func TestClusterReconcilerNodeRef(t *testing.T) {
t.Run("machine to cluster", func(t *testing.T) {
cluster := &clusterv1.Cluster{
Expand Down
30 changes: 27 additions & 3 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/internal/hooks"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
Expand Down Expand Up @@ -162,9 +163,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
// In case the object is deleted, the managed topology stops to reconcile;
// (the other controllers will take care of deletion).
if !cluster.ObjectMeta.DeletionTimestamp.IsZero() {
// TODO: When external patching is supported, we should handle the deletion
// of those external CRDs we created.
return ctrl.Result{}, nil
return r.reconcileDelete(ctx, cluster)
}

patchHelper, err := patch.NewHelper(cluster, r.Client)
Expand Down Expand Up @@ -336,6 +335,31 @@ func (r *Reconciler) machineDeploymentToCluster(o client.Object) []ctrl.Request
}}
}

func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) {
// Call the BeforeClusterDelete hook if the 'ok-to-delete' annotation is not set
// and add the annotation to the cluster after receiving a successful non-blocking response.
if feature.Gates.Enabled(feature.RuntimeSDK) {
if !hooks.IsOkToDelete(cluster) {
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
Cluster: *cluster,
}
hookResponse := &runtimehooksv1.BeforeClusterDeleteResponse{}
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterDelete, cluster, hookRequest, hookResponse); err != nil {
return ctrl.Result{}, err
}
if hookResponse.RetryAfterSeconds != 0 {
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
}
// The BeforeClusterDelete hook returned a non-blocking response. Now the cluster is ready to be deleted.
// Lets mark the cluster as `ok-to-delete`
if err := hooks.MarkAsOkToDelete(ctx, r.Client, cluster); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to mark %s/%s cluster as ok to delete", cluster.Namespace, cluster.Name)
}
}
}
return ctrl.Result{}, nil
}

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
Expand Down
155 changes: 155 additions & 0 deletions internal/controllers/topology/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,23 @@ import (

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/component-base/featuregate/testing"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/contract"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
"sigs.k8s.io/cluster-api/internal/hooks"
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/util/conditions"
Expand Down Expand Up @@ -389,6 +393,157 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {
}, timeout).Should(Succeed())
}

func TestClusterReconciler_reconcileDelete(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()

catalog := runtimecatalog.New()
_ = runtimehooksv1.AddToCatalog(catalog)

beforeClusterDeleteGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete)
if err != nil {
panic(err)
}

blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
RetryAfterSeconds: int32(10),
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusSuccess,
},
},
}
nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
RetryAfterSeconds: int32(0),
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusSuccess,
},
},
}
failureResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusFailure,
},
},
}

tests := []struct {
name string
cluster *clusterv1.Cluster
hookResponse *runtimehooksv1.BeforeClusterDeleteResponse
wantHookToBeCalled bool
wantResult ctrl.Result
wantOkToDelete bool
wantErr bool
}{
{
name: "should apply the ok-to-delete annotation if the BeforeClusterDelete hook returns a non-blocking response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: nonBlockingResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: true,
wantOkToDelete: true,
wantErr: false,
},
{
name: "should requeue if the BeforeClusterDelete hook returns a blocking response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: blockingResponse,
wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second},
wantHookToBeCalled: true,
wantOkToDelete: false,
wantErr: false,
},
{
name: "should fail if the BeforeClusterDelete hook returns a failure response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: failureResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: true,
wantOkToDelete: false,
wantErr: true,
},
{
name: "should succeed if the ok-to-delete annotation is already present",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
Annotations: map[string]string{
// If the hook is already marked the hook should not be called during cluster delete.
runtimev1.OkToDeleteAnnotation: "",
},
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
// Using a blocking response here should not matter as the hook should never be called.
// Using a blocking response to enforce the point.
hookResponse: blockingResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: false,
wantOkToDelete: true,
wantErr: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build()
fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
beforeClusterDeleteGVH: tt.hookResponse,
}).
WithCatalog(catalog).
Build()

r := &Reconciler{
Client: fakeClient,
APIReader: fakeClient,
RuntimeClient: fakeRuntimeClient,
}

res, err := r.reconcileDelete(ctx, tt.cluster)
if tt.wantErr {
g.Expect(err).NotTo(BeNil())
} else {
g.Expect(err).To(BeNil())
g.Expect(res).To(Equal(tt.wantResult))
g.Expect(hooks.IsOkToDelete(tt.cluster)).To(Equal(tt.wantOkToDelete))
g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.BeforeClusterDelete) == 1).To(Equal(tt.wantHookToBeCalled))
}
})
}
}

// TestClusterReconciler_deleteClusterClass tests the correct deletion behaviour for a ClusterClass with references in existing Clusters.
// In this case deletion of the ClusterClass should be blocked by the webhook.
func TestClusterReconciler_deleteClusterClass(t *testing.T) {
Expand Down
Loading