Skip to content

Commit

Permalink
Replication: Add e2e tests for workloads-related rbac objects
Browse files Browse the repository at this point in the history
Signed-off-by: David Festal <[email protected]>
  • Loading branch information
davidfestal committed Mar 30, 2023
1 parent 533310e commit cf8a625
Showing 1 changed file with 200 additions and 3 deletions.
203 changes: 200 additions & 3 deletions test/e2e/reconciler/cache/replication_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/kcp-dev/logicalcluster/v3"
"github.com/stretchr/testify/require"

rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
Expand Down Expand Up @@ -64,6 +65,10 @@ var scenarios = []testScenario{
{"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario},
{"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario},
{"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario},
{"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario},
{"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario},
{"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario},
{"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario},
}

// disruptiveScenarios contains a list of scenarios that will be run in a private environment
Expand Down Expand Up @@ -330,7 +335,9 @@ func replicateResource(ctx context.Context, t *testing.T,
kind string, /*kind for the given resource*/
gvr schema.GroupVersionResource, /*gvr for the given resource*/
res runtime.Object, /*a strongly typed resource object that will be created*/
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
) {
t.Helper()

orgPath, _ := framework.NewOrganizationFixture(t, server)
Expand All @@ -343,6 +350,10 @@ func replicateResource(ctx context.Context, t *testing.T,
resourceName := resMeta.GetName()
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}

for _, prepare := range prepares {
prepare(scenario)
}

t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
scenario.CreateSourceResource(ctx, t, res)
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
Expand Down Expand Up @@ -383,7 +394,9 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
kind string, /*kind for the given resource*/
gvr schema.GroupVersionResource, /*gvr for the given resource*/
res runtime.Object, /*a strongly typed resource object that will be created*/
resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) {
resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/
prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/
) {
t.Helper()

orgPath, _ := framework.NewOrganizationFixture(t, server)
Expand All @@ -396,6 +409,10 @@ func replicateResourceNegative(ctx context.Context, t *testing.T,
resourceName := resMeta.GetName()
scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient}

for _, prepare := range prepares {
prepare(scenario)
}

t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName)
scenario.CreateSourceResource(ctx, t, res)
t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName)
Expand Down Expand Up @@ -486,6 +503,14 @@ type replicateResourceScenario struct {
cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface
}

func (b *replicateResourceScenario) CreateAdditionalResource(ctx context.Context, t *testing.T, res runtime.Object, kind string, gvr schema.GroupVersionResource) {
t.Helper()
resUnstructured, err := toUnstructured(res, kind, gvr)
require.NoError(t, err)
_, err = b.kcpShardClusterDynamicClient.Resource(gvr).Cluster(b.cluster.Path()).Create(ctx, resUnstructured, metav1.CreateOptions{})
require.NoError(t, err)
}

func (b *replicateResourceScenario) CreateSourceResource(ctx context.Context, t *testing.T, res runtime.Object) {
t.Helper()
resUnstructured, err := toUnstructured(res, b.kind, b.gvr)
Expand Down Expand Up @@ -678,14 +703,22 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context.
}
unstructured.RemoveNestedField(originalResource.Object, "metadata", "resourceVersion")
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion")

// TODO(davidfestal): find out why the generation is not the same especially for rbacv1. Is it a characteristic of all
// internal KCP resources (which are not backed by CRDs) ?
if b.gvr.Group == rbacv1.SchemeGroupVersion.Group {
unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation")
unstructured.RemoveNestedField(cachedResource.Object, "metadata", "generation")
}

unstructured.RemoveNestedField(cachedResource.Object, "metadata", "annotations", genericapirequest.AnnotationKey)
if cachedStatus, ok := cachedResource.Object["status"]; ok && cachedStatus == nil || (cachedStatus != nil && len(cachedStatus.(map[string]interface{})) == 0) {
// TODO: worth investigating:
// for some reason cached resources have an empty status set whereas the original resources don't
unstructured.RemoveNestedField(cachedResource.Object, "status")
}
if diff := cmp.Diff(cachedResource.Object, originalResource.Object); len(diff) > 0 {
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original", b.gvr, cluster, cachedResourceMeta.GetName())
return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original: %s", b.gvr, cluster, cachedResourceMeta.GetName(), diff)
}
return true, ""
}, wait.ForeverTestTimeout, 100*time.Millisecond)
Expand Down Expand Up @@ -732,3 +765,167 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc
require.NoError(t, err)
return cacheServerRestConfig
}

// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server.
// The test exercises creation, modification and removal of the Shard object.
func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
t.Helper()
replicateResource(ctx,
t,
server,
kcpShardClusterDynamicClient,
cacheKcpClusterDynamicClient,
"",
"ClusterRole",
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"sync"},
APIGroups: []string{"workload.kcp.io"},
Resources: []string{"synctargets"},
ResourceNames: []string{"asynctarget"},
},
},
},
nil,
)
}

// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
t.Helper()
replicateResourceNegative(
ctx,
t,
server,
kcpShardClusterDynamicClient,
cacheKcpClusterDynamicClient,
"",
"ClusterRole",
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"sync"},
APIGroups: []string{"workload.kcp.io"},
Resources: []string{"synctargets"},
ResourceNames: []string{"asynctarget"},
},
},
},
nil,
)
}

// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server.
// The test exercises creation, modification and removal of the Shard object.
func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
t.Helper()

clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"sync"},
APIGroups: []string{"workload.kcp.io"},
Resources: []string{"synctargets"},
ResourceNames: []string{"asynctarget"},
},
},
}

replicateResource(ctx,
t,
server,
kcpShardClusterDynamicClient,
cacheKcpClusterDynamicClient,
"",
"ClusterRoleBinding",
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.SchemeGroupVersion.Group,
Kind: "ClusterRole",
Name: clusterRole.Name,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "kcp-syncer-0000",
Namespace: "kcp-syncer-namespace",
},
},
},
nil,
func(scenario *replicateResourceScenario) {
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
},
)
}

// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object.
func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) {
t.Helper()

clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"sync"},
APIGroups: []string{"workload.kcp.io"},
Resources: []string{"synctargets"},
ResourceNames: []string{"asynctarget"},
},
},
}

replicateResourceNegative(
ctx,
t,
server,
kcpShardClusterDynamicClient,
cacheKcpClusterDynamicClient,
"",
"ClusterRoleBinding",
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
&rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: withPseudoRandomSuffix("syncer"),
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.SchemeGroupVersion.Group,
Kind: "ClusterRole",
Name: clusterRole.Name,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "kcp-syncer-0000",
Namespace: "kcp-syncer-namespace",
},
},
},
nil,
func(scenario *replicateResourceScenario) {
t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name)
scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles"))
},
)
}

0 comments on commit cf8a625

Please sign in to comment.