Skip to content

Commit

Permalink
Feat: Github action e2e Testing and bash script
Browse files Browse the repository at this point in the history
Features a new e2e CI based on a bash script. the script makes some
tests to verify that the operator is working as intended. It does not go
into really deep tests, this is a way to simulate how a classic user
would use kotary. Mainly used to verify that the operator is working
fine.
  • Loading branch information
ARPIN committed Jan 15, 2024
1 parent 6281b31 commit ffe1829
Show file tree
Hide file tree
Showing 13 changed files with 151 additions and 134 deletions.
81 changes: 15 additions & 66 deletions .github/workflows/e2eCI.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ jobs:
kubernetes:
- 1.21
- 1.23
- 1.24
- 1.26
steps:
- name: checkout
Expand All @@ -20,6 +21,11 @@ jobs:
uses: actions/setup-go@v4
with:
go-version: '1.20'

- name: Docker Build image
run: |
docker build -t "ci/kotary:${{ matrix.kubernetes }}" .
docker image ls | grep ci
- name: Create cluster KinD
uses: helm/[email protected]
Expand All @@ -30,78 +36,21 @@ jobs:
run: |
kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context)
kubectl get all --all-namespaces
- name: Load docker image into kind cluster
run: kind load docker-image "ci/kotary:${{ matrix.kubernetes }}" --name chart-testing

- name: Set GOROOT
run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV

- name: Deploy CRD
run: kubectl apply -f artifacts/crd.yml

- name: Deploy kotary operator
run: kubectl apply -f artifacts/deployment.yml

- name: Create kotary ConfigMap
run: kubectl -n kube-system create -f e2e/KotaryService/ConfigMap.yaml

- name: Golden test 0 - Create NS 'kotarytest' and add a ResourceQuota
run: |
kubectl create ns kotarytest
while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do sleep 5; echo "Waiting for Kotary pod to be Running...."; done
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 1 - adding pods and verifying resources used
run: |
kubectl apply -f e2e/KindConfig/pod1.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 100/660m memory: 0.25/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod2.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 200/660m memory: 0.5/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod3.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod4.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 500/660m memory: 1/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
- name: Golden test 2 - trying to add a pod but no ressource left in NS. (Should return error)
run: if kubectl apply -f e2e/KindConfig/pod5.yml -n kotarytest ; then exit 1 ; fi

- name: Golden test 3 - Upscale
- name: Edit kotary deployement
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimUp.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
version=${{ matrix.kubernetes }}
sed -i -E -e "s/cagip\/kotary:v[0-9.]+/ci\/kotary:$version/g" artifacts/deployment.yml -e "s/Always/Never/g" artifacts/deployment.yml;
cat artifacts/deployment.yml
- name: Golden test 4 - Upscale (REJECTED)
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimToBig.yaml -n kotarytest
if ! kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 5 - Downscale
run: |
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 6 - Downscale (PENDING)
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimPending.yaml -n kotarytest
kubectl get quotaclaim -n kotarytest
kubectl get resourcequota -n kotarytest
if ! kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
kubectl delete pod -n kotarytest podtest-4
echo "<<<<<<<<< Deleted a pod, the pending claim should now be accepted >>>>>>>>>"
if kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
- name: Golden test 7 - Check RessourceQuota is well computed
run: |
kubectl apply -f e2e/KindConfig/badpod.yml
echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
- name: run tests
run: ./e2e/e2e.sh
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: testci
name: test

on:
push:
Expand Down
2 changes: 1 addition & 1 deletion e2e/KindConfig/badpod.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: badpod-test
name: badpod-test
spec:
containers:
- name: hw-container1
Expand Down
11 changes: 11 additions & 0 deletions e2e/KindConfig/kind-cluster-1.24.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.25.11"
- role: worker
image: "kindest/node:v1.25.11"
- role: worker
image: "kindest/node:v1.25.11"
- role: worker
image: "kindest/node:v1.25.11"
2 changes: 1 addition & 1 deletion e2e/KindConfig/pod1.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-1
name: podtest-1
spec:
containers:
- name: hw-container1
Expand Down
2 changes: 1 addition & 1 deletion e2e/KindConfig/pod2.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-2
name: podtest-2
spec:
containers:
- name: hw-container1
Expand Down
2 changes: 1 addition & 1 deletion e2e/KindConfig/pod3.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-3
name: podtest-3
spec:
containers:
- name: hw-container1
Expand Down
2 changes: 1 addition & 1 deletion e2e/KindConfig/pod4.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-4
name: podtest-4
spec:
containers:
- name: hw-container1
Expand Down
2 changes: 1 addition & 1 deletion e2e/KindConfig/pod5.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-5
name: podtest-5
spec:
containers:
- name: hw-container1
Expand Down
154 changes: 105 additions & 49 deletions e2e/e2e.sh
Original file line number Diff line number Diff line change
@@ -1,92 +1,148 @@
#!/bin/bash

#
# A FEW INFORMATIONS ABOUT THIS SCRIPT
#
# This script is used to test the kotary operator, it replicates what a really
# simple a basic usage of kotary should look like.
# This script is not an exaustive list of test of the operator, it is closer to
# an end to end test because each test depends of the previous one and fails one the first error encountered.
#
# This test should be used to verify that every basic features of kotary is working fine.
# It is not meant to be used to debug a particular part of the operator.
#
# HOW THIS SCRIPT WORKS:
#
# First of all it deploys the crds nececary for the operator and then deploys it in you current cluster.
# (if you are using kind don't forget to load you image in the cluster)
# Then it goes into a series of tests described in the code below.
# If any error occurs or on any unwanted behavior, the script ends and starts the CleanUp function
# to remoove what have been used during the test.
# Note that you can uncomment some lines in the CleanUp function depending of your needs.
# If everything goes as intended the script will exit with a code 0 and cleanup the evironment.
#
# /!\ This script is in no way perfect, feel free to add new tests at the end of the script if you
# believe that the script needs some more coverage.
#
# Pre-requirements to run this script:
# - having kubectl installed
# - beeing connected to a dev cluster
# - having jq installed
#
# @author: Léo ARPIN (ty for reading :D )
#

set -eo pipefail

# Bash colors for a more enjoyable script
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;36m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color

#YOU NEED TO BE INSIDE A K8S CLUSTER TO RUN THIS#

NS=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 12) #generate random namespace
ROOT=$(git rev-parse --show-toplevel) #get root of git repo

NS=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 12) #generate random namespace to avoid conflicts
ROOT=$(git rev-parse --show-toplevel) #get rootpath of git repo
QUOTACLAIM='resourcequotaclaims.cagip.github.com'

# Clean up function. If you want to totaly remove kotary after test, uncomment the lines
CleanUp () {
echo -e "\\n${BLUE}Starting CleanUp ${NC}\\n"
kubectl delete ns $NS
#kubectl delete configmap -n kube-system kotary-config
#kubectl delete deployment -n kube-system kotary
#kubectl delete crd resourcequotaclaims.cagip.github.com
rm temp.json
}

trap CleanUp EXIT ERR

echo -e "${BLUE}====== Starting SetUp ======${NC} \\n"

if ! kubectl apply -f artifacts/crd.yml ;
#apply crds, if an error occured it might be that the user is not connected to a cluster
if ! kubectl apply -f $ROOT/artifacts/crd.yml ;
then echo -e "\\n${RED}CONNECT TO A CLUSTER BEFORE RUNNING THIS EXECUTABLE${NC}\\n" && exit 1 ; fi

kubectl apply -f artifacts/deployment.yml
kubectl -n kube-system create -f $ROOT/e2e/KotaryService/ConfigMap.yaml
#deploy the operator
kubectl apply -f $ROOT/artifacts/deployment.yml
kubectl apply -f $ROOT/e2e/KotaryService/ConfigMap.yaml -n kube-system

kubectl create ns $NS
while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do echo -e "${BLUE}.... Waiting for Kotary pod to be Running ....${NC}" ; sleep 2; done
while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do echo -e "${BLUE}.... Waiting for Kotary pod to be Running ....${NC}" ; sleep 2; done

#This is the test part
echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS
if kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is REJECTED. Should be accepted ${NC}" && CleanUp && exit 1 ; fi
kubectl get resourcequota -n $NS

echo -e "\\n ${PURPLE}-- Applying pods in NS --${NC}" && sleep 1
#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything)
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
kubectl get $QUOTACLAIM -n $NS -o=json >> temp.json #get the claim
phase=$(jq ' .items[].status.phase' test.json) #get the status of the claim if the claim has been accepted $phase will be empty
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is $phase. Should be accepted ${NC}" && exit 1 ; fi

#apply pods in the NS in order to fill both cpu and memory resources
#if you add every spec of each pod you end up at cpu: 500m/660m, memory: 1Gi/1Gi
#if the result is different it should be considered an error
echo -e "\\n ${PURPLE}-- Applying pods in NS --${NC}" && sleep 3
kubectl apply -f $ROOT/e2e/KindConfig/pod1.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod2.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS
echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}"
kubectl get resourcequota -n $NS
if ! kubectl get resourcequota -n $NS | grep "cpu: 500m/660m, memory: 1000Mi/1Gi";
then echo -e "\\n${RED}FAILLED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 1
if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ;
then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && CleanUp && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n ${PURPLE}-- Scale UP --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS
if kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim has been rejected${NC}\\n" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi
# Verify that trying to add a pod with resources exceeding what is left to use results in an error
echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 3
if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ; # if the command does NOT result in an error then the test fails
then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Scale UP(to big) --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS
if ! kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi
# Apply a new quotaclaim to scale up the resourses
# verify that the claim is accepted (nothing should appear in the 'status' field)
echo -e "\\n ${PURPLE}-- Scale UP --${NC}"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS && sleep 3 #apply the new rqc
kubectl get $QUOTACLAIM -n $NS -o=json >> temp.json #get the claim
phase=$(jq ' .items[].status.phase' test.json) #get the status of the claim if the claim has been accepted $phase will be empty
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n ${PURPLE}-- Scale Down (under what is curently used --> PENDING) --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS
if ! kubectl get quotaclaim -n $NS | grep PENDING ;
then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get resourcequota -n $NS && CleanUp && exit 1 ; fi
# Apply a new quotaclaim to scale up the resourses but this claim is to big,
# Kotary should see that what is requested is way to much and should reject the claim.
# assert that the rqc is rejected
echo -e "\\n ${PURPLE}-- Scale UP(to big) --${NC}"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS && sleep 3
kubectl get $QUOTACLAIM -n $NS -o=json >> temp.json
phase=$(jq ' .items[].status.phase' test.json)
if [ "$phase" != "REJECTED" ]; #The claim MUST be rejected, else it is an error
then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n" && sleep 3

# Apply a new quotaclaim to scale down the resourses,
# Kotary should see that what is requested is lower that what is curently used.
# assert that the rqc is set to pending
echo -e "\\n ${PURPLE}-- Scale Down (under what is curently used --> PENDING) --${NC}"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS && sleep 3
kubectl get $QUOTACLAIM -n $NS -o=json >> temp.json
phase=$(jq ' .items[].status.phase' test.json)
if [ "$phase" != "PENDING" ]; #The claim MUST be pending, else it is an error
then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get resourcequota -n $NS && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 1
kubectl delete pod -n $NS podtest-4 && sleep 1
# Reduce the current usage of cpu and memory by deleting a pod
echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 3
kubectl delete pod -n $NS podtest-4 && sleep 3

if kubectl get quotaclaim -n $NS | grep PENDING ;
then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1; fi
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS
# assert that, after deletion of the pod, the 'pending' claim is now accepted
kubectl get $QUOTACLAIM -n $NS -o=json >> temp.json
phase=$(jq ' .items[].status.phase' test.json)
if [ "$phase" != "" ]; #The status must be empty because the claim should now be accepted. (remember: empty=accepted)
then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Adding a pod with bad image --> should not impact the ressources used --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KindConfig/badpod.yml -n $NS
if kubectl get resourcequota -n $NS | grep "350m/660m" && grep "750Mi/1Gi" ;
then echo -e "\\n${RED}FAILLED! error durring resource test: Not RUNNING pod is not ignored when calculating the resourcequota${NC}" && CleanUp && exit 1; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"

CleanUp

echo -e "\\n${BLUE}Done!${NC}"
echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"
8 changes: 4 additions & 4 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
Expand Down
Loading

0 comments on commit ffe1829

Please sign in to comment.