diff --git a/.gitignore b/.gitignore index 383e4cd2..73bacf00 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,17 @@ build/ sel-screenshots/* geckodriver.log .vscode/* +*.tar.gz + +# vim +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# airgapped +images.txt +charms.txt +retagged-images.txt diff --git a/gcloud-publish-file.sh b/gcloud-publish-file.sh new file mode 100755 index 00000000..f0911907 --- /dev/null +++ b/gcloud-publish-file.sh @@ -0,0 +1,41 @@ +set -eu + +# This script is responsible for the following: +# 1. Upload a file $ARTIFACT to a $GS_URL +# 2. Creates a service account key from $GCLOUD_SA +# 3. Creates a signed URL for the updoaded file with the SA key created +# from the previous step +# +# The script expects that the user has +# 1. logged in to the gcloud CLI +# 2. selected in gcloud the project they want to use +# 3. created a service account key, for pushing to bucket +# +# Some helper commands for the above are the following: +# +# gcloud auth login --no-launch-browser +# gcloud projects list +# gcloud config set project PROJECT_ID +# gcloud iam service-accounts keys create \ +# --iam-account=ckf-artifacts-storage-sa@thermal-creek-391110.iam.gserviceaccount.com +# signing-sa-key.json \ +# +# For more information you can take a look on the following links +# https://cloud.google.com/iam/docs/keys-create-delete#iam-service-account-keys-create-gcloud +# https://cloud.google.com/storage/docs/access-control/signing-urls-with-helpers + +echo $FILE +echo $GS_URL +echo $GCLOUD_SA_KEY + +FILE_URL=$GS_URL/$(basename $FILE) + +echo "Copying \"$FILE\" to \"$GS_URL\"" +gcloud storage cp -r $FILE $FILE_URL +echo "Successfully uploaded!" + +echo "Creating signed url" +gcloud storage sign-url \ + --private-key-file=$GCLOUD_SA_KEY \ + --duration=7d \ + $FILE_URL diff --git a/scripts/airgapped/README.md b/scripts/airgapped/README.md new file mode 100644 index 00000000..97b49871 --- /dev/null +++ b/scripts/airgapped/README.md @@ -0,0 +1,85 @@ +# Airgap Utility Scripts + +This directory contains bash and python scripts that are useful for performing +an airgapped installation. These scripts could either be used independently +to create airgap artifacts or via our testing scripts. + +We'll document some use-case scenarios here for the different scripts. + +## Prerequisites + +To use the scripts in this directory you'll need to install a couple of Python +and Ubuntu packages on the host machine, driving the test (not the LXC machine +that will contain the airgapped environment). +``` +pip3 install -r requirements.txt +sudo apt install pigz +sudo snap install docker +sudo snap install yq +sudo snap install jq +``` + +## Get list of all images from a bundle definition + +Use the following script to get the list of all OCI images used by a bundle. +This script makes the following assumptions: +1. Every charm in the bundle has a `_github_repo_name` metadata field, + containing the repository name of the charm (the org is assumed to be + canonical) +2. Every charm in the bundle has a `_github_repo_branch` metadata field, + containing the branch of the source code +3. There is a script called `tools/get_images.sh` in each repo that gathers + the images for that repo + +```bash +./scripts/airgapped/get-all-images.sh releases/1.7/stable/kubeflow/bundle.yaml > images.txt +``` + +## Pull images to docker cache + +We have a couple of scripts that are using `docker` commands to pull images, +retag them and compress them in a final `tar.gz` file. Those scripts require +that the images are already in docker's cache. This script pull a list of images +provided by a txt file. + +```bash +python3 scripts/airgapped/save-images-to-cache.py images.txt +``` + +## Retag images to cache + +In airgap environments users push their images in their own registries. So we'll +need to rename prefixes like `docker.io` to the server that users would use. + +Note that this script will produce by default a `retagged-images.txt` file, +containing the names of all re-tagged images. + +```bash +python3 scripts/airgapped/retag-images-to-cache.py images.txt +``` + +Or if you'd like to use a different prefix, i.e. `registry.example.com` +```bash +python3 scripts/airgapped/retag-images-to-cache.py --new-registry=registry.example.com images.txt +``` + +## Save images to tar + +Users will need to inject the OCI images in their registry in an airgap +environment. For this we'll be preparing a `tar.gz` file with all OCI images. + +```bash +python3 scripts/airgapped/save-images-to-tar.py retagged-images.txt +``` + +## Save charms to tar + +Users in an airgap env will need to deploy charms from local files. To assist this +we'll use this script to create a `tar.gz` containing all the charms referenced +in a bundle. + +```bash +BUNDLE_PATH=releases/1.7/stable/kubeflow/bundle.yaml + +python3 scripts/airgapped/save-charms-to-tar.py $BUNDLE_PATH +``` diff --git a/scripts/airgapped/__init__.py b/scripts/airgapped/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/airgapped/get-all-images.sh b/scripts/airgapped/get-all-images.sh new file mode 100755 index 00000000..2d0b6ab8 --- /dev/null +++ b/scripts/airgapped/get-all-images.sh @@ -0,0 +1,50 @@ +#!/usr/bin/bash +# +# This script parses given bundle file for github repositories and branches. Then checks out each +# charm's repository one by one using specified branch and collects images referred by that charm +# using that repository's image collection script +# +BUNDLE_FILE=$1 +IMAGES=() +# retrieve all repositories and branches for CKF +REPOS_BRANCHES=($(yq -r '.applications[] | select(._github_repo_name) | [(._github_repo_name, ._github_repo_branch)] | join(":")' $BUNDLE_FILE | sort --unique)) + +# TODO: We need to not hardcode this and be able to deduce all images from the bundle +# https://github.com/canonical/bundle-kubeflow/issues/789 +RESOURCE_DISPATCHER_BRANCH=track/1.0 +RESOURCE_DISPATCHER_REPO=https://github.com/canonical/resource-dispatcher + +for REPO_BRANCH in "${REPOS_BRANCHES[@]}"; do + IFS=: read -r REPO BRANCH <<< "$REPO_BRANCH" + git clone --branch $BRANCH https://github.com/canonical/$REPO + cd $REPO + IMAGES+=($(bash ./tools/get-images.sh)) + cd - > /dev/null + rm -rf $REPO +done + +# retrieve all repositories and branches for dependencies +DEP_REPOS_BRANCHES=($(yq -r '.applications[] | select(._github_dependency_repo_name) | [(._github_dependency_repo_name, ._github_dependency_repo_branch)] | join(":")' $BUNDLE_FILE | sort --unique)) + +for REPO_BRANCH in "${DEP_REPOS_BRANCHES[@]}"; do + IFS=: read -r REPO BRANCH <<< "$REPO_BRANCH" + git clone --branch $BRANCH https://github.com/canonical/$REPO + cd $REPO + # for dependencies only retrieve workload containers from metadata.yaml + IMAGES+=($(find -type f -name metadata.yaml -exec yq '.resources | to_entries | map(select(.value.upstream-source != null)) | .[] | .value | ."upstream-source"' {} \;)) + cd - > /dev/null + rm -rf $REPO +done + +# manually retrieve resource-dispatcher +git clone --branch $RESOURCE_DISPATCHER_BRANCH $RESOURCE_DISPATCHER_REPO +cd resource-dispatcher +IMAGES+=($(bash ./tools/get-images.sh)) +cd .. +rm -rf resource-dispatcher + +# ensure we only show unique images +IMAGES=($(echo "${IMAGES[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + +# print full list of images +printf "%s\n" "${IMAGES[@]}" diff --git a/scripts/airgapped/images/overview.png b/scripts/airgapped/images/overview.png new file mode 100644 index 00000000..390161a5 Binary files /dev/null and b/scripts/airgapped/images/overview.png differ diff --git a/scripts/airgapped/prerequisites.sh b/scripts/airgapped/prerequisites.sh new file mode 100755 index 00000000..73ffc71a --- /dev/null +++ b/scripts/airgapped/prerequisites.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -xe + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +echo "Installing dependencies..." +pip3 install -r $SCRIPT_DIR/requirements.txt +sudo apt update + +echo "Installing Docker" +sudo snap install docker +sudo groupadd docker +sudo usermod -aG docker $USER +sudo snap disable docker +sudo snap enable docker + +echo "Installing parsers" +sudo snap install yq +sudo snap install jq + +echo "Installing pigz for compression" +sudo apt install pigz diff --git a/scripts/airgapped/push-images-to-registry.py b/scripts/airgapped/push-images-to-registry.py new file mode 100644 index 00000000..3ed0b59b --- /dev/null +++ b/scripts/airgapped/push-images-to-registry.py @@ -0,0 +1,27 @@ +import argparse +import logging + +import docker + +from utils import get_images_list_from_file + +docker_client = docker.client.from_env() + +log = logging.getLogger(__name__) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Push images from list.") + parser.add_argument("images") + args = parser.parse_args() + + images_ls = get_images_list_from_file(args.images) + images_len = len(images_ls) + new_images_ls = [] + for idx, image_nm in enumerate(images_ls): + log.info("%s/%s", idx + 1, images_len) + + logging.info("Pushing image: %s", image_nm) + docker_client.images.push(image_nm) + + log.info("Successfully pushed all images!") diff --git a/scripts/airgapped/requirements.txt b/scripts/airgapped/requirements.txt new file mode 100644 index 00000000..90fc9ca3 --- /dev/null +++ b/scripts/airgapped/requirements.txt @@ -0,0 +1,2 @@ +docker +PyYAML diff --git a/scripts/airgapped/retag-images-to-cache.py b/scripts/airgapped/retag-images-to-cache.py new file mode 100644 index 00000000..5e2c917b --- /dev/null +++ b/scripts/airgapped/retag-images-to-cache.py @@ -0,0 +1,83 @@ +import argparse +import logging + +import docker + +from utils import (delete_file_if_exists, get_images_list_from_file, + get_or_pull_image) + +cli = docker.client.from_env() + +log = logging.getLogger(__name__) + +SHA_TOKEN = "@sha256" + + +def retag_image_with_sha(image): + """Retag the image by using the sha value.""" + log.info("Retagging image digest: %s", image) + repo_digest = image.attrs["RepoDigests"][0] + [repository_name, sha_value] = repo_digest.split("@sha256:") + + tagged_image = "%s:%s" % (repository_name, sha_value) + log.info("Retagging to: %s", tagged_image) + image.tag(tagged_image) + + log.info("Tagged image successfully: %s", tagged_image) + return cli.images.get(tagged_image) + + +def get_retagged_image_name(image_nm: str, new_registry: str) -> str: + """Given an image name replace the repo and use sha as tag.""" + if SHA_TOKEN in image_nm: + log.info("Provided image has sha. Using it's value as tag.") + image_nm = image_nm.replace(SHA_TOKEN, "") + + if len(image_nm.split("/")) == 1: + # docker.io/library image, i.e. ubuntu:22.04 + return "%s/%s" % (new_registry, image_nm) + + if len(image_nm.split("/")) == 2: + # classic docker.io image, i.e. argoproj/workflow-controller + return "%s/%s" % (new_registry, image_nm) + + # There are more than 2 / in the image name. Replace first part + # Example image: quay.io/metallb/speaker:v0.13.3 + _, image_nm = image_nm.split("/", 1) + return "%s/%s" % (new_registry, image_nm) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Retag list of images") + parser.add_argument("images") + parser.add_argument("--new-registry", default="172.17.0.2:5000") + parser.add_argument("--retagged-images", default="retagged-images.txt") + # The reason we are using this IP as new registry is because this will end + # up being the IP of the Registry we'll run as a Container. We'll need to + # do docker push <...> so we'll have to use the IP directly, or mess with + # the environment's /etc/hosts file + + args = parser.parse_args() + + images_ls = get_images_list_from_file(args.images) + images_len = len(images_ls) + new_images_ls = [] + for idx, image_nm in enumerate(images_ls): + log.info("%s/%s", idx + 1, images_len) + + retagged_image_nm = get_retagged_image_name( + image_nm, args.new_registry + ) + + img = get_or_pull_image(image_nm) + log.info("%s: Retagging to %s", image_nm, retagged_image_nm) + img.tag(retagged_image_nm) + + new_images_ls.append(retagged_image_nm) + + log.info("Saving the produced list of images.") + delete_file_if_exists(args.retagged_images) + with open(args.retagged_images, "w+") as f: + f.write("\n".join(new_images_ls)) + + log.info("Successfully saved list of images in '%s'", args.retagged_images) diff --git a/scripts/airgapped/save-charms-to-tar.py b/scripts/airgapped/save-charms-to-tar.py new file mode 100644 index 00000000..6c0e2137 --- /dev/null +++ b/scripts/airgapped/save-charms-to-tar.py @@ -0,0 +1,57 @@ +import argparse +import logging +import subprocess + +import os +import yaml + +import utils as airgap_utils + +log = logging.getLogger(__name__) + + +def download_bundle_charms(bundle: dict, no_zip: bool, + skip_resource_dispatcher: bool, + output_tar: str) -> None: + """Given a bundle dict download all the charms using juju download.""" + + log.info("Downloading all charms...") + applications = bundle.get("applications") + for app in applications.values(): + subprocess.run(["juju", "download", "--channel", app["channel"], + app["charm"]]) + + # FIXME: https://github.com/canonical/bundle-kubeflow/issues/789 + if not skip_resource_dispatcher: + log.info("Fetching charm of resource-dispatcher.") + subprocess.run(["juju", "download", "--channel", "1.0/stable", + "resource-dispatcher"]) + + if not no_zip: + # python3 download_bundle_charms.py $BUNDLE_PATH --zip_all + log.info("Creating the tar with all the charms...") + cmd = "tar -cv --use-compress-program=pigz -f %s *.charm" % output_tar + subprocess.run(cmd, shell=True) + log.info("Created %s file will all charms.", output_tar) + + log.info("Removing downloaded charms...") + airgap_utils.delete_files_with_extension(os.getcwd(), ".charm") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bundle Charms Downloader") + parser.add_argument("--no-zip", action="store_true") + parser.add_argument("--skip-resource-dispatcher", action="store_true") + parser.add_argument("--output-tar", default="charms.tar.gz") + parser.add_argument("bundle") + args = parser.parse_args() + log.info(args.no_zip) + + bundle_dict = {} + with open(args.bundle, 'r') as file: + bundle_dict = yaml.safe_load(file) + + airgap_utils.delete_file_if_exists(args.output_tar) + download_bundle_charms(bundle_dict, args.no_zip, + args.skip_resource_dispatcher, + args.output_tar) diff --git a/scripts/airgapped/save-images-to-cache.py b/scripts/airgapped/save-images-to-cache.py new file mode 100644 index 00000000..198ba20a --- /dev/null +++ b/scripts/airgapped/save-images-to-cache.py @@ -0,0 +1,22 @@ +import argparse +import logging + +import docker + +from utils import get_images_list_from_file, get_or_pull_image + +cli = docker.client.from_env() + +log = logging.getLogger(__name__) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Pull locally list of images") + parser.add_argument("images") + args = parser.parse_args() + + images_ls = get_images_list_from_file(args.images) + images_len = len(images_ls) + for idx, image in enumerate(images_ls): + log.info("%s/%s", idx + 1, images_len) + get_or_pull_image(image) diff --git a/scripts/airgapped/save-images-to-tar.py b/scripts/airgapped/save-images-to-tar.py new file mode 100644 index 00000000..266add61 --- /dev/null +++ b/scripts/airgapped/save-images-to-tar.py @@ -0,0 +1,65 @@ +import argparse +import logging +import os +import subprocess + +import docker + +from utils import (delete_file_if_exists, get_images_list_from_file, + get_or_pull_image) + +cli = docker.client.from_env() + +log = logging.getLogger(__name__) + + +def save_image(image_nm) -> str: + """Given an Image object, save it as tar.""" + get_or_pull_image(image_nm) + file_name = "%s.tar" % image_nm + file_name = file_name.replace("/", "-").replace(":", "-") + if os.path.isfile(file_name): + log.info("Tar '%s' already exists. Skipping...", file_name) + return file_name + + log.info("%s: Saving image to tar '%s'.", image_nm, file_name) + for i in range(10): + # We've seen that sometimes we get socket timeouts. Try 10 times + try: + with open(file_name, "w+b") as f: + subprocess.run(["docker", "save", image_nm], stdout=f) + + logging.info("%s: Saved image to tar '%s'", image_nm, file_name) + return file_name + except Exception as e: + log.error("Failed to create tar file. Deleting tar '%s", file_name) + log.error(e) + log.info("Retrying %s/10 to store image to tar '%s'", + i + 1, file_name) + + log.error("Tried 10 times to create tar '%s' and failed: %s", file_name) + delete_file_if_exists(file_name) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Create tar.gz from images") + parser.add_argument("images") + args = parser.parse_args() + + images_ls = get_images_list_from_file(args.images) + images_len = len(images_ls) + tar_files = [] + for idx, image_nm in enumerate(images_ls): + log.info("%s/%s", idx + 1, images_len) + tar_file = save_image(image_nm) + tar_files.append(tar_file) + + log.info("Creating final tar.gz file. Will take a while...") + subprocess.run(["tar", "-cv", "--use-compress-program=pigz", + "-f", "images.tar.gz", *tar_files]) + log.info("Created the tar.gz file!") + + log.info("Deleting intermediate .tar files.") + for file in tar_files: + delete_file_if_exists(file) + log.info("Deleted all .tar files.") diff --git a/scripts/airgapped/utils.py b/scripts/airgapped/utils.py new file mode 100644 index 00000000..7e6db2b3 --- /dev/null +++ b/scripts/airgapped/utils.py @@ -0,0 +1,55 @@ +import logging +import os +import pathlib + +import docker + +cli = docker.client.from_env() + +LOG_FORMAT = "%(levelname)s \t| %(message)s" +logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) + +log = logging.getLogger(__name__) + + +def delete_files_with_extension(dir_path, extension): + """Delete all files in dir_path that have a specific file extension.""" + dir_files = os.listdir(dir_path) + for file in dir_files: + if file.endswith(extension): + os.remove(os.path.join(dir_path, file)) + + +def delete_file_if_exists(file_name): + """Delete the file name if it exists.""" + pathlib.Path(file_name).unlink(missing_ok=True) + + +def get_images_list_from_file(file_name: str) -> list[str]: + """Given a file name with \n separated names return the list of names.""" + try: + with open(file_name, 'r') as file: + images = file.read().splitlines() + return images + except FileNotFoundError: + log.warn(f"File '{file_name}' not found.") + return [] + except Exception as e: + log.error("An error occurred:", e) + return [] + + +def get_or_pull_image(image: str): + """First try to get the image from local cache, and then pull.""" + try: + log.info("%s: Trying to get image from cache", image) + img = cli.images.get(image) + + log.info("%s: Found image in cache", image) + return img + except docker.errors.ImageNotFound: + log.info("%s: Couldn't find image in cache. Pulling it", image) + img = cli.images.pull(image) + + log.info("%s: Pulled image", image) + return img diff --git a/tests/airgapped/README.md b/tests/airgapped/README.md new file mode 100644 index 00000000..d6ec4091 --- /dev/null +++ b/tests/airgapped/README.md @@ -0,0 +1,111 @@ +# Testing Airgapped Installation +> :warning: **Those scripts require at least 500Gb**: Since they download all images of Kubeflow, both in host and inside the LXC container +> +> :warning: **Those scripts require Python 3.10** + +For running the tests we expect an environment that can: +1. Spin up LXC containers +2. Have Docker, to pull images + +We need docker to pull all the images necessary and push them to the airgapped +lxc container. + +## Setup + +We've prepared some scripts for setting up the environment +```bash +./tests/airgapped/setup/setup.sh +``` + +We've observed that in almost all cases we needed to reboot to be able to +run docker as sudo and the lxc container to, initially, have network access. + +## Running the tests + +You can run the script that will spin up an airgapped microk8s cluster with: + +```bash +./tests/airgapped/airgap.sh \ + --node-name airgapped-microk8s \ + --microk8s-channel 1.24/stable \ + --bundle-path releases/latest/edge/bundle.yaml +``` + +### Size considerations + +As stated in the beginning these scripts require a lot of storage, if run with +the full set of images of Kubeflow. To better expose this, we'll take for +granted that the total of all OCI images of Kubeflow is 125Gb. Then the amount +of storage needed is: +- 125Gb, for host to pull all images locally +- 125Gb, for the compressed `images.tar.gz` (the size almost always will be + smaller, but here I'll use the worst case scenario +- 125Gb, to copy this tarbal inside the airgapped LXC machine +- 125Gb, to copy the contents of the tarball into the container registry inside + the airgapped LXC machine + +So in the worst case, we need to have at least 500Gb to be able to run those +scripts and use all images of Kubeflow. + +### Running with a subset of images + +By default, if no `images.tar.gz` file is found, in working directory from where +the script was executed from, then the script will try to download +all the CKF images. These are 125Gb, which will make it difficult for running a +lot of tests locally. + +Devs are urged to instead define their own `images.txt` file with the images +they'd like to be loaded during tests. Note that in the instructions below I +used `1.7/stable` until https://github.com/canonical/bundle-kubeflow/issues/679 +is resolved, and we'll be able to use other bundle files. + +```bash +./scripts/airgapped/get-all-images.sh releases/1.7/stable/kubeflow/bundle.yaml > images-all.txt +``` + +This will generate an `images-all.txt`, with all images of CKF 1.7. You can +create a copy of that file `images.txt` and keep which images you want from +the initial file, or change the rest. Then you can continue with the following +commands to generate the `images.tar.gz` + +```bash +python3 scripts/airgapped/save-images-to-cache.py images.txt +python3 scripts/airgapped/retag-images-to-cache.py images.txt +python3 scripts/airgapped/save-images-to-tar.py retagged-images.txt +``` + + +### Common bugs + +#### No internet in the LXC container when starting + +This is most probably happening because LXC and Docker do not play nice together. + +To mitigate try to: +1. `./tests/airgapped/setup/lxd-docker-networking.sh` +2. If the problem persists, reboot the machine + + +#### This "instances" entry already exists +``` +Creating airgapped-microk8s +Error: Failed instance creation: Failed creating instance record: Add instance info to the database: This "instances" entry already exists +Error: Instance is not running +``` + +The reason for this is that the previous LXC Container still exists. To verify run `lxc ls`, where you should see `airgapped-microk8s` + +**Solution** +``` +lxc delete airgapped-microk8s --force +``` + +#### Charms are not the ones I expected + +Keep in mind that the script will NOT re-fetch the charms if it finds a +`charms.tar.gz` file in the repo. This means that follow-up runs will use +that cached file. + +If you want to use Charms from a different bundle, then make sure to remove +`charms.tar.gz` + diff --git a/tests/airgapped/airgap.sh b/tests/airgapped/airgap.sh new file mode 100755 index 00000000..f926032e --- /dev/null +++ b/tests/airgapped/airgap.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +# This is the driver script for: +# 1. Pulling all the images/charms in host env +# 2. Create an airgapped lxd VM and install MicroK8s in it +# 3. Setup a registry mirror in that VM +# +# The script is also made to use some "caching". Specifically, if a charms.tar.gz +# or an images.tar.gz file is present then it won't regenerate them, to save time. + +source tests/airgapped/utils.sh +source tests/airgapped/ckf.sh +source tests/airgapped/registry.sh + + +function wait_all_pods() { + container="$1" + + echo "Waiting for all pods to start" + lxc exec "$container" -- bash -c " + microk8s kubectl wait --for=condition=Ready pods --all --all-namespaces + " + + echo "All pods are ready" +} + +function airgap_wait_for_pods() { + container="$1" + + lxc exec "$container" -- bash -c " + while ! microk8s kubectl wait -n kube-system ds/calico-node --for=jsonpath='{.status.numberReady}'=1; do + echo waiting for calico + sleep 3 + done + + while ! microk8s kubectl wait -n kube-system deploy/hostpath-provisioner --for=jsonpath='{.status.readyReplicas}'=1; do + echo waiting for hostpath provisioner + sleep 3 + done + + while ! microk8s kubectl wait -n kube-system deploy/coredns --for=jsonpath='{.status.readyReplicas}'=1; do + echo waiting for coredns + sleep 3 + done + " +} + +function make_machine_airgapped() { + local NAME=$1 + + lxc exec "$NAME" -- bash -c " + ip route delete default + ip route add default dev eth0 + " + if lxc exec "$NAME" -- bash -c "ping -c1 1.1.1.1"; then + echo "machine for airgap test has internet access when it should not" + exit 1 + fi +} + + + + +function setup_microk8s() { + local NAME=$1 + local DISTRO=$2 + local MICROK8S_CHANNEL=$3 + + create_machine "$NAME" "$DISTRO" + + lxc exec "$NAME" -- snap install microk8s --classic --channel="$MICROK8S_CHANNEL" + + lxc exec "$NAME" -- bash -c " + microk8s enable ingress dns hostpath-storage metallb:10.64.140.43-10.64.140.49 + microk8s enable dns:\$(resolvectl status | grep 'Current DNS Server' | awk '{print \$NF}') + " + + echo "Wait for MicroK8s to come up" + airgap_wait_for_pods "$NAME" +} + +function post_airgap_tests() { + local AIRGAPPED_NAME=$2 + lxc rm "$AIRGAPPED_NAME" --force +} + +TEMP=$(getopt -o "lh" \ + --long help,lib-mode,registry-name:,node-name:,distro:,microk8s-channel:,juju-channel:,bundle-path:,proxy: \ + -n "$(basename "$0")" -- "$@") + +if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi + +eval set -- "$TEMP" + +AIRGAPPED_NAME="${AIRGAPPED_NAME:-"airgapped-microk8s"}" +DISTRO="${DISTRO:-"ubuntu:22.04"}" +MICROK8S_CHANNEL="${MICROK8S_CHANNEL:-}" +JUJU_CHANNEL="${JUJU_CHANNEL:-"2.9/stable"}" +BUNDLE_PATH="${BUNDLE_PATH:-"releases/latest/edge/bundle.yaml"}" +LIBRARY_MODE=false + + +while true; do + case "$1" in + -l | --lib-mode ) LIBRARY_MODE=true; shift ;; + --node-name ) AIRGAPPED_NAME="$2"; shift 2 ;; + --distro ) DISTRO="$2"; shift 2 ;; + --microk8s-channel ) MICROK8S_CHANNEL="$2"; shift 2 ;; + --juju-channel) JUJU_CHANNEL="$2"; shift 2 ;; + --bundle-path) BUNDLE_PATH="$2"; shift 2 ;; + -h | --help ) + prog=$(basename -s.wrapper "$0") + echo "Usage: $prog [options...]" + echo " --node-name Name to be used for LXD containers" + echo " Can also be set by using AIRGAPPED_NAME environment variable" + echo " --distro Distro image to be used for LXD containers Eg. ubuntu:20.04" + echo " Can also be set by using DISTRO environment variable" + echo " --microk8s-channel Channel to be tested Eg. latest/edge" + echo " Can also be set by using MICROK8S_CHANNEL environment variable" + echo " --bundle-path Bundle yaml to be tested" + echo " Can also be set by using BUNDLE_PATH environment variable" + echo " -l, --lib-mode Make the script act like a library Eg. true / false" + echo + exit ;; + -- ) shift; break ;; + * ) break ;; + esac +done + +if [ "$LIBRARY_MODE" == "false" ]; +then + echo "1/X -- (us) Create images tar.gz" + create_images_tar "$BUNDLE_PATH" + echo "2/X -- (us) Create charms tar.gz" + create_charms_tar "$BUNDLE_PATH" + echo "3/X -- (client) Setup K8s cluster (MicroK8s)" + setup_microk8s "$AIRGAPPED_NAME" "$DISTRO" "$MICROK8S_CHANNEL" + echo "4/X -- (client) Setup Docker registry, reachable from K8s cluster" + setup_docker_registry "$AIRGAPPED_NAME" + echo "5/X -- (field) Copy our tars to client env" + copy_tars_to_airgapped_env "$AIRGAPPED_NAME" + echo "6/X -- (client) Injest images from tar to their registry" + push_images_tar_to_registry "$AIRGAPPED_NAME" + echo "7/X -- (client) Configure access to mirror registry" + configure_to_use_registry_mirror "$AIRGAPPED_NAME" + echo "8/X -- Install Juju CLI and init Kubeflow model" + install_juju "$AIRGAPPED_NAME" "$JUJU_CHANNEL" + echo "9/X -- Make MicroK8s airgapped" + wait_all_pods "$AIRGAPPED_NAME" + make_machine_airgapped "$AIRGAPPED_NAME" + echo "10/X -- Initialize Kubeflow model in JuJu" + init_juju_model "$AIRGAPPED_NAME" "$JUJU_CHANNEL" + #echo "Cleaning up" + + lxc exec "$AIRGAPPED_NAME" -- bash -c "bash" +fi diff --git a/tests/airgapped/ckf.sh b/tests/airgapped/ckf.sh new file mode 100644 index 00000000..818669a8 --- /dev/null +++ b/tests/airgapped/ckf.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash + +# This file includes helper functions for +# 1. Fetching CKF artifacts (images,tars) +# 2. Pushing the artifacts to the airgapped VM +# 3. Initialising juju and preparing the model for CKF + +function create_images_tar() { + local BUNDLE_PATH=$1 + + if [ -f "images.tar.gz" ]; then + echo "images.tar.gz exists. Will not recreate it." + return 0 + fi + + pip3 install -r scripts/airgapped/requirements.txt + + echo "Generating list of images of Charmed Kubeflow" + bash scripts/airgapped/get-all-images.sh "$BUNDLE_PATH" > images.txt + + echo "Using produced list to load it into our machine's docker cache" + python3 scripts/airgapped/save-images-to-cache.py images.txt + + echo "Retagging in our machine's docker cache all the images of the list" + python3 scripts/airgapped/retag-images-to-cache.py images.txt + + echo "Creating images.tar.gz file with all images defined in the retagged list" + python3 scripts/airgapped/save-images-to-tar.py retagged-images.txt +} + +function create_charms_tar() { + local BUNDLE_PATH=$1 + + if [ -f "charms.tar.gz" ]; then + echo "charms.tar.gz exists. Will not recreate it." + return 0 + fi + + python3 scripts/airgapped/save-charms-to-tar.py \ + $BUNDLE_PATH \ + --zip-all \ + --delete-afterwards +} + +function copy_tars_to_airgapped_env() { + local NAME="$1" + + echo "Pushing images.tar.gz..." + lxc file push images.tar.gz "$NAME"/root/ + + echo "Pushing charms.tar.gz..." + lxc file push charms.tar.gz "$NAME"/root/ + lxc exec "$NAME" -- bash -c " + mkdir charms + tar -xzvf charms.tar.gz --directory charms + rm charms.tar.gz + " + + echo "Pushing retagged images list..." + lxc file push retagged-images.txt "$NAME"/root/ + + echo "Pushed all artifacts successfully." +} + +function install_juju() { + container="$1" + juju_channel="$2" + + lxc exec "$container" -- bash -c " + snap install juju --channel ${juju_channel} --classic + juju bootstrap microk8s + " + +} + +function init_juju_model() { + container="$1" + + lxc exec "$container" -- bash -c " + juju add-model kubeflow + " +} + +function fetch_ckf_charms() { + local NAME=$1 + local BUNDLE_PATH=$2 + + lxc exec "$NAME" -- bash -c " + python3 scripts/airgapped/download_bundle_charms.py \ + releases/1.7/stable/kubeflow/bundle.yaml \ + --zip-all \ + --delete-afterwards + + mkdir charms + mv charms.tar.gz charms/ + cd charms + tar -xzvf charms.tar.gz + " +} diff --git a/tests/airgapped/lxc/install-deps/ubuntu_20.04 b/tests/airgapped/lxc/install-deps/ubuntu_20.04 new file mode 100644 index 00000000..f4e01e39 --- /dev/null +++ b/tests/airgapped/lxc/install-deps/ubuntu_20.04 @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +export $(grep -v '^#' /etc/environment | xargs) +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get install python3-pip docker.io -y +pip3 install pytest requests pyyaml sh +# Attempting to address https://forum.snapcraft.io/t/lxd-refresh-cause-container-socket-error/8698 +# if core is to be installed by microk8s it fails +snap install core18 | true diff --git a/tests/airgapped/lxc/install-deps/ubuntu_22.04 b/tests/airgapped/lxc/install-deps/ubuntu_22.04 new file mode 100644 index 00000000..32fac7f1 --- /dev/null +++ b/tests/airgapped/lxc/install-deps/ubuntu_22.04 @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +export $(grep -v '^#' /etc/environment | xargs) +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get install python3-pip docker.io -y +pip3 install pytest requests pyyaml sh +# Attempting to address https://forum.snapcraft.io/t/lxd-refresh-cause-container-socket-error/8698 +# if core is to be installed by microk8s it fails +snap install core20 | true diff --git a/tests/airgapped/lxc/microk8s.profile b/tests/airgapped/lxc/microk8s.profile new file mode 100644 index 00000000..8bb7af53 --- /dev/null +++ b/tests/airgapped/lxc/microk8s.profile @@ -0,0 +1,29 @@ +name: microk8s +config: + boot.autostart: "true" + linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter + raw.lxc: | + lxc.apparmor.profile=unconfined + lxc.mount.auto=proc:rw sys:rw cgroup:rw + lxc.cgroup.devices.allow=a + lxc.cap.drop= + security.nesting: "true" + security.privileged: "true" +description: "" +devices: + aadisable: + path: /sys/module/nf_conntrack/parameters/hashsize + source: /sys/module/nf_conntrack/parameters/hashsize + type: disk + aadisable2: + path: /dev/kmsg + source: /dev/kmsg + type: unix-char + aadisable3: + path: /sys/fs/bpf + source: /sys/fs/bpf + type: disk + aadisable4: + path: /proc/sys/net/netfilter/nf_conntrack_max + source: /proc/sys/net/netfilter/nf_conntrack_max + type: disk diff --git a/tests/airgapped/lxd.profile b/tests/airgapped/lxd.profile new file mode 100644 index 00000000..3ac5b035 --- /dev/null +++ b/tests/airgapped/lxd.profile @@ -0,0 +1,30 @@ +config: {} +networks: +- config: + ipv4.address: auto + ipv6.address: auto + description: "" + name: lxdbr0 + type: "" + project: default +storage_pools: +- config: + size: 500GiB + description: "" + name: default + driver: zfs +profiles: +- config: {} + description: "" + devices: + eth0: + name: eth0 + network: lxdbr0 + type: nic + root: + path: / + pool: default + type: disk + name: default +projects: [] +cluster: null diff --git a/tests/airgapped/registry.sh b/tests/airgapped/registry.sh new file mode 100644 index 00000000..ded30cdd --- /dev/null +++ b/tests/airgapped/registry.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# This file includes helper functions for: +# 1. setting up a Docker Registry +# 2. configuring microk8s to use this registry (with http) +# +# The scripts are aimed to be run with access to the internet. + +function setup_docker_registry() { + local NAME=$1 + + lxc exec "$NAME" -- bash -c " + docker run -d \ + -p 5000:5000 \ + --restart=always \ + --name registry \ + -v /mnt/registry:/var/lib/registry \ + registry:2.8.1 + + echo '{ + \"insecure-registries\" : [\"172.17.0.2:5000\"] +} + ' > /etc/docker/daemon.json + + systemctl restart docker + " +} + + +function push_juju_images_to_registry() { + local NAME=$1 + + lxc exec "$NAME" -- bash -c " + microk8s ctr images pull docker.io/jujusolutions/charm-base:ubuntu-20.04 + microk8s ctr images pull docker.io/jujusolutions/charm-base:ubuntu-22.04 + " +} + + +function push_images_tar_to_registry() { + local NAME=$1 + + lxc exec "$NAME" -- bash -c " + echo \"Extracting images from tar\" + mkdir images + tar -xzvf images.tar.gz --directory images + rm images.tar.gz + + echo \"Loading images into intermediate Docker client\" + for img in images/*.tar; do docker load < \$img && rm \$img; done + rmdir images + + echo \"Pushing images from local docker to Registry\" + python3 scripts/airgapped/push-images-to-registry.py retagged-images.txt + + " + + echo "Pushing base charm images to registry" + push_juju_images_to_registry "$NAME" +} + + +function configure_to_use_registry_mirror() { + local NAME=$1 + + lxc exec "$NAME" -- bash -c ' + mkdir -p /var/snap/microk8s/current/args/certs.d/172.17.0.2:5000/ + + echo " +[host.\"http://172.17.0.2:5000\"] + capabilities = [\"pull\", \"resolve\"] + " > /var/snap/microk8s/current/args/certs.d/172.17.0.2:5000/hosts.toml + ' +} diff --git a/tests/airgapped/setup/lxd-docker-networking.sh b/tests/airgapped/setup/lxd-docker-networking.sh new file mode 100755 index 00000000..ab2cd2df --- /dev/null +++ b/tests/airgapped/setup/lxd-docker-networking.sh @@ -0,0 +1,7 @@ +# https://documentation.ubuntu.com/lxd/en/latest/howto/network_bridge_firewalld/#prevent-connectivity-issues-with-lxd-and-docker + +echo "Configuring LXD and Docker networking" + +sudo iptables -I DOCKER-USER -i lxdbr0 -j ACCEPT +sudo iptables -I DOCKER-USER -o lxdbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + diff --git a/tests/airgapped/setup/prerequisites.sh b/tests/airgapped/setup/prerequisites.sh new file mode 100755 index 00000000..0acc0a60 --- /dev/null +++ b/tests/airgapped/setup/prerequisites.sh @@ -0,0 +1,8 @@ +JUJU_CHANNEL="${JUJU_CHANNEL:-2.9/stable}" + +echo "Installing pip" +sudo apt update +sudo apt install python3-pip + +echo "Installing Juju" +sudo snap install juju --channel=$JUJU_CHANNEL --classic diff --git a/tests/airgapped/setup/setup.sh b/tests/airgapped/setup/setup.sh new file mode 100755 index 00000000..327a6df9 --- /dev/null +++ b/tests/airgapped/setup/setup.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -xe + +cat tests/airgapped/lxd.profile | lxd init --preseed +pip3 install -r scripts/airgapped/requirements.txt + +./scripts/airgapped/prerequisites.sh +./tests/airgapped/setup/prerequisites.sh +./tests/airgapped/setup/lxd-docker-networking.sh diff --git a/tests/airgapped/utils.sh b/tests/airgapped/utils.sh new file mode 100644 index 00000000..3d876ba8 --- /dev/null +++ b/tests/airgapped/utils.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +function create_machine() { + local NAME=$1 + local DISTRO=$2 + if ! lxc profile show microk8s + then + lxc profile copy default microk8s + fi + lxc profile edit microk8s < tests/airgapped/lxc/microk8s.profile + + lxc launch -p default -p microk8s "$DISTRO" "$NAME" + + # Allow for the machine to boot and get an IP + sleep 20 + tar cf - ./tests | lxc exec "$NAME" -- tar xvf - -C /root + tar cf - ./scripts | lxc exec "$NAME" -- tar xvf - -C /root + tar cf - ./releases | lxc exec "$NAME" -- tar xvf - -C /root + DISTRO_DEPS_TMP="${DISTRO//:/_}" + DISTRO_DEPS="${DISTRO_DEPS_TMP////-}" + lxc exec "$NAME" -- /bin/bash "/root/tests/airgapped/lxc/install-deps/$DISTRO_DEPS" + lxc exec "$NAME" -- pip3 install -r scripts/airgapped/requirements.txt + lxc exec "$NAME" -- reboot + sleep 20 + + trap 'lxc delete '"${NAME}"' --force || true' EXIT + if [ "$#" -ne 1 ] + then + lxc exec "$NAME" -- reboot + sleep 20 + fi + + # inotify increase + lxc exec "$NAME" -- sysctl fs.inotify.max_user_instances=1280 + lxc exec "$NAME" -- sysctl fs.inotify.max_user_watches=655360 +} + +function setup_tests() { + DISTRO="${1-$DISTRO}" + FROM_CHANNEL="${2-$FROM_CHANNEL}" + TO_CHANNEL="${3-$TO_CHANNEL}" + + export DEBIAN_FRONTEND=noninteractive + apt-get install python3-pip -y + pip3 install -U pytest requests pyyaml sh + apt-get install jq -y + snap install kubectl --classic + export ARCH=$(uname -m) + export LXC_PROFILE="tests/airgapped/lxc/microk8s.profile" + export BACKEND="lxc" + export CHANNEL_TO_TEST=${TO_CHANNEL} +}