From 19013d6cbe52f1d1d34f08cc609cb52602090708 Mon Sep 17 00:00:00 2001 From: Giuseppe Baccini Date: Wed, 1 Jun 2022 11:59:02 +0200 Subject: [PATCH 1/4] Environment enhancements - Added k3s-ansible submodule - Removed support for vanilla-k8s and NGINX - Modified environment to work with k3s-ansible - Added code to handle more than 1 admin; this seems not working with k3s-ansible. We will do further investigations. - Added group: kubectl in Vagrantfile - Support for OpenSuse/Leap Vagrant Box, now that is the default - General improvements to Ansible playbooks - Refactored Vagrantfile - Adjusted bootstrap.yaml for Ubuntu boxes - Updated README(s).md - Added a local insecure registry started in the admin-1 node on port 5000 - Built s3gw-ui image is now pushed on local registry - Image name and pull policy are now parametric for s3gw-ui-deployment Signed-off-by: Giuseppe Baccini --- .gitmodules | 3 + build-ui/README.md | 2 +- env/.gitignore | 2 +- env/{README.k3s.md => README.bm.md} | 25 +- env/README.md | 19 +- env/{README.k8s.md => README.vm.md} | 60 ++-- env/Vagrantfile | 328 +++++++----------- env/ansible.cfg | 12 + env/generate-spec.sh | 42 +-- env/ingress-nginx/longhorn-ingress.yaml | 24 -- env/ingress-nginx/nginx-nodeport.yaml | 22 -- env/ingress-nginx/s3gw-ingress.yaml | 45 --- env/ingress-nginx/s3gw-ui-ingress.yaml | 24 -- env/playbooks/bootstrap.yaml | 177 +++++----- env/playbooks/ingress-nginx-deploy.yaml | 12 - env/playbooks/ingress-traefik-deploy.yaml | 30 +- env/playbooks/k3s-ansible | 1 + ...k3s-install.yaml => k3s-post-install.yaml} | 34 +- env/playbooks/k8s-install.yaml | 128 ------- env/playbooks/load-scen.yaml | 15 +- env/playbooks/longhorn-deploy.yaml | 2 +- env/playbooks/nginx-deploy.yaml | 19 - env/playbooks/s3gw-deploy.yaml | 46 ++- env/playbooks/s3gw-ui-deploy.yaml | 84 ++--- env/s3gw-dev.yaml | 2 +- env/s3gw-ui/s3gw-ui-deployment.yaml | 4 +- env/s3gw.yaml | 2 +- env/{setup-k8s.sh => setup-vm.sh} | 31 +- env/{setup-k3s.sh => setup.sh} | 38 +- 29 files changed, 444 insertions(+), 789 deletions(-) create mode 100644 .gitmodules rename env/{README.k3s.md => README.bm.md} (79%) rename env/{README.k8s.md => README.vm.md} (76%) create mode 100644 env/ansible.cfg delete mode 100644 env/ingress-nginx/longhorn-ingress.yaml delete mode 100644 env/ingress-nginx/nginx-nodeport.yaml delete mode 100644 env/ingress-nginx/s3gw-ingress.yaml delete mode 100644 env/ingress-nginx/s3gw-ui-ingress.yaml delete mode 100644 env/playbooks/ingress-nginx-deploy.yaml create mode 160000 env/playbooks/k3s-ansible rename env/playbooks/{k3s-install.yaml => k3s-post-install.yaml} (55%) delete mode 100644 env/playbooks/k8s-install.yaml delete mode 100644 env/playbooks/nginx-deploy.yaml rename env/{setup-k8s.sh => setup-vm.sh} (75%) rename env/{setup-k3s.sh => setup.sh} (84%) diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..1806f4d --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "k3s-ansible"] + path = env/playbooks/k3s-ansible + url = https://github.com/k3s-io/k3s-ansible.git diff --git a/build-ui/README.md b/build-ui/README.md index 2c578ec..644ebdf 100644 --- a/build-ui/README.md +++ b/build-ui/README.md @@ -26,7 +26,7 @@ Make sure you've installed the following applications: * Podman or Docker -The build script expect the following directory hierarchy. +The build script expects the following directory hierarchy. ```text | diff --git a/env/.gitignore b/env/.gitignore index 81531e7..4f4d199 100644 --- a/env/.gitignore +++ b/env/.gitignore @@ -1,4 +1,4 @@ -/s3gw.ctr.tar +/*.ctr.tar s3gw/*.tmp.yaml playbooks/join-command playbooks/admin.conf diff --git a/env/README.k3s.md b/env/README.bm.md similarity index 79% rename from env/README.k3s.md rename to env/README.bm.md index ae30de2..4bccc79 100644 --- a/env/README.k3s.md +++ b/env/README.bm.md @@ -1,9 +1,9 @@ -# K3s +# K3s on Bare Metal + +This README will guide you through the setup of a K3s cluster on bare metal. +If you are looking for K3s cluster running on virtual machines, +refer to our [K3s on virtual machines](./README.vm.md). -This README will guide you through the setup of a K3s cluster on your system. -If you are looking for a vanilla K8s cluster running on virtual machines, -refer to our [K8s section](./README.k8s.md). -To install K3s on a virtual machine, see [here](#Install-K3s-on-a-virtual-machine). # Setup ## Note Before @@ -33,7 +33,7 @@ system. Additionally, it will deploy Longhorn and the s3gw in the cluster. ``` $ cd ~/git/s3gw-core/env -$ ./setup-k3s.sh +$ ./setup.sh ``` # Access the Longhorn UI @@ -66,19 +66,12 @@ Backup Target Credential Secret: `s3gw-secret` # Install K3s on a virtual machine -## Requirements - -Make sure you have installed the following applications on your system: - -* Vagrant -* libvirt -* Ansible - In order to install k3s on a virtual machine rather than on bare metal, execute: ``` $ cd ~/git/s3gw-core/env -$ ./setup-k3s.sh --vm +$ ./setup.sh --vm ``` -Refer to [K8s section](./README.k8s.md) for more configuration options. +Refer to [K3s on virtual machines](./README.vm.md) for requirements and for +more configuration options. diff --git a/env/README.md b/env/README.md index 7ad2723..8ee8d49 100644 --- a/env/README.md +++ b/env/README.md @@ -1,18 +1,13 @@ -# K3s & K8s environment running s3gw with Longhorn +# K3s environment running s3gw with Longhorn -This is the entrypoint to setup a Kubernetes cluster on your system. -You can either choose to install a lightweight **K3s** cluster or a **vanilla K8s** -cluster running the latest stable Kubernetes version available. -Regardless of the choice, you will get a provisioned cluster set up to work with -`s3gw` and Longhorn. -K3s version can install directly on bare metal or on virtual machine. -K8s version will install on an arbitrary number of virtual machines depending on the -size of the cluster. +This is the entrypoint to setup a Kubernetes cluster running s3gw with Longhorn. +You can choose to install a **K3s** cluster directly on your machine +or on top of virtual machines. -Refer to the appropriate section to proceed with the setup of the environment: +Refer to the appropriate section to proceed with the setup: -* [K3s Setup](./README.k3s.md) -* [K8s Setup](./README.k8s.md) +* [K3s on bare metal](./README.bm.md) +* [K3s on virtual machines](./README.vm.md) ## Ingresses diff --git a/env/README.k8s.md b/env/README.vm.md similarity index 76% rename from env/README.k8s.md rename to env/README.vm.md index f2e0151..6b227d9 100644 --- a/env/README.k8s.md +++ b/env/README.vm.md @@ -1,15 +1,15 @@ -# K8s +# K3s on Virtual Machines -Follow this guide if you wish to run an `s3gw` image on the latest stable Kubernetes release. -You will be able to quickly build a cluster installed on a set of virtual machines. +Follow this guide if you wish to run a K3s cluster installed on virtual machines. You will have a certain degree of choice in terms of customization options. If you are looking for a more lightweight environment running directly on bare metal, -refer to our [K3s section](./README.k3s.md). +refer to our [K3s on bare metal](./README.bm.md). ## Table of Contents * [Description](#description) * [Requirements](#requirements) +* [Supported Vagrant boxes](#supported-vagrant-boxes) * [Building the environment](#building-the-environment) * [Destroying the environment](#destroying-the-environment) * [Accessing the environment](#accessing-the-environment) @@ -20,14 +20,14 @@ refer to our [K3s section](./README.k3s.md). ## Description The entire environment build process is automated by a set of Ansible playbooks. -The cluster is created with exactly one `admin` node and +The cluster is created with one `admin` node and an arbitrary number of `worker` nodes. A single virtual machine acting as an `admin` node is also possible; in this case, it will be able to schedule pods as a `worker` node. -Name topology for nodes is the following: +Name topology of nodes is the following: ```text -admin +admin-1 worker-1 worker-2 ... @@ -41,13 +41,32 @@ Make sure you have installed the following applications on your system: * libvirt * Ansible +Make sure you have installed the following Ansible modules: + +* kubernetes.core +* community.docker.docker_image + +You can install them with: + +```bash +$ ansible-galaxy collection install kubernetes.core +... +$ ansible-galaxy collection install community.docker +... +``` + +## Supported Vagrant boxes + +* opensuse/Leap-15.3.x86_64 +* generic/ubuntu[1604-2004] + ## Building the environment -You can build the environment with the `setup-k8s.sh` script. +You can build the environment with the `setup-vm.sh` script. The simplest form you can use is: ```bash -$ ./setup-k8s.sh build +$ ./setup-vm.sh build Building environment ... ``` @@ -56,11 +75,10 @@ and one node `worker`. You can customize the build with the following environment variables: ```text -IMAGE_NAME : The Vagrant box image used in the cluster +BOX_NAME : The Vagrant box image used in the cluster (default: opensuse/Leap-15.3.x86_64) VM_NET : The virtual machine subnet used in the cluster VM_NET_LAST_OCTET_START : Vagrant will increment this value when creating vm(s) and assigning an ip -CIDR_NET : The CIDR subnet used by the Calico network plugin -WORKER_COUNT : The number of Kubernetes workers in the cluster +WORKER_COUNT : The number of Kubernetes node in the cluster ADMIN_MEM : The RAM amount used by the admin node (Vagrant format) ADMIN_CPU : The CPU amount used by the admin node (Vagrant format) ADMIN_DISK : yes/no, when yes a disk will be allocated for the admin node - this will be effective only for mono clusters @@ -71,27 +89,25 @@ WORKER_DISK : yes/no, when yes a disk will be allocated for the WORKER_DISK_SIZE : The disk size allocated for a worker node (Vagrant format) CONTAINER_ENGINE : The host's local container engine used to build the s3gw container (podman/docker) STOP_AFTER_BOOTSTRAP : yes/no, when yes stop the provisioning just after the bootstrapping phase -START_LOCAL_REGISTRY : yes/no, when yes start a local insecure image registry at admin.local:5000 -S3GW_IMAGE : The s3gw's container image used when deploying the application on k8s -K8S_DISTRO : The Kubernetes distribution to install; specify k3s or k8s (k8s default) -INGRESS : The ingress implementation to be used; NGINX or Traefik (NGINX default) +S3GW_IMAGE : The s3gw's container image used when deploying the application on k3s PROV_USER : The provisioning user used by Ansible (vagrant default) S3GW_UI_REPO : A GitHub repository to be used when building the s3gw-ui's image S3GW_UI_VERSION : A S3GW_UI_REPO's branch to be used SCENARIO : An optional scenario to be loaded in the cluster +K3S_VERSION : The K3s version to be used (default: v1.23.6+k3s1) ``` So, you could start a more specialized build with: ```bash -$ IMAGE_NAME=generic/ubuntu1804 WORKER_COUNT=4 ./setup-k8s.sh build +$ BOX_NAME=generic/ubuntu1804 WORKER_COUNT=4 ./setup-vm.sh build Building environment ... ``` You create a mono virtual machine cluster with the lone `admin` node with: ```bash -$ WORKER_COUNT=0 ./setup-k8s.sh build +$ WORKER_COUNT=0 ./setup-vm.sh build Building environment ... ``` @@ -102,7 +118,7 @@ In this case, the node will be able to schedule pods as a `worker` node. You can destroy a previously built environment with: ```bash -$ ./setup-k8s.sh destroy +$ ./setup-vm.sh destroy Destroying environment ... ``` @@ -115,7 +131,7 @@ to be released by Vagrant. You can start a previously built environment with: ```bash -$ ./setup-k8s.sh start +$ ./setup-vm.sh start Starting environment ... ``` @@ -131,14 +147,14 @@ You can connect through `ssh` to all nodes in the cluster. To connect to the `admin` node run: ```bash -$ ./setup-k8s.sh ssh admin +$ ./setup-vm.sh ssh admin Connecting to admin ... ``` To connect to a `worker` node run: ```bash -$ ./setup-k8s.sh ssh worker-2 +$ ./setup-vm.sh ssh worker-2 Connecting to worker-2 ... ``` diff --git a/env/Vagrantfile b/env/Vagrantfile index 01a1cc3..d500a1f 100644 --- a/env/Vagrantfile +++ b/env/Vagrantfile @@ -1,245 +1,175 @@ -IMAGE_NAME = ENV["IMAGE_NAME"] || "generic/ubuntu1604" -VM_NET = (ENV["VM_NET"] || "10.46.201.0").split(".0")[0] -VM_NET_LAST_OCTET_START = Integer(ENV["VM_NET_LAST_OCTET_START"] || "101") -CIDR_NET = ENV["CIDR_NET"] || "172.22.0.0" -WORKER_COUNT = Integer(ENV["WORKER_COUNT"] || "1") -ADMIN_MEM = Integer(ENV["ADMIN_MEM"] || "4096") -ADMIN_CPU = Integer(ENV["ADMIN_CPU"] || "2") -ADMIN_DISK = ENV["ADMIN_DISK"] || "no" -ADMIN_DISK_SIZE = ENV["ADMIN_DISK_SIZE"] || "8G" -WORKER_MEM = Integer(ENV["WORKER_MEM"] || "4096") -WORKER_CPU = Integer(ENV["WORKER_CPU"] || "2") -WORKER_DISK = ENV["WORKER_DISK"] || "no" -WORKER_DISK_SIZE = ENV["WORKER_DISK_SIZE"] || "8G" -STOP_AFTER_BOOTSTRAP = ENV["STOP_AFTER_BOOTSTRAP"] || "no" -START_LOCAL_REGISTRY = ENV["START_LOCAL_REGISTRY"] || "no" -S3GW_IMAGE = ENV["S3GW_IMAGE"] || "ghcr.io/aquarist-labs/s3gw:latest" -S3GW_IMAGE_PULL_POLICY = ENV["S3GW_IMAGE_PULL_POLICY"] || "Always" -K8S_DISTRO = ENV["K8S_DISTRO"] || "k8s" -INGRESS = ENV["INGRESS"] || "nginx" -PROV_USER = ENV["PROV_USER"] || "vagrant" -S3GW_UI_REPO = ENV["S3GW_UI_REPO"] || "" -S3GW_UI_VERSION = ENV["S3GW_UI_VERSION"] || "" -SCENARIO = ENV["SCENARIO"] || "" +BOX_NAME = ENV["BOX_NAME"] || "opensuse/Leap-15.3.x86_64" +VM_NET = (ENV["VM_NET"] || "10.46.201.0").split(".0")[0] +VM_NET_LAST_OCTET_START = Integer(ENV["VM_NET_LAST_OCTET_START"] || "101") + +#k3s-ansible seems to work with only 1 admin; this should be investigated. +#For the time being, we assume this value hardcoded to 1. +ADMIN_COUNT = Integer(ENV["ADMIN_COUNT"] || "1") + +WORKER_COUNT = Integer(ENV["WORKER_COUNT"] || "1") +ADMIN_MEM = Integer(ENV["ADMIN_MEM"] || "4096") +ADMIN_CPU = Integer(ENV["ADMIN_CPU"] || "2") +ADMIN_DISK = ((ENV["ADMIN_DISK"] || "no") == "yes") +ADMIN_DISK_SIZE = ENV["ADMIN_DISK_SIZE"] || "8G" +WORKER_MEM = Integer(ENV["WORKER_MEM"] || "4096") +WORKER_CPU = Integer(ENV["WORKER_CPU"] || "2") +WORKER_DISK = ((ENV["WORKER_DISK"] || "no") == "yes") +WORKER_DISK_SIZE = ENV["WORKER_DISK_SIZE"] || "8G" +STOP_AFTER_BOOTSTRAP = ((ENV["STOP_AFTER_BOOTSTRAP"] || "no") == "yes") +S3GW_IMAGE = ENV["S3GW_IMAGE"] || "ghcr.io/aquarist-labs/s3gw:latest" +S3GW_IMAGE_PULL_POLICY = ENV["S3GW_IMAGE_PULL_POLICY"] || "Always" +PROV_USER = ENV["PROV_USER"] || "vagrant" +S3GW_UI_IMAGE = "admin-1.local/s3gw-ui:latest" +S3GW_UI_IMAGE_PULL_POLICY = "Always" +S3GW_UI_REPO = ENV["S3GW_UI_REPO"] || "" +S3GW_UI_VERSION = ENV["S3GW_UI_VERSION"] || "" +SCENARIO = ENV["SCENARIO"] || "" +K3S_VERSION = ENV["K3S_VERSION"] || "v1.23.6+k3s1" ansible_groups = { - "admins" => [ - "admin" + "apt" => [], + "zypper" => [], + "master" => [ + "admin-[1:#{ADMIN_COUNT}]" ], - "workers" => [ + "node" => [ "worker-[1:#{WORKER_COUNT}]" ], - "solo_admins" => [ - ], - "registries" => [ - ], - "traefik" => [ + "k3s_cluster" => [ + "admin-[1:#{ADMIN_COUNT}]", + "worker-[1:#{WORKER_COUNT}]" ], - "nginx" => [ + "kubectl" => [ + "admin-1" ] } extra_vars = { user: PROV_USER, worker_count: WORKER_COUNT, - cidr_net: CIDR_NET, s3gw_image: S3GW_IMAGE, s3gw_image_pull_policy: S3GW_IMAGE_PULL_POLICY, + s3gw_ui_image: S3GW_UI_IMAGE, + s3gw_ui_image_pull_policy: S3GW_UI_IMAGE_PULL_POLICY, s3gw_ui_repo: S3GW_UI_REPO, s3gw_ui_version: S3GW_UI_VERSION, - scenario: SCENARIO + scenario: SCENARIO, + k3s_version: K3S_VERSION, + systemd_dir: "/etc/systemd/system", + master_ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START}" } -Vagrant.configure("2") do |config| - - if START_LOCAL_REGISTRY == "yes" - ansible_groups["registries"] << "admin" +def ansible_provision (context, ansible_groups, extra_vars) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/bootstrap.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars end - - if INGRESS == "traefik" - ansible_groups["traefik"] << "admin" + if(!STOP_AFTER_BOOTSTRAP) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/k3s-ansible/site.yml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/k3s-post-install.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/longhorn-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + if SCENARIO != "" + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/load-scen.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + end end +end + +Vagrant.configure("2") do |config| - if INGRESS == "nginx" - ansible_groups["nginx"] << "admin" + if BOX_NAME.include? "generic/ubuntu" + ansible_groups["apt"] << "admin-[1:#{ADMIN_COUNT}]" + ansible_groups["apt"] << "worker-[1:#{WORKER_COUNT}]" + elsif BOX_NAME.include? "opensuse/" + ansible_groups["zypper"] << "admin-[1:#{ADMIN_COUNT}]" + ansible_groups["zypper"] << "worker-[1:#{WORKER_COUNT}]" end - config.vm.provider "libvirt" do |lv| - lv.connect_via_ssh = false - lv.qemu_use_session = false - lv.nic_model_type = "e1000" - lv.cpu_mode = 'host-passthrough' - end + config.vm.provider "libvirt" do |lv| + lv.connect_via_ssh = false + lv.qemu_use_session = false + lv.nic_model_type = "e1000" + lv.cpu_mode = 'host-passthrough' + end - #admin node - config.vm.define "admin" do |admin| - admin.vm.provider "libvirt" do |lv| - lv.memory = ADMIN_MEM - lv.cpus = ADMIN_CPU - if WORKER_COUNT == 0 && ADMIN_DISK == "yes" - lv.storage :file, size: ADMIN_DISK_SIZE, type: 'qcow2', serial: "6646200" - end - end + (1..ADMIN_COUNT).each do |i| + config.vm.define "admin-#{i}" do |admin| + admin.vm.provider "libvirt" do |lv| + lv.memory = ADMIN_MEM + lv.cpus = ADMIN_CPU + if WORKER_COUNT == 0 && ADMIN_DISK + lv.storage :file, size: ADMIN_DISK_SIZE, type: 'qcow2', serial: "664620#{i}" + end + end - admin.vm.box = IMAGE_NAME - admin.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START}" - admin.vm.hostname = "admin" + admin.vm.box = BOX_NAME + admin.vm.hostname = "admin-#{i}" + admin.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + end - # [WORKER_COUNT == 0] - # if WORKER_COUNT == 0 - ansible_groups["solo_admins"] << "admin" - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/bootstrap.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - if(STOP_AFTER_BOOTSTRAP == "no") - - if K8S_DISTRO == "k8s" - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/k8s-install.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - else - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/k3s-install.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - end - - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/longhorn-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/nginx-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/ingress-nginx-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - if SCENARIO != "" - admin.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/load-scen.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - end - end + ansible_provision config, ansible_groups, extra_vars end end - # k8s-vanilla provisioning [WORKER_COUNT > 0] - # - if K8S_DISTRO == "k8s" && WORKER_COUNT > 0 - #worker nodes + if WORKER_COUNT > 0 (1..WORKER_COUNT).each do |i| config.vm.define "worker-#{i}" do |worker| worker.vm.provider "libvirt" do |lv| lv.memory = WORKER_MEM lv.cpus = WORKER_CPU - if WORKER_DISK == "yes" + if WORKER_DISK lv.storage :file, size: WORKER_DISK_SIZE, type: 'qcow2', serial: "674620#{i}" end end - worker.vm.box = IMAGE_NAME + worker.vm.box = BOX_NAME worker.vm.hostname = "worker-#{i}" - worker.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i}" + worker.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" # Only execute once the Ansible provisioner, - # when all the workers are up and ready. + # when all nodes are up and ready. if i == WORKER_COUNT - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/bootstrap.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - if(STOP_AFTER_BOOTSTRAP == "no") - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/k8s-install.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/longhorn-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/nginx-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/ingress-nginx-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - if SCENARIO != "" - worker.vm.provision :ansible do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/load-scen.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - end - end + ansible_provision worker, ansible_groups, extra_vars end end end diff --git a/env/ansible.cfg b/env/ansible.cfg new file mode 100644 index 0000000..bcc3533 --- /dev/null +++ b/env/ansible.cfg @@ -0,0 +1,12 @@ +# config file for ansible -- https://ansible.com/ +# =============================================== + +# nearly all parameters can be overridden in ansible-playbook +# or with command line flags. ansible will read ANSIBLE_CONFIG, +# ansible.cfg in the current working directory, .ansible.cfg in +# the home directory or /etc/ansible/ansible.cfg, whichever it +# finds first + +[defaults] + +interpreter_python = /usr/bin/python3 diff --git a/env/generate-spec.sh b/env/generate-spec.sh index 78d9f36..2c25131 100755 --- a/env/generate-spec.sh +++ b/env/generate-spec.sh @@ -19,8 +19,8 @@ is_dev_env=false s3gw_image="ghcr.io/aquarist-labs/s3gw:latest" s3gw_image_pull_policy="Always" - -ingress="traefik" +s3gw_ui_image="localhost/s3gw-ui:latest" +s3gw_ui_image_pull_policy="Never" while [[ $# -gt 0 ]]; do case $1 in @@ -32,12 +32,6 @@ while [[ $# -gt 0 ]]; do s3gw_image="localhost/s3gw:latest" s3gw_image_pull_policy="Never" ;; - --traefik) - ingress="traefik" - ;; - --nginx) - ingress="nginx" - ;; esac shift 1 done @@ -47,6 +41,11 @@ s3gw_image=$(printf '%s\n' "$s3gw_image" | sed -e 's/[]\/$*.^[]/\\&/g') sed "s/##S3GW_IMAGE##/"${s3gw_image}"/" s3gw/s3gw-deployment.yaml > s3gw/s3gw-deployment.tmp.yaml sed -i "s/##S3GW_IMAGE_PULL_POLICY##/"${s3gw_image_pull_policy}"/" s3gw/s3gw-deployment.tmp.yaml +s3gw_ui_image=$(printf '%s\n' "$s3gw_ui_image" | sed -e 's/[]\/$*.^[]/\\&/g') + +sed "s/##S3GW_UI_IMAGE##/"${s3gw_ui_image}"/" s3gw-ui/s3gw-ui-deployment.yaml > s3gw-ui/s3gw-ui-deployment.tmp.yaml +sed -i "s/##S3GW_UI_IMAGE_PULL_POLICY##/"${s3gw_ui_image_pull_policy}"/" s3gw-ui/s3gw-ui-deployment.tmp.yaml + rgw_default_user_access_key_base64=$(cat s3gw/s3gw-secret.yaml | grep RGW_DEFAULT_USER_ACCESS_KEY | cut -d':' -f 2 | sed -e 's/[[:space:],"]//g') rgw_default_user_access_key_base64=$(echo -n $rgw_default_user_access_key_base64 | base64) rgw_default_user_access_key_base64=$(printf '%s\n' "$rgw_default_user_access_key_base64" | sed -e 's/[]\/$*.^[]/\\&/g') @@ -72,17 +71,10 @@ specs=( "s3gw/s3gw-secret" "s3gw/s3gw-ingress-secret" "s3gw/s3gw-service" - "s3gw-ui/s3gw-ui-deployment" + "s3gw-ui/s3gw-ui-deployment.tmp" "s3gw-ui/s3gw-ui-service" ) -nginx_specs=( - "ingress-nginx/nginx-nodeport" - "ingress-nginx/longhorn-ingress" - "ingress-nginx/s3gw-ingress" - "ingress-nginx/s3gw-ui-ingress" -) - traefik_specs=( "ingress-traefik/traefik-nodeport" "ingress-traefik/longhorn-ingress" @@ -121,18 +113,10 @@ for spec in ${specs[@]}; do cat ${spec}.yaml >> ${tgtfile} done -if [ $ingress = "nginx" ]; then - for spec in ${nginx_specs[@]}; do - echo Inflating nginx-spec ${spec}.yaml - echo "---" >> ${tgtfile} - cat ${spec}.yaml >> ${tgtfile} - done -elif [ $ingress = "traefik" ]; then - for spec in ${traefik_specs[@]}; do - echo Inflating traefik-spec ${spec}.yaml - echo "---" >> ${tgtfile} - cat ${spec}.yaml >> ${tgtfile} - done -fi +for spec in ${traefik_specs[@]}; do + echo Inflating traefik-spec ${spec}.yaml + echo "---" >> ${tgtfile} + cat ${spec}.yaml >> ${tgtfile} +done find . -name "*.tmp.yaml" -type f -delete diff --git a/env/ingress-nginx/longhorn-ingress.yaml b/env/ingress-nginx/longhorn-ingress.yaml deleted file mode 100644 index 8c0012a..0000000 --- a/env/ingress-nginx/longhorn-ingress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: longhorn-ingress - namespace: longhorn-system - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - ingressClassName: nginx - tls: - - hosts: - - longhorn.local - secretName: longhorn-ingress-secret - rules: - - host: longhorn.local - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: longhorn-frontend - port: - number: 80 diff --git a/env/ingress-nginx/nginx-nodeport.yaml b/env/ingress-nginx/nginx-nodeport.yaml deleted file mode 100644 index ba13809..0000000 --- a/env/ingress-nginx/nginx-nodeport.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx-nodeport - namespace: ingress-nginx -spec: - type: NodePort - ports: - - port: 80 - nodePort: 30080 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - nodePort: 30443 - targetPort: 443 - protocol: TCP - name: https - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/name: ingress-nginx diff --git a/env/ingress-nginx/s3gw-ingress.yaml b/env/ingress-nginx/s3gw-ingress.yaml deleted file mode 100644 index 1420a22..0000000 --- a/env/ingress-nginx/s3gw-ingress.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: s3gw-ingress - namespace: s3gw-system - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - ingressClassName: nginx - tls: - - hosts: - - s3gw.local - secretName: s3gw-ingress-secret - rules: - - host: s3gw.local - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: s3gw-service - port: - number: 80 ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: s3gw-no-tls-ingress - namespace: s3gw-system - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - ingressClassName: nginx - rules: - - host: s3gw-no-tls.local - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: s3gw-service - port: - number: 80 diff --git a/env/ingress-nginx/s3gw-ui-ingress.yaml b/env/ingress-nginx/s3gw-ui-ingress.yaml deleted file mode 100644 index 9cd85e4..0000000 --- a/env/ingress-nginx/s3gw-ui-ingress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: s3gw-ui-ingress - namespace: s3gw-system - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - ingressClassName: nginx - tls: - - hosts: - - s3gw-ui.local - secretName: s3gw-ingress-secret - rules: - - host: s3gw-ui.local - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: s3gw-ui-service - port: - number: 80 diff --git a/env/playbooks/bootstrap.yaml b/env/playbooks/bootstrap.yaml index e563981..dd61ff6 100644 --- a/env/playbooks/bootstrap.yaml +++ b/env/playbooks/bootstrap.yaml @@ -1,9 +1,9 @@ -- name: Install Packages - hosts: all +- name: Install Packages - [APT] + hosts: apt become: true tasks: - - name: Install utility packages + - name: Install packages apt: name: "{{ packages }}" state: present @@ -16,11 +16,6 @@ - gnupg-agent - software-properties-common -- name: Container Runtime - hosts: all - become: true - tasks: - - name: Add signing key for Docker's repository apt_key: url: https://download.docker.com/linux/ubuntu/gpg @@ -31,85 +26,78 @@ repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable state: present - - name: Install container runtime + - name: Install Docker apt: name: "{{ packages }}" state: present update_cache: yes vars: packages: - - containerd.io - docker-ce - docker-ce-cli - - name: Patching docker/daemon.json - copy: - dest: "/etc/docker/daemon.json" - content: | - { - "exec-opts": ["native.cgroupdriver=systemd"], - "insecure-registries" : ["admin:5000"] - } - - - name: Patching /etc/modules-load.d/containerd.conf - copy: - dest: "/etc/modules-load.d/containerd.conf" - content: | - overlay - br_netfilter - - - name: Add the overlay module - modprobe: - name: overlay - state: present +- name: Install Packages - [ZYPPER] + hosts: zypper + become: true + tasks: - - name: Add the br_netfilter module - modprobe: - name: br_netfilter + - name: Install packages + zypper: + name: "{{ packages }}" state: present + vars: + packages: + - curl + - wget + - git + - docker + - python3-docker - - name: Patching /etc/sysctl.d/99-kubernetes-cri.conf - copy: - dest: "/etc/sysctl.d/99-kubernetes-cri.conf" - content: | - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - net.bridge.bridge-nf-call-ip6tables = 1 +- name: Install pip + hosts: master + become: true + tasks: - - name: Apply sysctl params without reboot - command: sysctl --system + - name: Install pip + command: "{{ item }}" + with_items: + - wget "https://bootstrap.pypa.io/pip/3.5/get-pip.py" + - python3 get-pip.py + - rm -rf get-pip.py - - name: Patching /etc/containerd/config.toml - copy: - dest: "/etc/containerd/config.toml" - content: | - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true +- name: Install pip s3cmd [ALL] + hosts: master + become: true + tasks: + - name: Install pip s3cmd + command: pip install s3cmd - version = 2 +- name: Install pip kubernetes [ALL] + hosts: master + tasks: + - name: Install pip kubernetes + command: pip install kubernetes - [plugins."io.containerd.grpc.v1.cri".registry] - config_path = "/etc/containerd/certs.d" +- name: Install pip docker [APT] + hosts: apt:!node + become: true + tasks: + - name: Install pip docker + command: pip install docker - - name: Create /etc/containerd/certs.d/admin.local directory - file: - path: /etc/containerd/certs.d/admin.local - state: directory +- name: Patch Runtime + hosts: all + become: true + tasks: - - name: Patching /etc/containerd/certs.d/admin.local/hosts.toml + - name: Patching docker/daemon.json copy: - dest: "/etc/containerd/certs.d/admin.local/hosts.toml" + dest: "/etc/docker/daemon.json" content: | - [host."http://admin.local:5000"] - capabilities = ["pull", "resolve", "push"] - skip_verify = true - - - name: Restart containerd - service: - name: containerd - state: restarted - daemon_reload: yes + { + "exec-opts": ["native.cgroupdriver=systemd"], + "insecure-registries" : ["admin-1:5000"] + } - name: Add user to Docker group user: @@ -134,36 +122,48 @@ - name: Disable swap command: swapoff -a -- name: Start Local Registry [optional] - hosts: registries - become: yes +- name: Start local registry + hosts: master + become: true tasks: - name: Start a local registry command: docker run -d -p 5000:5000 --restart=always --name registry registry:2 - - name: Copy host's s3gw container image to local dir - copy: src=../s3gw.tar dest=/home/{{ user }} mode=0777 +- name: Set up nodes to use local registry + hosts: all + become: true + tasks: - - name: Import s3gw container into local registry - command: "{{ item }}" - with_items: - - docker load --input s3gw.tar - - docker tag localhost/s3gw admin:5000/s3gw - - docker push admin:5000/s3gw - - docker image rm localhost/s3gw - - docker image rm admin:5000/s3gw - - rm -rf s3gw.tar + - name: Ensuring /etc/rancher/k3s + ansible.builtin.file: + path: /etc/rancher/k3s + state: directory + mode: '0755' + + - name: Creating /etc/rancher/k3s/registries.yaml + copy: + content: "" + dest: /etc/rancher/k3s/registries.yaml + force: no + + - name: Updating /etc/rancher/k3s/registries.yaml + blockinfile: + path: /etc/rancher/k3s/registries.yaml + block: | + mirrors: + admin-1.local: + endpoint: + - "http://admin-1:5000" - name: Local DNS hosts: all + become: true gather_facts: yes tasks: - name: Update /etc/hosts file with node name tags: etchostsupdate - become: yes - become_user: root lineinfile: path: "/etc/hosts" regexp: ".*\t{{ hostvars[item]['ansible_fqdn']}}\t{{ hostvars[item]['ansible_hostname']}}" @@ -171,16 +171,15 @@ state: present backup: yes register: etchostsupdate - when: ansible_hostname != "{{ item }}" or ansible_hostname == "{{ item }}" with_items: "{{groups['all']}}" - name: Patch /etc/hosts - become: true - shell: | - sed -i s/"127.0.2.1 {{ ansible_facts['fqdn']}} {{ ansible_facts['hostname']}}"/""/ /etc/hosts + lineinfile: + path: /etc/hosts + regexp: '^127\.0\.2\.1.*' + state: absent - - name: Add *local to /etc/hosts - become: true + - name: Add local names to /etc/hosts lineinfile: path: /etc/hosts line: "127.0.0.1 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local" diff --git a/env/playbooks/ingress-nginx-deploy.yaml b/env/playbooks/ingress-nginx-deploy.yaml deleted file mode 100644 index a1151c8..0000000 --- a/env/playbooks/ingress-nginx-deploy.yaml +++ /dev/null @@ -1,12 +0,0 @@ -- name: Longhorn/s3gw NGINX ingress deploy - hosts: nginx - tasks: - - - name: Apply ingresses configuration - command: "{{ item }}" - with_items: - - kubectl apply -f /home/{{ user }}/longhorn/longhorn-ingress-secret.yaml - - kubectl apply -f /home/{{ user }}/ingress-nginx/longhorn-ingress.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-ingress-secret.yaml - - kubectl apply -f /home/{{ user }}/ingress-nginx/s3gw-ingress.yaml - - kubectl apply -f /home/{{ user }}/ingress-nginx/s3gw-ui-ingress.yaml diff --git a/env/playbooks/ingress-traefik-deploy.yaml b/env/playbooks/ingress-traefik-deploy.yaml index b5c2a6c..6338ffd 100644 --- a/env/playbooks/ingress-traefik-deploy.yaml +++ b/env/playbooks/ingress-traefik-deploy.yaml @@ -1,5 +1,5 @@ - name: Longhorn/s3gw Traefik ingress deploy - hosts: traefik + hosts: kubectl tasks: - name: Wait Traefik controller to become ready, this could take a while ... @@ -9,15 +9,25 @@ retries: 20 delay: 5 - - name: Copy traefik ingresses cfg to local dir + - name: Copy Traefik ingresses cfg to local dir copy: src=../ingress-traefik dest=/home/{{ user }} mode=0777 - - name: Create a Service for the Ingress Controller Pods - command: kubectl apply -f /home/{{ user }}/ingress-traefik/traefik-nodeport.yaml + - name: Apply traefik-nodeport.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/traefik-nodeport.yaml - - name: Apply ingresses configuration - command: "{{ item }}" - with_items: - - kubectl apply -f /home/{{ user }}/ingress-traefik/longhorn-ingress.yaml - - kubectl apply -f /home/{{ user }}/ingress-traefik/s3gw-ingress.yaml - - kubectl apply -f /home/{{ user }}/ingress-traefik/s3gw-ui-ingress.yaml + - name: Apply longhorn-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/longhorn-ingress.yaml + + - name: Apply s3gw-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/s3gw-ingress.yaml + + - name: Apply s3gw-ui-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/s3gw-ui-ingress.yaml diff --git a/env/playbooks/k3s-ansible b/env/playbooks/k3s-ansible new file mode 160000 index 0000000..8e70812 --- /dev/null +++ b/env/playbooks/k3s-ansible @@ -0,0 +1 @@ +Subproject commit 8e7081243b4ffa83beffe53b58458824a00d1a38 diff --git a/env/playbooks/k3s-install.yaml b/env/playbooks/k3s-post-install.yaml similarity index 55% rename from env/playbooks/k3s-install.yaml rename to env/playbooks/k3s-post-install.yaml index 41e0c40..d8aaab8 100644 --- a/env/playbooks/k3s-install.yaml +++ b/env/playbooks/k3s-post-install.yaml @@ -1,10 +1,20 @@ -- name: Install K3s - hosts: all +- name: K3s extras + hosts: master become: true tasks: - - name: K3s download and install - shell: | - curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 + + - name: add .kube/config + copy: + src: /etc/rancher/k3s/k3s.yaml + dest: /home/{{ user }}/.kube/config + owner: "{{ user }}" + mode: '0600' + remote_src: yes + + - name: export KUBECONFIG + lineinfile: + path: /home/{{ user }}/.bashrc + line: "export KUBECONFIG=~/.kube/config" - name: Setting kubectl alias and enabling kubectl bash completion command: "{{ item }}" @@ -16,11 +26,17 @@ - /bin/bash -c "sudo echo 'alias k=kubectl' >> /home/{{ user }}/.bashrc" - /bin/bash -c "sudo echo 'complete -F __start_kubectl k' >> /home/{{ user }}/.bashrc" -- name: Probe admin - hosts: admins + - name: Patch k3s.yaml for this session + file: + path: /etc/rancher/k3s/k3s.yaml + mode: '0644' + +- name: Probe cluster + hosts: kubectl tasks: - - name: Wait for admin to become ready - command: kubectl get nodes admin + + - name: Wait for admin-1 to become ready + command: kubectl get nodes admin-1 register: result until: result.stdout.find("NotReady") == -1 retries: 25 diff --git a/env/playbooks/k8s-install.yaml b/env/playbooks/k8s-install.yaml deleted file mode 100644 index 027d87b..0000000 --- a/env/playbooks/k8s-install.yaml +++ /dev/null @@ -1,128 +0,0 @@ -- name: Install k8s Packages - hosts: all - become: true - tasks: - - - name: Add an apt signing key for Kubernetes - apt_key: - url: https://packages.cloud.google.com/apt/doc/apt-key.gpg - state: present - - - name: Adding apt repository for Kubernetes - apt_repository: - repo: deb https://apt.kubernetes.io/ kubernetes-xenial main - state: present - filename: kubernetes.list - - - name: Install Kubernetes binaries - apt: - name: "{{ packages }}" - state: present - update_cache: yes - vars: - packages: - - kubelet - - kubeadm - - kubectl - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ ansible_facts['eth1']['ipv4']['address'] }} - create: yes - - - name: Restart kubelet - service: - name: kubelet - daemon_reload: yes - state: restarted - - - name: Setting kubectl alias and enabling kubectl bash completion - command: "{{ item }}" - with_items: - - /bin/bash -c "sudo echo 'source <(kubectl completion bash)' >> /home/{{ user }}/.bashrc" - - /bin/bash -c "sudo touch /etc/bash_completion.d/kubectl" - - /bin/bash -c "sudo chmod 777 /etc/bash_completion.d/kubectl" - - /bin/bash -c "kubectl completion bash > /etc/bash_completion.d/kubectl" - - /bin/bash -c "sudo echo 'alias k=kubectl' >> /home/{{ user }}/.bashrc" - - /bin/bash -c "sudo echo 'complete -F __start_kubectl k' >> /home/{{ user }}/.bashrc" - -- name: Kubeadm - hosts: admins - become: true - tasks: - - name: Initialize Kubernetes cluster using kubeadm - command: kubeadm init --apiserver-advertise-address="{{ ansible_facts['eth1']['ipv4']['address'] }}" --apiserver-cert-extra-sans="{{ ansible_facts['eth1']['ipv4']['address'] }}" --node-name admin --pod-network-cidr={{ cidr_net }}/16 --ignore-preflight-errors=Swap - - - name: Setup kubeconfig for user - command: "{{ item }}" - with_items: - - mkdir -p /home/{{ user }}/.kube - - cp -i /etc/kubernetes/admin.conf /home/{{ user }}/.kube/config - - chown {{ user }}:docker /home/{{ user }}/.kube/config - - - name: Copy content of /etc/kubernetes/admin.conf - command: "cat /etc/kubernetes/admin.conf" - register: admin_conf - - - name: Copy kubeconfig to local file - local_action: copy content="{{ admin_conf.stdout }}" dest=./admin.conf - - - name: Install Calico pod network - become: false - command: kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml - - - name: Patching Calico custom-resources.yaml - command: "{{ item }}" - with_items: - - mkdir -p /home/{{ user }}/calico - - wget https://docs.projectcalico.org/manifests/custom-resources.yaml -P /home/{{ user }}/calico - - sed -i s/192.168.0.0/{{ cidr_net }}/g /home/{{ user }}/calico/custom-resources.yaml - - - name: Install Calico pod network - become: false - command: kubectl create -f /home/{{ user }}/calico/custom-resources.yaml - - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Copy join command to local file - local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command" - -- name: Join - hosts: workers - become: true - tasks: - - name: Copy join command inside vm - copy: src=join-command dest=/tmp/join-command.sh mode=0777 - - - name: Join node to cluster - command: sh /tmp/join-command.sh - - - name: Setup kubeconfig for user - command: "{{ item }}" - with_items: - - mkdir -p /home/{{ user }}/.kube - - - name: Copy kubeconfig to local file - copy: src=./admin.conf dest=/home/{{ user }}/.kube/config mode=0777 - -- name: Probe admin - hosts: admins - tasks: - - name: Wait for admin to become ready - command: kubectl get nodes admin - register: result - until: result.stdout.find("NotReady") == -1 - retries: 25 - delay: 5 - -- name: Untaint admin - hosts: solo_admins - tasks: - - name: Untaint admin for pod scheduling - command: "{{ item }}" - with_items: - - kubectl taint node --all node-role.kubernetes.io/master- - - kubectl taint node --all node-role.kubernetes.io/control-plane- diff --git a/env/playbooks/load-scen.yaml b/env/playbooks/load-scen.yaml index e4d22c7..9f328ba 100644 --- a/env/playbooks/load-scen.yaml +++ b/env/playbooks/load-scen.yaml @@ -1,18 +1,5 @@ -- name: Install pip & s3cmd - hosts: admins - become: true - tasks: - - - name: Install pip & s3cmd - command: "{{ item }}" - with_items: - - wget "https://bootstrap.pypa.io/pip/3.5/get-pip.py" - - python3 get-pip.py - - pip install s3cmd - - rm -rf get-pip.py - - name: Load scenario - hosts: admins + hosts: kubectl tasks: - name: Create scenarios dir diff --git a/env/playbooks/longhorn-deploy.yaml b/env/playbooks/longhorn-deploy.yaml index 81a5e67..5bd1da9 100644 --- a/env/playbooks/longhorn-deploy.yaml +++ b/env/playbooks/longhorn-deploy.yaml @@ -1,5 +1,5 @@ - name: Deploy Longhorn - hosts: admins + hosts: kubectl tasks: - name: Installing iscsi... diff --git a/env/playbooks/nginx-deploy.yaml b/env/playbooks/nginx-deploy.yaml deleted file mode 100644 index e540956..0000000 --- a/env/playbooks/nginx-deploy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -- name: Deploy NGINX controller - hosts: nginx - tasks: - - - name: Install NGINX controller - command: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/cloud/deploy.yaml - - - name: Copy nginx ingresses cfg to local dir - copy: src=../ingress-nginx dest=/home/{{ user }} mode=0777 - - - name: Create a Service for the Ingress Controller Pods - command: kubectl apply -f /home/{{ user }}/ingress-nginx/nginx-nodeport.yaml - - - name: Wait NGINX controller to become ready, this could take a while ... - command: kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=30s - register: result - until: result.rc == 0 - retries: 20 - delay: 2 diff --git a/env/playbooks/s3gw-deploy.yaml b/env/playbooks/s3gw-deploy.yaml index 5b977e3..8ee7c4a 100644 --- a/env/playbooks/s3gw-deploy.yaml +++ b/env/playbooks/s3gw-deploy.yaml @@ -1,5 +1,5 @@ - name: s3gw deploy - hosts: admins + hosts: kubectl tasks: - name: Copy Longhorn cfg to local dir @@ -20,16 +20,40 @@ regexp: '##S3GW_IMAGE_PULL_POLICY##' replace: "{{ s3gw_image_pull_policy }}" - - name: Deploy s3gw - command: "{{ item }}" - with_items: - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-namespace.yaml - - kubectl apply -f /home/{{ user }}/longhorn/longhorn-storageclass.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-pvc.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-config.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-secret.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-deployment.yaml - - kubectl apply -f /home/{{ user }}/s3gw/s3gw-service.yaml + - name: Apply s3gw-namespace.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-namespace.yaml + + - name: Apply longhorn-storageclass.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/longhorn/longhorn-storageclass.yaml + + - name: Apply s3gw-pvc.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-pvc.yaml + + - name: Apply s3gw-config.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-config.yaml + + - name: Apply s3gw-secret.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-secret.yaml + + - name: Apply s3gw-deployment.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-deployment.yaml + + - name: Apply s3gw-service.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-service.yaml - name: Wait s3gw application to become ready, this could take a while ... command: kubectl wait --namespace s3gw-system --for=condition=ready pod --selector=app.aquarist-labs.io/name=s3gw --timeout=30s diff --git a/env/playbooks/s3gw-ui-deploy.yaml b/env/playbooks/s3gw-ui-deploy.yaml index 6783cce..a56316c 100644 --- a/env/playbooks/s3gw-ui-deploy.yaml +++ b/env/playbooks/s3gw-ui-deploy.yaml @@ -1,11 +1,23 @@ - name: s3gw-ui build image - hosts: admins + hosts: kubectl become: true tasks: - name: Copy s3gw-ui directory to local directory copy: src=../s3gw-ui dest=/home/{{ user }} mode=0777 + - name: Set S3GW_UI_IMAGE in s3gw-ui-deployment.yaml + replace: + path: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml + regexp: '##S3GW_UI_IMAGE##' + replace: "{{ s3gw_ui_image }}" + + - name: Set S3GW_UI_IMAGE_PULL_POLICY in s3gw-ui-deployment.yaml + replace: + path: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml + regexp: '##S3GW_UI_IMAGE_PULL_POLICY##' + replace: "{{ s3gw_ui_image_pull_policy }}" + - name: Copy Dockerfile.s3gw-ui to local directory copy: src=../../build-ui/Dockerfile.s3gw-ui dest=/home/{{ user }}/s3gw-ui mode=0777 @@ -16,53 +28,33 @@ version: "{{ s3gw_ui_version }}" - name: Build s3gw-ui image - command: docker build -t localhost/s3gw-ui -f /home/{{ user }}/s3gw-ui/Dockerfile.s3gw-ui /home/{{ user }}/s3gw-ui/s3gw-ui - - - name: Export s3gw-ui image - command: docker save --output /home/{{ user }}/s3gw-ui/s3gw-ui.tar localhost/s3gw-ui:latest - - # Docker image manipulation should be done using community.docker.docker_image module. - # This is currently failing on OpenSuse Leap 15.3 due to wrong python version being used. - # This needs some adjustments/testing before promotion. - # - # -- Use: ansible-galaxy collection install community.docker - - #- name: Build s3gw-ui image - # community.docker.docker_image: - # build: - # path: /home/{{ user }}/s3gw-ui/s3gw-ui - # dockerfile: /home/{{ user }}/s3gw-ui/Dockerfile.s3gw-ui - # name: localhost/s3gw-ui - # tag: latest - # source: build - # - #- name: Export s3gw-ui image - # community.docker.docker_image: - # name: localhost/s3gw-ui - # tag: latest - # archive_path: /home/{{ user }}/s3gw-ui/s3gw-ui.tar - # source: local + community.docker.docker_image: + build: + path: /home/{{ user }}/s3gw-ui/s3gw-ui + dockerfile: /home/{{ user }}/s3gw-ui/Dockerfile.s3gw-ui + name: s3gw-ui + tag: latest + source: build + + - name: Tag and push s3gw-ui to local registry + community.docker.docker_image: + name: s3gw-ui + repository: admin-1:5000/s3gw-ui + tag: latest + push: yes + source: local -- name: s3gw-ui import image into Kubernetes - hosts: admins - become: true +- name: s3gw UI deploy + hosts: kubectl tasks: - - name: Import s3gw-ui image into Kubernetes - command: k3s ctr images import /home/{{ user }}/s3gw-ui/s3gw-ui.tar - - # kubectl commands should be done using kubernetes.core.k8s module. - # This is currently failing on OpenSuse Leap 15.3 due to wrong python version being used. - # This needs some adjustments before try to use it. - # - # -- Use: ansible-galaxy collection install kubernetes.core + - name: Apply s3gw-ui-deployment.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml -- name: s3gw UI deploy - hosts: admins - tasks: + - name: Apply s3gw-ui-service.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw-ui/s3gw-ui-service.yaml - - name: Deploy s3gw-ui - command: "{{ item }}" - with_items: - - kubectl apply -f /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml - - kubectl apply -f /home/{{ user }}/s3gw-ui/s3gw-ui-service.yaml diff --git a/env/s3gw-dev.yaml b/env/s3gw-dev.yaml index 53cb16d..a64c8b4 100644 --- a/env/s3gw-dev.yaml +++ b/env/s3gw-dev.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# This file was auto-generated by generate-spec.sh on 2022/43/08 14:06:58 CEST +# This file was auto-generated by generate-spec.sh on 2022/33/10 14:06:36 CEST # apiVersion: v1 diff --git a/env/s3gw-ui/s3gw-ui-deployment.yaml b/env/s3gw-ui/s3gw-ui-deployment.yaml index 71a9102..9ed2305 100644 --- a/env/s3gw-ui/s3gw-ui-deployment.yaml +++ b/env/s3gw-ui/s3gw-ui-deployment.yaml @@ -18,8 +18,8 @@ spec: spec: containers: - name: s3gw-ui - image: localhost/s3gw-ui:latest - imagePullPolicy: Never + image: ##S3GW_UI_IMAGE## + imagePullPolicy: ##S3GW_UI_IMAGE_PULL_POLICY## envFrom: - configMapRef: name: s3gw-config diff --git a/env/s3gw.yaml b/env/s3gw.yaml index ce21c93..a9ed4e1 100644 --- a/env/s3gw.yaml +++ b/env/s3gw.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# This file was auto-generated by generate-spec.sh on 2022/44/08 14:06:00 CEST +# This file was auto-generated by generate-spec.sh on 2022/32/10 14:06:13 CEST # apiVersion: v1 diff --git a/env/setup-k8s.sh b/env/setup-vm.sh similarity index 75% rename from env/setup-k8s.sh rename to env/setup-vm.sh index fff33c8..7be2f05 100755 --- a/env/setup-k8s.sh +++ b/env/setup-vm.sh @@ -2,10 +2,10 @@ set -e -export IMAGE_NAME=${IMAGE_NAME:-"generic/ubuntu1604"} +export BOX_NAME=${BOX_NAME:-"opensuse/Leap-15.3.x86_64"} export VM_NET=${VM_NET:-"10.46.201.0"} export VM_NET_LAST_OCTET_START=${CLUSTER_NET_LAST_OCTET_START:-"101"} -export CIDR_NET=${CIDR_NET:-"172.22.0.0"} +export ADMIN_COUNT=${ADMIN_COUNT:-"1"} export WORKER_COUNT=${WORKER_COUNT:-"1"} export ADMIN_MEM=${ADMIN_MEM:-"4096"} export ADMIN_CPU=${ADMIN_CPU:-"2"} @@ -17,11 +17,8 @@ export WORKER_DISK=${WORKER_DISK:-"no"} export WORKER_DISK_SIZE=${WORKER_DISK_SIZE:-"8G"} export CONTAINER_ENGINE=${CONTAINER_ENGINE:-"podman"} export STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP:-"no"} -export START_LOCAL_REGISTRY=${START_LOCAL_REGISTRY:-"no"} export S3GW_IMAGE=${S3GW_IMAGE:-"ghcr.io/aquarist-labs/s3gw:latest"} export S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY:-"Always"} -export K8S_DISTRO=${K8S_DISTRO:-"k8s"} -export INGRESS=${INGRESS:-"nginx"} export PROV_USER=${PROV_USER:-"vagrant"} #these defaults will change @@ -29,6 +26,7 @@ export S3GW_UI_REPO=${S3GW_UI_REPO:-"https://github.com/giubacc/aws-s3-explorer. export S3GW_UI_VERSION=${S3GW_UI_VERSION:-"s3gw-demo"} export SCENARIO=${SCENARIO:-"default"} +export K3S_VERSION=${K3S_VERSION:-"v1.23.6+k3s1"} start_env() { echo "Starting environment ..." @@ -37,10 +35,10 @@ start_env() { } build_env() { - echo "IMAGE_NAME=${IMAGE_NAME}" + echo "BOX_NAME=${BOX_NAME}" echo "VM_NET=${VM_NET}" echo "VM_NET_LAST_OCTET_START=${VM_NET_LAST_OCTET_START}" - echo "CIDR_NET=${CIDR_NET}" + echo "ADMIN_COUNT=${ADMIN_COUNT}" echo "WORKER_COUNT=${WORKER_COUNT}" echo "ADMIN_MEM=${ADMIN_MEM}" echo "ADMIN_CPU=${ADMIN_CPU}" @@ -52,29 +50,16 @@ build_env() { echo "WORKER_DISK_SIZE=${WORKER_DISK_SIZE}" echo "CONTAINER_ENGINE=${CONTAINER_ENGINE}" echo "STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP}" - echo "START_LOCAL_REGISTRY=${START_LOCAL_REGISTRY}" echo "S3GW_IMAGE=${S3GW_IMAGE}" echo "S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY}" - echo "K8S_DISTRO=${K8S_DISTRO}" - echo "INGRESS=${INGRESS}" echo "PROV_USER=${PROV_USER}" echo "S3GW_UI_REPO=${S3GW_UI_REPO}" echo "S3GW_UI_VERSION=${S3GW_UI_VERSION}" echo "SCENARIO=${SCENARIO}" - - if [ $START_LOCAL_REGISTRY = "yes" ]; then - echo "Saving s3gw container image locally ..." - rm -rf ./s3gw.tar - if [ $CONTAINER_ENGINE = "podman" ]; then - podman save -o ./s3gw.tar s3gw:latest - elif [ $CONTAINER_ENGINE = "docker" ]; then - docker save --output ./s3gw.tar localhost/s3gw:latest - fi - echo "Saved" - fi + echo "K3S_VERSION=${K3S_VERSION}" echo "Building environment ..." - vagrant up + vagrant up --provision echo "Built" echo "Cleaning ..." @@ -82,7 +67,7 @@ build_env() { echo "Cleaned" echo echo "Connect to admin node with:" - echo "vagrant ssh admin" + echo "vagrant ssh admin-1" } destroy_env() { diff --git a/env/setup-k3s.sh b/env/setup.sh similarity index 84% rename from env/setup-k3s.sh rename to env/setup.sh index ca9558c..3ef8709 100755 --- a/env/setup-k3s.sh +++ b/env/setup.sh @@ -27,8 +27,6 @@ use_local_image_s3exp=1 has_image_s3exp=true s3gw_image_s3exp="localhost/s3gw-ui:latest" -ingress="traefik" - function error() { echo "[ERROR] ${@}" >&2 } @@ -80,10 +78,8 @@ function show_ingresses() { function install_on_vm() { echo "Proceeding to install on a virtual machine..." WORKER_COUNT=0 - K8S_DISTRO=k3s S3GW_IMAGE=$s3gw_image - INGRESS=$ingress - source ./setup-k8s.sh build + source ./setup-vm.sh build } while [[ $# -gt 0 ]]; do @@ -113,12 +109,6 @@ while [[ $# -gt 0 ]]; do install_on_vm exit 0 ;; - --traefik) - ingress="traefik" - ;; - --nginx) - ingress="nginx" - ;; esac shift done @@ -229,23 +219,15 @@ else ) fi -if [ $ingress = "nginx" ]; then - echo Installing nginx-controller ... - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/cloud/deploy.yaml - echo Waiting for nginx-controller to become ready, this could take a while ... - kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=600s -fi -if [ $ingress = "traefik" ]; then - # Workaround a K8s behaviour that CustomResourceDefinition must be - # established before they can be used by a resource. - # https://github.com/kubernetes/kubectl/issues/1117 - # k3s kubectl wait --for=condition=established --timeout=60s crd middlewares.traefik.containo.us - echo -n "Waiting for CRD to be established..." - while [[ $(kubectl get crd middlewares.traefik.containo.us -o 'jsonpath={..status.conditions[?(@.type=="Established")].status}' 2>/dev/null) != "True" ]]; do - echo -n "." && sleep 1; - done - echo -fi +# Workaround a K8s behaviour that CustomResourceDefinition must be +# established before they can be used by a resource. +# https://github.com/kubernetes/kubectl/issues/1117 +# k3s kubectl wait --for=condition=established --timeout=60s crd middlewares.traefik.containo.us +echo -n "Waiting for CRD to be established..." +while [[ $(kubectl get crd middlewares.traefik.containo.us -o 'jsonpath={..status.conditions[?(@.type=="Established")].status}' 2>/dev/null) != "True" ]]; do + echo -n "." && sleep 1; +done +echo s3gw_yaml="s3gw.yaml" $dev_env && s3gw_yaml="s3gw-dev.yaml" From cdf5b3befaf4dd931baabe95f69b5f121f9995fa Mon Sep 17 00:00:00 2001 From: Giuseppe Baccini Date: Tue, 14 Jun 2022 12:23:48 +0200 Subject: [PATCH 2/4] Environment enhancements - Set S3GW_UI_REPO = https://github.com/aquarist-labs/aws-s3-explorer.git Signed-off-by: Giuseppe Baccini --- env/setup-vm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env/setup-vm.sh b/env/setup-vm.sh index 7be2f05..5ab3ba9 100755 --- a/env/setup-vm.sh +++ b/env/setup-vm.sh @@ -22,7 +22,7 @@ export S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY:-"Always"} export PROV_USER=${PROV_USER:-"vagrant"} #these defaults will change -export S3GW_UI_REPO=${S3GW_UI_REPO:-"https://github.com/giubacc/aws-s3-explorer.git"} +export S3GW_UI_REPO=${S3GW_UI_REPO:-"https://github.com/aquarist-labs/aws-s3-explorer.git"} export S3GW_UI_VERSION=${S3GW_UI_VERSION:-"s3gw-demo"} export SCENARIO=${SCENARIO:-"default"} From c236ba65e9a7b07bd2109aa271e27399cb8b702f Mon Sep 17 00:00:00 2001 From: Giuseppe Baccini Date: Tue, 14 Jun 2022 16:36:30 +0200 Subject: [PATCH 3/4] Environment enhancements - Added s3gw-ui-no-tls.local ingress due to browser complaining mixed http/https sources from gui application. Signed-off-by: Giuseppe Baccini --- env/README.md | 7 ++++--- env/ingress-traefik/s3gw-ui-ingress.yaml | 20 ++++++++++++++++++++ env/s3gw-dev.yaml | 22 +++++++++++++++++++++- env/s3gw.yaml | 22 +++++++++++++++++++++- 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/env/README.md b/env/README.md index 8ee8d49..f1cb20b 100644 --- a/env/README.md +++ b/env/README.md @@ -16,7 +16,7 @@ allocated on a separate virtual host: * **Longhorn dashboard**, on: `longhorn.local` * **s3gw**, on: `s3gw.local` and `s3gw-no-tls.local` -* **s3gw s3 explorer**, on: `s3gw-ui.local` +* **s3gw s3 explorer**, on: `s3gw-ui.local` and `s3gw-ui-no-tls.local` Host names are exposed with a node port service listening on ports 30443 (https) and 30080 (http). @@ -27,7 +27,7 @@ When you are running the cluster on a virtual machine, you can patch host's `/etc/hosts` file as follow: ```text -10.46.201.101 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local +10.46.201.101 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local s3gw-ui-no-tls.local ``` This makes host names resolving with the admin node. @@ -35,7 +35,7 @@ Otherwise, when you are running the cluster on bare metal, you can patch host's `/etc/hosts` file as follow: ```text -127.0.0.1 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local +127.0.0.1 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local s3gw-ui-no-tls.local ``` Services can now be accessed at: @@ -45,4 +45,5 @@ https://longhorn.local:30443 https://s3gw.local:30443 http://s3gw-no-tls.local:30080 https://s3gw-ui.local:30443 +http://s3gw-ui-no-tls.local:30080 ``` diff --git a/env/ingress-traefik/s3gw-ui-ingress.yaml b/env/ingress-traefik/s3gw-ui-ingress.yaml index bfe2f15..a81d6f5 100644 --- a/env/ingress-traefik/s3gw-ui-ingress.yaml +++ b/env/ingress-traefik/s3gw-ui-ingress.yaml @@ -21,3 +21,23 @@ spec: name: s3gw-ui-service port: number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui-no-tls.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 diff --git a/env/s3gw-dev.yaml b/env/s3gw-dev.yaml index a64c8b4..2e6b552 100644 --- a/env/s3gw-dev.yaml +++ b/env/s3gw-dev.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# This file was auto-generated by generate-spec.sh on 2022/33/10 14:06:36 CEST +# This file was auto-generated by generate-spec.sh on 2022/51/14 15:06:07 CEST # apiVersion: v1 @@ -330,3 +330,23 @@ spec: name: s3gw-ui-service port: number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui-no-tls.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 diff --git a/env/s3gw.yaml b/env/s3gw.yaml index a9ed4e1..60cb1e9 100644 --- a/env/s3gw.yaml +++ b/env/s3gw.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# This file was auto-generated by generate-spec.sh on 2022/32/10 14:06:13 CEST +# This file was auto-generated by generate-spec.sh on 2022/50/14 15:06:51 CEST # apiVersion: v1 @@ -330,3 +330,23 @@ spec: name: s3gw-ui-service port: number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui-no-tls.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 From 696d38e509c340f668eb9e5ce83dc124fa7fd837 Mon Sep 17 00:00:00 2001 From: Giuseppe Baccini Date: Wed, 15 Jun 2022 16:11:05 +0200 Subject: [PATCH 4/4] Environment enhancements - Added partial support to virtualbox provider Signed-off-by: Giuseppe Baccini --- env/Vagrantfile | 101 +++++++++++++++++++++++++++++++----------------- env/setup-vm.sh | 8 +++- 2 files changed, 72 insertions(+), 37 deletions(-) diff --git a/env/Vagrantfile b/env/Vagrantfile index d500a1f..0d9e161 100644 --- a/env/Vagrantfile +++ b/env/Vagrantfile @@ -1,6 +1,8 @@ BOX_NAME = ENV["BOX_NAME"] || "opensuse/Leap-15.3.x86_64" +VM_PROVIDER = ENV["VM_PROVIDER"] || "libvirt" VM_NET = (ENV["VM_NET"] || "10.46.201.0").split(".0")[0] VM_NET_LAST_OCTET_START = Integer(ENV["VM_NET_LAST_OCTET_START"] || "101") +VM_BRIDGE_INET = ENV["VM_BRIDGE_INET"] || "eth0" #k3s-ansible seems to work with only 1 admin; this should be investigated. #For the time being, we assume this value hardcoded to 1. @@ -16,6 +18,7 @@ WORKER_CPU = Integer(ENV["WORKER_CPU"] || "2") WORKER_DISK = ((ENV["WORKER_DISK"] || "no") == "yes") WORKER_DISK_SIZE = ENV["WORKER_DISK_SIZE"] || "8G" STOP_AFTER_BOOTSTRAP = ((ENV["STOP_AFTER_BOOTSTRAP"] || "no") == "yes") +STOP_AFTER_K3S_INSTALL = ((ENV["STOP_AFTER_K3S_INSTALL"] || "no") == "yes") S3GW_IMAGE = ENV["S3GW_IMAGE"] || "ghcr.io/aquarist-labs/s3gw:latest" S3GW_IMAGE_PULL_POLICY = ENV["S3GW_IMAGE_PULL_POLICY"] || "Always" PROV_USER = ENV["PROV_USER"] || "vagrant" @@ -56,7 +59,13 @@ extra_vars = { scenario: SCENARIO, k3s_version: K3S_VERSION, systemd_dir: "/etc/systemd/system", - master_ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START}" + master_ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START}", + + # --node-ip is needed when using virtualbox, otherwise it will start k3s on the NAT interface. + # This is not sufficient when WORKER_COUNT > 0 because workers need this directive too. + # Currently seems that this problem cannot be overcome, so with virtualbox you can only have a + # working cluster with WORKER_COUNT == 0 + extra_server_args: "--node-ip #{VM_NET}.#{VM_NET_LAST_OCTET_START}" } def ansible_provision (context, ansible_groups, extra_vars) @@ -79,37 +88,39 @@ def ansible_provision (context, ansible_groups, extra_vars) ansible.groups = ansible_groups ansible.extra_vars = extra_vars end - context.vm.provision "ansible" do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/longhorn-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - context.vm.provision "ansible" do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - context.vm.provision "ansible" do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - context.vm.provision "ansible" do |ansible| - ansible.limit = "all" - ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" - ansible.groups = ansible_groups - ansible.extra_vars = extra_vars - end - if SCENARIO != "" + if(!STOP_AFTER_K3S_INSTALL) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/longhorn-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end context.vm.provision "ansible" do |ansible| ansible.limit = "all" - ansible.playbook = "playbooks/load-scen.yaml" + ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" ansible.groups = ansible_groups ansible.extra_vars = extra_vars end + if SCENARIO != "" + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/load-scen.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + end end end end @@ -124,16 +135,25 @@ Vagrant.configure("2") do |config| ansible_groups["zypper"] << "worker-[1:#{WORKER_COUNT}]" end - config.vm.provider "libvirt" do |lv| - lv.connect_via_ssh = false - lv.qemu_use_session = false - lv.nic_model_type = "e1000" - lv.cpu_mode = 'host-passthrough' + if VM_PROVIDER == "libvirt" + config.vm.provider "libvirt" do |lv| + lv.connect_via_ssh = false + lv.qemu_use_session = false + lv.nic_model_type = "e1000" + lv.cpu_mode = 'host-passthrough' + end + + # This allows to have a working cluster with WORKER_COUNT > 0 + # It removes --node-ip directive. + extra_vars[:extra_server_args] = "" + + elsif VM_PROVIDER == "virtualbox" + config.vm.synced_folder "~", "/shared" end (1..ADMIN_COUNT).each do |i| config.vm.define "admin-#{i}" do |admin| - admin.vm.provider "libvirt" do |lv| + admin.vm.provider VM_PROVIDER do |lv| lv.memory = ADMIN_MEM lv.cpus = ADMIN_CPU if WORKER_COUNT == 0 && ADMIN_DISK @@ -143,7 +163,12 @@ Vagrant.configure("2") do |config| admin.vm.box = BOX_NAME admin.vm.hostname = "admin-#{i}" - admin.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + + if VM_PROVIDER == "libvirt" + admin.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + elsif VM_PROVIDER == "virtualbox" + admin.vm.network "public_network", bridge: VM_BRIDGE_INET, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + end end if WORKER_COUNT == 0 @@ -154,7 +179,7 @@ Vagrant.configure("2") do |config| if WORKER_COUNT > 0 (1..WORKER_COUNT).each do |i| config.vm.define "worker-#{i}" do |worker| - worker.vm.provider "libvirt" do |lv| + worker.vm.provider VM_PROVIDER do |lv| lv.memory = WORKER_MEM lv.cpus = WORKER_CPU if WORKER_DISK @@ -164,7 +189,11 @@ Vagrant.configure("2") do |config| worker.vm.box = BOX_NAME worker.vm.hostname = "worker-#{i}" - worker.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" + if VM_PROVIDER == "libvirt" + worker.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" + elsif VM_PROVIDER == "virtualbox" + worker.vm.network "public_network", bridge: VM_BRIDGE_INET, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" + end # Only execute once the Ansible provisioner, # when all nodes are up and ready. diff --git a/env/setup-vm.sh b/env/setup-vm.sh index 5ab3ba9..a592ce9 100755 --- a/env/setup-vm.sh +++ b/env/setup-vm.sh @@ -3,8 +3,10 @@ set -e export BOX_NAME=${BOX_NAME:-"opensuse/Leap-15.3.x86_64"} +export VM_PROVIDER=${VM_PROVIDER:-"libvirt"} export VM_NET=${VM_NET:-"10.46.201.0"} export VM_NET_LAST_OCTET_START=${CLUSTER_NET_LAST_OCTET_START:-"101"} +export VM_BRIDGE_INET=${VM_BRIDGE_INET:-"eth0"} export ADMIN_COUNT=${ADMIN_COUNT:-"1"} export WORKER_COUNT=${WORKER_COUNT:-"1"} export ADMIN_MEM=${ADMIN_MEM:-"4096"} @@ -17,13 +19,14 @@ export WORKER_DISK=${WORKER_DISK:-"no"} export WORKER_DISK_SIZE=${WORKER_DISK_SIZE:-"8G"} export CONTAINER_ENGINE=${CONTAINER_ENGINE:-"podman"} export STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP:-"no"} +export STOP_AFTER_K3S_INSTALL=${STOP_AFTER_K3S_INSTALL:-"no"} export S3GW_IMAGE=${S3GW_IMAGE:-"ghcr.io/aquarist-labs/s3gw:latest"} export S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY:-"Always"} export PROV_USER=${PROV_USER:-"vagrant"} #these defaults will change export S3GW_UI_REPO=${S3GW_UI_REPO:-"https://github.com/aquarist-labs/aws-s3-explorer.git"} -export S3GW_UI_VERSION=${S3GW_UI_VERSION:-"s3gw-demo"} +export S3GW_UI_VERSION=${S3GW_UI_VERSION:-"s3gw-ui-testing"} export SCENARIO=${SCENARIO:-"default"} export K3S_VERSION=${K3S_VERSION:-"v1.23.6+k3s1"} @@ -36,8 +39,10 @@ start_env() { build_env() { echo "BOX_NAME=${BOX_NAME}" + echo "VM_PROVIDER=${VM_PROVIDER}" echo "VM_NET=${VM_NET}" echo "VM_NET_LAST_OCTET_START=${VM_NET_LAST_OCTET_START}" + echo "VM_BRIDGE_INET=${VM_BRIDGE_INET}" echo "ADMIN_COUNT=${ADMIN_COUNT}" echo "WORKER_COUNT=${WORKER_COUNT}" echo "ADMIN_MEM=${ADMIN_MEM}" @@ -50,6 +55,7 @@ build_env() { echo "WORKER_DISK_SIZE=${WORKER_DISK_SIZE}" echo "CONTAINER_ENGINE=${CONTAINER_ENGINE}" echo "STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP}" + echo "STOP_AFTER_K3S_INSTALL=${STOP_AFTER_K3S_INSTALL}" echo "S3GW_IMAGE=${S3GW_IMAGE}" echo "S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY}" echo "PROV_USER=${PROV_USER}"