Skip to content

Commit

Permalink
Microceph (canonical#311)
Browse files Browse the repository at this point in the history
  • Loading branch information
tomponline authored Oct 15, 2024
2 parents 41f7aa0 + 5485dcf commit e49fb3b
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 75 deletions.
61 changes: 30 additions & 31 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ jobs:
- snapd
- storage-buckets
- storage-disks-vm
- "storage-vm btrfs"
- "storage-vm ceph"
- "storage-vm dir"
- "storage-vm lvm"
- "storage-vm lvm-thin"
- "storage-vm zfs"
- storage-vm btrfs
- storage-vm ceph
- storage-vm dir
- storage-vm lvm
- storage-vm lvm-thin
- storage-vm zfs
- storage-volumes-vm
- tpm-vm
- vm
Expand All @@ -122,7 +122,7 @@ jobs:
include:
- test: qemu-external-vm
track: "latest/edge"
os: "24.04"
os: 24.04
exclude:
# not compatible with 4.0/*
- test: container-copy
Expand All @@ -140,51 +140,46 @@ jobs:
track: "4.0/edge"
- test: network-ovn
track: "4.0/edge"
# https://github.com/canonical/pylxd/issues/590
- test: pylxd
track: "4.0/edge"
- test: storage-buckets
track: "4.0/edge"
- test: storage-disks-vm
track: "4.0/edge"
- test: "storage-vm dir"
- test: storage-vm btrfs
track: "4.0/edge"
- test: "storage-vm btrfs"
- test: storage-vm ceph
track: "4.0/edge"
- test: "storage-vm ceph"
- test: storage-vm dir
track: "4.0/edge"
- test: "storage-vm lvm"
- test: storage-vm lvm
track: "4.0/edge"
- test: "storage-vm lvm-thin"
- test: storage-vm lvm-thin
track: "4.0/edge"
- test: "storage-vm zfs"
- test: storage-vm zfs
track: "4.0/edge"
- test: storage-volumes-vm
track: "4.0/edge"
- test: tpm-vm
track: "4.0/edge"
- test: vm-migration
track: "4.0/edge"
# not compatible with 5.0/*
- test: efi-vars-editor-vm # not compatible with 5.0/*
- test: efi-vars-editor-vm
track: "5.0/edge"
- test: vm-migration
track: "5.0/edge"
# waiting for integration with microceph
- test: "storage-vm ceph"
# skip track/os combinaisons that are too far appart
- track: "4.0/edge"
os: "24.04"
os: 24.04
- track: "5.0/edge"
os: "24.04"
os: 24.04
- track: "5.0/edge"
os: "20.04"
os: 20.04
- track: "5.21/edge"
os: "20.04"
os: 20.04
- track: "latest/edge"
os: "20.04"
os: 20.04
- track: "latest/edge"
os: "22.04"
- test: "vm-migration"
track: "4.0/edge"
- test: "vm-migration"
track: "5.0/edge"
os: 22.04

steps:
- name: Performance tuning
Expand All @@ -199,8 +194,8 @@ jobs:
# disable dpkg from calling sync()
echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io
- name: Reclaim some space (storage tests only)
if: ${{ startsWith(matrix.test, 'storage') || matrix.test == 'vm-nesting' || matrix.test == 'conversion' }}
- name: Reclaim some space
if: ${{ matrix.test == 'conversion' || startsWith(matrix.test, 'storage-') || matrix.test == 'vm-nesting' }}
run: |
set -eux
df -h
Expand Down Expand Up @@ -266,6 +261,10 @@ jobs:
path: /home/runner/work/cache
key: cache-${{ steps.get-date.outputs.date }}

- name: Setup MicroCeph
if: ${{ matrix.test == 'conversion' || matrix.test == 'storage-buckets' || matrix.test == 'storage-vm ceph' || matrix.test == 'storage-volumes-vm' }}
uses: canonical/lxd/.github/actions/setup-microceph@main

- name: ${{ matrix.test }} (${{ matrix.track }})
run: |
set -eux
Expand Down
4 changes: 2 additions & 2 deletions tests/conversion
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ conversion() {
lxdMigrateCmd="lxd-migrate --conversion=${conversionOptions}"

# Create storage pool.
if [ "$poolType" = "dir" ]; then
if [ "$poolType" = "ceph" ] || [ "$poolType" = "dir" ]; then
lxc storage create "${poolName}" "${poolType}"
else
lxc storage create "${poolName}" "${poolType}" size=11GiB
Expand Down Expand Up @@ -255,7 +255,7 @@ IMAGE_PATH="${tmpdir}/backup/virtual-machine.img"
# Test VM migration using conversion mode. If server does not support
# conversion API extension, lxd-migrate must fallback to migration
# mode and successfully transfer the VM disk.
for driver in btrfs lvm zfs dir; do
for driver in btrfs ceph dir lvm zfs; do
conversion_vm alpine-raw "${driver}" "${IMAGE_PATH}" "no"
done

Expand Down
12 changes: 3 additions & 9 deletions tests/cpu-vm
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,12 @@ lxc storage create "${poolName}" "${poolDriver}"
# still work
lxc profile set default limits.kernel.nofile 50

# 4.0 does not reject `limits.kernel.*` keys on VM instances
if ! echo "${LXD_SNAP_CHANNEL}" | grep -qE "^4\.0/"; then
! lxc init v0 --vm --empty -c limits.kernel.cpu=46 -s "${poolName}" || false
fi
! lxc init v0 --vm --empty -c limits.kernel.cpu=46 -s "${poolName}" || false

lxc init v0 --vm --empty -s "${poolName}"

# 4.0 does not reject `limits.kernel.*` keys on VM instances
if ! echo "${LXD_SNAP_CHANNEL}" | grep -qE "^4\.0/"; then
# limits.kernel.* only applies to containers (shouldn't work)
! lxc config set v0 limits.kernel.as=1GiB || false
fi
# limits.kernel.* only applies to containers (shouldn't work)
! lxc config set v0 limits.kernel.as=1GiB || false

lxc delete v0

Expand Down
2 changes: 1 addition & 1 deletion tests/devlxd-vm
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ if hasNeededAPIExtension devlxd_images_vm; then
monitorPID="${!}"
fi

lxc exec v1 -- /snap/bin/lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1v1 --vm
lxc exec v1 -- /snap/bin/lxc launch "${IMAGE}" v1v1 --vm
sleep 30
lxc exec v1 -- /snap/bin/lxc info v1v1 | grep -F RUNNING

Expand Down
2 changes: 2 additions & 0 deletions tests/efi-vars-editor-vm
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ lxc storage create "${poolName}" "${poolDriver}"

echo "==> Create VM and boot"
lxc launch "${IMAGE}" v1 --vm -s "${poolName}"

# Wait for instance to be ready ensures LXD is done initializing the NVRAM
waitInstanceReady v1
lxc info v1

Expand Down
26 changes: 6 additions & 20 deletions tests/storage-buckets
Original file line number Diff line number Diff line change
Expand Up @@ -27,21 +27,7 @@ elif [ "${arch}" = "riscv64" ] ; then
exit 0
fi

poolDriverList="${1:-dir btrfs lvm lvm-thin zfs ceph}"

if echo "${poolDriverList}" | grep -qwF "ceph"; then
echo "::warning::Skipping test on ceph until we can integrate with microceph"
# shellcheck disable=SC2001
poolDriverList="$(echo "${poolDriverList}" | sed 's/ \?\bceph\b//')"
fi

if echo "${poolDriverList}" | grep -qwF "ceph"; then
# XXX: LXD will be reloaded further down
snap set lxd ceph.external=true

# Install dependencies
install_deps ceph-common
fi
poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}"

# Clean up the build dir in case it hung around from a failed test.
rm -rf /opt/minio
Expand Down Expand Up @@ -71,15 +57,15 @@ for poolDriver in $poolDriverList
do
echo "==> Create storage pool using driver ${poolDriver}"
if [ "${poolDriver}" = "dir" ]; then
lxc storage create "${poolName}" "${poolDriver}" volume.size=5GB
lxc storage create "${poolName}" "${poolDriver}" volume.size=5GiB
elif [ "${poolDriver}" = "ceph" ]; then
lxc storage create "${poolName}" cephobject cephobject.radosgw.endpoint="${LXD_CEPH_CEPHOBJECT_RADOSGW}"
lxc storage create "${poolName}" cephobject cephobject.radosgw.endpoint="${LXD_CEPH_CEPHOBJECT_RADOSGW:-http://127.0.0.1}"
elif [ "${poolDriver}" = "lvm" ]; then
lxc storage create "${poolName}" "${poolDriver}" size=40GiB lvm.use_thinpool=false volume.size=5GB
lxc storage create "${poolName}" "${poolDriver}" size=40GiB lvm.use_thinpool=false volume.size=5GiB
elif [ "${poolDriver}" = "lvm-thin" ]; then
lxc storage create "${poolName}" lvm size=20GiB volume.size=5GB
lxc storage create "${poolName}" lvm size=20GiB volume.size=5GiB
else
lxc storage create "${poolName}" "${poolDriver}" size=20GB volume.size=5GB
lxc storage create "${poolName}" "${poolDriver}" size=20GiB volume.size=5GiB
fi

if [ "${poolDriver}" != "ceph" ]; then
Expand Down
3 changes: 1 addition & 2 deletions tests/storage-vm
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@ install_lxd

IMAGE="${TEST_IMG:-ubuntu-minimal-daily:24.04}"

# XXX: skip ceph for now
poolDriverList="${1:-dir btrfs lvm lvm-thin zfs}"
poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}"

# Configure LXD
lxc network create lxdbr0
Expand Down
3 changes: 1 addition & 2 deletions tests/storage-volumes-vm
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@ install_lxd

IMAGE="${TEST_IMG:-ubuntu-minimal-daily:24.04}"

# XXX: skip ceph for now
poolDriverList="${1:-dir btrfs lvm lvm-thin zfs}"
poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}"

# Configure LXD
lxc project switch default
Expand Down
13 changes: 5 additions & 8 deletions tests/vm-migration
Original file line number Diff line number Diff line change
Expand Up @@ -96,17 +96,14 @@ fi

# Create a preseed file for member2 to join member1.
member2Address="$(lxc query /1.0/instances/member2?recursion=2 | jq -r ".state.network.enp5s0.addresses[0].address")"
preseed="$(
cat <<EOF

# Initialise member2 with the preseed.
lxc exec member2 -- lxd init --preseed << EOF
cluster:
enabled: true
server_address: "${member2Address}"
cluster_token: "${joinToken}"
EOF
)"

# Initialise member2 with the preseed.
echo "${preseed}" | lxc exec member2 -- lxd init --preseed

# Copy the ceph config from the microceph node into each cluster member.
rm -rf etc/ceph
Expand All @@ -122,10 +119,10 @@ lxc exec member1 -- lxc storage create ceph ceph --target member2
lxc exec member1 -- lxc storage create ceph ceph

# Create a volume in the ceph pool to test that we can live-migrate a VM with this volume attached.
lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=500MiB
lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=64MiB

# Create a VM in the cluster, on member1.
lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=512MiB
lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=512MiB -d root,size=3584MiB

# Add vol1 as a disk device to the VM.
lxc exec member1 -- lxc config device add v1 vol1-disk disk pool=ceph source=vol1
Expand Down

0 comments on commit e49fb3b

Please sign in to comment.