From 45899703e47aeb59cf4fa6a6b97f24f9d16aed7a Mon Sep 17 00:00:00 2001 From: Viktor Ribchev Date: Wed, 8 Nov 2023 15:58:53 +0200 Subject: [PATCH] Added script which, creates and attaches volumes Fixed notes --- .terraform.lock.hcl | 3 + modules/configuration/main.tf | 11 +++ modules/vm/templates/entrypoint.sh.tpl | 102 +++++++++++++++++++++++++ 3 files changed, 116 insertions(+) diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl index 7c6f4a14..71153377 100644 --- a/.terraform.lock.hcl +++ b/.terraform.lock.hcl @@ -6,6 +6,7 @@ provider "registry.terraform.io/hashicorp/azurerm" { constraints = ">= 3.71.0, >= 3.76.0" hashes = [ "h1:DWJ+qB1AY68Is827deEJH4pV7BL4PhDmaaWLlYkhqLM=", + "h1:oXXa023punihP4XHVp1gUlkflXJ6Y/Oa3+tYIDz3HXM=", "zh:09a965d5a35ddf418c0cc0eda507f79ba65ce679faa1ffc636c965c22cd2da88", "zh:144523f78596df2843ccf9c4dfa53670c71c66ef1edb96853b4d06b8d2973e26", "zh:1b2bbd1b2a7a8715f1bc828a174fc8f6810831cfebf3bffef141638b59aa4589", @@ -25,6 +26,7 @@ provider "registry.terraform.io/hashicorp/random" { version = "3.5.1" constraints = "~> 3.0" hashes = [ + "h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=", "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", @@ -46,6 +48,7 @@ provider "registry.terraform.io/microsoft/azuredevops" { constraints = ">= 0.9.0" hashes = [ "h1:GazdScTk4i4y9aIsvsO7GYkHYPYJBfaddaU+VlkLnZg=", + "h1:GwFmEDohB4JeBGMMvSOdSw7SHxh1xZqQUfko7eaW+l4=", "zh:07e596c045f8ee411c630e29e180e946d5e75af615e0223877de9c4718ff0265", "zh:18c07b7b610a85079b510117296da1fe2cd99da3664ece2a98390329dac2b58a", "zh:220949b1271420864d324f0494739b70ed79f66ad3d2928d9acb804bc04d1e75", diff --git a/modules/configuration/main.tf b/modules/configuration/main.tf index 4405f1bc..a7ebdfbf 100644 --- a/modules/configuration/main.tf +++ b/modules/configuration/main.tf @@ -1,3 +1,7 @@ +data "azurerm_resource_group" "graphdb" { + name = var.resource_group_name +} + data "azurerm_user_assigned_identity" "graphdb-instances" { name = var.identity_name resource_group_name = var.resource_group_name @@ -32,3 +36,10 @@ resource "azurerm_role_assignment" "graphdb-license-secret-reader" { scope = data.azurerm_key_vault.graphdb.id role_definition_name = "Key Vault Secrets User" } + +# TODO should be moved to vm module +resource "azurerm_role_assignment" "rg-contributor-role" { + principal_id = data.azurerm_user_assigned_identity.graphdb-instances.principal_id + scope = data.azurerm_resource_group.graphdb.id + role_definition_name = "Contributor" +} diff --git a/modules/vm/templates/entrypoint.sh.tpl b/modules/vm/templates/entrypoint.sh.tpl index 3ef6784b..480c9793 100644 --- a/modules/vm/templates/entrypoint.sh.tpl +++ b/modules/vm/templates/entrypoint.sh.tpl @@ -14,8 +14,110 @@ done # Login in Azure CLI with managed identity (user or system assigned) az login --identity +# Get the license +az keyvault secret download --vault-name ${key_vault_name} --name graphdb-license --file /etc/graphdb/graphdb.license --encoding base64 + # TODO: Find/create/mount volumes # https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/tutorial-use-disks-cli +# Find/create/attach volumes + +INSTANCE_HOSTNAME=\'$(hostname)\' +SUBSCRIPTION_ID=$(az account show --query "id" --output tsv) +RESOURSE_GROUP=$(az vmss list --query "[0].resourceGroup" --output tsv) +VMSS_NAME=$(az vmss list --query "[0].name" --output tsv) +INSTANCE_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].instanceId" --output tsv) +ZONE_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].zones" --output tsv) +REGION_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].location" --output tsv) +# Do NOT change the LUN. Based on this we find and mount the disk in the VM +LUN=2 + +# TODO move to TF variables +TIER="P40" +DISK_SIZE_GB=128 + +# TODO Define the disk name based on the hostname ?? +diskName="Disk_$${VMSS_NAME}_$${INSTANCE_ID}" + +for i in $(seq 1 6); do + +existingUnattachedDisk=$( + az disk list --resource-group $RESOURSE_GROUP \ + --query "[?diskState=='Unattached' && starts_with(name, 'Disk_$${VMSS_NAME}')].{Name:name}" \ + --output tsv + ) + + if [ -z "$${existingUnattachedDisk:-}" ]; then + echo 'Disk not yet available' + sleep 10 + else + break + fi +done + +# Finds disks in the VMSS which are unattached +#existingUnattachedDisk=$(az disk list --resource-group $RESOURSE_GROUP --query "[?diskState=='Unattached' && starts_with(name, 'Disk_$${VMSS_NAME}')].{Name:name}" --output tsv) + +if [ -z "$existingUnattachedDisk" ]; then + echo "Creating a new managed disk" + az disk create --resource-group $RESOURSE_GROUP --name $diskName --size-gb $DISK_SIZE_GB --location $REGION_ID --sku Premium_LRS --zone $ZONE_ID --tier $TIER +fi + +# Checks if a managed disk is attached to the instance +attachedDisk=$(az vmss list-instances --resource-group "$RESOURSE_GROUP" --name "$VMSS_NAME" --query "[?instanceId==\"$INSTANCE_ID\"].storageProfile.dataDisks[].name" --output tsv) + +if [ -z "$attachedDisk" ]; then + echo "No data disks attached for instance ID $INSTANCE_ID in VMSS $VMSS_NAME." + # Try to attach an existing managed disk + availableDisks=$(az disk list --resource-group $RESOURSE_GROUP --query "[?diskState=='Unattached' && starts_with(name, 'Disk_$${VMSS_NAME}') && zones[0]=='$${ZONE_ID}'].{Name:name}" --output tsv) + echo "Attaching available disk $availableDisks." + # Set Internal Field Separator to newline to handle spaces in names + IFS=$'\n' + # Would iterate through all available disks and attempt to attach them + for availableDisk in $availableDisks; do + az vmss disk attach --vmss-name $VMSS_NAME --resource-group $RESOURSE_GROUP --instance-id $INSTANCE_ID --lun $LUN --disk "$availableDisk" || true + done +fi + +# Gets device name based on LUN +graphdb_device=$(lsscsi --scsi --size | awk '/\[1:.*:0:2\]/ {print $7}') + +# Check if the device is present after attaching the disk +if [ -b "$graphdb_device" ]; then + echo "Device $graphdb_device is available." +else + echo "Device $graphdb_device is not available. Something went wrong." + exit 1 +fi + +# create a file system if there isn't any +if [ "$graphdb_device: data" = "$(file -s $graphdb_device)" ]; then + mkfs -t ext4 $graphdb_device +fi + +disk_mount_point="/var/opt/graphdb" +mkdir -p "$disk_mount_point" + +# Check if the disk is already mounted +if ! mount | grep -q "$graphdb_device"; then + echo "The disk at $graphdb_device is not mounted." + + # Add an entry to the fstab file to automatically mount the disk + if ! grep -q "$graphdb_device" /etc/fstab; then + echo "$graphdb_device $disk_mount_point ext4 defaults 0 2" >> /etc/fstab + fi + + # Mount the disk + mount "$disk_mount_point" + echo "The disk at $graphdb_device is now mounted at $disk_mount_point." +else + echo "The disk at $graphdb_device is already mounted." +fi + +# Recreates folders if necessary and changes owner + +mkdir -p /var/opt/graphdb/node /var/opt/graphdb/cluster-proxy +# TODO research how to avoid using chown, as it would be a slow operation if data is present. +chown -R graphdb:graphdb /var/opt/graphdb # # DNS hack