Skip to content

Commit

Permalink
Added support for vSphere cloud provider configuration
Browse files Browse the repository at this point in the history
  • Loading branch information
nilic committed Mar 27, 2020
1 parent 4dd07c3 commit 716af6b
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 25 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
## 2.2.0 (March 27, 2020)

FEATURES:
* `rancher2-vsphere-cluster`: Added support for defining vSphere cloud provider, necessary for dynamic provisioning of volumes

## 2.1.0 (March 26, 2020)

FEATURES:
Expand Down
1 change: 1 addition & 0 deletions rancher2-vsphere-cluster/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
| all\_in\_one\_node\_pool\_name | Name of the all-in-one node pool | `string` | `"all-in-one"` | no |
| all\_in\_one\_node\_prefix | Prefix for node created in the all-in-one node pool | `string` | `"all-in-one-"` | no |
| cloud\_credential\_name | Name of vSphere cloud credential | `string` | n/a | yes |
| cloud\_provider\_spec | Specification of vSphere cloud provider, which is necessary to allow dynamic provisioning of volumes. Take a look at the `examples` directory for synthax and Rancher vSphere Cloud Provider documentation for explanation of parameters | `map` | `{}` | no |
| cluster\_description | Cluster description | `string` | n/a | yes |
| cluster\_name | Cluster name | `string` | n/a | yes |
| control\_plane\_node\_pool\_name | Name of the control plane node pool | `string` | `"control-plane"` | no |
Expand Down
63 changes: 38 additions & 25 deletions rancher2-vsphere-cluster/examples/new-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -172,17 +172,42 @@ module "rancher_cluster_single_node" {
single_node_cluster = true
}

# creates a cluster with one control plane, one etcd and one worker node, canal networking and monitoring enabled
module "rancher_cluster_minimum_inputs" {
# cluster with vSphere cloud provider configuration
module "rancher_cluster_cloud_provider" {
source = "../.."

cloud_credential_name = "MyVsphereCredentials"
cluster_name = "tf_test_min"
cluster_description = "Terraform test Rancher K8s cluster 2"
cluster_name = "tf_test_consolidated"
cluster_description = "Terraform test Rancher K8s cluster"
enable_monitoring = true
enable_alerting = false
enable_istio = false
k8s_network_plugin = "canal"

cloud_provider_spec = {
global_insecure_flag = false
virtual_center_spec = {
myvcenter = {
name = "myvcenter.mydomain.com"
user = "myvcuser"
password = "mysecretpass"
datacenters = "/MyDC"
port = 443
soap_roundtrip_count = 0
}
}
workspace_server = "myvcenter.mydomain.com"
workspace_datacenter = "/MyDC"
workspace_folder = "/MyDC/vm/MyFolder"
workspace_default_datastore = "MyDatastore"
workspace_resourcepool_path = "/MyDC/host/MyCluster/Resources/MyResourcePool"
disk_scsi_controller_type = "pvscsi"
network_public_network = "MyPortgroup"
}

node_specs = {
control_plane = {
vsphere_template = "k8s-control_plane"
master = {
vsphere_template = "MyFolder/k8s-master"
num_vcpu = 2
memory_gb = 4
disk_gb = 20
Expand All @@ -196,23 +221,8 @@ module "rancher_cluster_minimum_inputs" {
template_ssh_password = "MySecretPass"
template_ssh_user_group = "root"
}
etcd = {
vsphere_template = "k8s-etcd"
num_vcpu = 1
memory_gb = 2
disk_gb = 20
datacenter = "MyDC"
datastore = "MyDatastore"
cluster = "MyCluster"
resource_pool = "MyResourcePool"
folder = "MyFolder"
portgroup = "MyPortgroup"
template_ssh_user = "root"
template_ssh_password = "MySecretPass"
template_ssh_user_group = "root"
}
worker = {
vsphere_template = "k8s-worker"
vsphere_template = "MyFolder/k8s-worker"
num_vcpu = 4
memory_gb = 8
disk_gb = 20
Expand All @@ -228,8 +238,11 @@ module "rancher_cluster_minimum_inputs" {
}
}

control_plane_node_quantity = 2
etcd_node_quantity = 3
worker_node_quantity = 2
master_node_pool_name = "tf-master"
master_node_prefix = "tf-master-"
master_node_quantity = 3

worker_node_pool_name = "tf-worker"
worker_node_prefix = "tf-worker-"
worker_node_quantity = 3
}
44 changes: 44 additions & 0 deletions rancher2-vsphere-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ terraform {
}
}

locals {
cloud_provider_spec_list = length(var.cloud_provider_spec) != 0 ? [var.cloud_provider_spec] : []
}

data "rancher2_cloud_credential" "cloud_credential" {
name = var.cloud_credential_name
}
Expand Down Expand Up @@ -52,6 +56,46 @@ resource "rancher2_cluster" "cluster" {
network {
plugin = var.k8s_network_plugin
}
dynamic "cloud_provider" {
for_each = local.cloud_provider_spec_list
content {
name = "vsphere"
vsphere_cloud_provider {
global {
insecure_flag = lookup(cloud_provider.value, "global_insecure_flag", null)
user = lookup(cloud_provider.value, "global_user", null)
password = lookup(cloud_provider.value, "global_password", null)
datacenters = lookup(cloud_provider.value, "global_datacenters", null)
port = lookup(cloud_provider.value, "global_port", null)
soap_roundtrip_count = lookup(cloud_provider.value, "global_soap_roundtrip_count", null)
}
dynamic "virtual_center" {
for_each = cloud_provider.value.virtual_center_spec
content {
name = virtual_center.value.name
user = virtual_center.value.user
password = virtual_center.value.password
datacenters = virtual_center.value.datacenters
port = lookup(virtual_center.value, "port", null)
soap_roundtrip_count = lookup(virtual_center.value, "soap_roundtrip_count", null)
}
}
workspace {
server = cloud_provider.value.workspace_server
datacenter = cloud_provider.value.workspace_datacenter
folder = cloud_provider.value.workspace_folder
default_datastore = lookup(cloud_provider.value, "workspace_default_datastore", null)
resourcepool_path = lookup(cloud_provider.value, "workspace_resourcepool_path", null)
}
disk {
scsi_controller_type = lookup(cloud_provider.value, "disk_scsi_controller_type", null)
}
network {
public_network = lookup(cloud_provider.value, "network_public_network", null)
}
}
}
}
}
}

Expand Down
5 changes: 5 additions & 0 deletions rancher2-vsphere-cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,11 @@ variable "k8s_network_plugin" {
default = "canal"
}

variable "cloud_provider_spec" {
description = "Specification of vSphere cloud provider, which is necessary to allow dynamic provisioning of volumes. Take a look at the `examples` directory for synthax and Rancher vSphere Cloud Provider documentation for explanation of parameters"
default = {}
}

variable "master_node_pool_name" {
type = string
description = "Name of the master (consolidated control plane and etcd) node pool"
Expand Down

0 comments on commit 716af6b

Please sign in to comment.