diff --git a/ansible/configs/ansible-bootcamp/How.To.Create.Env.Type.adoc b/ansible/configs/ansible-bootcamp/How.To.Create.Env.Type.adoc new file mode 100644 index 00000000000..7473d3e1b03 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/How.To.Create.Env.Type.adoc @@ -0,0 +1,190 @@ += How to create an Environment Type + +== Create a base for your new environment type + +* Duplicate the "generic-example" environemnt type directory or use another + environment type directory that is closer to your end goal. + +== Edit your cloud provider "blueprint" or "template" + +NOTE: At this point this is "aws" based, with time we will have other providers. + +* Edit the link:./files/cloud_providers/ec2_cloud_template.j2[./files/cloud_provides/ec2_cloud_template.j2] + +* Add Security Groups if you require any. +* Add LaunchConfigs and AutoScale Groups + +---- +"HostLC": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "AssociatePublicIpAddress": true, + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": "{{host_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroups": [ + { + "Ref": "HostSG" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeSize": 30 + } + }, + { + "DeviceName": "/dev/xvdb", + "Ebs": { + "VolumeSize": 100 + } + } + ] + } +}, +"HostAsg": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "DesiredCapacity": {{host_instance_count}}, + "LaunchConfigurationName": { + "Ref": "HostLC" + }, + "MaxSize": 100, + "MinSize": 1, + "Tags": [ + { + "Key": "Name", + "Value": "host", + "PropagateAtLaunch": true + }, + { + "Key": "AnsibleGroup", + "Value": "hosts", + "PropagateAtLaunch": true + }, + { + "Key": "Project", + "Value": "{{project_tag}}", + "PropagateAtLaunch": true + }, + { + "Key": "{{ project_tag }}", + "Value": "host", + "PropagateAtLaunch": true + } + ], + "VPCZoneIdentifier": [ + { + "Ref": "PublicSubnet" + } + ] + } +}, +---- + +** Pay attention to the Tags created for the different AS groups + +---- + +{ + "Key": "Project", + "Value": "{{project_tag}}", + "PropagateAtLaunch": true +}, +{ + "Key": "{{ project_tag }}", + "Value": "host", + "PropagateAtLaunch": true +} + +---- + + +* Add DNS Entries you need for your environment: +---- +"MasterDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": "Master", + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name": "{{master_public_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Master", + "PublicIp" + ] + } + ] + } + ] + } +}, + +---- + +* Add S3 or other resources you require: +---- + +"RegistryS3": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": "{{ env_type }}-{{ guid }}", + "Tags": [ + { + "Key": "Name", + "Value": "s3-{{ env_type }}-{{ guid }}" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + } + ] + } +} +}, +---- + +* Add any "outputs" you need from the cloud provider: +---- +"RegistryS3Output": { + "Description": "The ID of the S3 Bucket", + "Value": { + "Ref": "RegistryS3" + }}, + +---- + + + +== Internal DNS file + + +* Edit the internal dns template: link:./files/ec2_internal_dns.json.j2[./files/ec2_internal_dns.json.j2] +** You can create nicely indexed internal hostname by creating a for loop in the file for each host group +---- +{% for host in groups[('tag_' + env_type + '-' + guid + '_support') | replace('-', '_') ] %} + { + "Action": "{{DNS_action}}", + "ResourceRecordSet": { + "Name": "support{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": 20, + "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] + } + }, + +---- diff --git a/ansible/configs/ansible-bootcamp/README.adoc b/ansible/configs/ansible-bootcamp/README.adoc new file mode 100644 index 00000000000..d25c632ff58 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/README.adoc @@ -0,0 +1,84 @@ += OPENTLC OCP-HA-LAB Env_Type config + +For example, we will include things such as ec2 instance names, secret +variables such as private/public key pair information, passwords, etc. + +Eventually, all sensitive information will be encrypted via Ansible Vault. The +inclusion as well as instructions on doing this will be included in a later +release. + +== Set up your "Secret" variables + +* You need to provide some credentials for deployments to work +* Create a file called "env_secrets.yml" and put it in the + ./ansible/configs/CONFIGNAME/ directory. +** At this point this file has to be created even if no vars from it are used. +* You can choose to provide these values as extra vars (-e "var=value") in the + command line if you prefer not to keep sensitive information in a file. +* In the future we will use ansible vault for this. + +.Example contents of "Secret" Vars file +---- +# ## Logon credentials for Red Hat Network +# ## Required if using the subscription component +# ## of this playbook. +rhel_subscription_user: '' +rhel_subscription_pass: '' +# +# ## LDAP Bind Password +bindPassword: '' +# +# ## Desired openshift admin name and password +admin_user: "" +admin_user_password: "" +# +# ## AWS Credentials. This is required. +aws_access_key_id: "" +aws_secret_access_key: "" +---- + + +== Review the Env_Type variable file + +* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you + need to define to control the deployment of your environment. + +== Running Ansible Playbook + +. You can run the playbook with the following arguments to overwrite the default variable values: +[source,bash] +---- +# Set the your environment variables (this is optional, but makes life easy) + +REGION=ap-southeast-1 +KEYNAME=ocpkey +GUID=ansibletest1 +ENVTYPE="ansible-bootcamp" +CLOUDPROVIDER=ec2 +HOSTZONEID='Z3IHLWJZOU9SRT' +#REPO_PATH='https://admin.example.com/repos/ocp/3.5/' +BASESUFFIX='.example.opentlc.com' +#IPAPASS=aaaaaa +REPO_VERSION=3.5 +TOWER_COUNT=3 +TOWERDB_COUNT=3 + +## For a HA environment that is not installed with OpenShift + +time ansible-playbook -i ./inventory/ ./main.yml \ + -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" \ + -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \ + -e "subdomain_base_suffix=${BASESUFFIX}" \ + -e "tower_instance_count=${TOWER_COUNT}" -e "towerdb_instance_count=${TOWERDB_COUNT}" \ + -e "software_to_deploy=none" -e "tower_run=false" + + +. To Delete an environment +---- + +#To Destroy an Env +ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \ + -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \ + -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" -e "subdomain_base_suffix=${BASESUFFIX}" + +---- diff --git a/ansible/configs/ansible-bootcamp/destroy_env.yml b/ansible/configs/ansible-bootcamp/destroy_env.yml new file mode 100644 index 00000000000..6c635f39851 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/destroy_env.yml @@ -0,0 +1,46 @@ +- name: Starting environment deployment + hosts: localhost + connection: local + gather_facts: False + become: no + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + + tasks: + # - name: get internal dns zone id if not provided + # environment: + # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" + # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" + # AWS_DEFAULT_REGION: "{{aws_region}}" + # shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'" + # register: internal_zone_id_register + # - debug: + # var: internal_zone_id_register + # - name: Store internal route53 ID + # set_fact: + # internal_zone_id: "{{ internal_zone_id_register.stdout }}" + # when: 'internal_zone_id_register is defined' + # - name: delete internal dns names + # environment: + # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" + # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" + # AWS_DEFAULT_REGION: "{{aws_region}}" + # shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}} --change-batch file://../../workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json --region={{aws_region}}" + # ignore_errors: true + # tags: + # - internal_dns_delete + # when: internal_zone_id is defined + + + - name: Destroy cloudformation template + cloudformation: + stack_name: "{{project_tag}}" + state: "absent" + region: "{{aws_region}}" + disable_rollback: false + template: "../../workdir/ec2_cloud_template.{{ env_type }}.{{ guid }}.json" + tags: + Stack: "project {{env_type}}-{{ guid }}" + tags: [ destroying, destroy_cf_deployment ] +## we need to add something to delete the env specific key. diff --git a/ansible/configs/ansible-bootcamp/env_vars.yml b/ansible/configs/ansible-bootcamp/env_vars.yml new file mode 100644 index 00000000000..544937776bf --- /dev/null +++ b/ansible/configs/ansible-bootcamp/env_vars.yml @@ -0,0 +1,130 @@ +################################################################################ +#### GENERIC EXAMPLE +################################################################################ + +install_common: true +install_nfs: false +install_bastion: false +env_authorized_key: "{{guid}}key" +set_env_authorized_key: true +software_to_deploy: "none" +################################################################################ +#### OCP IMPLEMENATATION LAB +################################################################################ + +#repo_version: '3.4' +#tower_public_dns: "loadbalancer.{{subdomain_base}}." + +################################################################################ +#### Common host variables +################################################################################ + +update_packages: false +common_packages: + - python + - unzip + - bash-completion + - tmux + - bind-utils + - wget + - git + - vim-enhanced +# - ansible + +rhel_repos: + - rhel-7-server-rpms + - rhel-7-server-extras-rpms + +use_subscription_manager: false +use_own_repos: false +#rhn_pool_id_string: "Red Hat Enterprise Linux Server" +#rhn_pool_id_string: OpenShift Container Platform + +################################################################################ +#### nfs host settings +################################################################################ + +################################################################################ +#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES +################################################################################ + +#### Route 53 Zone ID (AWS) +HostedZoneId: '' +key_name: '' +aws_region: us-east-2 + +# admin_user: '' +# admin_user_password: '' + +#### Connection Settings +ansible_ssh_user: ec2-user +remote_user: ec2-user + +#### Networking (AWS) +guid: defaultguid +subdomain_base_short: "{{ guid }}" +subdomain_base_suffix: ".example.opentlc.com" +subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}" +tower_run: false + +#### Environment Sizing + +#bastion_instance_type: "t2.micro" +bastion_instance_type: "t2.small" + + + +tower_instance_type: "t2.medium" +tower_instance_count: 3 + + +towerdb_instance_type: "t2.medium" +towerdb_instance_count: 2 + +ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem + + +#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT +#### You can, but you usually wouldn't need to. + +#### CLOUDFORMATIONS vars + +project_tag: "{{ env_type }}-{{ guid }}" + +create_internal_dns_entries: true +zone_internal_dns: "{{guid}}.internal." +chomped_zone_internal_dns: "{{guid}}.internal" +zone_public_dns: "{{subdomain_base}}." + +bastion_public_dns: "bastion.{{subdomain_base}}." +bastion_public_dns_chomped: "bastion.{{subdomain_base}}" + + +# vpcid_cidr_block: "192.168.0.0/16" +vpcid_name_tag: "{{subdomain_base}}" + +# az_1_name: "{{ aws_region }}a" +# az_2_name: "{{ aws_region }}b" +# +# subnet_private_1_cidr_block: "192.168.2.0/24" +# subnet_private_1_az: "{{ az_2_name }}" +# subnet_private_1_name_tag: "{{subdomain_base}}-private" +# +# subnet_private_2_cidr_block: "192.168.1.0/24" +# subnet_private_2_az: "{{ az_1_name }}" +# subnet_private_2_name_tag: "{{subdomain_base}}-private" +# +# subnet_public_1_cidr_block: "192.168.10.0/24" +# subnet_public_1_az: "{{ az_1_name }}" +# subnet_public_1_name_tag: "{{subdomain_base}}-public" +# +# subnet_public_2_cidr_block: "192.168.20.0/24" +# subnet_public_2_az: "{{ az_2_name }}" +# subnet_public_2_name_tag: "{{subdomain_base}}-public" + +# dopt_domain_name: "{{ aws_region }}.compute.internal" +# +# rtb_public_name_tag: "{{subdomain_base}}-public" +# rtb_private_name_tag: "{{subdomain_base}}-private" +# +# cf_template_description: "{{ env_type }}-{{ guid }} template" diff --git a/ansible/configs/ansible-bootcamp/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/ansible-bootcamp/files/cloud_providers/ec2_cloud_template.j2 new file mode 100644 index 00000000000..4b0cf466097 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/files/cloud_providers/ec2_cloud_template.j2 @@ -0,0 +1,527 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { }, + "Mappings": { + "RegionMapping": { + "us-east-1": { + "AMI": "ami-b63769a1" + }, + "us-east-2": { + "AMI": "ami-0932686c" + }, + "us-west-1": { + "AMI": "ami-2cade64c" + }, + "us-west-2": { + "AMI": "ami-6f68cf0f" + }, + "eu-west-1": { + "AMI": "ami-02ace471" + }, + "eu-central-1": { + "AMI": "ami-e4c63e8b" + }, + "ap-northeast-1": { + "AMI": "ami-5de0433c" + }, + "ap-northeast-2": { + "AMI": "ami-44db152a" + }, + "ap-southeast-1": { + "AMI": "ami-2c95344f" + }, + "ap-southeast-2": { + "AMI": "ami-39ac915a" + }, + "sa-east-1": { + "AMI": "ami-7de77b11" + }, + "ap-south-1": { + "AMI": "ami-cdbdd7a2" + } + }, + "DNSMapping": { + "us-east-1": { + "domain": "us-east-1.compute.internal" + }, + "us-west-1": { + "domain": "us-west-1.compute.internal" + }, + "us-west-2": { + "domain": "us-west-2.compute.internal" + }, + "eu-west-1": { + "domain": "eu-west-1.compute.internal" + }, + "eu-central-1": { + "domain": "eu-central-1.compute.internal" + }, + "ap-northeast-1": { + "domain": "ap-northeast-1.compute.internal" + }, + "ap-northeast-2": { + "domain": "ap-northeast-2.compute.internal" + }, + "ap-southeast-1": { + "domain": "ap-southeast-1.compute.internal" + }, + "ap-southeast-2": { + "domain": "ap-southeast-2.compute.internal" + }, + "sa-east-1": { + "domain": "sa-east-1.compute.internal" + }, + "ap-south-1": { + "domain": "ap-south-1.compute.internal" + } + } + }, + "Resources": { + "Vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "192.199.0.0/16", + "EnableDnsSupport": "true", + "EnableDnsHostnames": "true", + "Tags": [ + { + "Key": "Name", + "Value": "{{vpcid_name_tag}}" + }, + { + "Key": "Hostlication", + "Value": { + "Ref": "AWS::StackId" + } + } + ] + } + }, + "VpcInternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": {} + }, + "VpcGA": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "VpcInternetGateway" + }, + "VpcId": { + "Ref": "Vpc" + } + } + }, + "VpcRouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc" + } + } + }, + "VPCRouteInternetGateway": { + "DependsOn" : "VpcGA", + "Type": "AWS::EC2::Route", + "Properties": { + "GatewayId": { + "Ref": "VpcInternetGateway" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "RouteTableId": { + "Ref": "VpcRouteTable" + } + } + }, + "PublicSubnet": { + "Type": "AWS::EC2::Subnet", + "DependsOn": [ + "Vpc" + ], + "Properties": { + "CidrBlock": "192.199.0.0/24", + "Tags": [ + { + "Key": "Name", + "Value": "{{project_tag}}" + }, + { + "Key": "Hostlication", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "MapPublicIpOnLaunch": "true", + "VpcId": { + "Ref": "Vpc" + } + } + }, + "PublicSubnetRTA": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcRouteTable" + }, + "SubnetId": { + "Ref": "PublicSubnet" + } + } + }, + "HostSG": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Host", + "VpcId": { + "Ref": "Vpc" + }, + "Tags": [ + { + "Key": "Name", + "Value": "host_sg" + } + ] + } + }, + "HostUDPPorts": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + }, + "IpProtocol": "udp", + "FromPort": "0", + "ToPort": "65535", + "CidrIp": "0.0.0.0/0" + } + }, + "HostTCPPorts": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + }, + "IpProtocol": "tcp", + "FromPort": "0", + "ToPort": "65535", + "CidrIp": "0.0.0.0/0" + } + }, + "zoneinternalidns": { + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "{{ zone_internal_dns }}", + "VPCs" : [{ + "VPCId": { "Ref" : "Vpc" }, + "VPCRegion": { "Ref": "AWS::Region" } } ], + "HostedZoneConfig": { + "Comment": "Created By ansible agnostic deployer" + } + } + }, + "BastionDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": [ "BastionEIP" ], + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name": "{{bastion_public_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Bastion", + "PublicIp" + ] + } + ] + } + ] + } + }, + "Bastion": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": "{{bastion_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "bastion" + }, + { + "Key": "AnsibleGroup", + "Value": "bastions" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "bastion" + } + ] + } + }, + "BastionEIP" : { + "Type" : "AWS::EC2::EIP", + "DependsOn": [ "Bastion" ], + "Properties" : { + "InstanceId" : { "Ref" : "Bastion" }, + } +}, + "BastionInternalDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "bastion.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Bastion", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% for c in range(1,(towerdb_instance_count|int)+1) %} + + "towerdb{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": "{{towerdb_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "towerdb{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "towerdbs" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "towerdb" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeSize": 30 + } + } + ] + } + + }, + + "towerdb{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "towerdb{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "towerdb{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% endfor %} + + + + + {% for c in range(1,(tower_instance_count|int)+1) %} + "tower{{loop.index}}PublicDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": [ "tower{{loop.index}}EIP" ], + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name": "tower{{loop.index}}.{{guid}}{{subdomain_base_suffix}}.", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "tower{{loop.index}}", + "PublicIp" + ] + } + ] + } + ] + } + }, + + "tower{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": "{{tower_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "tower{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "tower" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "tower" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeSize": 30 + } + } + ] + } + + }, + "tower{{loop.index}}EIP" : { + "Type" : "AWS::EC2::EIP", + "DependsOn": [ "tower{{loop.index}}" ], + "Properties" : { + "InstanceId" : { "Ref" : "tower{{loop.index}}" }, + + } + }, + "tower{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "tower{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "tower{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }, + + {% endfor %} +}, + "Outputs": { + "Route53internalzoneOutput": { + "Description": "The ID of the internal route 53 zone", + "Value": { + "Ref": "zoneinternalidns" + } + } +}} diff --git a/ansible/configs/ansible-bootcamp/files/ec2_internal_dns.json.j2 b/ansible/configs/ansible-bootcamp/files/ec2_internal_dns.json.j2 new file mode 100644 index 00000000000..c487dd663e7 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/files/ec2_internal_dns.json.j2 @@ -0,0 +1,34 @@ +{ + "Comment": "Create internal dns zone entries", + "Changes": [ + + + + +{% for host in groups[('tag_' + env_type + '-' + guid + '_tower') | replace('-', '_') ] %} + { + "Action": "{{DNS_action}}", + "ResourceRecordSet": { + "Name": "master{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": 20, + "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] + } + }, +{% endfor %} + + +{% for host in groups[('tag_' + env_type + '-' + guid + '_towerdb') | replace('-', '_') ] %} + { + "Action": "{{DNS_action}}", + "ResourceRecordSet": { + "Name": "loadbalancer{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": 20, + "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] + } + }, +{% endfor %} + + ] +} diff --git a/ansible/configs/ansible-bootcamp/files/repos_template.j2 b/ansible/configs/ansible-bootcamp/files/repos_template.j2 new file mode 100644 index 00000000000..6801feef182 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/files/repos_template.j2 @@ -0,0 +1,37 @@ +[rhel-7-server-rpms] +name=Red Hat Enterprise Linux 7 +baseurl={{own_repo_path}}/rhel-7-server-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-rh-common-rpms] +name=Red Hat Enterprise Linux 7 Common +baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-extras-rpms] +name=Red Hat Enterprise Linux 7 Extras +baseurl={{own_repo_path}}/rhel-7-server-extras-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-optional-rpms] +name=Red Hat Enterprise Linux 7 Optional +baseurl={{own_repo_path}}/rhel-7-server-optional-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-ose-{{repo_version}}-rpms] +name=Red Hat Enterprise Linux 7 OSE {{repo_version}} +baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-fast-datapath-rpms] +name=Red Hat Enterprise Linux 7 Fast Datapath +baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms +enabled=1 +gpgcheck=0 + + diff --git a/ansible/configs/ansible-bootcamp/post_infra.yml b/ansible/configs/ansible-bootcamp/post_infra.yml new file mode 100644 index 00000000000..7901fa678fb --- /dev/null +++ b/ansible/configs/ansible-bootcamp/post_infra.yml @@ -0,0 +1,32 @@ +- name: Step 002 Post Infrastructure + hosts: localhost + connection: local + become: false + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step002 + - post_infrastructure + tasks: + - debug: + msg: "Step 001 Post Infrastructure - There are no post_infrastructure tasks defined" + when: "not {{ tower_run | default(false) }}" + - name: Job Template to launch a Job Template with update on launch inventory set + uri: + url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/" + method: POST + user: "{{tower_admin}}" + password: "{{tower_admin_password}}" + body: + extra_vars: + guid: "{{guid}}" + ipa_host_password: "{{ipa_host_password}}" + + body_format: json + validate_certs: False + HEADER_Content-Type: "application/json" + status_code: 200, 201 + when: "{{ tower_run | default(false) }}" + tags: + - tower_workaround diff --git a/ansible/configs/ansible-bootcamp/post_software.yml b/ansible/configs/ansible-bootcamp/post_software.yml new file mode 100644 index 00000000000..91a0ace8ef6 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/post_software.yml @@ -0,0 +1,13 @@ +- name: Step 000 post software + hosts: localhost + connection: local + become: false + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step006 + - post_software + tasks: + - debug: + msg: "Step 006 - no post_software tasks defined" diff --git a/ansible/configs/ansible-bootcamp/pre_infra.yml b/ansible/configs/ansible-bootcamp/pre_infra.yml new file mode 100644 index 00000000000..ee24d70ac7f --- /dev/null +++ b/ansible/configs/ansible-bootcamp/pre_infra.yml @@ -0,0 +1,13 @@ +- name: Step 000 Pre Infrastructure + hosts: localhost + connection: local + become: false + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step000 + - pre_infrastructure + tasks: + - debug: + msg: "Step 000 Pre Infrastructure - There are no pre_infrastructure tasks defined" diff --git a/ansible/configs/ansible-bootcamp/pre_software.yml b/ansible/configs/ansible-bootcamp/pre_software.yml new file mode 100644 index 00000000000..70fede2e915 --- /dev/null +++ b/ansible/configs/ansible-bootcamp/pre_software.yml @@ -0,0 +1,50 @@ +- name: Step 003 Pre Software + hosts: localhost + connection: local + become: False + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step003 + - pre_software + - internal_dns_creation + tasks: + # - set_fact: + # DNS_action: "CREATE" + # - name: create file for internal dns names + # template: + # src: "./files/{{cloud_provider}}_internal_dns.json.j2" + # dest: "../../workdir/internal_dns-{{ env_type }}-{{ guid }}.json" + # + # - set_fact: + # DNS_action: "DELETE" + # - name: create Delete file for internal dns names + # template: + # src: "./files/{{cloud_provider}}_internal_dns.json.j2" + # dest: "../../workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json" + # + # - name: get internal dns zone id if not provided + # environment: + # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" + # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" + # AWS_DEFAULT_REGION: "{{aws_region}}" + # shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'" + # register: internal_zone_id_register + # - debug: + # var: internal_zone_id_register + # - name: Store internal route53 ID + # set_fact: + # internal_zone_id: "{{ internal_zone_id_register.stdout }}" + # when: 'internal_zone_id_register is defined' + # tags: + # - internal_dns_creation + # + # - name: create internal dns names + # environment: + # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" + # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" + # AWS_DEFAULT_REGION: "{{aws_region}}" + # shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}} --change-batch file://../../workdir/internal_dns-{{ env_type }}-{{ guid }}.json --region={{aws_region}}" + # ignore_errors: true + # when: internal_zone_id is defined diff --git a/ansible/configs/ansible-bootcamp/ssh_vars.yml b/ansible/configs/ansible-bootcamp/ssh_vars.yml new file mode 100644 index 00000000000..c48f125543d --- /dev/null +++ b/ansible/configs/ansible-bootcamp/ssh_vars.yml @@ -0,0 +1,5 @@ +ansible_ssh_extra_args: > + -o User={{ ansible_ssh_user }} + -o StrictHostKeyChecking=no + -i ~/.ssh/{{ key_name }}.pem + -o ProxyCommand="ssh -o StrictHostKeyChecking=no -i ~/.ssh/'{{ key_name }}'.pem -o User='{{ ansible_ssh_user }}' -W %h:%p {{ hostvars[ groups[ ('tag_' ~ env_type ~ '_' ~ guid ~ '_bastion') | replace('-', '_') ].0 ]['ec2_public_dns_name'] }}"