Skip to content

Commit

Permalink
Merge branch 'release/1.12.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
Hector Castro committed Apr 7, 2016
2 parents e2a9eaa + 6044cac commit 1ac5b14
Show file tree
Hide file tree
Showing 37 changed files with 476 additions and 253 deletions.
2 changes: 1 addition & 1 deletion deployment/ansible/group_vars/test
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ celery_processes_per_worker: 1
itsi_base_url: "https://learn.staging.concord.org/"
itsi_secret_key: "{{ lookup('env', 'MMW_ITSI_SECRET_KEY') }}"

tilecache_bucket_name: "tilecache.mmw-dev.azavea.com"
tilecache_bucket_name: "tile-cache.staging.app.wikiwatershed.org"

aws_profile: "mmw-stg"

Expand Down
10 changes: 10 additions & 0 deletions deployment/cfn/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class Application(StackNode):
['global:BackwardCompatSSLCertificateARN'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'PublicHostedZoneName': ['global:PublicHostedZoneName'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
'BlueTileServerDistributionEndpoint':
Expand Down Expand Up @@ -172,6 +173,11 @@ def set_up_stack(self):
Description='A list of private subnets'
), 'PrivateSubnets')

self.public_hosted_zone_name = self.add_parameter(Parameter(
'PublicHostedZoneName', Type='String',
Description='Route 53 public hosted zone name'
), 'PublicHostedZoneName')

self.vpc_id = self.add_parameter(Parameter(
'VpcId', Type='String',
Description='VPC ID'
Expand Down Expand Up @@ -473,6 +479,10 @@ def get_cloud_config(self, tile_distribution_endpoint):
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', self.get_input('StackType'), '\n',
' - path: /etc/mmw.d/env/MMW_PUBLIC_HOSTED_ZONE_NAME\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.public_hosted_zone_name), '\n',
' - path: /etc/mmw.d/env/MMW_DB_PASSWORD\n',
' permissions: 0750\n',
' owner: root:mmw\n',
Expand Down
53 changes: 39 additions & 14 deletions deployment/cfn/public_hosted_zone.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from boto import route53 as r53
from boto import s3, route53 as r53
from boto.s3.connection import OrdinaryCallingFormat

from majorkirby import CustomActionNode

Expand Down Expand Up @@ -36,14 +37,19 @@ class PublicHostedZone(CustomActionNode):

def action(self):
region = self.get_input('Region')
color = self.get_input('StackColor')

hosted_zone_name = self.get_input('PublicHostedZoneName')
app_lb_endpoint = self.get_input('AppServerLoadBalancerEndpoint')
app_lb_hosted_zone_id = self.get_input('AppServerLoadBalancerHostedZoneNameID') # NOQA
app_lb_hosted_zone_id = self.get_input(
'AppServerLoadBalancerHostedZoneNameID')

backward_compat_hosted_zone_name = self.get_input('BackwardCompatPublicHostedZoneName') # NOQA
backward_compat_app_lb_endpoint = self.get_input('BackwardCompatAppServerLoadBalancerEndpoint')
backward_compat_app_lb_hosted_zone_id = self.get_input('BackwardCompatAppServerLoadBalancerHostedZoneNameID') # NOQA
backward_compat_hosted_zone_name = self.get_input(
'BackwardCompatPublicHostedZoneName')
backward_compat_app_lb_endpoint = self.get_input(
'BackwardCompatAppServerLoadBalancerEndpoint')
backward_compat_app_lb_hosted_zone_id = self.get_input(
'BackwardCompatAppServerLoadBalancerHostedZoneNameID')

route53_conn = r53.connect_to_region(region,
profile_name=self.aws_profile)
Expand All @@ -59,13 +65,32 @@ def action(self):
failover='PRIMARY')
record_sets.commit()

backward_compat_hosted_zone = route53_conn.get_zone(backward_compat_hosted_zone_name)
backward_compat_record_sets = r53.record.ResourceRecordSets(route53_conn,
backward_compat_hosted_zone.id)
backward_compat_record_sets.add_change('UPSERT', backward_compat_hosted_zone_name, 'A',
alias_hosted_zone_id=backward_compat_app_lb_hosted_zone_id,
alias_dns_name=backward_compat_app_lb_endpoint,
alias_evaluate_target_health=True,
identifier='Primary',
failover='PRIMARY')
backward_compat_hosted_zone = route53_conn.get_zone(
backward_compat_hosted_zone_name)
backward_compat_record_sets = r53.record.ResourceRecordSets(
route53_conn, backward_compat_hosted_zone.id)
backward_compat_record_sets.add_change(
'UPSERT', backward_compat_hosted_zone_name, 'A',
alias_hosted_zone_id=backward_compat_app_lb_hosted_zone_id,
alias_dns_name=backward_compat_app_lb_endpoint,
alias_evaluate_target_health=True, identifier='Primary',
failover='PRIMARY')
backward_compat_record_sets.commit()

s3_conn = s3.connect_to_region(region,
profile_name=self.aws_profile,
calling_format=OrdinaryCallingFormat())

bucket = s3_conn.get_bucket('tile-cache.{}'.format(hosted_zone_name))

rules = s3.website.RoutingRules()
rules.add_rule(s3.website.RoutingRule(
s3.website.Redirect(
protocol='https',
http_redirect_code=302,
hostname='{}-tiles.{}'.format(color.lower(),
hosted_zone_name)),
s3.website.Condition(http_error_code=404)))

bucket.configure_website(suffix='index.html', error_key='error.html',
routing_rules=rules)
70 changes: 67 additions & 3 deletions deployment/cfn/tile_delivery_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,14 @@
GetAtt,
Join,
cloudfront as cf,
cloudwatch as cw
cloudwatch as cw,
route53 as r53,
s3
)

from utils.constants import (
AMAZON_S3_HOSTED_ZONE_ID,
AMAZON_S3_WEBSITE_DOMAIN,
)

from majorkirby import StackNode
Expand Down Expand Up @@ -67,6 +74,8 @@ def set_up_stack(self):
blue_tile_distribution, \
green_tile_distribution = self.create_cloudfront_distributions()

self.create_s3_resources()

self.add_output(Output('BlueTileServerDistributionEndpoint',
Value=GetAtt(blue_tile_distribution,
'DomainName')))
Expand All @@ -82,7 +91,7 @@ def create_cloudfront_distributions(self):
cf.Origin(
Id='tileOriginId',
DomainName=Join('.',
['blue-tiles',
['tile-cache',
Ref(self.public_hosted_zone_name)]),
CustomOriginConfig=cf.CustomOrigin(
OriginProtocolPolicy='http-only'
Expand Down Expand Up @@ -155,7 +164,7 @@ def create_cloudfront_distributions(self):
cf.Origin(
Id='tileOriginId',
DomainName=Join('.',
['green-tiles',
['tile-cache',
Ref(self.public_hosted_zone_name)]),
CustomOriginConfig=cf.CustomOrigin(
OriginProtocolPolicy='http-only'
Expand Down Expand Up @@ -223,6 +232,61 @@ def create_cloudfront_distributions(self):

return blue_tile_distribution, green_tile_distribution

def create_s3_resources(self):
s3_bucket = self.add_resource(s3.Bucket(
's3TileCacheBucket',
BucketName=Join('.', ['tile-cache',
Ref(self.public_hosted_zone_name)]),
AccessControl=s3.PublicRead,
CorsConfiguration=s3.CorsConfiguration(
CorsRules=[
s3.CorsRules(
AllowedOrigins=['*'],
AllowedMethods=['GET'],
MaxAge=3000,
AllowedHeaders=['*'],
)
]
)
))

self.add_resource(s3.BucketPolicy(
's3TileCacheBucketPolicy',
Bucket=Ref(s3_bucket),
PolicyDocument={
'Statement': [{
'Action': ['s3:GetObject'],
'Effect': 'Allow',
'Resource': {
'Fn::Join': ['', [
'arn:aws:s3:::',
Ref(s3_bucket),
'/*'
]]
},
'Principal': '*'
}]
}
))

self.add_resource(r53.RecordSetGroup(
'dnsPublicRecordsCache',
HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']),
RecordSets=[
r53.RecordSet(
'dnsTileServersCache',
AliasTarget=r53.AliasTarget(
AMAZON_S3_HOSTED_ZONE_ID,
AMAZON_S3_WEBSITE_DOMAIN,
True,
),
Name=Join('', ['tile-cache.',
Ref(self.public_hosted_zone_name), '.']),
Type='A'
)
]
))

def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Expand Down
18 changes: 17 additions & 1 deletion deployment/cfn/tiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ class Tiler(StackNode):
'TileServerAutoScalingDesired': ['global:TileServerAutoScalingDesired'], # NOQA
'TileServerAutoScalingMin': ['global:TileServerAutoScalingMin'],
'TileServerAutoScalingMax': ['global:TileServerAutoScalingMax'],
'SSLCertificateARN': ['global:SSLCertificateARN'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'PublicHostedZoneName': ['global:PublicHostedZoneName'],
Expand Down Expand Up @@ -142,6 +143,11 @@ def set_up_stack(self):
Description='Tile server AutoScalingGroup maximum'
), 'TileServerAutoScalingMax')

self.ssl_certificate_arn = self.add_parameter(Parameter(
'SSLCertificateARN', Type='String',
Description='ARN for a SSL certificate stored in IAM'
), 'SSLCertificateARN')

self.public_subnets = self.add_parameter(Parameter(
'PublicSubnets', Type='CommaDelimitedList',
Description='A list of public subnets'
Expand Down Expand Up @@ -207,7 +213,7 @@ def create_security_groups(self):
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP]
for p in [HTTP, HTTPS]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
Expand Down Expand Up @@ -275,6 +281,12 @@ def create_load_balancer(self, tile_server_lb_security_group):
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
),
elb.Listener(
LoadBalancerPort='443',
InstancePort='80',
Protocol='HTTPS',
SSLCertificateId=Ref(self.ssl_certificate_arn)
)
],
HealthCheck=elb.HealthCheck(
Expand Down Expand Up @@ -350,6 +362,10 @@ def get_cloud_config(self):
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.rds_password), '\n',
' - path: /etc/mmw.d/env/MMW_TILECACHE_BUCKET\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Join('.', ['tile-cache', Ref(self.public_hosted_zone_name)]), '\n', # NOQA
' - path: /etc/mmw.d/env/ROLLBAR_SERVER_SIDE_ACCESS_TOKEN\n',
' permissions: 0750\n',
' owner: root:mmw\n',
Expand Down
3 changes: 3 additions & 0 deletions deployment/cfn/utils/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,6 @@
RELP = 20514
SSH = 22
STATSITE = 8125

AMAZON_S3_HOSTED_ZONE_ID = 'Z3AQBSTGFYJSTF'
AMAZON_S3_WEBSITE_DOMAIN = 's3-website-us-east-1.amazonaws.com'
2 changes: 1 addition & 1 deletion deployment/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
ansible==2.0.1.0
majorkirby>=0.2.0,<0.2.99
troposphere>=1.1.0
boto==2.38.0
boto==2.39.0
awscli>=1.9.15
38 changes: 36 additions & 2 deletions scripts/aws/setupdb.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,18 @@ where: \n
-b load/reload boundary data\n
-f load a named boundary sql.gz\n
-s load/reload stream data\n
-d load/reload DRB stream data\n
-m load/reload mapshed data\n
"

# HTTP accessible storage for initial app data
FILE_HOST="https://s3.amazonaws.com/data.mmw.azavea.com"
load_boundary=false
file_to_load=
load_stream=false
load_mapshed=false

while getopts ":hbsf:" opt; do
while getopts ":hbsdmf:" opt; do
case $opt in
h)
echo -e $usage
Expand All @@ -28,6 +31,10 @@ while getopts ":hbsf:" opt; do
load_boundary=true ;;
s)
load_stream=true ;;
d)
load_drb_streams=true ;;
m)
load_mapshed=true ;;
f)
file_to_load=$OPTARG ;;
\?)
Expand All @@ -41,6 +48,7 @@ export PGHOST=$(cat /etc/mmw.d/env/MMW_DB_HOST)
export PGDATABASE=$(cat /etc/mmw.d/env/MMW_DB_NAME)
export PGUSER=$(cat /etc/mmw.d/env/MMW_DB_USER)
export PGPASSWORD=$(cat /etc/mmw.d/env/MMW_DB_PASSWORD)
export PUBLIC_HOSTED_ZONE_NAME=$(cat /etc/mmw.d/env/MMW_PUBLIC_HOSTED_ZONE_NAME)

# Ensure that the PostGIS extension exists
psql -c "CREATE EXTENSION IF NOT EXISTS postgis;"
Expand All @@ -54,6 +62,12 @@ function download_and_load {
done
}

function purge_tile_cache {
for path in "${PATHS[@]}"; do
aws s3 rm --recursive "s3://tile-cache.${PUBLIC_HOSTED_ZONE_NAME}/${path}/"
done
}

if [ ! -z "$file_to_load" ] ; then
FILES=("$file_to_load")
download_and_load $FILES
Expand All @@ -62,13 +76,33 @@ fi
if [ "$load_boundary" = "true" ] ; then
# Fetch boundary layer sql files
FILES=("boundary_county.sql.gz" "boundary_school_district.sql.gz" "boundary_district.sql.gz" "boundary_huc12.sql.gz" "boundary_huc10.sql.gz" "boundary_huc08.sql.gz")
PATHS=("county" "district" "huc8" "huc10" "huc12")

download_and_load $FILES
purge_tile_cache $PATHS
fi

if [ "$load_stream" = "true" ] ; then
# Fetch stream network layer sql files
FILES=("drb_stream_network_20.sql.gz" "drb_stream_network_50.sql.gz" "drb_stream_network_100.sql.gz")
FILES=("nhdflowline.sql.gz")
PATHS=("stream")

download_and_load $FILES
purge_tile_cache $PATHS
fi

if [ "$load_drb_streams" = "true" ] ; then
# Fetch DRB stream network layer sql file
FILES=("drb_streams_50.sql.gz")
PATHS=("drb_streams")

download_and_load $FILES
purge_tile_cache $PATHS
fi

if [ "$load_mapshed" = "true" ] ; then
# Fetch map shed specific vector features
FILES=("ms_weather.sql.gz" "ms_weather_station.sql.gz" "ms_pointsource.sql.gz" "ms_county_animals.sql.gz")

download_and_load $FILES
fi
2 changes: 1 addition & 1 deletion src/mmw/apps/home/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,10 @@ def get_client_settings(request):
'client_settings': json.dumps({
EMBED_FLAG: request.session.get(EMBED_FLAG, False),
'base_layers': get_layer_config(['basemap']),
'stream_layers': get_layer_config(['stream']),
'boundary_layers': get_layer_config(['boundary']),
'vector_layers': get_layer_config(['vector', 'overlay']),
'raster_layers': get_layer_config(['raster', 'overlay']),
'stream_layers': get_layer_config(['stream', 'overlay']),
'draw_tools': settings.DRAW_TOOLS,
'map_controls': settings.MAP_CONTROLS,
'google_maps_api_key': settings.GOOGLE_MAPS_API_KEY,
Expand Down
Loading

0 comments on commit 1ac5b14

Please sign in to comment.