Skip to content

Commit

Permalink
Merge pull request jhuapl-boss#5 from aplmicrons/dynamo_autoscale
Browse files Browse the repository at this point in the history
Dynamo autoscale
  • Loading branch information
sandyhider authored May 25, 2017
2 parents ca5bb12 + bbe6a01 commit 34c3ed4
Show file tree
Hide file tree
Showing 6 changed files with 291 additions and 10 deletions.
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,6 @@
path = lib/heaviside.git
url = https://github.com/jhuapl-boss/heaviside
branch = master
[submodule "cloud_formation/lambda/dynamodb-lambda-autoscale"]
path = cloud_formation/lambda/dynamodb-lambda-autoscale
url = https://github.com/jhuapl-boss/dynamodb-lambda-autoscale
259 changes: 259 additions & 0 deletions cloud_formation/configs/dynamolambda.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Create the DynamoDB lambda configuration which consists of:
* Lambda function written in NodeJS
* Lambda policy
"""

import boto3
import configparser
import io
from lib.cloudformation import CloudFormationConfiguration, Arg, Arn, Ref
from lib.names import AWSNames
from lib.external import ExternalCalls
from lib import aws
from lib import constants as const
import os
import subprocess

from update_lambda_fcn import load_lambdas_on_s3

# Location of repo with the lambda autoscaler.
LAMBDA_ROOT_FOLDER = os.path.join(
os.path.dirname(__file__), '../lambda/dynamodb-lambda-autoscale')

# Zip file created by `npm run build`
LAMBDA_ZIP_FILE = os.path.join(LAMBDA_ROOT_FOLDER, 'dist.zip')

CONFIG_TEMPLATE_PATH = os.path.join(
os.path.dirname(__file__), 'dynamo_config.template')
CONFIG_OUTPUT_PATH = os.path.join(LAMBDA_ROOT_FOLDER, 'config.env.production')

DYNAMO_LAMBDA_KEY = 'DynamoLambda'
TRIGGER_KEY = 'TriggerDynamoAutoscale'

SLACK_WEBHOOK_HOST = 'SLACK_WEBHOOK_HOST'

# This variable is the one used by the lambda function.
SLACK_WEBHOOK_PATH = 'SLACK_WEBHOOK_PATH'

# Value in this config variable will be written to SLACK_WEBHOOK_PATH when
# standing up the service in production.
SLACK_WEBHOOK_PATH_PRODUCTION = 'SLACK_WEBHOOK_PATH_PRODUCTION'

# Value in this config variable will be written to SLACK_WEBHOOK_PATH when
# standing up the service in a development stack or an integration stack.
SLACK_WEBHOOK_PATH_DEV = 'SLACK_WEBHOOK_PATH_DEV'

# Domain name will be included in messages to Slack and also determines
# whether SLACK_WEBHOOK_PATH_DEV's or SLACK_WEBHOOK_PATH_PRODUCTION's value is
# written to SLACK_WEBHOOK_PATH.
VPC_DOMAIN = 'VPC_DOMAIN'

def create_config(session, domain):
"""
Create the CloudFormationConfiguration object.
Args:
session (Session): amazon session object
domain (str): domain of the stack being created
Returns: the config for the Cloud Formation stack
"""
names = AWSNames(domain)
config = CloudFormationConfiguration("dynamolambda", domain, const.REGION)

role = aws.role_arn_lookup(session, "lambda_cache_execution")
config.add_arg(Arg.String("LambdaCacheExecutionRole", role,
"IAM role for multilambda." + domain))

lambda_bucket = aws.get_lambda_s3_bucket(session)
lambda_key = generate_lambda_key(domain)
config.add_lambda(DYNAMO_LAMBDA_KEY,
names.dynamo_lambda,
Ref("LambdaCacheExecutionRole"),
s3=(aws.get_lambda_s3_bucket(session),
lambda_key,
"index.handler"),
timeout=120,
memory=128,
runtime="nodejs6.10")

config.add_cloudwatch_rule(TRIGGER_KEY,
name=names.trigger_dynamo_autoscale,
description='Run DynamoDB table autoscaler',
targets=[
{
'Arn': Arn(DYNAMO_LAMBDA_KEY),
'Id': names.vault_monitor,
}
],
schedule='rate(1 minute)',
depends_on=[DYNAMO_LAMBDA_KEY])

config.add_lambda_permission('TriggerPerms',
names.dynamo_lambda,
principal='events.amazonaws.com',
source=Arn(TRIGGER_KEY))

return config


def generate(session, domain):
"""Create the configuration and save it to disk"""
config = create_config(session, domain)
config.generate()


def create(session, domain):
"""Create the configuration, and launch it"""
keypair = aws.keypair_lookup(session)

try:
pre_init(session, domain)

config = create_config(session, domain)

success = config.create(session)
if not success:
raise Exception("Create Failed")
else:
post_init(session, domain)
except:
# DP NOTE: This will catch errors from pre_init, create, and post_init
print("Error detected")
raise


def pre_init(session, domain):
"""
Create NodeJS config file from template.
Package NodeJS lambda function.
Upload .zip to S3 bucket.
"""
with open(CONFIG_TEMPLATE_PATH) as fh:
config_str = fh.read()
update_config_file(config_str, domain)

build_lambda()
bucket = aws.get_lambda_s3_bucket(session)
zip_file = os.path.join(LAMBDA_ROOT_FOLDER, LAMBDA_ZIP_FILE)
zips_s3_key = upload_to_s3(session, domain, zip_file, bucket)


def post_init(session, domain):
pass


def update_config_file(config_str, domain):
"""Update config file that stores environment variables for the lambda
environment.
Args:
config_str (str): String representation of config file template.
"""
parser = configparser.ConfigParser()
parser.read_string(config_str)
parser.set('default', VPC_DOMAIN, domain)

slack_host = parser.get('default', SLACK_WEBHOOK_HOST)
slack_path_prod = parser.get('default', SLACK_WEBHOOK_PATH_PRODUCTION)
slack_path_dev = parser.get('default', SLACK_WEBHOOK_PATH_DEV)
if domain == 'production.boss':
parser.set('default', SLACK_WEBHOOK_PATH, slack_path_prod)
else:
parser.set('default', SLACK_WEBHOOK_PATH, slack_path_dev)
slack_path = parser.get('default', SLACK_WEBHOOK_PATH)

# Remove stack specific variables before outputting.
parser.remove_option('default', SLACK_WEBHOOK_PATH_DEV)
parser.remove_option('default', SLACK_WEBHOOK_PATH_PRODUCTION)

print('\nWill post to Slack at https://{}{}'.format(slack_host, slack_path))

updated_config = io.StringIO()
parser.write(updated_config)
config_str = updated_config.getvalue()
updated_config.close()

# Strip default section header from config. NodeJS config file does not
# use sections.
_, headerless_config = config_str.split('[default]', 1)

# Convert variable names back to upper case.
headerless_config = headerless_config.replace(
SLACK_WEBHOOK_HOST.lower(), SLACK_WEBHOOK_HOST).replace(
SLACK_WEBHOOK_PATH.lower(), SLACK_WEBHOOK_PATH).replace(
VPC_DOMAIN.lower(), VPC_DOMAIN)

with open(CONFIG_OUTPUT_PATH, 'w') as out:
out.write(headerless_config)
#print(headerless_config)

def build_lambda():
"""Package lambda in preparation to upload to S3."""
install_node_deps()
build_node()

def install_node_deps():
"""npm install NodeJS dependencies."""
print('Installing NodeJS dependencies.')
args = ('npm', 'install')
popen = subprocess.Popen(args, cwd=LAMBDA_ROOT_FOLDER, stdout=subprocess.PIPE)
exit_code = popen.wait()
output = popen.stdout.read()
if not exit_code == 0:
print(str(output))
raise RuntimeError('Failed to install dependencies.')

def build_node():
"""Build and package in dist.zip."""
print('Packaging NodeJS app.')
args = ('npm', 'run', 'build')
popen = subprocess.Popen(args, cwd=LAMBDA_ROOT_FOLDER, stdout=subprocess.PIPE)
exit_code = popen.wait()
output = popen.stdout.read()
if not exit_code == 0:
print(str(output))
raise RuntimeError('Failed to build Node application.')

def upload_to_s3(session, domain, zip_file, bucket):
"""Upload the zip file to the given S3 bucket.
Args:
session (Session): Boto3 Session.
domain (str): domain of the stack being created
zip_file (str): Name of zip file.
bucket (str): Name of bucket to use.
"""
print('Uploading to S3.')
key = generate_lambda_key(domain)
s3 = session.client('s3')
s3.create_bucket(Bucket=bucket)
s3.put_object(Bucket=bucket, Key=key, Body=open(zip_file, 'rb'))

def generate_lambda_key(domain):
"""Generate the S3 key name for the lambda's zip file.
Args:
domain (str): Use the domain as part of the key.
Returns:
(str)
"""
key = 'dynamodb_autoscale.' + domain + '.zip'
return key

1 change: 1 addition & 0 deletions cloud_formation/lambda/dynamodb-lambda-autoscale
29 changes: 22 additions & 7 deletions docs/IntegrationRebuild.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ $ git add salt_stack/salt/proofreader-web/files/proofread.git
$ git add salt_stack/salt/spdb/files/spdb.git
$ git add salt_stack/salt/ndingest/files/ndingest.git
$ git add salt_stack/salt/ingest-client/files/ingest-client.git
$ git add cloud_formation/lambda/dynamodb-lambda-autoscale
$ git commit -m "Updated submodule references"
$ git push
```
Expand Down Expand Up @@ -115,8 +116,9 @@ Stacks need to be deleted.
$ cd bin/
$ source ../config/set_vars.sh

# Deletion of cloudwatch, api, actvities and cachedb can probably
# Deletion of cloudwatch, api, actvities, cachedb, and dynamolambda can probably
# be done in parallel.
$ ./cloudformation.py delete integration.boss dynamolambda
$ ./cloudformation.py delete integration.boss cloudwatch
$ ./cloudformation.py delete integration.boss actvities
$ ./cloudformation.py delete integration.boss cachedb
Expand Down Expand Up @@ -162,11 +164,24 @@ If you are building a personal developer domain it should have this:

### Launching configs

For the *core*, *api*, *cachedb*, *activities*, *cloudwatch* configurations
run the following command. You have to wait for each command to finish before
launching the next configuration as they build upon each other. **Only use the
*--scenario ha-development* flag** if you are rebuilding integration. It is not used
if you are following these instructions to build a developer environment.
#### dynamolambda Requirements

Building the dynamolambda configuration requires NodeJS. Install v6.10.x from
https://nodejs.org/en/download

A `dynamo_config.template` file is required in
`boss-manage.git/cloud_formation/configs` to set up the Slack integration.
This file is not under source control and should have been distributed to each
developer.

#### Launching

For the *core*, *api*, *cachedb*, *activities*, *cloudwatch*, and *dynamolambda*
configurations run the following command. You have to wait for each command to
finish before launching the next configuration as they build upon each other.
**Only use the *--scenario ha-development* flag** if you are rebuilding
integration. It is not used if you are following these instructions to build a
developer environment.
```shell
$ ./cloudformation.py create integration.boss --scenario ha-development <config>
```
Expand Down Expand Up @@ -197,7 +212,7 @@ select load balancers on left side
click the checkbox for the loadbalancer to change
under attributes
Set "Idle timeout: 300 seconds"
save and refresh the page
save and refresh the page

## Run unit tests on Endpoint

Expand Down
5 changes: 3 additions & 2 deletions lib/cloudformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1591,7 +1591,7 @@ def add_s3_bucket_policy(self, key, bucket_name, action, principal):
}


def add_lambda(self, key, name, role, file=None, handler=None, s3=None, description="", memory=128, timeout=3, security_groups=None, subnets=None, depends_on=None):
def add_lambda(self, key, name, role, file=None, handler=None, s3=None, description="", memory=128, timeout=3, security_groups=None, subnets=None, depends_on=None, runtime="python2.7"):
"""Create a Python Lambda
Args:
Expand All @@ -1610,6 +1610,7 @@ def add_lambda(self, key, name, role, file=None, handler=None, s3=None, descript
subnets (None|list) : List of ids of subnets to grant the lambda access to
depends_on (None|string|list) : A unique name or list of unique names of resources within the
configuration and is used to determine the launch order of resources
runtime (optional[string]) : Lambda runtime to use. Defaults to "python2.7".
"""

if file is not None:
Expand Down Expand Up @@ -1648,7 +1649,7 @@ def add_lambda(self, key, name, role, file=None, handler=None, s3=None, descript
"Handler": handler,
"MemorySize": memory,
"Role": role,
"Runtime": "python2.7",
"Runtime": runtime,
"Timeout": int(timeout)
}
}
Expand Down
4 changes: 3 additions & 1 deletion lib/names.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ def public_dns(self, name):
'ingest_queue_populate': 'Ingest.Populate',
'ingest_queue_upload': 'Ingest.Upload',
'ingest_lambda': 'IngestUpload',
'dynamo_lambda': 'dynamoLambda',
'trigger_dynamo_autoscale': 'triggerDynamoAutoscale'
}

def __getattr__(self, name):
Expand All @@ -105,7 +107,7 @@ def __getattr__(self, name):
fq_hostname = hostname + self.base_dot

if name in ['multi_lambda', 'write_lock', 'vault_monitor', 'consul_monitor', 'vault_consul_check',
'delete_lambda', 'ingest_lambda']:
'delete_lambda', 'ingest_lambda', 'dynamo_lambda']:
fq_hostname = fq_hostname.replace('.','-')

if name in ['s3flush_queue', 'deadletter_queue', 'delete_cuboid', 'query_deletes',
Expand Down

0 comments on commit 34c3ed4

Please sign in to comment.