diff --git a/deployment/ansible/group_vars/all b/deployment/ansible/group_vars/all index 50288be16..6435e8fae 100644 --- a/deployment/ansible/group_vars/all +++ b/deployment/ansible/group_vars/all @@ -32,7 +32,7 @@ nodejs_npm_version: 2.1.17 apache_version: "2.4.7-*" -java_version: "7u111-*" +java_version: "7u121-*" graphite_carbon_version: "0.9.13-pre1" graphite_whisper_version: "0.9.13-pre1" diff --git a/deployment/ansible/roles/model-my-watershed.app/tasks/dev-and-test-dependencies.yml b/deployment/ansible/roles/model-my-watershed.app/tasks/dev-and-test-dependencies.yml index eefd0e004..b34fba7ce 100644 --- a/deployment/ansible/roles/model-my-watershed.app/tasks/dev-and-test-dependencies.yml +++ b/deployment/ansible/roles/model-my-watershed.app/tasks/dev-and-test-dependencies.yml @@ -1,6 +1,6 @@ --- - name: Install Firefox for UI tests - apt: pkg="firefox=4*" state=present + apt: pkg="firefox=5*" state=present - name: Install Xvfb for JavaScript tests apt: pkg="xvfb=2:1.15.1*" state=present diff --git a/deployment/ansible/roles/model-my-watershed.app/templates/nginx-app.conf.j2 b/deployment/ansible/roles/model-my-watershed.app/templates/nginx-app.conf.j2 index b0c1a3e1a..e502d0b31 100644 --- a/deployment/ansible/roles/model-my-watershed.app/templates/nginx-app.conf.j2 +++ b/deployment/ansible/roles/model-my-watershed.app/templates/nginx-app.conf.j2 @@ -31,7 +31,7 @@ server { location /static/ { {% if ['packer'] | is_in(group_names) -%} etag on; - expires max; + expires 1h; {% endif %} alias {{ app_static_root }}; diff --git a/deployment/ansible/roles/model-my-watershed.rwd/defaults/main.yml b/deployment/ansible/roles/model-my-watershed.rwd/defaults/main.yml index 8762677e4..78e38ac8e 100644 --- a/deployment/ansible/roles/model-my-watershed.rwd/defaults/main.yml +++ b/deployment/ansible/roles/model-my-watershed.rwd/defaults/main.yml @@ -2,4 +2,4 @@ rwd_data_path: "/opt/rwd-data" rwd_host: "localhost" rwd_port: 5000 -rwd_docker_image: "quay.io/wikiwatershed/rwd:1.0.1" +rwd_docker_image: "quay.io/wikiwatershed/rwd:1.1.1" diff --git a/deployment/ansible/roles/model-my-watershed.rwd/templates/upstart-mmw-rwd.conf.j2 b/deployment/ansible/roles/model-my-watershed.rwd/templates/upstart-mmw-rwd.conf.j2 index 2efebdcba..87371ed0b 100644 --- a/deployment/ansible/roles/model-my-watershed.rwd/templates/upstart-mmw-rwd.conf.j2 +++ b/deployment/ansible/roles/model-my-watershed.rwd/templates/upstart-mmw-rwd.conf.j2 @@ -3,7 +3,7 @@ description "Rapid Watershed Delineation" {% if ['development', 'test'] | some_are_in(group_names) -%} start on (vagrant-mounted and started docker) {% else %} -start on (filesystem and started docker) +start on (filesystem and local-filesystems and started docker) {% endif %} stop on stopping docker diff --git a/deployment/cfn/application.py b/deployment/cfn/application.py index d83d1e0d9..f22920ee4 100644 --- a/deployment/cfn/application.py +++ b/deployment/cfn/application.py @@ -48,6 +48,10 @@ class Application(StackNode): 'AppServerAutoScalingDesired': ['global:AppServerAutoScalingDesired'], 'AppServerAutoScalingMin': ['global:AppServerAutoScalingMin'], 'AppServerAutoScalingMax': ['global:AppServerAutoScalingMax'], + 'AppServerAutoScalingScheduleStartCapacity': ['global:AppServerAutoScalingScheduleStartCapacity'], # NOQA + 'AppServerAutoScalingScheduleStartRecurrence': ['global:AppServerAutoScalingScheduleStartRecurrence'], # NOQA + 'AppServerAutoScalingScheduleEndCapacity': ['global:AppServerAutoScalingScheduleEndCapacity'], # NOQA + 'AppServerAutoScalingScheduleEndRecurrence': ['global:AppServerAutoScalingScheduleEndRecurrence'], # NOQA 'SSLCertificateARN': ['global:SSLCertificateARN'], 'BackwardCompatSSLCertificateARN': ['global:BackwardCompatSSLCertificateARN'], @@ -152,6 +156,34 @@ def set_up_stack(self): Description='Application server AutoScalingGroup maximum' ), 'AppServerAutoScalingMax') + self.app_server_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA + Parameter( + 'AppServerAutoScalingScheduleStartRecurrence', Type='String', + Default='0 13 * * 1-5', + Description='Application server ASG schedule start recurrence' + ), 'AppServerAutoScalingScheduleStartRecurrence') + + self.app_server_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA + Parameter( + 'AppServerAutoScalingScheduleStartCapacity', Type='String', + Default='1', + Description='Application server ASG schedule start capacity' + ), 'AppServerAutoScalingScheduleStartCapacity') + + self.app_server_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA + Parameter( + 'AppServerAutoScalingScheduleEndRecurrence', Type='String', + Default='0 23 * * *', + Description='Application server ASG schedule end recurrence' + ), 'AppServerAutoScalingScheduleEndRecurrence') + + self.app_server_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA + Parameter( + 'AppServerAutoScalingScheduleEndCapacity', Type='String', + Default='1', + Description='Application server ASG schedule end capacity' + ), 'AppServerAutoScalingScheduleEndCapacity') + self.ssl_certificate_arn = self.add_parameter(Parameter( 'SSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM' @@ -395,7 +427,7 @@ def create_auto_scaling_resources(self, app_server_security_group, self.blue_tile_distribution_endpoint))) )) - self.add_resource( + blue_app_server_asg = self.add_resource( asg.AutoScalingGroup( 'asgAppServerBlue', AvailabilityZones=Ref(self.availability_zones), @@ -424,6 +456,30 @@ def create_auto_scaling_resources(self, app_server_security_group, Tags=[asg.Tag('Name', 'AppServer', True)]) ) + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingStartBlue', + AutoScalingGroupName=Ref(blue_app_server_asg), + Condition='BlueCondition', + DesiredCapacity=Ref( + self.app_server_auto_scaling_schedule_start_capacity), + Recurrence=Ref( + self.app_server_auto_scaling_schedule_start_recurrence) + ) + ) + + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingEndBlue', + AutoScalingGroupName=Ref(blue_app_server_asg), + Condition='BlueCondition', + DesiredCapacity=Ref( + self.app_server_auto_scaling_schedule_end_capacity), + Recurrence=Ref( + self.app_server_auto_scaling_schedule_end_recurrence) + ) + ) + green_app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServerGreen', @@ -438,7 +494,7 @@ def create_auto_scaling_resources(self, app_server_security_group, self.green_tile_distribution_endpoint))) )) - self.add_resource( + green_app_server_asg = self.add_resource( asg.AutoScalingGroup( 'asgAppServerGreen', AvailabilityZones=Ref(self.availability_zones), @@ -467,6 +523,30 @@ def create_auto_scaling_resources(self, app_server_security_group, Tags=[asg.Tag('Name', 'AppServer', True)]) ) + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingStartGreen', + AutoScalingGroupName=Ref(green_app_server_asg), + Condition='GreenCondition', + DesiredCapacity=Ref( + self.app_server_auto_scaling_schedule_start_capacity), + Recurrence=Ref( + self.app_server_auto_scaling_schedule_start_recurrence) + ) + ) + + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingEndGreen', + AutoScalingGroupName=Ref(green_app_server_asg), + Condition='GreenCondition', + DesiredCapacity=Ref( + self.app_server_auto_scaling_schedule_end_capacity), + Recurrence=Ref( + self.app_server_auto_scaling_schedule_end_recurrence) + ) + ) + def get_cloud_config(self, tile_distribution_endpoint): return ['#cloud-config\n', '\n', diff --git a/deployment/cfn/tiler.py b/deployment/cfn/tiler.py index e0ad6bccd..97a068f88 100644 --- a/deployment/cfn/tiler.py +++ b/deployment/cfn/tiler.py @@ -49,6 +49,10 @@ class Tiler(StackNode): 'TileServerAutoScalingDesired': ['global:TileServerAutoScalingDesired'], # NOQA 'TileServerAutoScalingMin': ['global:TileServerAutoScalingMin'], 'TileServerAutoScalingMax': ['global:TileServerAutoScalingMax'], + 'TileServerAutoScalingScheduleStartCapacity': ['global:TileServerAutoScalingScheduleStartCapacity'], # NOQA + 'TileServerAutoScalingScheduleStartRecurrence': ['global:TileServerAutoScalingScheduleStartRecurrence'], # NOQA + 'TileServerAutoScalingScheduleEndCapacity': ['global:TileServerAutoScalingScheduleEndCapacity'], # NOQA + 'TileServerAutoScalingScheduleEndRecurrence': ['global:TileServerAutoScalingScheduleEndRecurrence'], # NOQA 'SSLCertificateARN': ['global:SSLCertificateARN'], 'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'], 'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'], @@ -143,6 +147,34 @@ def set_up_stack(self): Description='Tile server AutoScalingGroup maximum' ), 'TileServerAutoScalingMax') + self.tile_server_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA + Parameter( + 'TileServerAutoScalingScheduleStartRecurrence', Type='String', + Default='0 13 * * 1-5', + Description='Tile server ASG schedule start recurrence' + ), 'TileServerAutoScalingScheduleStartRecurrence') + + self.tile_server_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA + Parameter( + 'TileServerAutoScalingScheduleStartCapacity', Type='String', + Default='1', + Description='Tile server ASG schedule start capacity' + ), 'TileServerAutoScalingScheduleStartCapacity') + + self.tile_server_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA + Parameter( + 'TileServerAutoScalingScheduleEndRecurrence', Type='String', + Default='0 23 * * *', + Description='Tile server ASG schedule end recurrence' + ), 'TileServerAutoScalingScheduleEndRecurrence') + + self.tile_server_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA + Parameter( + 'TileServerAutoScalingScheduleEndCapacity', Type='String', + Default='1', + Description='Tile server ASG schedule end capacity' + ), 'TileServerAutoScalingScheduleEndCapacity') + self.ssl_certificate_arn = self.add_parameter(Parameter( 'SSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM' @@ -318,7 +350,7 @@ def create_auto_scaling_resources(self, tile_server_security_group, tile_server_auto_scaling_group_name = 'asgTileServer' - self.add_resource( + tile_server_asg = self.add_resource( asg.AutoScalingGroup( tile_server_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), @@ -346,6 +378,28 @@ def create_auto_scaling_resources(self, tile_server_security_group, ) ) + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingStart', + AutoScalingGroupName=Ref(tile_server_asg), + DesiredCapacity=Ref( + self.tile_server_auto_scaling_schedule_start_capacity), + Recurrence=Ref( + self.tile_server_auto_scaling_schedule_start_recurrence) + ) + ) + + self.add_resource( + asg.ScheduledAction( + 'schedTileServerAutoScalingEnd', + AutoScalingGroupName=Ref(tile_server_asg), + DesiredCapacity=Ref( + self.tile_server_auto_scaling_schedule_end_capacity), + Recurrence=Ref( + self.tile_server_auto_scaling_schedule_end_recurrence) + ) + ) + def get_cloud_config(self): return ['#cloud-config\n', '\n', diff --git a/deployment/cfn/worker.py b/deployment/cfn/worker.py index aa5e4c9a8..dff482f1f 100644 --- a/deployment/cfn/worker.py +++ b/deployment/cfn/worker.py @@ -50,6 +50,10 @@ class Worker(StackNode): 'WorkerAutoScalingDesired': ['global:WorkerAutoScalingDesired'], # NOQA 'WorkerAutoScalingMin': ['global:WorkerAutoScalingMin'], 'WorkerAutoScalingMax': ['global:WorkerAutoScalingMax'], + 'WorkerAutoScalingScheduleStartCapacity': ['global:WorkerAutoScalingScheduleStartCapacity'], # NOQA + 'WorkerAutoScalingScheduleStartRecurrence': ['global:WorkerAutoScalingScheduleStartRecurrence'], # NOQA + 'WorkerAutoScalingScheduleEndCapacity': ['global:WorkerAutoScalingScheduleEndCapacity'], # NOQA + 'WorkerAutoScalingScheduleEndRecurrence': ['global:WorkerAutoScalingScheduleEndRecurrence'], # NOQA 'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'], 'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'], 'PublicHostedZoneName': ['global:PublicHostedZoneName'], @@ -135,20 +139,48 @@ def set_up_stack(self): ), 'WorkerInstanceProfile') self.worker_auto_scaling_desired = self.add_parameter(Parameter( - 'WorkerAutoScalingDesired', Type='String', Default='1', + 'WorkerAutoScalingDesired', Type='String', Default='2', Description='Worker AutoScalingGroup desired' ), 'WorkerAutoScalingDesired') self.worker_auto_scaling_min = self.add_parameter(Parameter( - 'WorkerAutoScalingMin', Type='String', Default='1', + 'WorkerAutoScalingMin', Type='String', Default='0', Description='Worker AutoScalingGroup minimum' ), 'WorkerAutoScalingMin') self.worker_auto_scaling_max = self.add_parameter(Parameter( - 'WorkerAutoScalingMax', Type='String', Default='1', + 'WorkerAutoScalingMax', Type='String', Default='2', Description='Worker AutoScalingGroup maximum' ), 'WorkerAutoScalingMax') + self.worker_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA + Parameter( + 'WorkerAutoScalingScheduleStartRecurrence', Type='String', + Default='0 13 * * 1-5', + Description='Worker ASG schedule start recurrence' + ), 'WorkerAutoScalingScheduleStartRecurrence') + + self.worker_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA + Parameter( + 'WorkerAutoScalingScheduleStartCapacity', Type='String', + Default='2', + Description='Worker ASG schedule start capacity' + ), 'WorkerAutoScalingScheduleStartCapacity') + + self.worker_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA + Parameter( + 'WorkerAutoScalingScheduleEndRecurrence', Type='String', + Default='0 23 * * *', + Description='Worker ASG schedule end recurrence' + ), 'WorkerAutoScalingScheduleEndRecurrence') + + self.worker_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA + Parameter( + 'WorkerAutoScalingScheduleEndCapacity', Type='String', + Default='0', + Description='Worker ASG schedule end capacity' + ), 'WorkerAutoScalingScheduleEndCapacity') + self.public_subnets = self.add_parameter(Parameter( 'PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets' @@ -310,7 +342,7 @@ def create_auto_scaling_resources(self, worker_security_group, worker_lb): worker_auto_scaling_group_name = 'asgWorker' - return self.add_resource( + worker_asg = self.add_resource( asg.AutoScalingGroup( worker_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), @@ -338,6 +370,30 @@ def create_auto_scaling_resources(self, worker_security_group, worker_lb): ) ) + self.add_resource( + asg.ScheduledAction( + 'schedWorkerAutoScalingStart', + AutoScalingGroupName=Ref(worker_asg), + DesiredCapacity=Ref( + self.worker_auto_scaling_schedule_start_capacity), + Recurrence=Ref( + self.worker_auto_scaling_schedule_start_recurrence) + ) + ) + + self.add_resource( + asg.ScheduledAction( + 'schedWorkerAutoScalingEnd', + AutoScalingGroupName=Ref(worker_asg), + DesiredCapacity=Ref( + self.worker_auto_scaling_schedule_end_capacity), + Recurrence=Ref( + self.worker_auto_scaling_schedule_end_recurrence) + ) + ) + + return worker_asg + def get_cloud_config(self): return ['#cloud-config\n', '\n', diff --git a/deployment/default.yml.example b/deployment/default.yml.example index be82e2d9b..a9924aca5 100644 --- a/deployment/default.yml.example +++ b/deployment/default.yml.example @@ -23,8 +23,14 @@ AppServerInstanceType: 't2.micro' #AppServerAMI: '' AppServerInstanceProfile: 'AppServerInstanceProfile' AppServerAutoScalingDesired: '1' -AppServerAutoScalingMin: '1' +AppServerAutoScalingMin: '0' AppServerAutoScalingMax: '1' +AppServerAutoScalingScheduleStartCapacity: '1' +# 8AM ET +AppServerAutoScalingScheduleStartRecurrence: '0 13 * * 1-5' +AppServerAutoScalingScheduleEndCapacity: '0' +# 6PM ET +AppServerAutoScalingScheduleEndRecurrence: '0 23 * * *' SSLCertificateARN: 'arn:aws:iam...' BackwardCompatSSLCertificateARN: 'arn:aws:iam...' TileServerInstanceType: 't2.micro' @@ -33,16 +39,28 @@ TileServerInstanceType: 't2.micro' #TileServerAMI: '' TileServerInstanceProfile: 'TileServerInstanceProfile' TileServerAutoScalingDesired: '1' -TileServerAutoScalingMin: '1' +TileServerAutoScalingMin: '0' TileServerAutoScalingMax: '1' +TileServerAutoScalingScheduleStartCapacity: '1' +# 8AM ET +TileServerAutoScalingScheduleStartRecurrence: '0 13 * * 1-5' +TileServerAutoScalingScheduleEndCapacity: '0' +# 6PM ET +TileServerAutoScalingScheduleEndRecurrence: '0 23 * * *' WorkerInstanceType: 't2.micro' # Leaving this commented dynamically looks up the # most recent AMI for this type. #WorkerAMI: '' WorkerInstanceProfile: 'WorkerInstanceProfile' -WorkerAutoScalingDesired: '1' -WorkerAutoScalingMin: '1' -WorkerAutoScalingMax: '1' +WorkerAutoScalingDesired: '2' +WorkerAutoScalingMin: '0' +WorkerAutoScalingMax: '2' +WorkerAutoScalingScheduleStartCapacity: '2' +# 8AM ET +WorkerAutoScalingScheduleStartRecurrence: '0 13 * * 1-5' +WorkerAutoScalingScheduleEndCapacity: '0' +# 6PM ET +WorkerAutoScalingScheduleEndRecurrence: '0 23 * * *' ITSIBaseURL: '' ITSISecretKey: '' RollbarServerSideAccessToken: '' diff --git a/deployment/packer/template.js b/deployment/packer/template.js index a18ed7230..e7feeb50d 100644 --- a/deployment/packer/template.js +++ b/deployment/packer/template.js @@ -63,7 +63,7 @@ "ami_block_device_mappings": [ { "device_name": "/dev/sdf", - "snapshot_id": "snap-4a764b4a", + "snapshot_id": "snap-090ac799996dba0a4", "volume_type": "gp2", "delete_on_termination": true } diff --git a/scripts/aws/setupdb.sh b/scripts/aws/setupdb.sh index eb322ead9..000bbe908 100755 --- a/scripts/aws/setupdb.sh +++ b/scripts/aws/setupdb.sh @@ -84,7 +84,7 @@ fi if [ "$load_dep" = "true" ] ; then # Fetch DEP layers FILES=("dep_urban_areas.sql.gz" "dep_municipalities.sql.gz") - PATHS=("dep_urbanareas" "dep_municipalities") + PATHS=("urban_areas" "municipalities") download_and_load $FILES purge_tile_cache $PATHS diff --git a/src/mmw/apps/core/templates/base.html b/src/mmw/apps/core/templates/base.html index 2e796ef2d..344752258 100644 --- a/src/mmw/apps/core/templates/base.html +++ b/src/mmw/apps/core/templates/base.html @@ -1,3 +1,4 @@ +{% include 'head.html' %} {% load staticfiles %} @@ -21,6 +22,7 @@ ga('create', '{{ GOOGLE_ANALYTICS_ACCOUNT }}', 'auto'); ga('send', 'pageview'); + window.ga = ga; diff --git a/src/mmw/apps/core/templates/head.html b/src/mmw/apps/core/templates/head.html new file mode 100644 index 000000000..65575d8f8 --- /dev/null +++ b/src/mmw/apps/core/templates/head.html @@ -0,0 +1,25 @@ +{% load staticfiles %} + + + + + {% block metatitle %} + Model My Watershed + {% endblock metatitle %} + + + + + + + + + diff --git a/src/mmw/apps/home/views.py b/src/mmw/apps/home/views.py index 0cca87450..43b537116 100644 --- a/src/mmw/apps/home/views.py +++ b/src/mmw/apps/home/views.py @@ -131,6 +131,7 @@ def get_client_settings(request): 'vector_layers': get_layer_config(['vector', 'overlay']), 'raster_layers': get_layer_config(['raster', 'overlay']), 'stream_layers': get_layer_config(['stream', 'overlay']), + 'nhd_perimeter': settings.NHD_REGION2_PERIMETER, 'draw_tools': settings.DRAW_TOOLS, 'map_controls': settings.MAP_CONTROLS, 'vizer_urls': settings.VIZER_URLS, diff --git a/src/mmw/apps/modeling/migrations/0020_old_scenarios.py b/src/mmw/apps/modeling/migrations/0020_old_scenarios.py new file mode 100644 index 000000000..cfff265c3 --- /dev/null +++ b/src/mmw/apps/modeling/migrations/0020_old_scenarios.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations + + +def clear_old_scenario_results(apps, schema_editor): + Scenario = apps.get_model('modeling', 'Scenario') + old_scenarios = Scenario.objects.filter( + project__model_package='tr-55' + ).filter( + results__contains='"result": {"pc_modified"' + ) + + for scenario in old_scenarios: + scenario.results = '[]' + scenario.modification_hash = '' + scenario.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ('modeling', '0019_project_gis_data'), + ] + + operations = [ + migrations.RunPython(clear_old_scenario_results) + ] diff --git a/src/mmw/apps/modeling/tasks.py b/src/mmw/apps/modeling/tasks.py index 78d972e8c..4923dc97e 100644 --- a/src/mmw/apps/modeling/tasks.py +++ b/src/mmw/apps/modeling/tasks.py @@ -28,9 +28,11 @@ CM_PER_INCH = 2.54 ACRES_PER_SQM = 0.000247105 +DRB = 'drb' + @shared_task -def start_rwd_job(location, snapping): +def start_rwd_job(location, snapping, data_source): """ Calls the Rapid Watershed Delineation endpoint that is running in the Docker container, and returns @@ -39,7 +41,8 @@ def start_rwd_job(location, snapping): """ location = json.loads(location) lat, lng = location - rwd_url = 'http://localhost:5000/rwd/%f/%f' % (lat, lng) + end_point = 'rwd' if data_source == DRB else 'rwd-nhd' + rwd_url = 'http://localhost:5000/%s/%f/%f' % (end_point, lat, lng) # The Webserver defaults to enable snapping, uses 1 (true) 0 (false) if not snapping: diff --git a/src/mmw/apps/modeling/views.py b/src/mmw/apps/modeling/views.py index ebec1a437..d30f280a3 100644 --- a/src/mmw/apps/modeling/views.py +++ b/src/mmw/apps/modeling/views.py @@ -171,6 +171,7 @@ def start_rwd(request, format=None): user = request.user if request.user.is_authenticated() else None created = now() location = request.POST['location'] + data_source = request.POST.get('dataSource', 'drb') # Parse out the JS style T/F to a boolean snappingParam = request.POST['snappingOn'] @@ -179,7 +180,8 @@ def start_rwd(request, format=None): job = Job.objects.create(created_at=created, result='', error='', traceback='', user=user, status='started') - task_list = _initiate_rwd_job_chain(location, snapping, job.id) + task_list = _initiate_rwd_job_chain(location, snapping, data_source, + job.id) job.uuid = task_list.id job.save() @@ -402,13 +404,14 @@ def choose_worker(): return random.choice(get_living_workers()) -def _initiate_rwd_job_chain(location, snapping, job_id, testing=False): +def _initiate_rwd_job_chain(location, snapping, data_source, + job_id, testing=False): exchange = MAGIC_EXCHANGE routing_key = choose_worker() errback = save_job_error.s(job_id).set(exchange=MAGIC_EXCHANGE, routing_key=choose_worker()) - return chain(tasks.start_rwd_job.s(location, snapping) + return chain(tasks.start_rwd_job.s(location, snapping, data_source) .set(exchange=exchange, routing_key=routing_key), save_job_result.s(job_id, location) .set(exchange=exchange, routing_key=choose_worker())) \ diff --git a/src/mmw/bundle.sh b/src/mmw/bundle.sh index 82681e6d5..430bc077d 100755 --- a/src/mmw/bundle.sh +++ b/src/mmw/bundle.sh @@ -85,6 +85,7 @@ fi COPY_IMAGES_COMMAND="cp -r \ ./node_modules/leaflet/dist/images/* \ ./node_modules/leaflet-draw/dist/images/* \ + ./img/* \ $STATIC_IMAGES_DIR" COPY_FONTS_COMMAND="cp -r \ diff --git a/src/mmw/img/basemap.png b/src/mmw/img/basemap.png deleted file mode 100644 index 5f3f2d37a..000000000 Binary files a/src/mmw/img/basemap.png and /dev/null differ diff --git a/src/mmw/img/stroud-logo.png b/src/mmw/img/stroud-logo.png new file mode 100644 index 000000000..164681bad Binary files /dev/null and b/src/mmw/img/stroud-logo.png differ diff --git a/src/mmw/img/time_series.png b/src/mmw/img/time_series.png deleted file mode 100644 index 9ba701bf6..000000000 Binary files a/src/mmw/img/time_series.png and /dev/null differ diff --git a/src/mmw/img/water_column.png b/src/mmw/img/water_column.png deleted file mode 100644 index 225882194..000000000 Binary files a/src/mmw/img/water_column.png and /dev/null differ diff --git a/src/mmw/js/src/analyze/templates/catchmentWaterQualityTable.html b/src/mmw/js/src/analyze/templates/catchmentWaterQualityTable.html index e64977cea..96643d586 100644 --- a/src/mmw/js/src/analyze/templates/catchmentWaterQualityTable.html +++ b/src/mmw/js/src/analyze/templates/catchmentWaterQualityTable.html @@ -27,29 +27,6 @@ - - - Total For Area of Interest (kg/ha) - - - {{ totalTN|filterNoData()|toLocaleString(3) }} - - - {{ totalTP|filterNoData()|toLocaleString(3) }} - - - {{ totalTSS|filterNoData()|toLocaleString(3) }} - - - -- - - - -- - - - -- - - diff --git a/src/mmw/js/src/analyze/templates/catchmentWaterQualityTableRow.html b/src/mmw/js/src/analyze/templates/catchmentWaterQualityTableRow.html index 0932c8d9b..a1b1f76f2 100644 --- a/src/mmw/js/src/analyze/templates/catchmentWaterQualityTableRow.html +++ b/src/mmw/js/src/analyze/templates/catchmentWaterQualityTableRow.html @@ -39,13 +39,13 @@ {{ areaha|filterNoData()|toLocaleString(3) }} - {{ (tn_tot_kgy/areaha)|filterNoData()|toLocaleString(3) }} + {{ tn_tot_kgy|filterNoData()|toLocaleString(3) }} - {{ (tp_tot_kgy/areaha)|filterNoData()|toLocaleString(3) }} + {{ tp_tot_kgy|filterNoData()|toLocaleString(3) }} - {{ (tss_tot_kg/areaha)|filterNoData()|toLocaleString(3) }} + {{ tss_tot_kg|filterNoData()|toLocaleString(3) }} {{ tn_yr_avg_|filterNoData()|toLocaleString(3) }} diff --git a/src/mmw/js/src/analyze/tests.js b/src/mmw/js/src/analyze/tests.js index 94a3229bf..c0abad626 100644 --- a/src/mmw/js/src/analyze/tests.js +++ b/src/mmw/js/src/analyze/tests.js @@ -144,11 +144,11 @@ function catchmentWaterQualityTableFormatter(categories) { var nord = category.get('nord').toString(), areaha = renderPtSrcAndWQTableRowValue(category.get('areaha')), tn_tot_kgy = renderPtSrcAndWQTableRowValue( - category.get('tn_tot_kgy')/category.get('areaha')), + category.get('tn_tot_kgy')), tp_tot_kgy = renderPtSrcAndWQTableRowValue( - category.get('tp_tot_kgy')/category.get('areaha')), + category.get('tp_tot_kgy')), tss_tot_kg = renderPtSrcAndWQTableRowValue( - category.get('tss_tot_kg')/category.get('areaha')), + category.get('tss_tot_kg')), tn_yr_avg_ = renderPtSrcAndWQTableRowValue(category.get('tn_yr_avg_')), tp_yr_avg_ = renderPtSrcAndWQTableRowValue(category.get('tp_yr_avg_')), tss_concmg = renderPtSrcAndWQTableRowValue(category.get('tss_concmg')); diff --git a/src/mmw/js/src/analyze/views.js b/src/mmw/js/src/analyze/views.js index 4f807b86d..a4e8127fc 100644 --- a/src/mmw/js/src/analyze/views.js +++ b/src/mmw/js/src/analyze/views.js @@ -419,11 +419,11 @@ var PointSourceTableView = Marionette.CompositeView.extend({ return { headerUnits: this.options.units, totalMGD: utils.totalForPointSourceCollection( - this.collection.models, 'mgd'), + this.collection.fullCollection.models, 'mgd'), totalKGN: utils.totalForPointSourceCollection( - this.collection.models, 'kgn_yr'), + this.collection.fullCollection.models, 'kgn_yr'), totalKGP: utils.totalForPointSourceCollection( - this.collection.models, 'kgp_yr'), + this.collection.fullCollection.models, 'kgp_yr'), hasNextPage: this.collection.hasNextPage(), hasPreviousPage: this.collection.hasPreviousPage(), currentPage: this.collection.state.currentPage, @@ -535,12 +535,6 @@ var CatchmentWaterQualityTableView = Marionette.CompositeView.extend({ templateHelpers: function() { return { headerUnits: this.options.units, - totalTN: utils.totalForCatchmentWaterQualityCollection( - this.collection.fullCollection.models, 'tn_tot_kgy', 'areaha'), - totalTP: utils.totalForCatchmentWaterQualityCollection( - this.collection.fullCollection.models, 'tp_tot_kgy', 'areaha'), - totalTSS: utils.totalForCatchmentWaterQualityCollection( - this.collection.fullCollection.models, 'tss_tot_kg', 'areaha'), hasNextPage: this.collection.hasNextPage(), hasPreviousPage: this.collection.hasPreviousPage(), currentPage: this.collection.state.currentPage, diff --git a/src/mmw/js/src/core/chart.js b/src/mmw/js/src/core/chart.js index 97551c0e8..7eb3d32fe 100644 --- a/src/mmw/js/src/core/chart.js +++ b/src/mmw/js/src/core/chart.js @@ -7,6 +7,30 @@ var d3 = require('d3'), var widthCutoff = 400; +// Make jQuery handle destroyed event. +// http://stackoverflow.com/questions/2200494/ +// jquery-trigger-event-when-an-element-is-removed-from-the-dom +(function($) { + $.event.special.destroyed = { + remove: function(o) { + if (o.handler) { + o.handler(); + } + } + }; +})($); + +// When we replace a chart with a new one, the tooltip for the old chart +// persists because it resides under the body tag instead of under +// chartEl (the container div for the chart) like the other chart components. +// Therefore, we manually remove the tooltip when elements under chartEl are +// destroyed. +function removeTooltipOnDestroy(chartEl, tooltip) { + $(chartEl).children().bind('destroyed', function() { + $('#' + tooltip.id()).remove(); + }); +} + function makeSvg(el) { // For some reason, the chart will only render if the style is // defined inline, even if it is blank. @@ -140,6 +164,11 @@ function renderHorizontalBarChart(chartEl, data, options) { // redrawing the chart. $(chartEl).on('bar-chart:refresh', updateChart); + // This isn't strictly necessary since tooltips aren't enabled in the + // horizontal bar chart, but it's here defensively in case we start + // using them. + removeTooltipOnDestroy(chartEl, chart.tooltip); + return chart; }); } @@ -232,7 +261,6 @@ function renderVerticalBarChart(chartEl, data, options) { chart.color(options.seriesColors); } - d3.select(svg) .datum(data) .call(chart); @@ -242,6 +270,8 @@ function renderVerticalBarChart(chartEl, data, options) { // redrawing the chart. $(chartEl).on('bar-chart:refresh', updateChart); + removeTooltipOnDestroy(chartEl, chart.tooltip); + return chart; }); } diff --git a/src/mmw/js/src/core/itsiEmbed.js b/src/mmw/js/src/core/itsiEmbed.js index 53b98f240..92d69a617 100644 --- a/src/mmw/js/src/core/itsiEmbed.js +++ b/src/mmw/js/src/core/itsiEmbed.js @@ -21,7 +21,7 @@ var ItsiEmbed = function(App) { this.url = window.location.origin + '/' + route + '?' + QUERY_SUFFIX; this.interactiveState = { route: route }; } - + this.sendLearnerUrl(); }; diff --git a/src/mmw/js/src/core/layerControl.js b/src/mmw/js/src/core/layerControl.js index 47f1cbb92..01fbb4e11 100644 --- a/src/mmw/js/src/core/layerControl.js +++ b/src/mmw/js/src/core/layerControl.js @@ -48,8 +48,8 @@ module.exports = L.Control.Layers.extend({ self._map), 'DRB Point Source', 'observation'); self._update(); }) - .fail(function(reason) { - $('#observations-layer-list').text(reason); + .fail(function() { + $('#observations-layer-list').text('Could not load observations.'); }); }, diff --git a/src/mmw/js/src/core/models.js b/src/mmw/js/src/core/models.js index d885c079f..c6a4760c7 100644 --- a/src/mmw/js/src/core/models.js +++ b/src/mmw/js/src/core/models.js @@ -85,7 +85,7 @@ var MapModel = Backbone.Model.extend({ var TaskModel = Backbone.Model.extend({ defaults: { - pollInterval: 500, + pollInterval: 1000, /* The timeout is set to 45 seconds here, while in the src/mmw/apps/modeling/tasks.py file it is set to 42 seconds. That was done because the countdown starts in the diff --git a/src/mmw/js/src/core/templates/catchmentWaterQualityPopup.html b/src/mmw/js/src/core/templates/catchmentWaterQualityPopup.html index 16159f56b..63c3ca256 100644 --- a/src/mmw/js/src/core/templates/catchmentWaterQualityPopup.html +++ b/src/mmw/js/src/core/templates/catchmentWaterQualityPopup.html @@ -3,9 +3,9 @@

ID: {{ nord }}

Source - TN (kg/y) - TP (kg/y) - TSS (kg/y) + TN (kg/ha) + TP (kg/ha) + TSS (kg/ha) @@ -46,7 +46,7 @@

ID: {{ nord }}

- Riparian + Streambank {{ tn_riparia|filterNoData()|toLocaleString(3) }} diff --git a/src/mmw/js/src/core/templates/header.html b/src/mmw/js/src/core/templates/header.html index e9ed195f0..b2d0c25f9 100644 --- a/src/mmw/js/src/core/templates/header.html +++ b/src/mmw/js/src/core/templates/header.html @@ -1,7 +1,10 @@