diff --git a/CHANGES.rst b/CHANGES.rst index 6f2bcdbf..1e28c2c9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,38 @@ Changelog ========= +.. _changelog.12_0_0: + +12.0.0 (2021-08-04) +------------------- + +IMPORTANT - Breaking Changes +++++++++++++++++++++++++++++ + +* This release **removes** the ``EC2 / Max spot instance requests per region`` limit, which has been removed by AWS, in favor of six new vCPU-based limits: ``All F Spot Instance Requests``, ``All G Spot Instance Requests``, ``All Inf Spot Instance Requests``, ``All P Spot Instance Requests``, ``All X Spot Instance Requests``, and ``All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests``. +* This release **adds two new services**: ``CertificateManager`` (ACM) and ``CloudFront``. +* This release **requires additional IAM permissions**: ``acm:ListCertificates``, ``cloudfront:ListCloudFrontOriginAccessIdentities``, ``cloudfront:ListKeyGroups``, ``cloudfront:ListDistributions``, ``cloudfront:ListCachePolicies``, and ``cloudfront:ListOriginRequestPolicies``. + +IMPORTANT - Seeking New Maintainer +++++++++++++++++++++++++++++++++++ + +As I commented in `Issue #500 `__, I'm looking for someone to share (and perhaps take over) maintenance of this project. awslimitchecker is, and has always been, a personal-time-only project for me; the only time I've done work on it during my day job is when my employer was experiencing an issue or requested a specific feature. Because of a variety of issues, including changing personal interests and my employer relying on this project much less (following an AWS account restructuring that largely avoids service limits), I've been spending much less time on this project than it deserves. As a result, I'm looking for someone to help with maintenance... at the very least, helping review PRs and get them to a merge-able state. If you're interested, please comment on `Issue #500 `__ or contact me directly. While I am *incredibly* flattered by the offers I've received for sponsorship, paid support, or other financial incentive, I'd ask that anyone who's willing to make that commitment instead dedicate a few hours to working on issues or PRs. I, for my part, will make a concerted effort to quickly merge and release any PRs that meet all of the :ref:`development.pull_request_guidelines`. + +All Changes ++++++++++++ + +* `PR #532 `__ - Add Quotas Service support for ECS Fargate quotas. Thanks to `robpickerill `__ for this contribution. +* `PR #533 `__ / Fixes `Issue #527 `__ - Fix Quotas Service quota names for EIPs. Thanks to `robpickerill `__ for this contribution. +* `PR #534 `__ / Fixes `Issue #521 `__ - Update Quotas Service quota names for EBS. Thanks to `robpickerill `__ for this contribution. +* `PR #535 `__ / Fixes `Issue #518 `__ - Fix EC2 Security Group counts to only include groups owned by the current account. Thanks to `robpickerill `__ for this contribution. +* `PR #536 `__ / Fixes `Issue #512 `__ - Fix CloudWatch metrics queries to get data from one minute ago, to fix bug where GetMetricData is not yet populated. Thanks to `robpickerill `__ for this contribution. +* `PR #543 `__ / Fixes `Issue #538 `__ - Fix issue with calculation of usage for EC2 Rules Per Network ACL. Thanks to `jwu2 `__ for this contribution. +* `PR #537 `__ - Use boto3 adaptive retry mode. Thanks to `robpickerill `__ for this contribution. +* `PR #547 `__ / Fixes `Issue #502 `__ - Replace ``EC2 / Max spot instance requests per region`` limit, which has been removed by AWS, with new vCPU-based spot instance requests limits. This also switches to using CloudWatch metric data to retrieve current usage. Thanks to `TagadaPoe `__ for this contribution. +* `PR #546 `__ / Fixes `Issue #540 `__ - Add support for ACM (Certificate Manager) limits. Thanks to `TagadaPoe `__ for this contribution. +* `PR #545 `__ / Fixes `Issue #539 `__ - Add support for CloudFront limits. Thanks to `TagadaPoe `__ for this contribution. +* `Issue #551 `__ - Allow custom host for Datadog metric provider. + .. _changelog.11_0_0: 11.0.0 (2021-04-20) diff --git a/awslimitchecker/connectable.py b/awslimitchecker/connectable.py index 55cf7592..57456979 100644 --- a/awslimitchecker/connectable.py +++ b/awslimitchecker/connectable.py @@ -112,9 +112,13 @@ def connect(self): """ if self.conn is not None: return + + default_config = Config(retries={'mode': 'adaptive'}) kwargs = dict(self._boto3_connection_kwargs) + kwargs['config'] = default_config + if self._max_retries_config is not None: - kwargs['config'] = self._max_retries_config + kwargs['config'] = default_config.merge(self._max_retries_config) self.conn = boto3.client(self.api_name, **kwargs) logger.info("Connected to %s in region %s", self.api_name, self.conn._client_config.region_name) @@ -132,9 +136,14 @@ def connect_resource(self): """ if self.resource_conn is not None: return + + default_config = Config(retries={'mode': 'adaptive'}) kwargs = dict(self._boto3_connection_kwargs) + kwargs['config'] = default_config + if self._max_retries_config is not None: - kwargs['config'] = self._max_retries_config + kwargs['config'] = default_config.merge(self._max_retries_config) + self.resource_conn = boto3.resource(self.api_name, **kwargs) logger.info("Connected to %s (resource) in region %s", self.api_name, self.resource_conn.meta.client._client_config.region_name) diff --git a/awslimitchecker/metrics/datadog.py b/awslimitchecker/metrics/datadog.py index 82d1aad0..c2b5fc3a 100644 --- a/awslimitchecker/metrics/datadog.py +++ b/awslimitchecker/metrics/datadog.py @@ -53,7 +53,7 @@ class Datadog(MetricsProvider): def __init__( self, region_name, prefix='awslimitchecker.', api_key=None, - extra_tags=None + extra_tags=None, host='https://api.datadoghq.com' ): """ Initialize the Datadog metrics provider. This class does not have any @@ -68,6 +68,11 @@ def __init__( :param api_key: Datadog API key. May alternatively be specified by the ``DATADOG_API_KEY`` environment variable. :type api_key: str + :param host: The datadog host URL to use; defaults to + ``https://api.datadoghq.com``. This parameter is overridden by the + ``DATADOG_HOST`` environment variable, if set. This must NOT end with + a trailing slash. + :type host: str :param extra_tags: CSV list of additional tags to send with metrics. All metrics will automatically be tagged with ``region:`` :type extra_tags: str @@ -78,6 +83,7 @@ def __init__( if extra_tags is not None: self._tags.extend(extra_tags.split(',')) self._api_key = os.environ.get('DATADOG_API_KEY') + self._host = os.environ.get('DATADOG_HOST', host) if api_key is not None: self._api_key = api_key if self._api_key is None: @@ -88,7 +94,7 @@ def __init__( self._validate_auth(self._api_key) def _validate_auth(self, api_key): - url = 'https://api.datadoghq.com/api/v1/validate?api_key=%s' + url = self._host + '/api/v1/validate?api_key=%s' logger.debug('Validating Datadog API key: GET %s', url) url = url % api_key r = self._http.request('GET', url) @@ -149,8 +155,7 @@ def flush(self): logger.info('POSTing %d metrics to datadog', len(series)) data = {'series': series} encoded = json.dumps(data).encode('utf-8') - url = 'https://api.datadoghq.com/api/v1/series' \ - '?api_key=%s' % self._api_key + url = self._host + '/api/v1/series?api_key=%s' % self._api_key resp = self._http.request( 'POST', url, headers={'Content-type': 'application/json'}, diff --git a/awslimitchecker/services/__init__.py b/awslimitchecker/services/__init__.py index d5031892..bb99f310 100644 --- a/awslimitchecker/services/__init__.py +++ b/awslimitchecker/services/__init__.py @@ -40,7 +40,10 @@ from awslimitchecker.services.base import _AwsService from awslimitchecker.services.apigateway import _ApigatewayService from awslimitchecker.services.autoscaling import _AutoscalingService +from awslimitchecker.services.certificatemanager import \ + _CertificatemanagerService from awslimitchecker.services.cloudformation import _CloudformationService +from awslimitchecker.services.cloudfront import _CloudfrontService from awslimitchecker.services.cloudtrail import _CloudTrailService from awslimitchecker.services.directoryservice import _DirectoryserviceService from awslimitchecker.services.dynamodb import _DynamodbService diff --git a/awslimitchecker/services/base.py b/awslimitchecker/services/base.py index 359c7186..7f453529 100644 --- a/awslimitchecker/services/base.py +++ b/awslimitchecker/services/base.py @@ -353,8 +353,8 @@ def _get_cloudwatch_usage_latest( } } ], - StartTime=datetime.utcnow() - timedelta(hours=1), - EndTime=datetime.utcnow(), + StartTime=datetime.utcnow() - timedelta(hours=1, minutes=1), + EndTime=datetime.utcnow() - timedelta(minutes=1), ScanBy='TimestampDescending', MaxDatapoints=1 ) diff --git a/awslimitchecker/services/certificatemanager.py b/awslimitchecker/services/certificatemanager.py new file mode 100644 index 00000000..91735062 --- /dev/null +++ b/awslimitchecker/services/certificatemanager.py @@ -0,0 +1,117 @@ +""" +awslimitchecker/services/certificatemanager.py + +The latest version of this package is available at: + + +################################################################################ +Copyright 2015-2018 Jason Antman + + This file is part of awslimitchecker, also known as awslimitchecker. + + awslimitchecker is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + awslimitchecker is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with awslimitchecker. If not, see . + +The Copyright and Authors attributions contained herein may not be removed or +otherwise altered, except to add the Author attribution of a contributor to +this work. (Additional Terms pursuant to Section 7b of the AGPL v3) +################################################################################ +While not legally required, I sincerely request that anyone who finds +bugs please submit them at or +to me via email, and that you send any contributions or improvements +either as a pull request on GitHub, or to me via email. +################################################################################ + +AUTHORS: +Jason Antman +################################################################################ +""" + +import abc # noqa +import logging + +from .base import _AwsService +from ..limit import AwsLimit +from ..utils import paginate_dict + +logger = logging.getLogger(__name__) + + +class _CertificatemanagerService(_AwsService): + + service_name = 'CertificateManager' + api_name = 'acm' # AWS API name to connect to (boto3.client) + quotas_service_code = 'acm' + + def find_usage(self): + """ + List CloudFront distributions by calling AWS list_certificates, and + update usage in self.limits for the limit 'ACM certificates' + """ + logger.debug("Checking usage for service %s", self.service_name) + self.connect() + for lim in self.limits.values(): + lim._reset_usage() + + self._find_usage_certificates() + + self._have_usage = True + logger.debug("Done checking usage.") + + def _find_usage_certificates(self): + """find usage for ACM certificates""" + res = paginate_dict( + self.conn.list_certificates, + alc_marker_path=['NextToken'], + alc_data_path=['CertificateSummaryList'], + alc_marker_param='NextToken' + ) + if 'CertificateSummaryList' not in res: + nb_certificates = 0 + else: + nb_certificates = len(res['CertificateSummaryList']) + self.limits['ACM certificates']._add_current_usage(nb_certificates) + + def get_limits(self): + """ + Return all known limits for this service, as a dict of their names + to :py:class:`~.AwsLimit` objects. + + :returns: dict of limit names to :py:class:`~.AwsLimit` objects + :rtype: dict + """ + if self.limits != {}: + return self.limits + limits = {} + limits['ACM certificates'] = AwsLimit( + 'ACM certificates', + self, + 1000, + self.warning_threshold, + self.critical_threshold + ) + self.limits = limits + return limits + + def required_iam_permissions(self): + """ + Return a list of IAM Actions required for this Service to function + properly. All Actions will be shown with an Effect of "Allow" + and a Resource of "*". + + :returns: list of IAM Action strings + :rtype: list + """ + return [ + "acm:ListCertificates", + ] diff --git a/awslimitchecker/services/cloudfront.py b/awslimitchecker/services/cloudfront.py new file mode 100644 index 00000000..2196e0da --- /dev/null +++ b/awslimitchecker/services/cloudfront.py @@ -0,0 +1,800 @@ +""" +awslimitchecker/services/cloudfront.py + +The latest version of this package is available at: + + +################################################################################ +Copyright 2015-2018 Jason Antman + + This file is part of awslimitchecker, also known as awslimitchecker. + + awslimitchecker is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + awslimitchecker is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with awslimitchecker. If not, see . + +The Copyright and Authors attributions contained herein may not be removed or +otherwise altered, except to add the Author attribution of a contributor to +this work. (Additional Terms pursuant to Section 7b of the AGPL v3) +################################################################################ +While not legally required, I sincerely request that anyone who finds +bugs please submit them at or +to me via email, and that you send any contributions or improvements +either as a pull request on GitHub, or to me via email. +################################################################################ + +AUTHORS: +Jason Antman +################################################################################ +""" + +import abc # noqa +import logging +from collections import Counter + +from .base import _AwsService +from ..limit import AwsLimit +from ..utils import paginate_dict + +logger = logging.getLogger(__name__) + + +class _CloudfrontService(_AwsService): + + service_name = "CloudFront" + api_name = "cloudfront" # AWS API name to connect to (boto3.client) + quotas_service_code = "cloudfront" + + def find_usage(self): + """ + Determine the current usage for each limit of this service, + and update corresponding Limit via + :py:meth:`~.AwsLimit._add_current_usage`. + """ + logger.debug("Checking usage for service %s", self.service_name) + self.connect() + for lim in self.limits.values(): + lim._reset_usage() + + self._find_usage_distributions() + self._find_usage_keygroups() + self._find_usage_origin_access_identities() + self._find_usage_cache_policies() + self._find_usage_origin_request_policies() + + self._have_usage = True + logger.debug("Done checking usage.") + + def _find_usage_distributions(self): + """ + List CloudFront distributions by calling AWS list_distributions, and + update usage in self.limits for the following limits: + + Per-distribution: + - Alternate domain names (CNAMEs) per distribution + - Cache behaviors per distribution + - Origins per distribution + - Origin groups per distribution + - Key groups associated with a single distribution + + Per cache behavior: + - Key groups associated with a single cache behavior + - Whitelisted cookies per cache behavior + - Whitelisted headers per cache behavior + - Whitelisted query strings per cache behavior + + Global: + - Distributions associated with a single key group + - Distributions associated with a single cache policy + - Distributions associated with a single origin request policy + - Distributions per AWS account + """ + + # Read distribution list from AWS + res = paginate_dict( + self.conn.list_distributions, + alc_marker_path=['DistributionList', 'NextMarker'], + alc_data_path=['DistributionList', 'Items'], + alc_marker_param='Marker' + ) + if 'Items' not in res['DistributionList']: + nb_distributions = 0 + else: + distributions = res['DistributionList']['Items'] + nb_distributions = len(distributions) + + # number of times a keygroup is referenced, in all distributions + keygroup_references = Counter() + cache_policy_references = Counter() + origin_request_policy_references = Counter() + + for d in distributions: + # Count alternate domain names + nb_aliases = 0 + if ('Aliases' in d) and ('Items' in d['Aliases']): + nb_aliases = len(d['Aliases']['Items']) + self.limits[ + 'Alternate domain names (CNAMEs) per distribution' + ]._add_current_usage( + nb_aliases, + resource_id=d['Id'], + aws_type='AWS::CloudFront::Distribution', + ) + + # Count cache behaviors + # Note: the AWS documentation does not specify this, but + # the quota includes the default cache behavior. + nb_cache_behaviors = 1 # 1 for default cache behavior + if ('CacheBehaviors' in d) and ('Items' in d['CacheBehaviors']): + nb_cache_behaviors += len(d['CacheBehaviors']['Items']) + self.limits[ + 'Cache behaviors per distribution' + ]._add_current_usage( + nb_cache_behaviors, + resource_id=d['Id'], + aws_type='AWS::CloudFront::Distribution', + ) + + # Count origins + nb_origins = 0 + if ('Origins' in d) and ('Items' in d['Origins']): + nb_origins = len(d['Origins']['Items']) + self.limits[ + 'Origins per distribution' + ]._add_current_usage( + nb_origins, + resource_id=d['Id'], + aws_type='AWS::CloudFront::Distribution', + ) + + # Count origin groups + nb_origin_groups = 0 + if ('OriginGroups' in d) and ('Items' in d['OriginGroups']): + nb_origin_groups = len(d['OriginGroups']['Items']) + self.limits[ + 'Origin groups per distribution' + ]._add_current_usage( + nb_origin_groups, + resource_id=d['Id'], + aws_type='AWS::CloudFront::Distribution', + ) + + # Count: + # - keygroups in cache behaviors + # - whitelisted cookies in cache behaviors + # - whitelisted headers in cache behaviors + # - whitelisted query strings in cache behaviors + keygroups = set() + cache_policies = set() + origin_request_policies = set() + + # Iterate over additional cache behaviors + if ('CacheBehaviors' in d) and ('Items' in d['CacheBehaviors']): + for cb in d['CacheBehaviors']['Items']: + res_id = "{}-cache-behavior-{}".format( + d['Id'], cb['PathPattern']) + + # Count key groups + nb_keygroups = 0 + if ('TrustedKeyGroups' in cb) and ( + 'Items' in cb['TrustedKeyGroups']): + # counting the KG even if not Enabled + keygroups.update(cb['TrustedKeyGroups']['Items']) + nb_keygroups = len(cb['TrustedKeyGroups']['Items']) + self.limits[ + 'Key groups associated with a single cache behavior' + ]._add_current_usage(nb_keygroups, resource_id=res_id) + + # Count whitelisted cookies + nb_cookies = 0 + try: + nb_cookies = len(cb['ForwardedValues']['Cookies'][ + 'WhitelistedNames']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted cookies per cache behavior' + ]._add_current_usage(nb_cookies, resource_id=res_id) + + # Count whitelisted headers + nb_headers = 0 + try: + nb_headers = len( + cb['ForwardedValues']['Headers']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted headers per cache behavior' + ]._add_current_usage(nb_headers, resource_id=res_id) + + # Count whitelisted query strings + nb_querystring = 0 + try: + nb_querystring = len(cb['ForwardedValues'][ + 'QueryStringCacheKeys']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted query strings per cache behavior' + ]._add_current_usage(nb_querystring, resource_id=res_id) + + if 'CachePolicyId' in cb: + cache_policies.add(cb['CachePolicyId']) + if 'OriginRequestPolicyId' in cb: + origin_request_policies.add( + cb['OriginRequestPolicyId']) + + # Default cache behavior + if 'DefaultCacheBehavior' in d: + cb = d['DefaultCacheBehavior'] + res_id = "{}-default-cache-behavior".format(d['Id']) + + nb_keygroups = 0 + if ('TrustedKeyGroups' in cb) and ( + 'Items' in cb['TrustedKeyGroups']): + # counting the KG even if not Enabled + keygroups.update(cb['TrustedKeyGroups']['Items']) + nb_keygroups = len(cb['TrustedKeyGroups']['Items']) + + self.limits[ + 'Key groups associated with a single cache behavior' + ]._add_current_usage(nb_keygroups, resource_id=res_id) + + # Count whitelisted cookies + nb_cookies = 0 + try: + nb_cookies = len(cb['ForwardedValues']['Cookies'][ + 'WhitelistedNames']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted cookies per cache behavior' + ]._add_current_usage(nb_cookies, resource_id=res_id) + + # Count whitelisted headers + nb_headers = 0 + try: + nb_headers = len( + cb['ForwardedValues']['Headers']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted headers per cache behavior' + ]._add_current_usage(nb_headers, resource_id=res_id) + + # Count whitelisted query strings + nb_querystring = 0 + try: + nb_querystring = len(cb['ForwardedValues'][ + 'QueryStringCacheKeys']['Items']) + except KeyError: + pass + self.limits[ + 'Whitelisted query strings per cache behavior' + ]._add_current_usage(nb_querystring, resource_id=res_id) + + if 'CachePolicyId' in cb: + cache_policies.add(cb['CachePolicyId']) + if 'OriginRequestPolicyId' in cb: + origin_request_policies.add( + cb['OriginRequestPolicyId']) + + self.limits[ + 'Key groups associated with a single distribution' + ]._add_current_usage( + len(keygroups), + resource_id=d['Id'], + aws_type='AWS::CloudFront::Distribution', + ) + + keygroup_references.update(keygroups) + cache_policy_references.update(cache_policies) + origin_request_policy_references.update(origin_request_policies) + + for k, count in keygroup_references.items(): + self.limits[ + 'Distributions associated with a single key group' + ]._add_current_usage(count, resource_id=k) + + for k, count in cache_policy_references.items(): + self.limits[ + 'Distributions associated with the same cache policy' + ]._add_current_usage(count, resource_id=k) + + for k, count in origin_request_policy_references.items(): + self.limits[ + 'Distributions associated with the same origin request ' + 'policy' + ]._add_current_usage(count, resource_id=k) + + self.limits['Distributions per AWS account']._add_current_usage( + nb_distributions, + aws_type='AWS::CloudFront::Distribution', + ) + + def _find_usage_keygroups(self): + """ + List CloudFront key groups from AWS, and update usage in self.limits + for the following limits: + - Key groups per AWS account + - Public keys in a single key group + """ + + # Read keygroup list from AWS + res = paginate_dict( + self.conn.list_key_groups, + alc_marker_path=['KeyGroupList', 'NextMarker'], + alc_data_path=['KeyGroupList', 'Items'], + alc_marker_param='Marker' + ) + nb_keygroups = 0 + try: + keygroups = res['KeyGroupList']['Items'] + nb_keygroups = len(keygroups) + except KeyError: + pass + + self.limits['Key groups per AWS account']._add_current_usage( + nb_keygroups, + aws_type='AWS::CloudFront::KeyGroup', + ) + + if 'Items' in res['KeyGroupList']: + for kg in res['KeyGroupList']['Items']: + nb_keys = 0 + try: + nb_keys = len(kg['KeyGroup']['KeyGroupConfig']['Items']) + except KeyError: + pass + self.limits[ + 'Public keys in a single key group' + ]._add_current_usage(nb_keys, resource_id=kg['KeyGroup']['Id']) + + def _find_usage_origin_access_identities(self): + """ + List CloudFront origin access identities from AWS, and update usage in + self.limits for the limit "Origin access identities per account". + """ + + # Read usage from AWS + res = paginate_dict( + self.conn.list_cloud_front_origin_access_identities, + alc_marker_path=['CloudFrontOriginAccessIdentityList', + 'NextMarker'], + alc_data_path=['CloudFrontOriginAccessIdentityList', 'Items'], + alc_marker_param='Marker' + ) + if 'Items' not in res['CloudFrontOriginAccessIdentityList']: + nb_origin_access_identities = 0 + else: + origin_access_identities = res['CloudFrontOriginAccessIdentityList' + ]['Items'] + nb_origin_access_identities = len(origin_access_identities) + + self.limits["Origin access identities per account"]._add_current_usage( + nb_origin_access_identities, + aws_type='AWS::CloudFront::KeyGroup', + ) + + def _find_usage_cache_policies(self): + """ + List CloudFront cache policies from AWS, and update usage in + self.limits for the following limits: + + - Cache policies per AWS account + - Cookies per cache policy + - Headers per cache policy + - Query strings per cache policy + """ + + # Read usage from AWS + res = paginate_dict( + # count only the custom cache policies, not the managed ones + self.conn.list_cache_policies, + Type='custom', + alc_marker_path=['CachePolicyList', + 'NextMarker'], + alc_data_path=['CachePolicyList', 'Items'], + alc_marker_param='Marker' + ) + if 'Items' not in res['CachePolicyList']: + nb_resources = 0 + else: + cache_policies = res['CachePolicyList' + ]['Items'] + nb_resources = len(cache_policies) + + self.limits["Cache policies per AWS account"]._add_current_usage( + nb_resources, + aws_type='AWS::CloudFront::CachePolicy', + ) + + if 'Items' in res['CachePolicyList']: + for cp in res['CachePolicyList']['Items']: + # Count whitelisted cookies + nb_cookies = 0 + try: + nb_cookies = len(cp['CachePolicy']['CachePolicyConfig'][ + 'ParametersInCacheKeyAndForwardedToOrigin'][ + 'CookiesConfig']['Cookies']['Items']) + except KeyError: + pass + self.limits[ + 'Cookies per cache policy' + ]._add_current_usage(nb_cookies, + resource_id=cp['CachePolicy']['Id']) + + # Count whitelisted headers + nb_headers = 0 + try: + nb_headers = len(cp['CachePolicy']['CachePolicyConfig'][ + 'ParametersInCacheKeyAndForwardedToOrigin'][ + 'HeadersConfig']['Headers']['Items']) + except KeyError: + pass + self.limits[ + 'Headers per cache policy' + ]._add_current_usage(nb_headers, + resource_id=cp['CachePolicy']['Id']) + + # Count whitelisted query strings + nb_querystring = 0 + try: + nb_querystring = len(cp['CachePolicy']['CachePolicyConfig'][ + 'ParametersInCacheKeyAndForwardedToOrigin'][ + 'QueryStringsConfig']['QueryStrings']['Items']) + except KeyError: + pass + self.limits[ + 'Query strings per cache policy' + ]._add_current_usage(nb_querystring, + resource_id=cp['CachePolicy']['Id']) + + def _find_usage_origin_request_policies(self): + """ + List CloudFront origin request policies from AWS, and update usage in + self.limits for the following limits: + + - Origin request policies per AWS account + - Cookies per origin request policy + - Headers per origin request policy + - Query strings per origin request policy + """ + + # Read usage from AWS + res = paginate_dict( + # count only the custom origin request policies + self.conn.list_origin_request_policies, + Type='custom', + alc_marker_path=['OriginRequestPolicyList', + 'NextMarker'], + alc_data_path=['OriginRequestPolicyList', 'Items'], + alc_marker_param='Marker' + ) + if 'Items' not in res['OriginRequestPolicyList']: + nb_resources = 0 + else: + origin_request_policies = res['OriginRequestPolicyList' + ]['Items'] + nb_resources = len(origin_request_policies) + + self.limits["Origin request policies per AWS account" + ]._add_current_usage( + nb_resources, + aws_type='AWS::CloudFront::OriginRequestPolicy', + ) + + if 'Items' in res['OriginRequestPolicyList']: + for cp in res['OriginRequestPolicyList']['Items']: + # Count cookies + nb_cookies = 0 + try: + nb_cookies = len( + cp['OriginRequestPolicy']['OriginRequestPolicyConfig'][ + 'CookiesConfig']['Cookies']['Items']) + except KeyError: + pass + self.limits[ + 'Cookies per origin request policy' + ]._add_current_usage( + nb_cookies, + resource_id=cp['OriginRequestPolicy']['Id']) + + # Count headers + nb_headers = 0 + try: + nb_headers = len( + cp['OriginRequestPolicy']['OriginRequestPolicyConfig'][ + 'HeadersConfig']['Headers']['Items']) + except KeyError: + pass + self.limits[ + 'Headers per origin request policy' + ]._add_current_usage( + nb_headers, + resource_id=cp['OriginRequestPolicy']['Id']) + + # Count query strings + nb_querystring = 0 + try: + nb_querystring = len( + cp['OriginRequestPolicy']['OriginRequestPolicyConfig'][ + 'QueryStringsConfig']['QueryStrings']['Items']) + except KeyError: + pass + self.limits[ + 'Query strings per origin request policy' + ]._add_current_usage( + nb_querystring, + resource_id=cp['OriginRequestPolicy']['Id']) + + def get_limits(self): + """ + Return all known limits for this service, as a dict of their names + to :py:class:`~.AwsLimit` objects. + + :returns: dict of limit names to :py:class:`~.AwsLimit` objects + :rtype: dict + """ + if self.limits != {}: + return self.limits + limits = {} + + limits["Distributions per AWS account"] = AwsLimit( + "Distributions per AWS account", + self, + 200, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Web distributions per AWS account", + ) + + limits["Alternate domain names (CNAMEs) per distribution"] = AwsLimit( + "Alternate domain names (CNAMEs) per distribution", + self, + 100, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Alternate domain names (CNAMEs) per distribution", + ) + + limits["Cache behaviors per distribution"] = AwsLimit( + "Cache behaviors per distribution", + self, + 25, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Cache behaviors per distribution", + ) + + limits["Origins per distribution"] = AwsLimit( + "Origins per distribution", + self, + 25, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Origins per distribution", + ) + + limits["Origin groups per distribution"] = AwsLimit( + "Origin groups per distribution", + self, + 10, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Origin groups per distribution", + ) + + # This limit is listed by the "Service Quotas" service, but not in the + # CloudFront documentation. + limits["Key groups associated with a single distribution"] = AwsLimit( + "Key groups associated with a single distribution", + self, + 4, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + quotas_name="Key groups associated with a single distribution", + ) + + # This limit is listed in the CloudFront documentation, but not in the + # "Service Quotas" service. + limits["Key groups associated with a single cache behavior"] = AwsLimit( + "Key groups associated with a single cache behavior", + self, + 4, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::Distribution", + ) + + limits["Key groups per AWS account"] = AwsLimit( + "Key groups per AWS account", + self, + 10, + self.warning_threshold, + self.critical_threshold, + limit_type="AWS::CloudFront::KeyGroup", + quotas_name="Key groups per AWS account" + ) + + limits["Origin access identities per account"] = AwsLimit( + "Origin access identities per account", + self, + 100, + self.warning_threshold, + self.critical_threshold, + quotas_name="Origin access identities per account" + ) + + limits["Cache policies per AWS account"] = AwsLimit( + "Cache policies per AWS account", + self, + 20, + self.warning_threshold, + self.critical_threshold, + quotas_name="Cache policies per AWS account" + ) + + limits["Origin request policies per AWS account"] = AwsLimit( + "Origin request policies per AWS account", + self, + 20, + self.warning_threshold, + self.critical_threshold, + quotas_name="Origin request policies per AWS account" + ) + + limits["Whitelisted cookies per cache behavior"] = AwsLimit( + "Whitelisted cookies per cache behavior", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Whitelisted cookies per cache behavior" + ) + + limits["Whitelisted headers per cache behavior"] = AwsLimit( + "Whitelisted headers per cache behavior", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Whitelisted headers per cache behavior" + ) + + limits["Whitelisted query strings per cache behavior"] = AwsLimit( + "Whitelisted query strings per cache behavior", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Whitelisted query strings per cache behavior" + ) + + limits["Cookies per cache policy"] = AwsLimit( + "Cookies per cache policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Cookies per cache policy" + ) + + limits["Headers per cache policy"] = AwsLimit( + "Headers per cache policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Headers per cache policy" + ) + + limits["Query strings per cache policy"] = AwsLimit( + "Query strings per cache policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Query strings per cache policy" + ) + + limits["Cookies per origin request policy"] = AwsLimit( + "Cookies per origin request policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Cookies per origin request policy" + ) + + limits["Headers per origin request policy"] = AwsLimit( + "Headers per origin request policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Headers per origin request policy" + ) + + limits["Query strings per origin request policy"] = AwsLimit( + "Query strings per origin request policy", + self, + 10, + self.warning_threshold, + self.critical_threshold, + quotas_name="Query strings per origin request policy" + ) + + limits["Public keys in a single key group"] = AwsLimit( + "Public keys in a single key group", + self, + 5, + self.warning_threshold, + self.critical_threshold, + quotas_name="Public keys in a single key group" + ) + + limits["Distributions associated with a single key group"] = AwsLimit( + "Distributions associated with a single key group", + self, + 100, + self.warning_threshold, + self.critical_threshold, + quotas_name="Distributions associated with a single key group" + ) + + limits["Distributions associated with the same cache policy"] = \ + AwsLimit( + "Distributions associated with the same cache policy", + self, + 100, + self.warning_threshold, + self.critical_threshold, + quotas_name="Distributions associated with the same cache policy" + ) + + limits["Distributions associated with the same origin request policy"] \ + = AwsLimit( + "Distributions associated with the same origin request policy", + self, + 100, + self.warning_threshold, + self.critical_threshold, + quotas_name="Distributions associated with the same origin request " + "policy" + ) + + self.limits = limits + return limits + + def required_iam_permissions(self): + """ + Return a list of IAM Actions required for this Service to function + properly. All Actions will be shown with an Effect of "Allow" + and a Resource of "*". + + :returns: list of IAM Action strings + :rtype: list + """ + return [ + "cloudfront:ListCloudFrontOriginAccessIdentities", + "cloudfront:ListKeyGroups", + "cloudfront:ListDistributions", + "cloudfront:ListCachePolicies", + "cloudfront:ListOriginRequestPolicies" + ] diff --git a/awslimitchecker/services/ebs.py b/awslimitchecker/services/ebs.py index bb23bd16..dcb25bf0 100644 --- a/awslimitchecker/services/ebs.py +++ b/awslimitchecker/services/ebs.py @@ -223,7 +223,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io1', - quotas_name='Provisioned IOPS (io1)' + quotas_name='IOPS for Provisioned IOPS SSD (io1) volumes' ) limits['Provisioned IOPS SSD (io1) storage (GiB)'] = AwsLimit( 'Provisioned IOPS SSD (io1) storage (GiB)', @@ -233,7 +233,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io1', - quotas_name='Provisioned IOPS (SSD) volume storage', + quotas_name='Storage for Provisioned IOPS SSD (io1) volumes', quotas_unit='GiB', quotas_unit_converter=convert_TiB_to_GiB ) @@ -245,7 +245,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io2', - quotas_name='Provisioned IOPS (io2)' + quotas_name='IOPS for Provisioned IOPS SSD (io2) volumes' ) limits['Provisioned IOPS SSD (io2) storage (GiB)'] = AwsLimit( 'Provisioned IOPS SSD (io2) storage (GiB)', @@ -255,7 +255,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io2', - quotas_name='Provisioned IOPS SSD (io2) volume storage', + quotas_name='Storage for Provisioned IOPS SSD (io2) volumes', quotas_unit='GiB', quotas_unit_converter=convert_TiB_to_GiB ) @@ -294,7 +294,7 @@ def _get_limits_ebs(self): limit_type='AWS::EC2::Volume', limit_subtype='standard', ta_limit_name='Magnetic (standard) volume storage (GiB)', - quotas_name='Magnetic volume storage', + quotas_name='Storage for Magnetic (standard) volumes', quotas_unit='GiB', quotas_unit_converter=convert_TiB_to_GiB ) @@ -306,7 +306,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='st1', - quotas_name='Max Throughput Optimized HDD (ST1) Storage', + quotas_name='Storage for Throughput Optimized HDD (st1) volumes', quotas_unit='GiB', quotas_unit_converter=convert_TiB_to_GiB ) @@ -318,7 +318,7 @@ def _get_limits_ebs(self): self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='sc1', - quotas_name='Max Cold HDD (SC1) Storage', + quotas_name='Storage for Cold HDD (sc1) volumes', quotas_unit='GiB', quotas_unit_converter=convert_TiB_to_GiB ) diff --git a/awslimitchecker/services/ec2.py b/awslimitchecker/services/ec2.py index 58b57094..d72b17a1 100644 --- a/awslimitchecker/services/ec2.py +++ b/awslimitchecker/services/ec2.py @@ -104,6 +104,16 @@ class _Ec2Service(_AwsService): 'u-24tb1.metal' ] + instance_family_to_spot_limit_name = { + 'F': 'All F Spot Instance Requests', + 'G': 'All G Spot Instance Requests', + 'Inf': 'All Inf Spot Instance Requests', + 'P': 'All P Spot Instance Requests', + 'X': 'All X Spot Instance Requests', + 'Standard': 'All Standard (A, C, D, H, I, M, R, T, Z)' + ' Spot Instance Requests' + } + def find_usage(self): """ Determine the current usage for each limit of this service, @@ -202,25 +212,20 @@ def _find_usage_instances_vcpu(self): def _find_usage_spot_instances(self): """calculate spot instance request usage and update Limits""" logger.debug('Getting spot instance request usage') - try: - res = self.conn.describe_spot_instance_requests() - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'UnsupportedOperation': - return - raise - count = 0 - for req in res['SpotInstanceRequests']: - if req['State'] in ['open', 'active']: - count += 1 - logger.debug('Counting spot instance request %s state=%s', - req['SpotInstanceRequestId'], req['State']) - else: - logger.debug('NOT counting spot instance request %s state=%s', - req['SpotInstanceRequestId'], req['State']) - self.limits['Max spot instance requests per region']._add_current_usage( - count, - aws_type='AWS::EC2::SpotInstanceRequest' - ) + for key in self.instance_family_to_spot_limit_name.keys(): + self.limits[ + self.instance_family_to_spot_limit_name[key] + ]._add_current_usage( + self._get_cloudwatch_usage_latest( + [ + {'Name': 'Type', 'Value': 'Resource'}, + {'Name': 'Resource', 'Value': 'vCPU'}, + {'Name': 'Service', 'Value': 'EC2'}, + {'Name': 'Class', 'Value': '{}/Spot'.format(key)}, + ], + period=300 + ) + ) def _find_usage_spot_fleets(self): """calculate spot fleet request usage and update Limits""" @@ -595,13 +600,56 @@ def _get_limits_spot(self): :rtype: dict """ limits = {} - limits['Max spot instance requests per region'] = AwsLimit( - 'Max spot instance requests per region', + + limits['All F Spot Instance Requests'] = AwsLimit( + 'All F Spot Instance Requests', + self, + 11, + self.warning_threshold, + self.critical_threshold, + limit_subtype='F' + ) + limits['All G Spot Instance Requests'] = AwsLimit( + 'All G Spot Instance Requests', + self, + 11, + self.warning_threshold, + self.critical_threshold, + limit_subtype='G' + ) + limits['All Inf Spot Instance Requests'] = AwsLimit( + 'All Inf Spot Instance Requests', + self, + 64, + self.warning_threshold, + self.critical_threshold, + limit_subtype='Inf' + ) + limits['All P Spot Instance Requests'] = AwsLimit( + 'All P Spot Instance Requests', + self, + 16, + self.warning_threshold, + self.critical_threshold, + limit_subtype='P' + ) + limits['All X Spot Instance Requests'] = AwsLimit( + 'All X Spot Instance Requests', + self, + 21, + self.warning_threshold, + self.critical_threshold, + limit_subtype='X' + ) + limits[ + 'All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests' + ] = AwsLimit( + 'All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests', self, - 20, + 1440, self.warning_threshold, self.critical_threshold, - limit_type='Spot instance requests' + limit_subtype='Standard' ) limits['Max active spot fleets per region'] = AwsLimit( @@ -642,7 +690,9 @@ def _find_usage_networking_sgs(self): logger.debug("Getting usage for EC2 VPC resources") sg_count = 0 rules_per_sg = defaultdict(int) - for sg in self.resource_conn.security_groups.all(): + for sg in self.resource_conn.security_groups.filter( + Filters=[{'Name': 'owner-id', 'Values': [self.current_account_id]}] + ): if sg.vpc_id is None: continue sg_count += 1 @@ -756,7 +806,7 @@ def _get_limits_networking(self): limit_type='AWS::EC2::EIP', limit_subtype='AWS::EC2::VPC', ta_service_name='VPC', # TA shows this as VPC not EC2 - quotas_name='Number of EIPs - VPC EIPs' + quotas_name='EC2-VPC Elastic IPs' ) # the EC2 limits screen calls this 'EC2-Classic Elastic IPs' # but Trusted Advisor just calls it 'Elastic IP addresses (EIPs)' @@ -767,7 +817,7 @@ def _get_limits_networking(self): self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::EIP', - quotas_name='Elastic IP addresses for EC2-Classic' + quotas_name='EC2-Classic Elastic IPs' ) limits['VPC security groups per elastic network interface'] = AwsLimit( 'VPC security groups per elastic network interface', @@ -804,11 +854,11 @@ def required_iam_permissions(self): "ec2:DescribeSpotFleetInstances", "ec2:DescribeSpotFleetRequestHistory", "ec2:DescribeSpotFleetRequests", - "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVolumes", "ec2:DescribeVpcs", + "cloudwatch:GetMetricData" ] def _instance_types(self): diff --git a/awslimitchecker/services/ecs.py b/awslimitchecker/services/ecs.py index aa7fef86..977c2e68 100644 --- a/awslimitchecker/services/ecs.py +++ b/awslimitchecker/services/ecs.py @@ -51,6 +51,7 @@ class _EcsService(_AwsService): service_name = 'ECS' api_name = 'ecs' # AWS API name to connect to (boto3.client) + quotas_service_code = 'ecs' def find_usage(self): """ @@ -208,7 +209,9 @@ def get_limits(self): self.warning_threshold, self.critical_threshold, limit_type='AWS::ECS::TaskDefinition', - limit_subtype='Fargate' + limit_subtype='Fargate', + quotas_name='Fargate On-Demand resource count', + quotas_service_code='fargate' ) limits['Fargate Spot resource count'] = AwsLimit( 'Fargate Spot resource count', @@ -217,7 +220,9 @@ def get_limits(self): self.warning_threshold, self.critical_threshold, limit_type='AWS::ECS::TaskDefinition', - limit_subtype='FargateSpot' + limit_subtype='FargateSpot', + quotas_name='Fargate Spot resource count', + quotas_service_code='fargate' ) self.limits = limits return limits diff --git a/awslimitchecker/services/vpc.py b/awslimitchecker/services/vpc.py index 83feb37b..0080fe5b 100644 --- a/awslimitchecker/services/vpc.py +++ b/awslimitchecker/services/vpc.py @@ -114,8 +114,21 @@ def _find_usage_ACLs(self): )['NetworkAcls']: acls[acl['VpcId']] += 1 # Rules per network ACL + egress_ipv4 = sum(map( + lambda x: x["Egress"] and "CidrBlock" in x, acl['Entries'] + )) + ingress_ipv4 = sum(map( + lambda x: not x["Egress"] and "CidrBlock" in x, acl['Entries'] + )) + egress_ipv6 = sum(map( + lambda x: x["Egress"] and "Ipv6CidrBlock" in x, acl['Entries'] + )) + ingress_ipv6 = sum(map( + lambda x: not x["Egress"] and "Ipv6CidrBlock" in x, + acl['Entries'] + )) self.limits['Rules per network ACL']._add_current_usage( - len(acl['Entries']), + max(egress_ipv4, ingress_ipv4, egress_ipv6, ingress_ipv6), aws_type='AWS::EC2::NetworkAcl', resource_id=acl['NetworkAclId'] ) diff --git a/awslimitchecker/tests/metrics/test_datadog.py b/awslimitchecker/tests/metrics/test_datadog.py index 4bf06a97..77ebf592 100644 --- a/awslimitchecker/tests/metrics/test_datadog.py +++ b/awslimitchecker/tests/metrics/test_datadog.py @@ -76,6 +76,53 @@ def test_happy_path(self): 'region:foo', 'foo', 'bar', 'baz:blam' ] assert cls._http == mock_http + assert cls._host == 'https://api.datadoghq.com' + assert m_pm.mock_calls == [call()] + assert m_va.mock_calls == [call(cls, '1234')] + + @patch.dict('os.environ', {}, clear=True) + def test_host_param(self): + mock_http = Mock() + with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm: + m_pm.return_value = mock_http + with patch('%s._validate_auth' % pb, autospec=True) as m_va: + cls = Datadog( + 'foo', api_key='1234', extra_tags='foo,bar,baz:blam', + host='http://foo.bar' + ) + assert cls._region_name == 'foo' + assert cls._duration == 0.0 + assert cls._limits == [] + assert cls._api_key == '1234' + assert cls._prefix == 'awslimitchecker.' + assert cls._tags == [ + 'region:foo', 'foo', 'bar', 'baz:blam' + ] + assert cls._http == mock_http + assert cls._host == 'http://foo.bar' + assert m_pm.mock_calls == [call()] + assert m_va.mock_calls == [call(cls, '1234')] + + @patch.dict('os.environ', {'DATADOG_HOST': 'http://dd.host'}, clear=True) + def test_host_env_var(self): + mock_http = Mock() + with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm: + m_pm.return_value = mock_http + with patch('%s._validate_auth' % pb, autospec=True) as m_va: + cls = Datadog( + 'foo', api_key='1234', extra_tags='foo,bar,baz:blam', + host='http://foo.bar' + ) + assert cls._region_name == 'foo' + assert cls._duration == 0.0 + assert cls._limits == [] + assert cls._api_key == '1234' + assert cls._prefix == 'awslimitchecker.' + assert cls._tags == [ + 'region:foo', 'foo', 'bar', 'baz:blam' + ] + assert cls._http == mock_http + assert cls._host == 'http://dd.host' assert m_pm.mock_calls == [call()] assert m_va.mock_calls == [call(cls, '1234')] @@ -97,6 +144,7 @@ def test_api_key_env_var(self): 'region:foo' ] assert cls._http == mock_http + assert cls._host == 'https://api.datadoghq.com' assert m_pm.mock_calls == [call()] assert m_va.mock_calls == [call(cls, '5678')] @@ -122,6 +170,7 @@ def setup(self): with patch('%s.__init__' % pb) as m_init: m_init.return_value = None self.cls = Datadog() + self.cls._host = 'https://api.datadoghq.com' class TestValidateAuth(DatadogTester): @@ -139,6 +188,20 @@ def test_happy_path(self): ) ] + def test_non_default_host(self): + mock_http = Mock() + mock_resp = Mock(status=200, data=b'{"success": "ok"}') + mock_http.request.return_value = mock_resp + self.cls._http = mock_http + self.cls._host = 'http://my.host' + self.cls._validate_auth('1234') + assert mock_http.mock_calls == [ + call.request( + 'GET', + 'http://my.host/api/v1/validate?api_key=1234' + ) + ] + def test_failure(self): mock_http = Mock() mock_resp = Mock(status=401, data='{"success": "NO"}') @@ -235,11 +298,12 @@ def test_happy_path(self): assert json.loads(c[2]['body'].decode()) == expected @freeze_time("2016-12-16 10:40:42", tz_offset=0, auto_tick_seconds=6) - def test_api_error(self): + def test_api_error_non_default_host(self): self.cls._prefix = 'prefix.' self.cls._tags = ['tag1', 'tag:2'] self.cls._limits = [] self.cls._api_key = 'myKey' + self.cls._host = 'http://my.host' self.cls.set_run_duration(123.45) limA = Mock( name='limitA', service=Mock(service_name='SVC1') @@ -298,7 +362,7 @@ def test_api_error(self): c = mock_http.mock_calls[0] assert c[0] == 'request' assert c[1] == ( - 'POST', 'https://api.datadoghq.com/api/v1/series?api_key=myKey' + 'POST', 'http://my.host/api/v1/series?api_key=myKey' ) assert len(c[2]) == 2 assert c[2]['headers'] == {'Content-type': 'application/json'} diff --git a/awslimitchecker/tests/services/result_fixtures.py b/awslimitchecker/tests/services/result_fixtures.py index 4c7324e2..93c68929 100644 --- a/awslimitchecker/tests/services/result_fixtures.py +++ b/awslimitchecker/tests/services/result_fixtures.py @@ -371,12 +371,72 @@ class VPC(object): { 'NetworkAclId': 'acl-2', 'VpcId': 'vpc-1', - 'Entries': [1], + 'Entries': [ + { + 'Egress': True, + 'CidrBlock': 'string' + }, + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': False, + 'CidrBlock': 'string' + }, + ], }, { 'NetworkAclId': 'acl-3', 'VpcId': 'vpc-2', - 'Entries': [1, 2, 3, 4, 5], + 'Entries': [ + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': False, + 'CidrBlock': 'string' + }, + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + } + ], + }, + { + 'NetworkAclId': 'acl-4', + 'VpcId': 'vpc-1', + 'Entries': [ + { + 'Egress': False, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': False, + 'CidrBlock': 'string' + }, + { + 'Egress': False, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': True, + 'Ipv6CidrBlock': 'string' + }, + { + 'Egress': False, + 'Ipv6CidrBlock': 'string' + } + ], }, ] } @@ -1303,6 +1363,862 @@ class ElasticBeanstalk(object): } +class CloudFront(object): + test_find_usage_distributions_empty = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 0 + } + } + test_find_usage_distributions = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 2, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-000', + 'Aliases': { + 'Quantity': 3, + 'Items': [ + 'string1', 'string2', 'string3' + ] + }, + }, + { + 'Id': 'ID-DISTRIBUTION-001', + 'DefaultCacheBehavior': {}, + 'CacheBehaviors': { + 'Quantity': 4, + 'Items': [ + {'PathPattern': '', 'TargetOriginId': 'string'}, + {'PathPattern': '', 'TargetOriginId': 'string'}, + {'PathPattern': '', 'TargetOriginId': 'string'}, + {'PathPattern': '', 'TargetOriginId': 'string'}, + ] + } + }, + { + 'Id': 'ID-DISTRIBUTION-002', + 'Origins': { + 'Quantity': 3, + 'Items': [ + {'Id': 'string'}, + {'Id': 'string'}, + {'Id': 'string'}, + ] + }, + }, + { + 'Id': 'ID-DISTRIBUTION-003', + 'OriginGroups': { + 'Quantity': 1, + 'Items': [ + { + 'Id': 'string', + 'FailoverCriteria': {}, + 'Members': {} + }, + ] + }, + }, + { + 'Id': 'ID-DISTRIBUTION-100', + 'ARN': 'string', + 'Status': 'string', + 'LastModifiedTime': datetime(2015, 1, 1), + 'DomainName': 'string', + 'Aliases': { + 'Quantity': 2, + 'Items': [ + 'string1', + 'string2', + ] + }, + 'Origins': { + 'Quantity': 123, + 'Items': [ + { + 'Id': 'string', + 'DomainName': 'string', + 'OriginPath': 'string', + 'CustomHeaders': { + 'Quantity': 123, + 'Items': [ + { + 'HeaderName': 'string', + 'HeaderValue': 'string' + }, + ] + }, + 'S3OriginConfig': { + 'OriginAccessIdentity': 'string' + }, + 'CustomOriginConfig': { + 'HTTPPort': 123, + 'HTTPSPort': 123, + 'OriginProtocolPolicy': 'https-only', + 'OriginSslProtocols': { + 'Quantity': 123, + 'Items': [ + 'SSLv3', 'TLSv1', 'TLSv1.1', + 'TLSv1.2', + ] + }, + 'OriginReadTimeout': 123, + 'OriginKeepaliveTimeout': 123 + }, + 'ConnectionAttempts': 123, + 'ConnectionTimeout': 123, + 'OriginShield': { + 'Enabled': False, + 'OriginShieldRegion': 'string' + } + }, + ] + }, + 'OriginGroups': { + 'Quantity': 123, + 'Items': [ + { + 'Id': 'string', + 'FailoverCriteria': { + 'StatusCodes': { + 'Quantity': 123, + 'Items': [ + 123, + ] + } + }, + 'Members': { + 'Quantity': 123, + 'Items': [ + { + 'OriginId': 'string' + }, + ] + } + }, + ] + }, + 'DefaultCacheBehavior': { + 'TargetOriginId': 'string', + 'TrustedSigners': { + 'Enabled': False, + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'TrustedKeyGroups': { + 'Enabled': False, + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'ViewerProtocolPolicy': 'https-only', + 'AllowedMethods': { + 'Quantity': 123, + 'Items': [ + 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', + 'OPTIONS', 'DELETE', + ], + 'CachedMethods': { + 'Quantity': 123, + 'Items': [ + 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', + 'OPTIONS', 'DELETE', + ] + } + }, + 'SmoothStreaming': False, + 'Compress': False, + 'LambdaFunctionAssociations': { + 'Quantity': 123, + 'Items': [ + { + 'LambdaFunctionARN': 'string', + 'EventType': 'viewer-request', + 'IncludeBody': True + }, + ] + }, + 'FunctionAssociations': { + 'Quantity': 123, + 'Items': [ + { + 'FunctionARN': 'string', + 'EventType': 'viewer-request' + }, + ] + }, + 'FieldLevelEncryptionId': 'string', + 'RealtimeLogConfigArn': 'string', + 'CachePolicyId': 'string', + 'OriginRequestPolicyId': 'string', + 'ForwardedValues': { + 'QueryString': True, + 'Cookies': { + 'Forward': 'whitelist', + 'WhitelistedNames': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + } + }, + 'Headers': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'QueryStringCacheKeys': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + } + }, + 'MinTTL': 123, + 'DefaultTTL': 123, + 'MaxTTL': 123 + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'string', + 'TargetOriginId': 'string', + 'TrustedSigners': { + 'Enabled': True, + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'ViewerProtocolPolicy': 'https-only', + 'AllowedMethods': { + 'Quantity': 123, + 'Items': [ + 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', + 'OPTIONS', 'DELETE', + ], + 'CachedMethods': { + 'Quantity': 123, + 'Items': [ + 'GET', 'HEAD', 'POST', 'PUT', + 'PATCH', 'OPTIONS', 'DELETE', + ] + } + }, + 'SmoothStreaming': False, + 'Compress': False, + 'LambdaFunctionAssociations': { + 'Quantity': 123, + 'Items': [ + { + 'LambdaFunctionARN': 'string', + 'EventType': 'viewer-request', + 'IncludeBody': True + }, + ] + }, + 'FunctionAssociations': { + 'Quantity': 123, + 'Items': [ + { + 'FunctionARN': 'string', + 'EventType': 'viewer-request' + }, + ] + }, + 'FieldLevelEncryptionId': 'string', + 'RealtimeLogConfigArn': 'string', + 'CachePolicyId': 'string', + 'OriginRequestPolicyId': 'string', + 'ForwardedValues': { + 'QueryString': True, + 'Cookies': { + 'Forward': 'whitelist', + 'WhitelistedNames': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + } + }, + 'Headers': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + }, + 'QueryStringCacheKeys': { + 'Quantity': 123, + 'Items': [ + 'string', + ] + } + }, + 'MinTTL': 123, + 'DefaultTTL': 123, + 'MaxTTL': 123 + }, + ] + }, + 'CustomErrorResponses': { + 'Quantity': 123, + 'Items': [ + { + 'ErrorCode': 123, + 'ResponsePagePath': 'string', + 'ResponseCode': 'string', + 'ErrorCachingMinTTL': 123 + }, + ] + }, + 'Comment': 'string', + 'PriceClass': 'PriceClass_100', + 'Enabled': True, + 'ViewerCertificate': { + 'CloudFrontDefaultCertificate': True, + 'IAMCertificateId': 'string', + 'ACMCertificateArn': 'string', + 'SSLSupportMethod': 'sni-only', + 'MinimumProtocolVersion': 'SSLv3', + 'Certificate': 'string', + 'CertificateSource': 'cloudfront' + }, + 'Restrictions': { + 'GeoRestriction': { + 'RestrictionType': 'whitelist', + 'Quantity': 123, + 'Items': [ + 'string', + ] + } + }, + 'WebACLId': 'string', + 'HttpVersion': 'http1.1', + 'IsIPV6Enabled': False, + 'AliasICPRecordals': [ + { + 'CNAME': 'string', + 'ICPRecordalStatus': 'APPROVED' + }, + ] + }, + ] + } + } + + test_find_usage_distributions_keygroups = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 2, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-001', + 'DefaultCacheBehavior': { + 'TrustedKeyGroups': { + 'Enabled': False, + 'Quantity': 2, + 'Items': [ + 'A', 'B', + ] + }, + }, + 'CacheBehaviors': { + 'Quantity': 2, + 'Items': [ + { + 'PathPattern': 'path01', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 0, + 'Items': [] + } + }, + { + 'PathPattern': 'path02', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 3, + 'Items': ['A', 'B', 'C'] + }, + } + ] + } + }, + ] + } + } + + test_find_usage_keygroups = { + 'KeyGroupList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 2, + 'Items': [ + { + 'KeyGroup': { + 'Id': 'kg01', + 'LastModifiedTime': datetime(2015, 1, 1), + 'KeyGroupConfig': { + 'Name': 'string', + 'Items': ['key01', 'key02', 'key03', 'key04'], + 'Comment': 'string' + } + } + }, + { + 'KeyGroup': { + 'Id': 'kg02', + 'LastModifiedTime': datetime(2015, 1, 1), + 'KeyGroupConfig': {} + } + }, + ] + } + } + + test_find_usage_keygroups_empty = { + 'KeyGroupList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 0, + } + } + + test_find_usage_origin_access_identities = { + 'CloudFrontOriginAccessIdentityList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 3, + 'Items': [ + {'Id': 'oai01', 'S3CanonicalUserId': 'string', 'Comment': ''}, + {'Id': 'oai02', 'S3CanonicalUserId': 'string', 'Comment': ''}, + {'Id': 'oai03', 'S3CanonicalUserId': 'string', 'Comment': ''} + ] + } + } + + test_find_usage_origin_access_identities_empty = { + 'CloudFrontOriginAccessIdentityList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 0, + } + } + + test_find_usage_cache_policies = { + 'CachePolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 4, + 'Items': [ + { + 'Type': 'custom', + 'CachePolicy': {'Id': 'CP01', 'CachePolicyConfig': {}} + }, + { + 'Type': 'custom', + 'CachePolicy': {'Id': 'CP02', 'CachePolicyConfig': {}} + }, + { + 'Type': 'custom', + 'CachePolicy': {'Id': 'CP03', 'CachePolicyConfig': {}} + }, + { + 'Type': 'custom', + 'CachePolicy': {'Id': 'CP04', 'CachePolicyConfig': {}} + }, + ] + } + } + + test_find_usage_cache_policies_empty = { + 'CachePolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 0 + } + } + + test_find_usage_cache_policies_config = { + 'CachePolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 123, + 'Items': [ + { + 'Type': 'custom', + 'CachePolicy': { + 'Id': 'CP01', + 'LastModifiedTime': datetime(2015, 1, 1), + 'CachePolicyConfig': { + 'Comment': 'string', + 'Name': 'string', + 'DefaultTTL': 123, + 'MaxTTL': 123, + 'MinTTL': 123, + 'ParametersInCacheKeyAndForwardedToOrigin': { + 'EnableAcceptEncodingGzip': True, + 'EnableAcceptEncodingBrotli': True, + 'HeadersConfig': { + 'HeaderBehavior': 'whitelist', + 'Headers': { + 'Quantity': 3, + 'Items': ['a', 'b', 'c'] + }, + }, + 'CookiesConfig': { + 'CookieBehavior': 'whitelist', + 'Cookies': { + 'Quantity': 2, + 'Items': ['1', '2'] + } + }, + 'QueryStringsConfig': { + 'QueryStringBehavior': 'allExcept', + 'QueryStrings': { + 'Quantity': 1, + 'Items': ['string'] + } + } + } + } + } + }, + ] + } + } + + test_find_usage_origin_request_policies = { + 'OriginRequestPolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 2, + 'Items': [ + { + 'Type': 'custom', + 'OriginRequestPolicy': { + 'Id': 'ORP01', 'OriginRequestPolicyConfig': {} + } + }, + { + 'Type': 'custom', + 'OriginRequestPolicy': { + 'Id': 'ORP02', 'OriginRequestPolicyConfig': {} + } + } + ] + } + } + + test_find_usage_origin_request_policies_empty = { + 'OriginRequestPolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 0 + } + } + + test_find_usage_origin_request_policies_config = { + 'OriginRequestPolicyList': { + 'NextMarker': 'string', + 'MaxItems': 123, + 'Quantity': 123, + 'Items': [ + { + 'Type': 'custom', + 'OriginRequestPolicy': { + 'Id': 'ORP01', + 'LastModifiedTime': datetime(2015, 1, 1), + 'OriginRequestPolicyConfig': { + 'Comment': 'string', + 'Name': 'string', + 'HeadersConfig': { + 'HeaderBehavior': 'whitelist', + 'Headers': { + 'Quantity': 3, + 'Items': ['a', 'b', 'c'] + }, + }, + 'CookiesConfig': { + 'CookieBehavior': 'whitelist', + 'Cookies': { + 'Quantity': 2, + 'Items': ['1', '2'] + } + }, + 'QueryStringsConfig': { + 'QueryStringBehavior': 'allExcept', + 'QueryStrings': { + 'Quantity': 1, + 'Items': ['string'] + } + } + } + } + }, + ] + } + } + + test_find_usage_per_cache_behavior = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 1, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-100', + 'DefaultCacheBehavior': { + 'ForwardedValues': { + 'QueryString': True, + 'Cookies': { + 'Forward': 'whitelist', + 'WhitelistedNames': { + 'Quantity': 3, + 'Items': ['a', 'b', 'c'] + } + }, + 'Headers': { + 'Quantity': 4, + 'Items': ['a', 'b', 'c', 'd'] + }, + 'QueryStringCacheKeys': { + 'Quantity': 5, + 'Items': ['a', 'b', 'c', 'd', 'e'] + } + }, + }, + 'CacheBehaviors': { + 'Quantity': 1, + 'Items': [ + { + 'PathPattern': 'path01', + 'ForwardedValues': { + 'QueryString': True, + 'Cookies': { + 'Forward': 'whitelist', + 'WhitelistedNames': { + 'Quantity': 1, + 'Items': [''] + } + }, + 'Headers': { + 'Quantity': 2, + 'Items': ['', ''] + }, + 'QueryStringCacheKeys': { + 'Quantity': 3, + 'Items': ['', '', ''] + } + } + }, + ] + }, + }, + ] + } + } + + test_find_usage_distributions_per_key_group = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 2, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-001', + 'DefaultCacheBehavior': { + 'TrustedKeyGroups': { + 'Enabled': False, + 'Quantity': 123, + 'Items': ['A', 'B', 'C'] + }, + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'string', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 123, + 'Items': ['A', 'B'] + }, + }, + { + 'PathPattern': 'string', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 123, + 'Items': ['A'] + }, + }, + ] + } + }, + { + 'Id': 'ID-DISTRIBUTION-002', + 'DefaultCacheBehavior': { + 'TrustedKeyGroups': { + 'Enabled': False, + 'Quantity': 123, + 'Items': ['A'] + }, + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'string', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 123, + 'Items': [] + }, + }, + { + 'PathPattern': 'string', + 'TrustedKeyGroups': { + 'Enabled': True, + 'Quantity': 123, + 'Items': [] + }, + }, + ] + } + } + ] + } + } + + test_find_usage_distributions_per_cache_policy = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 2, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-001', + 'DefaultCacheBehavior': { + 'CachePolicyId': 'A', + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'path01', + 'CachePolicyId': 'A', + }, + { + 'PathPattern': 'path02', + 'CachePolicyId': 'B', + }, + ] + } + }, + { + 'Id': 'ID-DISTRIBUTION-002', + 'DefaultCacheBehavior': { + 'CachePolicyId': 'D', + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'path01', + 'CachePolicyId': 'A', + }, + { + 'PathPattern': 'path02', + 'CachePolicyId': 'C', + }, + ] + } + } + ] + } + } + + test_find_usage_distributions_per_origin_req_policy = { + 'DistributionList': { + 'Marker': 'string', + 'NextMarker': 'string', + 'MaxItems': 123, + 'IsTruncated': False, + 'Quantity': 2, + 'Items': [ + { + 'Id': 'ID-DISTRIBUTION-001', + 'DefaultCacheBehavior': { + 'OriginRequestPolicyId': 'A', + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'path01', + 'OriginRequestPolicyId': 'A', + }, + { + 'PathPattern': 'path02', + 'OriginRequestPolicyId': 'B', + }, + ] + } + }, + { + 'Id': 'ID-DISTRIBUTION-002', + 'DefaultCacheBehavior': { + 'OriginRequestPolicyId': 'A', + }, + 'CacheBehaviors': { + 'Quantity': 123, + 'Items': [ + { + 'PathPattern': 'path01', + 'OriginRequestPolicyId': 'C', + }, + ] + } + } + ] + } + } + + class ELB(object): test_find_usage = { @@ -4724,3 +5640,26 @@ class EKS(object): } } ] + + +class CertificateManager(object): + test_find_usage_certificates_empty = { + } + + test_find_usage_certificates = { + 'NextToken': 'string', + 'CertificateSummaryList': [ + { + 'CertificateArn': 'string1', + 'DomainName': 'string1' + }, + { + 'CertificateArn': 'string2', + 'DomainName': 'string2' + }, + { + 'CertificateArn': 'string3', + 'DomainName': 'string3' + }, + ] + } diff --git a/awslimitchecker/tests/services/test_base.py b/awslimitchecker/tests/services/test_base.py index 893577d3..12c8b7e6 100644 --- a/awslimitchecker/tests/services/test_base.py +++ b/awslimitchecker/tests/services/test_base.py @@ -527,8 +527,8 @@ def test_defaults(self): } } ], - StartTime=datetime(2020, 9, 22, 11, 26, 00), - EndTime=datetime(2020, 9, 22, 12, 26, 00), + StartTime=datetime(2020, 9, 22, 11, 25, 00), + EndTime=datetime(2020, 9, 22, 12, 25, 00), ScanBy='TimestampDescending', MaxDatapoints=1 ) @@ -596,8 +596,8 @@ def test_non_default(self): } } ], - StartTime=datetime(2020, 9, 22, 11, 26, 00), - EndTime=datetime(2020, 9, 22, 12, 26, 00), + StartTime=datetime(2020, 9, 22, 11, 25, 00), + EndTime=datetime(2020, 9, 22, 12, 25, 00), ScanBy='TimestampDescending', MaxDatapoints=1 ) @@ -652,8 +652,8 @@ def test_exception(self): } } ], - StartTime=datetime(2020, 9, 22, 11, 26, 00), - EndTime=datetime(2020, 9, 22, 12, 26, 00), + StartTime=datetime(2020, 9, 22, 11, 25, 00), + EndTime=datetime(2020, 9, 22, 12, 25, 00), ScanBy='TimestampDescending', MaxDatapoints=1 ) @@ -711,8 +711,8 @@ def test_no_data(self): } } ], - StartTime=datetime(2020, 9, 22, 11, 26, 00), - EndTime=datetime(2020, 9, 22, 12, 26, 00), + StartTime=datetime(2020, 9, 22, 11, 25, 00), + EndTime=datetime(2020, 9, 22, 12, 25, 00), ScanBy='TimestampDescending', MaxDatapoints=1 ) @@ -778,8 +778,8 @@ def test_no_values(self): } } ], - StartTime=datetime(2020, 9, 22, 11, 26, 00), - EndTime=datetime(2020, 9, 22, 12, 26, 00), + StartTime=datetime(2020, 9, 22, 11, 25, 00), + EndTime=datetime(2020, 9, 22, 12, 25, 00), ScanBy='TimestampDescending', MaxDatapoints=1 ) diff --git a/awslimitchecker/tests/services/test_certificatemanager.py b/awslimitchecker/tests/services/test_certificatemanager.py new file mode 100644 index 00000000..f105551b --- /dev/null +++ b/awslimitchecker/tests/services/test_certificatemanager.py @@ -0,0 +1,173 @@ +""" +awslimitchecker/tests/services/test_certificatemanager.py + +The latest version of this package is available at: + + +################################################################################ +Copyright 2015-2018 Jason Antman + + This file is part of awslimitchecker, also known as awslimitchecker. + + awslimitchecker is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + awslimitchecker is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with awslimitchecker. If not, see . + +The Copyright and Authors attributions contained herein may not be removed or +otherwise altered, except to add the Author attribution of a contributor to +this work. (Additional Terms pursuant to Section 7b of the AGPL v3) +################################################################################ +While not legally required, I sincerely request that anyone who finds +bugs please submit them at or +to me via email, and that you send any contributions or improvements +either as a pull request on GitHub, or to me via email. +################################################################################ + +AUTHORS: +Jason Antman +################################################################################ +""" + +import sys +from awslimitchecker.tests.services import result_fixtures +from awslimitchecker.services.certificatemanager import \ + _CertificatemanagerService + +# https://code.google.com/p/mock/issues/detail?id=249 +# py>=3.4 should use unittest.mock not the mock package on pypi +if ( + sys.version_info[0] < 3 or + sys.version_info[0] == 3 and sys.version_info[1] < 4 +): + from mock import patch, call, Mock, DEFAULT +else: + from unittest.mock import patch, call, Mock, DEFAULT + + +pbm = 'awslimitchecker.services.certificatemanager' # module patch base +pb = '%s._CertificatemanagerService' % pbm # class patch pase + + +class Test_CertificatemanagerService(object): + + def test_init(self): + """test __init__()""" + cls = _CertificatemanagerService(21, 43, {}, None) + assert cls.service_name == 'CertificateManager' + assert cls.api_name == 'acm' + assert cls.conn is None + assert cls.warning_threshold == 21 + assert cls.critical_threshold == 43 + + def test_get_limits(self): + cls = _CertificatemanagerService(21, 43, {}, None) + cls.limits = {} + res = cls.get_limits() + assert sorted(res.keys()) == sorted([ + 'ACM certificates', + ]) + for name, limit in res.items(): + assert limit.service == cls + assert limit.def_warning_threshold == 21 + assert limit.def_critical_threshold == 43 + + def test_get_limits_again(self): + """test that existing limits dict is returned on subsequent calls""" + mock_limits = Mock() + cls = _CertificatemanagerService(21, 43, {}, None) + cls.limits = mock_limits + res = cls.get_limits() + assert res == mock_limits + + def test_find_usage(self): + """ + Test overall find_usage method + Check that find_usage() method calls the other methods. + """ + with patch.multiple( + pb, + connect=DEFAULT, + _find_usage_certificates=DEFAULT, + autospec=True + ) as mocks: + cls = _CertificatemanagerService(21, 43, {}, None) + assert cls._have_usage is False + cls.find_usage() + + assert cls._have_usage is True + assert len(mocks) == 2 + # the other methods should have been called + for x in [ + "_find_usage_certificates" + ]: + assert mocks[x].mock_calls == [call(cls)] + + def test_find_usage_certificates_empty(self): + """ + Verify the correctness of usage (when there are no certificates) + This test mocks the AWS list_certificates response (after pagination). + """ + # Setup the mock and call the tested function + resp = result_fixtures.CertificateManager\ + .test_find_usage_certificates_empty + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CertificatemanagerService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = resp + cls._find_usage_certificates() + + # Check that usage values are correctly set + assert len( + cls.limits["ACM certificates"].get_current_usage() + ) == 1 + assert ( + cls.limits["ACM certificates"].get_current_usage()[0] + .get_value() == 0 + ) + assert ( + cls.limits["ACM certificates"].get_current_usage()[0] + .resource_id is None + ) + + def test_find_usage_certificates(self): + """ + Verify the correctness of usage + This test mocks the AWS list_certificates response (after pagination). + """ + # Setup the mock and call the tested function + resp = result_fixtures.CertificateManager.test_find_usage_certificates + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CertificatemanagerService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = resp + cls._find_usage_certificates() + + # Check that usage values are correctly set + assert len( + cls.limits["ACM certificates"].get_current_usage() + ) == 1 + assert ( + cls.limits["ACM certificates"].get_current_usage()[0] + .get_value() == 3 + ) + assert ( + cls.limits["ACM certificates"].get_current_usage()[0] + .resource_id is None + ) + + def test_required_iam_permissions(self): + cls = _CertificatemanagerService(21, 43, {}, None) + assert cls.required_iam_permissions() == [ + "acm:ListCertificates" + ] diff --git a/awslimitchecker/tests/services/test_cloudfront.py b/awslimitchecker/tests/services/test_cloudfront.py new file mode 100644 index 00000000..1ffef17c --- /dev/null +++ b/awslimitchecker/tests/services/test_cloudfront.py @@ -0,0 +1,710 @@ +""" +awslimitchecker/tests/services/test_cloudfront.py + +The latest version of this package is available at: + + +################################################################################ +Copyright 2015-2018 Jason Antman + + This file is part of awslimitchecker, also known as awslimitchecker. + + awslimitchecker is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + awslimitchecker is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with awslimitchecker. If not, see . + +The Copyright and Authors attributions contained herein may not be removed or +otherwise altered, except to add the Author attribution of a contributor to +this work. (Additional Terms pursuant to Section 7b of the AGPL v3) +################################################################################ +While not legally required, I sincerely request that anyone who finds +bugs please submit them at or +to me via email, and that you send any contributions or improvements +either as a pull request on GitHub, or to me via email. +################################################################################ + +AUTHORS: +Jason Antman +################################################################################ +""" + +import sys +from awslimitchecker.tests.services import result_fixtures +from awslimitchecker.services.cloudfront import _CloudfrontService + +# https://code.google.com/p/mock/issues/detail?id=249 +# py>=3.4 should use unittest.mock not the mock package on pypi +if sys.version_info[0] < 3 or sys.version_info[0] == 3 \ + and sys.version_info[1] < 4: + from mock import patch, call, Mock, DEFAULT +else: + from unittest.mock import patch, call, Mock, DEFAULT + +pbm = "awslimitchecker.services.cloudfront" # module patch base +pb = "%s._CloudfrontService" % pbm # class patch pase + + +class Test_CloudfrontService(object): + def test_init(self): + """test __init__()""" + cls = _CloudfrontService(21, 43, {}, None) + assert cls.service_name == "CloudFront" + assert cls.api_name == "cloudfront" + assert cls.quotas_service_code == "cloudfront" + assert cls.conn is None + assert cls.warning_threshold == 21 + assert cls.critical_threshold == 43 + + def test_get_limits(self): + cls = _CloudfrontService(21, 43, {}, None) + cls.limits = {} + res = cls.get_limits() + assert sorted(res.keys()) == sorted( + [ + "Alternate domain names (CNAMEs) per distribution", + "Cache behaviors per distribution", + "Distributions per AWS account", + "Origins per distribution", + "Origin groups per distribution", + "Key groups associated with a single distribution", + "Key groups associated with a single cache behavior", + "Key groups per AWS account", + "Origin access identities per account", + "Cache policies per AWS account", + "Origin request policies per AWS account", + "Whitelisted cookies per cache behavior", + "Whitelisted headers per cache behavior", + "Whitelisted query strings per cache behavior", + "Cookies per cache policy", + "Headers per cache policy", + "Query strings per cache policy", + "Cookies per origin request policy", + "Headers per origin request policy", + "Query strings per origin request policy", + "Public keys in a single key group", + "Distributions associated with a single key group", + "Distributions associated with the same cache policy", + "Distributions associated with the same origin request policy" + ] + ) + for name, limit in res.items(): + assert limit.service == cls + assert limit.def_warning_threshold == 21 + assert limit.def_critical_threshold == 43 + + def test_get_limits_again(self): + """test that existing limits dict is returned on subsequent calls""" + mock_limits = Mock() + cls = _CloudfrontService(21, 43, {}, None) + cls.limits = mock_limits + res = cls.get_limits() + assert res == mock_limits + + def test_find_usage(self): + """ + Check that find_usage() method calls other methods. + """ + with patch.multiple( + pb, + connect=DEFAULT, + _find_usage_distributions=DEFAULT, + _find_usage_keygroups=DEFAULT, + _find_usage_origin_access_identities=DEFAULT, + _find_usage_cache_policies=DEFAULT, + _find_usage_origin_request_policies=DEFAULT, + autospec=True + ) as mocks: + cls = _CloudfrontService(21, 43, {}, None) + assert cls._have_usage is False + cls.find_usage() + + assert cls._have_usage is True + assert len(mocks) == 6 + # other methods should have been called + for x in [ + "_find_usage_distributions", + "_find_usage_keygroups", + "_find_usage_origin_access_identities", + "_find_usage_cache_policies", + "_find_usage_origin_request_policies" + ]: + assert mocks[x].mock_calls == [call(cls)] + + def test_find_usage_distributions_empty(self): + """ + Verify the correctness of usage (when there are no distributions) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + resp = result_fixtures.CloudFront.test_find_usage_distributions_empty + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = resp + cls._find_usage_distributions() + + # Check that usage values are correctly set + assert len( + cls.limits["Distributions per AWS account"].get_current_usage() + ) == 1 + assert ( + cls.limits["Distributions per AWS account"].get_current_usage()[0] + .get_value() == 0 + ) + assert ( + cls.limits["Distributions per AWS account"].get_current_usage()[0] + .resource_id is None + ) + + def test_find_usage_distributions(self): + """ + Verify the correctness of usage (basic per-distribution limits) + This test mocks the AWS list_distributions response (after pagination) + """ + # Setup the mock and call the tested function + response = result_fixtures.CloudFront.test_find_usage_distributions + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = response + cls._find_usage_distributions() + + expected_nb_distributions = len( + response['DistributionList']['Items']) + + # Check that usage values are correctly set + limit = "Distributions per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() \ + == expected_nb_distributions + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + limit = "Alternate domain names (CNAMEs) per distribution" + assert len(cls.limits[limit].get_current_usage()) \ + == expected_nb_distributions + assert cls.limits[limit].get_current_usage()[0].resource_id \ + == "ID-DISTRIBUTION-000" + assert cls.limits[limit].get_current_usage()[0].get_value() == 3 + + limit = "Cache behaviors per distribution" + assert len(cls.limits[limit].get_current_usage()) \ + == expected_nb_distributions + assert cls.limits[limit].get_current_usage()[1].resource_id \ + == "ID-DISTRIBUTION-001" + assert cls.limits[limit].get_current_usage()[1].get_value() == 5 + + limit = "Origins per distribution" + assert len(cls.limits[limit].get_current_usage()) \ + == expected_nb_distributions + assert cls.limits[limit].get_current_usage()[2].resource_id \ + == "ID-DISTRIBUTION-002" + assert cls.limits[limit].get_current_usage()[2].get_value() == 3 + + limit = "Origin groups per distribution" + assert len(cls.limits[limit].get_current_usage()) \ + == expected_nb_distributions + assert cls.limits[limit].get_current_usage()[3].resource_id \ + == "ID-DISTRIBUTION-003" + assert cls.limits[limit].get_current_usage()[3].get_value() == 1 + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_distributions, + alc_marker_path=["DistributionList", "NextMarker"], + alc_data_path=["DistributionList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_distributions_keygroups(self): + """ + Verify the correctness of usage (keygroups within a distribution) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + response = result_fixtures.CloudFront.\ + test_find_usage_distributions_keygroups + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = response + cls._find_usage_distributions() + + # Check that usage values are correctly set + limit = "Key groups associated with a single distribution" + assert len(cls.limits[limit].get_current_usage()) == 1 # 1 distribution + assert cls.limits[limit].get_current_usage()[0].resource_id \ + == "ID-DISTRIBUTION-001" + assert cls.limits[limit].get_current_usage()[0].get_value() == 3 + + limit = "Key groups associated with a single cache behavior" + assert len(cls.limits[limit].get_current_usage()) == 3 # 3 cache behav. + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "ID-DISTRIBUTION-001-default-cache-behavior" in usage_map + assert usage_map["ID-DISTRIBUTION-001-default-cache-behavior" + ].get_value() == 2 + assert "ID-DISTRIBUTION-001-cache-behavior-path01" in usage_map + assert usage_map["ID-DISTRIBUTION-001-cache-behavior-path01" + ].get_value() == 0 + assert "ID-DISTRIBUTION-001-cache-behavior-path02" in usage_map + assert usage_map["ID-DISTRIBUTION-001-cache-behavior-path02" + ].get_value() == 3 + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_distributions, + alc_marker_path=["DistributionList", "NextMarker"], + alc_data_path=["DistributionList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_distributions_per_keygroups(self): + """ + Verify the correctness of usage (distributions associated to a keygroup) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + response = result_fixtures.CloudFront.\ + test_find_usage_distributions_per_key_group + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = response + cls._find_usage_distributions() + + # Check that usage values are correctly set + limit = "Distributions associated with a single key group" + assert len(cls.limits[limit].get_current_usage()) == 3 # 3 key groups + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "A" in usage_map + assert usage_map["A"].get_value() == 2 # "A" referenced in 2 distrib + assert "B" in usage_map + assert usage_map["B"].get_value() == 1 # "B" referenced in 1 distrib + assert "C" in usage_map + assert usage_map["C"].get_value() == 1 # "C" referenced in 1 distrib + + def test_find_usage_distributions_per_cache_policy(self): + """ + Verify the correctness of usage + (distributions associated to a cache policy) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + response = result_fixtures.CloudFront.\ + test_find_usage_distributions_per_cache_policy + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = response + cls._find_usage_distributions() + + # Check that usage values are correctly set + limit = "Distributions associated with the same cache policy" + assert len(cls.limits[limit].get_current_usage()) == 4 # 4 cache pol. + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "A" in usage_map + assert usage_map["A"].get_value() == 2 # "A" referenced in 2 distrib + assert "B" in usage_map + assert usage_map["B"].get_value() == 1 # "B" referenced in 1 distrib + assert "C" in usage_map + assert usage_map["C"].get_value() == 1 # "C" referenced in 1 distrib + assert "D" in usage_map + assert usage_map["D"].get_value() == 1 # "D" referenced in 1 distrib + + def test_find_usage_distributions_per_origin_req_policy(self): + """ + Verify the correctness of usage + (distributions associated to an origin request policy) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + response = result_fixtures.CloudFront.\ + test_find_usage_distributions_per_origin_req_policy + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = response + cls._find_usage_distributions() + + # Check that usage values are correctly set + limit = "Distributions associated with the same origin request policy" + assert len(cls.limits[limit].get_current_usage()) == 3 + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "A" in usage_map + assert usage_map["A"].get_value() == 2 # "A" referenced in 2 distrib + assert "B" in usage_map + assert usage_map["B"].get_value() == 1 # "B" referenced in 1 distrib + assert "C" in usage_map + assert usage_map["C"].get_value() == 1 # "C" referenced in 1 distrib + + def test_find_usage_per_cache_behavior(self): + """ + Verify the correctness of cache behavior (limits per cache behavior) + This test mocks the AWS list_distributions response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_per_cache_behavior + cls._find_usage_distributions() + + # Check that usage values are correctly set + limit = "Whitelisted cookies per cache behavior" + assert len(cls.limits[limit].get_current_usage()) == 2 + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "ID-DISTRIBUTION-100-default-cache-behavior" in usage_map + assert usage_map["ID-DISTRIBUTION-100-default-cache-behavior" + ].get_value() == 3 + assert "ID-DISTRIBUTION-100-cache-behavior-path01" in usage_map + assert usage_map["ID-DISTRIBUTION-100-cache-behavior-path01" + ].get_value() == 1 + + limit = "Whitelisted headers per cache behavior" + assert len(cls.limits[limit].get_current_usage()) == 2 + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "ID-DISTRIBUTION-100-default-cache-behavior" in usage_map + assert usage_map["ID-DISTRIBUTION-100-default-cache-behavior" + ].get_value() == 4 + assert "ID-DISTRIBUTION-100-cache-behavior-path01" in usage_map + assert usage_map["ID-DISTRIBUTION-100-cache-behavior-path01" + ].get_value() == 2 + + limit = "Whitelisted query strings per cache behavior" + assert len(cls.limits[limit].get_current_usage()) == 2 + # convert to map to ignore how usage entries are ordered in the array + usage_map = {u.resource_id: u + for u in cls.limits[limit].get_current_usage()} + assert "ID-DISTRIBUTION-100-default-cache-behavior" in usage_map + assert usage_map["ID-DISTRIBUTION-100-default-cache-behavior" + ].get_value() == 5 + assert "ID-DISTRIBUTION-100-cache-behavior-path01" in usage_map + assert usage_map["ID-DISTRIBUTION-100-cache-behavior-path01" + ].get_value() == 3 + + def test_find_usage_keygroups(self): + """ + Verify the correctness of usage (key groups) + This test mocks the AWS list_key_groups response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = \ + result_fixtures.CloudFront.test_find_usage_keygroups + cls._find_usage_keygroups() + + # Check that usage values are correctly set + limit = "Key groups per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 2 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + limit = "Public keys in a single key group" + assert len(cls.limits[limit].get_current_usage()) == 2 + assert cls.limits[limit].get_current_usage()[0].get_value() == 4 + assert cls.limits[limit].get_current_usage()[0].resource_id == "kg01" + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_key_groups, + alc_marker_path=["KeyGroupList", "NextMarker"], + alc_data_path=["KeyGroupList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_keygroups_empty(self): + """ + Verify the correctness of usage + (when there are no key groups) + This test mocks the AWS list_key_groups response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = \ + result_fixtures.CloudFront.test_find_usage_keygroups_empty + cls._find_usage_keygroups() + + # Check that usage values are correctly set + limit = "Key groups per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 0 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + limit = "Public keys in a single key group" + assert len(cls.limits[limit].get_current_usage()) == 0 + + def test_find_usage_origin_access_identities(self): + """ + Verify the correctness of usage (origin access identities) + This test mocks the AWS list_cloud_front_origin_access_identities + response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_origin_access_identities + cls._find_usage_origin_access_identities() + + # Check that usage values are correctly set + limit = "Origin access identities per account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 3 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_cloud_front_origin_access_identities, + alc_marker_path=["CloudFrontOriginAccessIdentityList", + "NextMarker"], + alc_data_path=["CloudFrontOriginAccessIdentityList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_origin_access_identities_empty(self): + """ + Verify the correctness of usage + (when there are no origin access identities) + This test mocks the AWS list_cloud_front_origin_access_identities + response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_origin_access_identities_empty + cls._find_usage_origin_access_identities() + + # Check that usage values are correctly set + limit = "Origin access identities per account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 0 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + def test_find_usage_cache_policies(self): + """ + Verify the correctness of usage (cache policies) + This test mocks the AWS list_cache_policies response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_cache_policies + cls._find_usage_cache_policies() + + # Check that usage values are correctly set + limit = "Cache policies per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 4 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_cache_policies, + Type='custom', + alc_marker_path=["CachePolicyList", "NextMarker"], + alc_data_path=["CachePolicyList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_cache_policies_empty(self): + """ + Verify the correctness of usage + (when there are no cache policies) + This test mocks the AWS list_cache_policies response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_cache_policies_empty + cls._find_usage_cache_policies() + + # Check that usage values are correctly set + limit = "Cache policies per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 0 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + def test_find_usage_cache_policies_config(self): + """ + Verify the correctness of usage (per-cache-policy limits) + This test mocks the AWS list_cache_policies response (after pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_cache_policies_config + cls._find_usage_cache_policies() + + # Check that usage values are correctly set + limit = "Cookies per cache policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 2 + assert cls.limits[limit].get_current_usage()[0].resource_id == "CP01" + + limit = "Headers per cache policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 3 + assert cls.limits[limit].get_current_usage()[0].resource_id == "CP01" + + limit = "Query strings per cache policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 1 + assert cls.limits[limit].get_current_usage()[0].resource_id == "CP01" + + def test_find_usage_origin_request_policies(self): + """ + Verify the correctness of usage (origin request policies) + This test mocks the AWS list_origin_request_policies response (after + pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_origin_request_policies + cls._find_usage_origin_request_policies() + + # Check that usage values are correctly set + limit = "Origin request policies per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 2 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + # Check which methods were called + assert mock_conn.mock_calls == [] + assert mock_paginate.mock_calls == [ + call( + mock_conn.list_origin_request_policies, + Type='custom', + alc_marker_path=["OriginRequestPolicyList", "NextMarker"], + alc_data_path=["OriginRequestPolicyList", "Items"], + alc_marker_param="Marker", + ) + ] + + def test_find_usage_origin_request_policies_empty(self): + """ + Verify the correctness of usage + This test mocks the AWS list_origin_request_policies response (after + pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_origin_request_policies_empty + cls._find_usage_origin_request_policies() + + # Check that usage values are correctly set + limit = "Origin request policies per AWS account" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 0 + assert cls.limits[limit].get_current_usage()[0].resource_id is None + + def test_find_usage_origin_request_policies_config(self): + """ + Verify the correctness of usage (per-origin-request-policy limits) + This test mocks the AWS list_origin_request_policies response (after + pagination). + """ + # Setup the mock and call the tested function + mock_conn = Mock() + with patch("%s.paginate_dict" % pbm) as mock_paginate: + cls = _CloudfrontService(21, 43, {}, None) + cls.conn = mock_conn + mock_paginate.return_value = result_fixtures.CloudFront\ + .test_find_usage_origin_request_policies_config + cls._find_usage_origin_request_policies() + + # Check that usage values are correctly set + limit = "Cookies per origin request policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 2 + assert cls.limits[limit].get_current_usage()[0].resource_id == "ORP01" + + limit = "Headers per origin request policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 3 + assert cls.limits[limit].get_current_usage()[0].resource_id == "ORP01" + + limit = "Query strings per origin request policy" + assert len(cls.limits[limit].get_current_usage()) == 1 + assert cls.limits[limit].get_current_usage()[0].get_value() == 1 + assert cls.limits[limit].get_current_usage()[0].resource_id == "ORP01" + + def test_required_iam_permissions(self): + cls = _CloudfrontService(21, 43, {}, None) + assert cls.required_iam_permissions() == [ + "cloudfront:ListCloudFrontOriginAccessIdentities", + "cloudfront:ListKeyGroups", + "cloudfront:ListDistributions", + "cloudfront:ListCachePolicies", + "cloudfront:ListOriginRequestPolicies" + ] diff --git a/awslimitchecker/tests/services/test_ec2.py b/awslimitchecker/tests/services/test_ec2.py index a79e1517..4377535b 100644 --- a/awslimitchecker/tests/services/test_ec2.py +++ b/awslimitchecker/tests/services/test_ec2.py @@ -753,11 +753,11 @@ def test_simple(self): "ec2:DescribeSpotFleetInstances", "ec2:DescribeSpotFleetRequestHistory", "ec2:DescribeSpotFleetRequests", - "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVolumes", "ec2:DescribeVpcs", + "cloudwatch:GetMetricData", ] @@ -767,9 +767,10 @@ def test_simple(self): mocks = fixtures.test_find_usage_networking_sgs mock_conn = Mock() - mock_conn.security_groups.all.return_value = mocks + mock_conn.security_groups.filter.return_value = mocks cls = _Ec2Service(21, 43, {}, None) + cls._current_account_id = "1234567890" cls.resource_conn = mock_conn with patch('awslimitchecker.services.ec2.logger') as mock_logger: @@ -802,7 +803,9 @@ def test_simple(self): # egress: IPv4 = 22; IPv6 = 29 assert sorted_usage[2].get_value() == 29 assert mock_conn.mock_calls == [ - call.security_groups.all() + call.security_groups.filter( + Filters=[{'Name': 'owner-id', 'Values': ['1234567890']}] + ) ] @@ -895,7 +898,7 @@ def test_simple(self): ].quotas_service_code == 'ec2' assert limits[ 'VPC Elastic IP addresses (EIPs)' - ].quota_name == 'Number of EIPs - VPC EIPs' + ].quota_name == 'EC2-VPC Elastic IPs' assert limits[ 'VPC Elastic IP addresses (EIPs)' ].quotas_unit == 'None' @@ -904,7 +907,7 @@ def test_simple(self): ].quotas_service_code == 'ec2' assert limits[ 'Elastic IP addresses (EIPs)' - ].quota_name == 'Elastic IP addresses for EC2-Classic' + ].quota_name == 'EC2-Classic Elastic IPs' assert limits[ 'Elastic IP addresses (EIPs)' ].quotas_unit == 'None' @@ -920,7 +923,12 @@ def test_simple(self): cls = _Ec2Service(21, 43, {}, None) limits = cls._get_limits_spot() expected = [ - 'Max spot instance requests per region', + 'All F Spot Instance Requests', + 'All G Spot Instance Requests', + 'All Inf Spot Instance Requests', + 'All P Spot Instance Requests', + 'All X Spot Instance Requests', + 'All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests', 'Max active spot fleets per region', 'Max launch specifications per spot fleet', 'Max target capacity per spot fleet', @@ -931,70 +939,66 @@ def test_simple(self): class TestFindUsageSpotInstances(object): - def test_happy_path(self): - data = fixtures.test_find_usage_spot_instances - mock_conn = Mock() - mock_client_conn = Mock() - mock_client_conn.describe_spot_instance_requests.return_value = data - cls = _Ec2Service(21, 43, {}, None) - cls.resource_conn = mock_conn - cls.conn = mock_client_conn - with patch('awslimitchecker.services.ec2.logger') as mock_logger: + def test_find_usage_spot_instances(self): + def get_cw_usage(klass, dims, metric_name='ResourceCount', period=60): + dim_dict = {x['Name']: x['Value'] for x in dims} + if dim_dict['Class'] == 'F/Spot': + return 2.0 + if dim_dict['Class'] == 'G/Spot': + return 3.0 + if dim_dict['Class'] == 'Inf/Spot': + return 4.0 + if dim_dict['Class'] == 'P/Spot': + return 5.0 + if dim_dict['Class'] == 'X/Spot': + return 6.0 + if dim_dict['Class'] == 'Standard/Spot': + return 7.0 + return 0 + + with patch( + '%s._get_cloudwatch_usage_latest' % pb, autospec=True + ) as mock: + mock.side_effect = get_cw_usage + cls = _Ec2Service(21, 43, {}, None) cls._find_usage_spot_instances() - assert mock_conn.mock_calls == [] - assert mock_client_conn.mock_calls == [ - call.describe_spot_instance_requests() - ] - lim = cls.limits['Max spot instance requests per region'] - usage = lim.get_current_usage() + + usage = cls.limits['All F Spot Instance Requests']\ + .get_current_usage() assert len(usage) == 1 - assert usage[0].get_value() == 2 - assert mock_logger.mock_calls == [ - call.debug('Getting spot instance request usage'), - call.debug('NOT counting spot instance request %s state=%s', - 'reqID1', 'closed'), - call.debug('Counting spot instance request %s state=%s', - 'reqID2', 'active'), - call.debug('Counting spot instance request %s state=%s', - 'reqID3', 'open'), - call.debug('NOT counting spot instance request %s state=%s', - 'reqID4', 'failed') - ] + assert usage[0].get_value() == 2.0 + assert usage[0].resource_id is None - def test_unsupported(self): - mock_client_conn = Mock() - err = botocore.exceptions.ClientError( - {'Error': {'Code': 'UnsupportedOperation'}}, - 'operation', - ) - mock_client_conn.describe_spot_instance_requests.side_effect = err - cls = _Ec2Service(21, 43, {}, None) - cls.conn = mock_client_conn - cls._find_usage_spot_instances() - lim = cls.limits['Max spot instance requests per region'] - usage = lim.get_current_usage() - assert len(usage) == 0 + usage = cls.limits['All G Spot Instance Requests']\ + .get_current_usage() + assert len(usage) == 1 + assert usage[0].get_value() == 3.0 + assert usage[0].resource_id is None - def test_unknown_code(self): - mock_client_conn = Mock() - err = botocore.exceptions.ClientError( - {'Error': {'Code': 'SomeCode'}}, - 'operation', - ) - mock_client_conn.describe_spot_instance_requests.side_effect = err - cls = _Ec2Service(21, 43, {}, None) - cls.conn = mock_client_conn - with pytest.raises(botocore.exceptions.ClientError): - cls._find_usage_spot_instances() + usage = cls.limits['All Inf Spot Instance Requests']\ + .get_current_usage() + assert len(usage) == 1 + assert usage[0].get_value() == 4.0 + assert usage[0].resource_id is None - def test_unknown_error(self): - mock_client_conn = Mock() - err = RuntimeError - mock_client_conn.describe_spot_instance_requests.side_effect = err - cls = _Ec2Service(21, 43, {}, None) - cls.conn = mock_client_conn - with pytest.raises(RuntimeError): - cls._find_usage_spot_instances() + usage = cls.limits['All P Spot Instance Requests']\ + .get_current_usage() + assert len(usage) == 1 + assert usage[0].get_value() == 5.0 + assert usage[0].resource_id is None + + usage = cls.limits['All X Spot Instance Requests']\ + .get_current_usage() + assert len(usage) == 1 + assert usage[0].get_value() == 6.0 + assert usage[0].resource_id is None + + usage = cls.limits[ + 'All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests' + ].get_current_usage() + assert len(usage) == 1 + assert usage[0].get_value() == 7.0 + assert usage[0].resource_id is None class TestFindUsageSpotFleets(object): diff --git a/awslimitchecker/tests/services/test_vpc.py b/awslimitchecker/tests/services/test_vpc.py index e968935a..60003ace 100644 --- a/awslimitchecker/tests/services/test_vpc.py +++ b/awslimitchecker/tests/services/test_vpc.py @@ -198,17 +198,19 @@ def test_find_usage_acls(self): assert len(usage) == 2 assert usage[0].get_value() == 1 assert usage[0].resource_id == 'vpc-2' - assert usage[1].get_value() == 2 + assert usage[1].get_value() == 3 assert usage[1].resource_id == 'vpc-1' entries = sorted(cls.limits['Rules per network ' 'ACL'].get_current_usage()) - assert len(entries) == 3 + assert len(entries) == 4 assert entries[0].resource_id == 'acl-2' assert entries[0].get_value() == 1 assert entries[1].resource_id == 'acl-1' - assert entries[1].get_value() == 3 - assert entries[2].resource_id == 'acl-3' - assert entries[2].get_value() == 5 + assert entries[1].get_value() == 2 + assert entries[2].resource_id == 'acl-4' + assert entries[2].get_value() == 3 + assert entries[3].resource_id == 'acl-3' + assert entries[3].get_value() == 4 assert mock_conn.mock_calls == [ call.describe_network_acls(Filters=[{ 'Name': 'owner-id', 'Values': ['0123456789'] diff --git a/awslimitchecker/tests/support.py b/awslimitchecker/tests/support.py index 25346df5..dc855b59 100644 --- a/awslimitchecker/tests/support.py +++ b/awslimitchecker/tests/support.py @@ -207,7 +207,7 @@ def verify_region(self, region_name): "to be us-east-1 but got %s" \ "" % support_region for svc, rname in service_regions.items(): - if svc == 'route53': + if svc in ['route53', 'cloudfront']: continue assert rname == region_name, "Expected service %s to connect to " \ "region %s, but connected to %s" % ( diff --git a/awslimitchecker/tests/test_connectable.py b/awslimitchecker/tests/test_connectable.py index 009aee2b..8f2b2a0f 100644 --- a/awslimitchecker/tests/test_connectable.py +++ b/awslimitchecker/tests/test_connectable.py @@ -129,6 +129,8 @@ class Test_Connectable(object): def test_connect(self): mock_conn = Mock() mock_cc = Mock() + mock_botoconfig = Mock() + type(mock_cc).region_name = 'myregion' type(mock_conn)._client_config = mock_cc @@ -141,12 +143,13 @@ def test_connect(self): mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.client' % pbm) as mock_client: - with patch( - '%s._max_retries_config' % pb, new_callable=PropertyMock - ) as m_mrc: - m_mrc.return_value = None - mock_client.return_value = mock_conn - cls.connect() + with patch('%s.Config' % pbm) as m_conf: + with patch( + '%s._max_retries_config' % pb, new_callable=PropertyMock # noqa - ignore line length + ) as m_mrc: + m_conf.return_value.merge.return_value = mock_botoconfig # noqa - ignore line length + mock_client.return_value = mock_conn + cls.connect() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s in region %s", @@ -157,15 +160,19 @@ def test_connect(self): call( 'myapi', foo='fooval', - bar='barval' + bar='barval', + config=mock_botoconfig, ) ] - assert m_mrc.mock_calls == [call()] + assert m_mrc.mock_calls == [call(), call()] assert cls.conn == mock_client.return_value def test_connect_with_retries(self): mock_conn = Mock() mock_cc = Mock() + mock_conf = Mock() + mock_botoconfig = Mock() + type(mock_cc).region_name = 'myregion' type(mock_conn)._client_config = mock_cc @@ -179,12 +186,14 @@ def test_connect_with_retries(self): mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.client' % pbm) as mock_client: - with patch( - '%s._max_retries_config' % pb, new_callable=PropertyMock - ) as m_mrc: - m_mrc.return_value = mock_conf - mock_client.return_value = mock_conn - cls.connect() + with patch('%s.Config' % pbm) as m_conf: + with patch( + '%s._max_retries_config' % pb, new_callable=PropertyMock # noqa - ignore line length + ) as m_mrc: + m_conf.return_value.merge.return_value = mock_botoconfig # noqa - ignore line length + m_mrc.return_value = mock_conf + mock_client.return_value = mock_conn + cls.connect() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s in region %s", @@ -196,7 +205,7 @@ def test_connect_with_retries(self): 'myapi', foo='fooval', bar='barval', - config=mock_conf + config=mock_botoconfig ) ] assert m_mrc.mock_calls == [call(), call()] @@ -236,6 +245,8 @@ def test_connect_resource(self): mock_meta = Mock() mock_client = Mock() mock_cc = Mock() + mock_botoconfig = Mock() + type(mock_cc).region_name = 'myregion' type(mock_client)._client_config = mock_cc type(mock_meta).client = mock_client @@ -250,13 +261,15 @@ def test_connect_resource(self): mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.resource' % pbm) as mock_resource: - with patch( - '%s._max_retries_config' % pb, - new_callable=PropertyMock - ) as m_mrc: - m_mrc.return_value = None - mock_resource.return_value = mock_conn - cls.connect_resource() + with patch('%s.Config' % pbm) as m_conf: + with patch( + '%s._max_retries_config' % pb, + new_callable=PropertyMock + ) as m_mrc: + m_conf.return_value = mock_botoconfig + m_mrc.return_value = None + mock_resource.return_value = mock_conn + cls.connect_resource() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s (resource) in region %s", @@ -267,7 +280,8 @@ def test_connect_resource(self): call( 'myapi', foo='fooval', - bar='barval' + bar='barval', + config=mock_botoconfig, ) ] assert m_mrc.mock_calls == [call()] @@ -278,6 +292,8 @@ def test_connect_resource_with_max_retries(self): mock_meta = Mock() mock_client = Mock() mock_cc = Mock() + mock_botoconfig = Mock() + type(mock_cc).region_name = 'myregion' type(mock_client)._client_config = mock_cc type(mock_meta).client = mock_client @@ -293,13 +309,15 @@ def test_connect_resource_with_max_retries(self): mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.resource' % pbm) as mock_resource: - with patch( - '%s._max_retries_config' % pb, - new_callable=PropertyMock - ) as m_mrc: - m_mrc.return_value = mock_conf - mock_resource.return_value = mock_conn - cls.connect_resource() + with patch('%s.Config' % pbm) as m_conf: + with patch( + '%s._max_retries_config' % pb, + new_callable=PropertyMock + ) as m_mrc: + m_conf.return_value.merge.return_value = mock_botoconfig # noqa - ignore line length + m_mrc.return_value = mock_conf + mock_resource.return_value = mock_conn + cls.connect_resource() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s (resource) in region %s", @@ -311,7 +329,7 @@ def test_connect_resource_with_max_retries(self): 'myapi', foo='fooval', bar='barval', - config=mock_conf + config=mock_botoconfig ) ] assert m_mrc.mock_calls == [call(), call()] diff --git a/awslimitchecker/version.py b/awslimitchecker/version.py index ecfcd5fa..e00b9368 100644 --- a/awslimitchecker/version.py +++ b/awslimitchecker/version.py @@ -47,7 +47,7 @@ except ImportError: logger.error("Unable to import versionfinder", exc_info=True) -_VERSION_TUP = (11, 0, 0) +_VERSION_TUP = (12, 0, 0) _VERSION = '.'.join([str(x) for x in _VERSION_TUP]) _PROJECT_URL = 'https://github.com/jantman/awslimitchecker' diff --git a/docs/source/awslimitchecker.services.certificatemanager.rst b/docs/source/awslimitchecker.services.certificatemanager.rst new file mode 100644 index 00000000..c039c9e8 --- /dev/null +++ b/docs/source/awslimitchecker.services.certificatemanager.rst @@ -0,0 +1,8 @@ +awslimitchecker.services.certificatemanager module +================================================== + +.. automodule:: awslimitchecker.services.certificatemanager + :members: + :undoc-members: + :show-inheritance: + :private-members: diff --git a/docs/source/awslimitchecker.services.cloudfront.rst b/docs/source/awslimitchecker.services.cloudfront.rst new file mode 100644 index 00000000..f4955fc4 --- /dev/null +++ b/docs/source/awslimitchecker.services.cloudfront.rst @@ -0,0 +1,8 @@ +awslimitchecker.services.cloudfront module +========================================== + +.. automodule:: awslimitchecker.services.cloudfront + :members: + :undoc-members: + :show-inheritance: + :private-members: diff --git a/docs/source/awslimitchecker.services.rst b/docs/source/awslimitchecker.services.rst index e849f0a5..bd702761 100644 --- a/docs/source/awslimitchecker.services.rst +++ b/docs/source/awslimitchecker.services.rst @@ -16,7 +16,9 @@ Submodules awslimitchecker.services.apigateway awslimitchecker.services.autoscaling awslimitchecker.services.base + awslimitchecker.services.certificatemanager awslimitchecker.services.cloudformation + awslimitchecker.services.cloudfront awslimitchecker.services.cloudtrail awslimitchecker.services.directoryservice awslimitchecker.services.dynamodb diff --git a/docs/source/cli_usage.rst b/docs/source/cli_usage.rst index baaf4176..eb8d752b 100644 --- a/docs/source/cli_usage.rst +++ b/docs/source/cli_usage.rst @@ -176,9 +176,9 @@ View the AWS services currently supported by ``awslimitchecker`` with the (venv)$ awslimitchecker -s ApiGateway AutoScaling + CertificateManager CloudFormation - CloudTrail - Directory Service + CloudFront (...) Route53 S3 @@ -383,7 +383,7 @@ For example, to override the limits of EC2's "EC2-Classic Elastic IPs" and ApiGateway/Documentation parts per API 2000 ApiGateway/Edge APIs per account 120.0 (Quotas) (...) - CloudFormation/Stacks 200 (API) + CloudFormation/Stacks 2000 (API) (...) Lambda/Function Count None (...) @@ -628,7 +628,7 @@ permissions for it to perform all limit checks. This can be viewed with the "Statement": [ { "Action": [ - "apigateway:GET", + "acm:ListCertificates", (...) } ], diff --git a/docs/source/iam_policy.rst b/docs/source/iam_policy.rst index 0878346c..b9a75a4a 100644 --- a/docs/source/iam_policy.rst +++ b/docs/source/iam_policy.rst @@ -31,6 +31,7 @@ services that do not affect the results of this program. "Statement": [ { "Action": [ + "acm:ListCertificates", "apigateway:GET", "apigateway:HEAD", "apigateway:OPTIONS", @@ -39,6 +40,11 @@ services that do not affect the results of this program. "autoscaling:DescribeLaunchConfigurations", "cloudformation:DescribeAccountLimits", "cloudformation:DescribeStacks", + "cloudfront:ListCachePolicies", + "cloudfront:ListCloudFrontOriginAccessIdentities", + "cloudfront:ListDistributions", + "cloudfront:ListKeyGroups", + "cloudfront:ListOriginRequestPolicies", "cloudtrail:DescribeTrails", "cloudtrail:GetEventSelectors", "cloudwatch:GetMetricData", @@ -61,7 +67,6 @@ services that do not affect the results of this program. "ec2:DescribeSpotFleetInstances", "ec2:DescribeSpotFleetRequestHistory", "ec2:DescribeSpotFleetRequests", - "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVolumes", diff --git a/docs/source/limits.rst b/docs/source/limits.rst index c566dd60..76b10121 100644 --- a/docs/source/limits.rst +++ b/docs/source/limits.rst @@ -48,7 +48,7 @@ Documentation parts per API 2000 Edge APIs per account |check| 120 Private APIs per account |check| 600 Regional APIs per account |check| 600 -Resources per API |check| 300 +Resources per API 300 Stages per API |check| 10 Usage plans per account |check| 300 VPC Links per account |check| 5 @@ -62,10 +62,21 @@ AutoScaling ===================== =============== ======== ======= === Limit Trusted Advisor Quotas API Default ===================== =============== ======== ======= === -Auto Scaling groups |check| |check| |check| 200 -Launch configurations |check| |check| |check| 200 +Auto Scaling groups |check| |check| 200 +Launch configurations |check| |check| 200 ===================== =============== ======== ======= === +.. _limits.CertificateManager: + +CertificateManager +------------------- + +================ =============== ======== ======= ==== +Limit Trusted Advisor Quotas API Default +================ =============== ======== ======= ==== +ACM certificates 1000 +================ =============== ======== ======= ==== + .. _limits.CloudFormation: CloudFormation @@ -74,9 +85,43 @@ CloudFormation ====== =============== ======== ======= === Limit Trusted Advisor Quotas API Default ====== =============== ======== ======= === -Stacks |check| |check| |check| 200 +Stacks |check| |check| 200 ====== =============== ======== ======= === +.. _limits.CloudFront: + +CloudFront +----------- + +============================================================ =============== ======== ======= === +Limit Trusted Advisor Quotas API Default +============================================================ =============== ======== ======= === +Alternate domain names (CNAMEs) per distribution 100 +Cache behaviors per distribution 25 +Cache policies per AWS account 20 +Cookies per cache policy 10 +Cookies per origin request policy 10 +Distributions associated with a single key group 100 +Distributions associated with the same cache policy 100 +Distributions associated with the same origin request policy 100 +Distributions per AWS account 200 +Headers per cache policy 10 +Headers per origin request policy 10 +Key groups associated with a single cache behavior 4 +Key groups associated with a single distribution 4 +Key groups per AWS account 10 +Origin access identities per account 100 +Origin groups per distribution 10 +Origin request policies per AWS account 20 +Origins per distribution 25 +Public keys in a single key group 5 +Query strings per cache policy 10 +Query strings per origin request policy 10 +Whitelisted cookies per cache behavior 10 +Whitelisted headers per cache behavior 10 +Whitelisted query strings per cache behavior 10 +============================================================ =============== ======== ======= === + .. _limits.CloudTrail: CloudTrail @@ -131,11 +176,11 @@ Limit Trusted Advisor Quotas API Active snapshots |check| 100000 Active volumes 5000 Cold (HDD) volume storage (GiB) 307200 -General Purpose (SSD gp2) volume storage (GiB) |check| 307200 -General Purpose (SSD gp3) volume storage (GiB) |check| 307200 +General Purpose (SSD gp2) volume storage (GiB) 307200 +General Purpose (SSD gp3) volume storage (GiB) 307200 Magnetic volume storage (GiB) 307200 -Provisioned IOPS (io1) 300000 -Provisioned IOPS (io2) 100000 +Provisioned IOPS (io1) |check| 300000 +Provisioned IOPS (io2) |check| 100000 Provisioned IOPS SSD (io1) storage (GiB) 307200 Provisioned IOPS SSD (io2) storage (GiB) 20480 Throughput Optimized (HDD) volume storage (GiB) 307200 @@ -176,10 +221,15 @@ type. ==================================================================== =============== ======== ======= ==== Limit Trusted Advisor Quotas API Default ==================================================================== =============== ======== ======= ==== -Elastic IP addresses (EIPs) |check| 5 +All F Spot Instance Requests |check| 11 +All G Spot Instance Requests |check| 11 +All Inf Spot Instance Requests |check| 64 +All P Spot Instance Requests |check| 16 +All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Requests |check| 1440 +All X Spot Instance Requests |check| 21 +Elastic IP addresses (EIPs) |check| |check| 5 Max active spot fleets per region 1000 Max launch specifications per spot fleet 50 -Max spot instance requests per region 20 Max target capacity for all spot fleets in region 5000 Max target capacity per spot fleet 3000 Rules per VPC security group |check| 60 @@ -188,7 +238,7 @@ Running On-Demand All G instances Running On-Demand All P instances |check| 128 Running On-Demand All Standard (A, C, D, H, I, M, R, T, Z) instances |check| 1152 Running On-Demand All X instances |check| 128 -VPC Elastic IP addresses (EIPs) |check| 5 +VPC Elastic IP addresses (EIPs) |check| |check| 5 VPC security groups per Region |check| 2500 VPC security groups per elastic network interface |check| 5 ==================================================================== =============== ======== ======= ==== @@ -502,8 +552,8 @@ Limit Trusted Advisor Quotas API Default ================================ =============== ======== ======= ===== Clusters 10000 Container Instances per Cluster 2000 -Fargate On-Demand resource count 1000 -Fargate Spot resource count 1000 +Fargate On-Demand resource count |check| 1000 +Fargate Spot resource count |check| 1000 Services per Cluster 5000 Tasks per service 5000 ================================ =============== ======== ======= ===== @@ -546,14 +596,14 @@ ELB Limit Trusted Advisor Quotas API Default ========================================== =============== ======== ======= ==== Application load balancers |check| |check| 20 -Certificates per application load balancer 25 +Certificates per application load balancer |check| 25 Classic load balancers |check| |check| 20 -Listeners per application load balancer |check| 50 +Listeners per application load balancer |check| |check| 50 Listeners per load balancer |check| 100 -Listeners per network load balancer |check| 50 +Listeners per network load balancer |check| |check| 50 Network load balancers |check| 20 Registered instances per load balancer |check| 1000 -Rules per application load balancer |check| 100 +Rules per application load balancer |check| |check| 100 Target groups |check| 3000 ========================================== =============== ======== ======= ====