diff --git a/.bandit b/.bandit
new file mode 100644
index 0000000..b1e0fa3
--- /dev/null
+++ b/.bandit
@@ -0,0 +1,306 @@
+pytest_filter: test_*
+skips: ['B101', 'B301']
+exclude_dirs:
+ - tools
+any_other_function_with_shell_equals_true:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+assert_used:
+ skips: []
+hardcoded_tmp_directory:
+ tmp_dirs:
+ - /tmp
+ - /var/tmp
+ - /dev/shm
+linux_commands_wildcard_injection:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+ssl_with_bad_defaults:
+ bad_protocol_versions:
+ - PROTOCOL_SSLv2
+ - SSLv2_METHOD
+ - SSLv23_METHOD
+ - PROTOCOL_SSLv3
+ - PROTOCOL_TLSv1
+ - SSLv3_METHOD
+ - TLSv1_METHOD
+ssl_with_bad_version:
+ bad_protocol_versions:
+ - PROTOCOL_SSLv2
+ - SSLv2_METHOD
+ - SSLv23_METHOD
+ - PROTOCOL_SSLv3
+ - PROTOCOL_TLSv1
+ - SSLv3_METHOD
+ - TLSv1_METHOD
+start_process_with_a_shell:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+start_process_with_no_shell:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+start_process_with_partial_path:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+subprocess_popen_with_shell_equals_true:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+subprocess_without_shell_equals_true:
+ no_shell:
+ - os.execl
+ - os.execle
+ - os.execlp
+ - os.execlpe
+ - os.execv
+ - os.execve
+ - os.execvp
+ - os.execvpe
+ - os.spawnl
+ - os.spawnle
+ - os.spawnlp
+ - os.spawnlpe
+ - os.spawnv
+ - os.spawnve
+ - os.spawnvp
+ - os.spawnvpe
+ - os.startfile
+ shell:
+ - os.system
+ - os.popen
+ - os.popen2
+ - os.popen3
+ - os.popen4
+ - popen2.popen2
+ - popen2.popen3
+ - popen2.popen4
+ - popen2.Popen3
+ - popen2.Popen4
+ - commands.getoutput
+ - commands.getstatusoutput
+ subprocess:
+ - subprocess.Popen
+ - subprocess.call
+ - subprocess.check_call
+ - subprocess.check_output
+ - subprocess.run
+try_except_continue:
+ check_typed_exception: false
+try_except_pass:
+ check_typed_exception: false
+weak_cryptographic_key:
+ weak_key_size_dsa_high: 1024
+ weak_key_size_dsa_medium: 2048
+ weak_key_size_ec_high: 160
+ weak_key_size_ec_medium: 224
+ weak_key_size_rsa_high: 1024
+ weak_key_size_rsa_medium: 2048
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..c747bc3
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,29 @@
+[flake8]
+jobs = auto
+verbose = 1
+quiet = 0
+tee = True
+exclude =
+ # git folder
+ .git,
+ # python cache
+ __pycache__,
+ test,
+ eggs,
+ migrations,
+ app/tests,
+ bin,
+filename =
+ *.py
+
+disable-noqa = False
+
+max-line-length = 140
+max-complexity = 10
+ignore =
+ F401
+ C901
+ W504
+ W503
+ F403
+ F405
diff --git a/.gitignore b/.gitignore
index 309ff47..f3846c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,6 @@
gui/test/aws_inventory_data*.json
*.pyc
/.idea/workspace.xml
+/gui/aws_inventory_*.json
+.idea/
+tmp
diff --git a/.idea/aws-inventory.iml b/.idea/aws-inventory.iml
deleted file mode 100644
index 6711606..0000000
--- a/.idea/aws-inventory.iml
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index 2f28788..0000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index 61d771e..0000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 94a25f7..0000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..b1ac6dd
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,39 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v2.3.0
+ hooks:
+ - id: check-yaml
+ args: [--allow-multiple-documents]
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ - id: check-docstring-first
+ - id: check-merge-conflict
+ - id: mixed-line-ending
+ args: [--fix=lf]
+ - repo: https://github.com/ambv/black
+ rev: stable
+ hooks:
+ - id: black
+
+ - repo: https://github.com/Lucas-C/pre-commit-hooks-bandit
+ rev: v1.0.4
+ hooks:
+ - id: python-bandit-vulnerability-check
+ args: [--ini, .bandit,-ll,-s, B301, --recursive, .]
+ files: .py$
+
+ - repo: local
+ hooks:
+ - id: flake8
+ name: flake8
+ stages: [commit]
+ language: system
+ entry: flake8
+ types: [python]
+ exclude: |
+ (?x)(
+ ^migration/|
+ ^tst/|
+ ^app/tests/|
+ ^bin/
+ )
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 20c4922..475a390 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -14,4 +14,4 @@ To create a production build, run:
`$ npm run build`
-It will compile, minimize, and bundle everything to the *build* directory.
\ No newline at end of file
+It will compile, minimize, and bundle everything to the *build* directory.
diff --git a/README.md b/README.md
index 4ac3769..2507b43 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
# Introduction
+>> This fork based on cool script from [Nccgroup](https://www.nccgroup.com/) guys, i'm only migrate script from python 2 to python 3
This is a tool that tries to discover all [AWS resources](https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#resource) created in an account. AWS has many products (a.k.a. services) with new ones constantly being added and existing ones expanded with new features. The ecosystem allows users to piece together many different services to form a customized cloud experience. The ability to instantly spin up services at scale comes with a manageability cost. It can quickly become difficult to audit an AWS account for the resources being used. It is not only important for billing purposes, but also for security. Dormant resources and unknown resources are more prone to security configuration weaknesses. Additionally, resources with unexpected dependencies pose availability, access control, and authorization issues.
@@ -10,7 +11,13 @@ It uses [botocore](https://github.com/boto/botocore) to discover [AWS services](
# Installation
-First, install Python2.7.
+First, you should have python 3.8 with tk support
+I use [pyenv](https://github.com/pyenv/pyenv) for switching between multiple versions of Python.
+
+```shell
+PYTHON_CONFIGURE_OPTS="--with-tcltk-includes='-I/usr/local/opt/tcl-tk/include' --with-tcltk-libs='-L/usr/local/opt/tcl-tk/lib -ltcl8.6 -ltk8.6' --enable-shared --enable-framework" pyenv install 3.8.6
+
+```
There is a small GUI for displaying progress which uses the standard Python *Tkinter* module. However, the underlying native library code for Tcl/Tk may need extra steps to install. Then,
@@ -32,9 +39,9 @@ Use your OS package manager:
You can run the script without any parameters. It will search for your AWS creds in your shell environment, instance metadata, config file, then credentials file. You can also provide a CSV file, containing your creds, on the commandline. You will want a user that has permissions like the AWS managed policy [ViewOnlyAccess](arn:aws:iam::aws:policy/job-function/ViewOnlyAccess). If you are feeling lucky, you could just pipe the output of the tool to a JSON parser like *jq*.
-The tool could take a long time (dozens of minutes) to complete if no restrictions are placed on which operations to invoke for each service across each region. Filtering by service and region can be done on the commandline while filtering by service operation can be done via configuration file. A [pre-configured file](operation_blacklist.conf) was created and checked into the repository. It will be used by default.
+The tool could take a long time (dozens of minutes) to complete if no restrictions are placed on which operations to invoke for each service across each region. Filtering by service and region can be done on the commandline while filtering by service operation can be done via configuration file. A [pre-configured file](operation_blacklist.conf) was created and checked into the repository. It will be used by default.
-Aside from the commandline output, you can view the results locally in a [React](https://reactjs.org/) [single-page app](https://en.wikipedia.org/wiki/Single-page_application). No web server needed. Just open the [HTML file](gui/dist/index.html) in a browser and select the generated JSON file when prompted.
+Aside from the commandline output, you can view the results locally in a [React](https://reactjs.org/) [single-page app](https://en.wikipedia.org/wiki/Single-page_application). No web server needed. Just open the [HTML file](gui/dist/index.html) in a browser and select the generated JSON file when prompted.
The app uses [jsTree](https://www.jstree.com/) to display the data in a hierarchical, tree-like structure. There is also a search feature.
@@ -101,9 +108,13 @@ Total operations to invoke: 4045
`$ python aws_inventory.py --debug --dry-run`
# Screenshots
-
+## Ubuntu
![invoking apis on commandline](screenshots/invoking%20apis%20on%20commandline.png)
![data in browser](screenshots/data%20in%20browser.png)
+
+## MacOS
+
+![macos_inventory_gui](screenshots/macos_inventory.png)
diff --git a/aws_inventory.py b/aws_inventory.py
index 64f592c..1e0bec3 100644
--- a/aws_inventory.py
+++ b/aws_inventory.py
@@ -10,92 +10,113 @@
import aws_inventory.blacklist
import aws_inventory.invoker
-
# create a module logger and ignore messages outside of the module. botocore was spewing messages
logging.basicConfig()
LOGGER = logging.getLogger(aws_inventory.__name__)
LOGGER.addFilter(logging.Filter(aws_inventory.__name__))
+
def setup_logging(verbose):
LOGGER.setLevel(logging.DEBUG if verbose else logging.INFO)
+
def parse_args(args=None):
parser = argparse.ArgumentParser(
- description='Discover resources in an AWS account.'
+ description="Discover resources in an AWS account."
)
- parser.add_argument('--profile',
- default='default',
- help='Name of the profile (default: %(default)s)')
+ parser.add_argument(
+ "--profile",
+ default="default",
+ help="Name of the profile (default: %(default)s)",
+ )
- parser.add_argument('--mfa-serial',
- help='serial number of MFA device')
+ parser.add_argument("--mfa-serial", help="serial number of MFA device")
- parser.add_argument('--mfa-code',
- help='MFA code')
+ parser.add_argument("--mfa-code", help="MFA code")
- parser.add_argument('--csv-credentials',
- help='CSV file containing account credentials')
+ parser.add_argument(
+ "--csv-credentials", help="CSV file containing account credentials"
+ )
- parser.add_argument('--services',
- default=[],
- nargs='+',
- help='Name of AWS services to include')
+ parser.add_argument(
+ "--services", default=[], nargs="+", help="Name of AWS services to include"
+ )
- parser.add_argument('--exclude',
- dest='excluded_services',
- default=[],
- nargs='+',
- help='Name of AWS services to exclude')
+ parser.add_argument(
+ "--exclude",
+ dest="excluded_services",
+ default=[],
+ nargs="+",
+ help="Name of AWS services to exclude",
+ )
- parser.add_argument('--regions',
- default=[],
- nargs='+',
- help='Name of regions to include, defaults to all')
+ parser.add_argument(
+ "--regions",
+ default=[],
+ nargs="+",
+ help="Name of regions to include, defaults to all",
+ )
- parser.add_argument('--list-svcs',
- action='store_true',
- help=('Print a list of available services (ignore service and region '
- 'filters) and exit'))
+ parser.add_argument(
+ "--list-svcs",
+ action="store_true",
+ help=(
+ "Print a list of available services (ignore service and region "
+ "filters) and exit"
+ ),
+ )
- parser.add_argument('--list-operations',
- action='store_true',
- help='Print a list of operations to invoke for a given service and exit')
+ parser.add_argument(
+ "--list-operations",
+ action="store_true",
+ help="Print a list of operations to invoke for a given service and exit",
+ )
- parser.add_argument('--dry-run',
- action='store_true',
- help=('Go through local API discovery, but do not actually invoke any API. '
- 'Useful for checking filtering of regions, services, and operations.'
- ))
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=(
+ "Go through local API discovery, but do not actually invoke any API. "
+ "Useful for checking filtering of regions, services, and operations."
+ ),
+ )
- parser.add_argument('--op-blacklist',
- type=argparse.FileType('r'),
- default='operation_blacklist.conf',
- help=(
- 'Configuration file listing service operations to avoid invoking ' +
- '(default: %(default)s)'
- ))
+ parser.add_argument(
+ "--op-blacklist",
+ type=argparse.FileType("r"),
+ default="operation_blacklist.conf",
+ help=(
+ "Configuration file listing service operations to avoid invoking "
+ + "(default: %(default)s)"
+ ),
+ )
- parser.add_argument('--exceptions-dump', help='File to dump the exceptions store')
+ parser.add_argument("--exceptions-dump", help="File to dump the exceptions store")
- parser.add_argument('--responses-dump', help='File to dump the responses store')
+ parser.add_argument("--responses-dump", help="File to dump the responses store")
- parser.add_argument('--gui-data-file',
- help='File to the GUI data (default: {})'.format(
- aws_inventory.config.GUI_DATA_FILENAME_TEMPLATE.template
- ))
+ parser.add_argument(
+ "--gui-data-file",
+ help="File to the GUI data (default: {})".format(
+ aws_inventory.config.GUI_DATA_FILENAME_TEMPLATE.template
+ ),
+ )
- parser.add_argument('--debug',
- action='store_true',
- help='Print debugging information')
+ parser.add_argument(
+ "--debug", action="store_true", help="Print debugging information"
+ )
- parser.add_argument('-v', '--verbose',
- action='store_true',
- help='Print the account resources to stdout')
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Print the account resources to stdout",
+ )
- parser.add_argument('-V', '--version',
- action='store_true',
- help='Print version and exit')
+ parser.add_argument(
+ "-V", "--version", action="store_true", help="Print version and exit"
+ )
parsed = parser.parse_args(args)
@@ -110,6 +131,7 @@ def parse_args(args=None):
parsed.gui_data_file = os.path.join(tool_dir, relative_path)
return parsed
+
def filter_services(api_model, services=frozenset(), excluded_services=frozenset()):
"""Build a list of services by merging together a white- and black-list.
@@ -119,11 +141,11 @@ def filter_services(api_model, services=frozenset(), excluded_services=frozenset
:rtype: frozenset
:return: the list of merged services
"""
- available = frozenset(api_model.keys())
+ available = frozenset(list(api_model.keys()))
if services:
invalid = services - available
if invalid:
- raise ValueError('Invalid requested service(s): {}'.format(invalid))
+ raise ValueError("Invalid requested service(s): {}".format(invalid))
enabled = services
else:
enabled = available
@@ -131,11 +153,12 @@ def filter_services(api_model, services=frozenset(), excluded_services=frozenset
if excluded_services:
invalid = excluded_services - available
if invalid:
- raise ValueError('Invalid excluded service(s): {}'.format(invalid))
+ raise ValueError("Invalid excluded service(s): {}".format(invalid))
enabled = enabled - excluded_services
return enabled
+
def filter_operations(api_model, op_blacklist_parser, regions, services):
"""Build a list of operations.
@@ -149,46 +172,61 @@ def filter_operations(api_model, op_blacklist_parser, regions, services):
svc_descriptors = {}
for svc_name in services:
# validate regions against ones available for service
- available_regions = api_model[svc_name]['regions']
+ available_regions = api_model[svc_name]["regions"]
if available_regions:
if regions:
invalid_regions = frozenset(regions) - available_regions
if invalid_regions:
- LOGGER.warning('[%s] Invalid region(s): %s.', svc_name, ', '.join(invalid_regions))
+ LOGGER.warning(
+ "[%s] Invalid region(s): %s.",
+ svc_name,
+ ", ".join(invalid_regions),
+ )
target_regions = frozenset(regions) - invalid_regions
if not target_regions:
- LOGGER.warning('[%s] No valid regions after applying service and region '
- 'filters.', svc_name)
+ LOGGER.warning(
+ "[%s] No valid regions after applying service and region "
+ "filters.",
+ svc_name,
+ )
else:
- target_regions = available_regions
+ target_regions = available_regions
else:
- target_regions = [None]
- svc_descriptors[svc_name] = {'regions': target_regions}
+ target_regions = [None]
+ svc_descriptors[svc_name] = {"regions": target_regions}
- LOGGER.debug('[%s] Service region(s) to inspect: %s.',
- svc_name,
- ', '.join(target_regions if available_regions else ['global']))
+ LOGGER.debug(
+ "[%s] Service region(s) to inspect: %s.",
+ svc_name,
+ ", ".join(target_regions if available_regions else ["global"]),
+ )
operations = []
if target_regions:
- for svc_op in api_model[svc_name]['ops']:
+ for svc_op in api_model[svc_name]["ops"]:
if op_blacklist_parser.is_blacklisted(svc_name, svc_op):
- LOGGER.debug('[%s] Excluding blacklisted API "%s".', svc_name, svc_op)
+ LOGGER.debug(
+ '[%s] Excluding blacklisted API "%s".', svc_name, svc_op
+ )
else:
operations.append(svc_op)
if not operations:
- LOGGER.warning('[%s] No operations to invoke for specified regions.', svc_name)
+ LOGGER.warning(
+ "[%s] No operations to invoke for specified regions.", svc_name
+ )
LOGGER.info(
- '[%s] service summary - %d API(s), %d region(s).',
+ "[%s] service summary - %d API(s), %d region(s).",
svc_name,
len(operations),
- len(target_regions))
- svc_descriptors[svc_name]['ops'] = operations
+ len(target_regions),
+ )
+ svc_descriptors[svc_name]["ops"] = operations
return svc_descriptors
+
def build_api_model():
"""Build a model of the available API.
@@ -197,29 +235,36 @@ def build_api_model():
"""
boto_session = botocore.session.get_session()
- LOGGER.debug('Building service list.')
+ LOGGER.debug("Building service list.")
available_services = frozenset(boto_session.get_available_services())
svc_descriptors = {}
for svc_name in available_services:
# validate regions
available_regions = frozenset(boto_session.get_available_regions(svc_name))
- svc_descriptors[svc_name] = {'regions': available_regions}
+ svc_descriptors[svc_name] = {"regions": available_regions}
if available_regions:
LOGGER.debug(
- '[%s] Available service region(s): %s.',
+ "[%s] Available service region(s): %s.",
svc_name,
- ', '.join(available_regions))
+ ", ".join(available_regions),
+ )
else:
LOGGER.warning(
- '[%s] Unable to obtain a valid region. Assuming service is region agnostic (i.e., '
- 'global).', svc_name)
+ "[%s] Unable to obtain a valid region. Assuming service is region agnostic (i.e., "
+ "global).",
+ svc_name,
+ )
operations = []
# get operation names from local service model files
- api_version = boto_session.get_config_variable('api_versions').get(svc_name, None)
- service_model = boto_session.get_service_model(svc_name, api_version=api_version)
+ api_version = boto_session.get_config_variable("api_versions").get(
+ svc_name, None
+ )
+ service_model = boto_session.get_service_model(
+ svc_name, api_version=api_version
+ )
# Filter out operations we don't care about. Currently we care about operations with
# names indicating a list- or describe-like action and the operation doesn't require
@@ -235,21 +280,22 @@ def build_api_model():
# no input shape
operations.append(svc_op)
- svc_descriptors[svc_name]['ops'] = operations
+ svc_descriptors[svc_name]["ops"] = operations
return svc_descriptors
+
def main(args):
setup_logging(args.debug)
if args.version:
- print aws_inventory.__version__
+ print(aws_inventory.__version__)
return
api_model = build_api_model()
if args.list_svcs:
- print '\n'.join(sorted(filter_services(api_model)))
+ print("\n".join(sorted(filter_services(api_model))))
return
# configure the debug level for opinel
@@ -257,20 +303,28 @@ def main(args):
# validate services against API mode #
- available_services = api_model.keys()
+ available_services = list(api_model.keys())
if args.services:
- invalid_included_services = [svc for svc in args.services if svc not in available_services]
+ invalid_included_services = [
+ svc for svc in args.services if svc not in available_services
+ ]
if invalid_included_services:
- raise EnvironmentError('Invalid service(s) specified: {}'.format(
- ', '.join(invalid_included_services))
+ raise EnvironmentError(
+ "Invalid service(s) specified: {}".format(
+ ", ".join(invalid_included_services)
+ )
)
if args.excluded_services:
- invalid_excluded_services = [svc for svc in args.excluded_services if svc not in available_services]
+ invalid_excluded_services = [
+ svc for svc in args.excluded_services if svc not in available_services
+ ]
if invalid_excluded_services:
- raise EnvironmentError('Invalid service(s) to exclude: {}'.format(
- ', '.join(invalid_excluded_services))
+ raise EnvironmentError(
+ "Invalid service(s) to exclude: {}".format(
+ ", ".join(invalid_excluded_services)
+ )
)
# validate regions against API model #
@@ -278,43 +332,55 @@ def main(args):
if args.regions:
available_regions = set()
for svc in available_services:
- available_regions.update(api_model[svc]['regions'])
- invalid_regions = [region for region in args.regions if region not in available_regions]
+ available_regions.update(api_model[svc]["regions"])
+ invalid_regions = [
+ region for region in args.regions if region not in available_regions
+ ]
if invalid_regions:
- raise EnvironmentError('Invalid region(s) specified: {}'.format(
- ', '.join(invalid_regions))
+ raise EnvironmentError(
+ "Invalid region(s) specified: {}".format(", ".join(invalid_regions))
)
# create the list of services to analyze
- services = filter_services(api_model,
- frozenset(args.services),
- frozenset(args.excluded_services))
+ services = filter_services(
+ api_model, frozenset(args.services), frozenset(args.excluded_services)
+ )
if not services:
- raise EnvironmentError('List of AWS services to be analyzed is empty.')
- LOGGER.debug('%d AWS service(s) to inspect: %s.', len(services), ', '.join(services))
+ raise EnvironmentError("List of AWS services to be analyzed is empty.")
+ LOGGER.debug(
+ "%d AWS service(s) to inspect: %s.", len(services), ", ".join(services)
+ )
- op_blacklist_parser = aws_inventory.blacklist.OpBlacklistParser(args.op_blacklist, api_model)
- service_descriptors = filter_operations(api_model, op_blacklist_parser, args.regions, services)
+ op_blacklist_parser = aws_inventory.blacklist.OpBlacklistParser(
+ args.op_blacklist, api_model
+ )
+ service_descriptors = filter_operations(
+ api_model, op_blacklist_parser, args.regions, services
+ )
if not service_descriptors:
- raise EnvironmentError('No operations to invoke for specifed AWS services and regions.')
+ raise EnvironmentError(
+ "No operations to invoke for specifed AWS services and regions."
+ )
ops_count = 0
for svc_name in service_descriptors:
- ops_count += (
- len(service_descriptors[svc_name]['ops']) *
- len(service_descriptors[svc_name]['regions'])
+ ops_count += len(service_descriptors[svc_name]["ops"]) * len(
+ service_descriptors[svc_name]["regions"]
)
if args.list_operations:
- print '[{}]\n{}\n'.format(
- svc_name,
- '\n'.join(service_descriptors[svc_name]['ops']) or '# NONE'
+ print(
+ "[{}]\n{}\n".format(
+ svc_name,
+ "\n".join(service_descriptors[svc_name]["ops"]) or "# NONE",
+ )
)
if args.list_operations:
- print 'Total operations to invoke: {}'.format(ops_count)
+ print("Total operations to invoke: {}".format(ops_count))
else:
- LOGGER.debug('Total operations to invoke: %d.', ops_count)
+ LOGGER.debug("Total operations to invoke: %d.", ops_count)
aws_inventory.invoker.ApiInvoker(args, service_descriptors, ops_count).start()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main(parse_args())
diff --git a/aws_inventory/__init__.py b/aws_inventory/__init__.py
index e2ad98d..37fd7e4 100644
--- a/aws_inventory/__init__.py
+++ b/aws_inventory/__init__.py
@@ -1,3 +1,3 @@
-import version
+from . import version
__version__ = version.__version__
diff --git a/aws_inventory/blacklist.py b/aws_inventory/blacklist.py
index 647c801..18d1f4f 100644
--- a/aws_inventory/blacklist.py
+++ b/aws_inventory/blacklist.py
@@ -3,23 +3,26 @@
account resources. Others may be too verbose or not relevant for some users of this tool.
"""
-import ConfigParser
+import configparser
import logging
LOGGER = logging.getLogger(__name__)
+
class BlacklistError(Exception):
"""Generic error for parsing the operations blacklist."""
+
pass
+
class OpBlacklistParser(object):
"""Parser for operations blacklist."""
def __init__(self, blacklist_fp, api_model):
self.blacklist_fp = blacklist_fp
self.api_model = api_model
- self._cfg_parser = ConfigParser.RawConfigParser(allow_no_value=True)
+ self._cfg_parser = configparser.RawConfigParser(allow_no_value=True)
self._cfg_parser.optionxform = str # case sensitive
self._cfg_parser.readfp(self.blacklist_fp)
@@ -28,16 +31,20 @@ def __init__(self, blacklist_fp, api_model):
err = False
for svc_name in self._cfg_parser.sections():
try:
- available_ops = set(api_model[svc_name]['ops'])
+ available_ops = set(api_model[svc_name]["ops"])
blacklist_ops = set(self._cfg_parser.options(svc_name))
invalid_ops = blacklist_ops - available_ops
if invalid_ops:
err = True
- LOGGER.error('[%s] Invalid operation(s): %s.', svc_name, ', '.join(invalid_ops))
+ LOGGER.error(
+ "[%s] Invalid operation(s): %s.",
+ svc_name,
+ ", ".join(invalid_ops),
+ )
except KeyError:
LOGGER.warning('Invalid service name "%s".', svc_name)
if err:
- raise BlacklistError('Failure to validate blacklist file.')
+ raise BlacklistError("Failure to validate blacklist file.")
def is_blacklisted(self, svc_name, op_name):
"""
diff --git a/aws_inventory/config.py b/aws_inventory/config.py
index 068a8fc..bac90bc 100644
--- a/aws_inventory/config.py
+++ b/aws_inventory/config.py
@@ -4,19 +4,18 @@
import re
import string
-
# max number of threads for invoking APIs of a service in a region
MAX_THREADS = multiprocessing.cpu_count() * 2
# RE to filter desired service operation names
-SVC_OPS_RE = re.compile(r'^(Describe|List).+')
+SVC_OPS_RE = re.compile(r"^(Describe|List).+")
-## some constants ##
+# some constants
# used to create JSON file (in "./gui/") for holding the GUI data
-GUI_DATA_FILENAME_TEMPLATE = string.Template('gui/aws_inventory_data-$profile.json')
+GUI_DATA_FILENAME_TEMPLATE = string.Template("gui/aws_inventory_data-$profile.json")
-## Network-related timeouts. See botocore/endpoint.py ##
+# Network-related timeouts. See botocore/endpoint.py
# number of seconds to wait for a connection to succeed. By default, botocore tries 4 times.
CLIENT_CONNECT_TIMEOUT = 10
# number of seconds to wait for a complete API response to be received
@@ -24,7 +23,7 @@
# region to use when service model says there are no regions, but creating a client still
# requires one
-DEFAULT_REGION = 'us-west-2'
+DEFAULT_REGION = "us-west-2"
-## maximum number of connections to keep in a connection pool
-CLIENT_MAX_POOL_CONNECTIONS = 25
\ No newline at end of file
+# maximum number of connections to keep in a connection pool
+CLIENT_MAX_POOL_CONNECTIONS = 25
diff --git a/aws_inventory/invoker.py b/aws_inventory/invoker.py
index 33de0ed..4c0687d 100644
--- a/aws_inventory/invoker.py
+++ b/aws_inventory/invoker.py
@@ -1,19 +1,19 @@
"""Abstraction for invoking AWS APIs (a.k.a. operations) and handling responses."""
import logging
-from Queue import Queue
+from queue import Queue
from threading import Thread
import botocore
from opinel.utils.credentials import read_creds
-import config
-import progress
-import store
-
+from . import config
+from . import progress
+from . import store
LOGGER = logging.getLogger(__name__)
+
class ApiInvoker(object):
"""Invoke APIs from GUI."""
@@ -31,17 +31,17 @@ def __init__(self, script_args, svc_descriptors, ops_count):
script_args.profile,
script_args.csv_credentials,
script_args.mfa_serial,
- script_args.mfa_code)
- if not self.credentials['AccessKeyId']:
- raise EnvironmentError('Failed to get AWS account credentials.')
- LOGGER.info('Using AWS credential key ID: %s.', self.credentials['AccessKeyId'])
+ script_args.mfa_code,
+ )
+ if not self.credentials["AccessKeyId"]:
+ raise EnvironmentError("Failed to get AWS account credentials.")
+ LOGGER.info("Using AWS credential key ID: %s.", self.credentials["AccessKeyId"])
def start(self):
"""Start the invoker with associated GUI. Wait for GUI to stop."""
self.progress_bar = progress.GuiProgressBar(
- 'AWS Inventory',
- self.ops_count,
- self._probe_services)
+ "AWS Inventory", self.ops_count, self._probe_services
+ )
self.progress_bar.mainloop()
def _probe_services(self):
@@ -50,19 +50,23 @@ def _probe_services(self):
client_config = botocore.config.Config(
connect_timeout=config.CLIENT_CONNECT_TIMEOUT,
max_pool_connections=config.CLIENT_MAX_POOL_CONNECTIONS,
- read_timeout=config.CLIENT_READ_TIMEOUT
+ read_timeout=config.CLIENT_READ_TIMEOUT,
)
session = botocore.session.get_session()
for svc_name in self.svc_descriptors:
- operations = self.svc_descriptors[svc_name]['ops']
- regions = self.svc_descriptors[svc_name]['regions']
+ operations = self.svc_descriptors[svc_name]["ops"]
+ regions = self.svc_descriptors[svc_name]["regions"]
# call each API across each region
- api_version = session.get_config_variable('api_versions').get(svc_name, None)
- params = {'svc_name': svc_name,
- 'dry_run': self.script_args.dry_run,
- 'store': self.store}
+ api_version = session.get_config_variable("api_versions").get(
+ svc_name, None
+ )
+ params = {
+ "svc_name": svc_name,
+ "dry_run": self.script_args.dry_run,
+ "store": self.store,
+ }
for region in regions:
self.progress_bar.update_svc_text(svc_name, region)
try:
@@ -70,38 +74,39 @@ def _probe_services(self):
svc_name,
region_name=region,
api_version=api_version,
- aws_access_key_id=self.credentials['AccessKeyId'],
- aws_secret_access_key=self.credentials['SecretAccessKey'],
- aws_session_token=self.credentials['SessionToken'],
- config=client_config
+ aws_access_key_id=self.credentials["AccessKeyId"],
+ aws_secret_access_key=self.credentials["SecretAccessKey"],
+ aws_session_token=self.credentials["SessionToken"],
+ config=client_config,
)
except botocore.exceptions.NoRegionError:
- LOGGER.warning('[%s][%s] Issue in region detection. Using default region.',
- config.DEFAULT_REGION,
- svc_name)
+ LOGGER.warning(
+ "[%s][%s] Issue in region detection. Using default region.",
+ config.DEFAULT_REGION,
+ svc_name,
+ )
client = session.create_client(
svc_name,
region_name=config.DEFAULT_REGION,
api_version=api_version,
- aws_access_key_id=self.credentials['AccessKeyId'],
- aws_secret_access_key=self.credentials['SecretAccessKey'],
- aws_session_token=self.credentials['SessionToken'],
- config=client_config
+ aws_access_key_id=self.credentials["AccessKeyId"],
+ aws_secret_access_key=self.credentials["SecretAccessKey"],
+ aws_session_token=self.credentials["SessionToken"],
+ config=client_config,
)
- params.update({'region': region, 'client': client})
+ params.update({"region": region, "client": client})
# schedule worker threads to invoke all APIs. Wait until all APIs have been
# invoked.
- thread_work(
- operations,
- self.svc_worker,
- params)
+ thread_work(operations, self.svc_worker, params)
self.progress_bar.update_progress(len(operations))
self.progress_bar.finish_work()
self.write_results()
except progress.LifetimeError as e:
LOGGER.debug(e)
- def write_results(self, response_dump_fp=None, exception_dump_fp=None, gui_data_fp=None):
+ def write_results(
+ self, response_dump_fp=None, exception_dump_fp=None, gui_data_fp=None
+ ):
"""Output the results, if not a dry run.
:param file response_dump_fp: file for responses
@@ -112,22 +117,22 @@ def write_results(self, response_dump_fp=None, exception_dump_fp=None, gui_data_
if response_dump_fp:
self.store.dump_response_store(response_dump_fp)
elif self.script_args.responses_dump:
- with open(self.script_args.responses_dump, 'wb') as out_fp:
+ with open(self.script_args.responses_dump, "wb") as out_fp:
self.store.dump_response_store(out_fp)
if exception_dump_fp:
self.store.dump_exception_store(exception_dump_fp)
elif self.script_args.exceptions_dump:
- with open(self.script_args.exceptions_dump, 'wb') as out_fp:
+ with open(self.script_args.exceptions_dump, "wb") as out_fp:
self.store.dump_exception_store(out_fp)
if self.script_args.verbose:
- print self.store.get_response_store()
+ print(self.store.get_response_store())
if gui_data_fp:
self.store.generate_data_file(gui_data_fp)
else:
- with open(self.script_args.gui_data_file, 'w') as out_fp:
+ with open(self.script_args.gui_data_file, "w") as out_fp:
self.store.generate_data_file(out_fp)
@staticmethod
@@ -137,9 +142,9 @@ def svc_worker(que, params):
:param Queue que: APIs to invoke
:param dict params: parameters to use for invoking API
"""
- svc_name = params['svc_name']
- region = params['region']
- storage = params['store']
+ svc_name = params["svc_name"]
+ region = params["region"]
+ storage = params["store"]
while True:
try:
svc_op = que.get()
@@ -150,29 +155,33 @@ def svc_worker(que, params):
# this is the way botocore does it. See botocore/__init__.py
py_op = botocore.xform_name(svc_op)
- LOGGER.debug('[%s][%s] Invoking API "%s". Python name "%s".',
- region,
- svc_name,
- svc_op,
- py_op)
-
- if not params['dry_run']:
- if params['client'].can_paginate(py_op):
- paginator = params['client'].get_paginator(py_op)
+ LOGGER.debug(
+ '[%s][%s] Invoking API "%s". Python name "%s".',
+ region,
+ svc_name,
+ svc_op,
+ py_op,
+ )
+
+ if not params["dry_run"]:
+ if params["client"].can_paginate(py_op):
+ paginator = params["client"].get_paginator(py_op)
response = paginator.paginate().build_full_result()
else:
- response = getattr(params['client'], py_op)()
+ response = getattr(params["client"], py_op)()
storage.add_response(svc_name, region, svc_op, response)
except Exception as e:
storage.add_exception(svc_name, region, svc_op, e)
LOGGER.exception(
'Unknown error while invoking API for service "%s" in region "%s".',
svc_name,
- region)
+ region,
+ )
finally:
que.task_done()
-#XXX: borrowed from opinel because their threading module is failing to load. Pretty much the example in the Queue docs
+
+# XXX: borrowed from opinel because their threading module is failing to load. Pretty much the example in the Queue docs
def thread_work(targets, function, params=None):
"""Thread worker creator.
@@ -197,4 +206,4 @@ def thread_work(targets, function, params=None):
que.join()
else:
- LOGGER.warning('No work to be done.')
+ LOGGER.warning("No work to be done.")
diff --git a/aws_inventory/progress.py b/aws_inventory/progress.py
index b92a36c..583d013 100644
--- a/aws_inventory/progress.py
+++ b/aws_inventory/progress.py
@@ -2,88 +2,96 @@
import collections
import threading
-import Tkinter as tk
-import tkMessageBox
-import ttk
+import tkinter as tk
+import tkinter.messagebox
+import tkinter.ttk
class LifetimeError(Exception):
"""Progress was interrupted (i.e., window closed or cancel button was pressed)."""
+
pass
-class GuiProgressBar(ttk.Frame):
+
+class GuiProgressBar(tkinter.ttk.Frame):
def __init__(self, title, work_count, work_func, *func_args):
- ttk.Frame.__init__(self, relief='ridge', borderwidth=2)
+ tkinter.ttk.Frame.__init__(self, relief="ridge", borderwidth=2)
self.work_count = work_count
self.worker_task = threading.Thread(target=work_func, args=func_args)
self.pending_stop = False
self.master.title(title)
- self.master.protocol('WM_DELETE_WINDOW', self._confirm_quit)
- self.pack(fill='both', expand=1)
+ self.master.protocol("WM_DELETE_WINDOW", self._confirm_quit)
+ self.pack(fill="both", expand=1)
self.widget_space = self._create_widgets()
def _create_widgets(self):
# storage for widgets so we don't pollute GUI app instance namespace
- widget_space = collections.namedtuple('WidgetSpace', [
- 'button_text',
- 'button',
- 'label_frame',
- 'label_text',
- 'label',
- 'progress_bar',
- 'status_label_text',
- 'status_label'
- ])
-
- button_text = tk.StringVar(value='Start')
- button = ttk.Button(self, textvariable=button_text, command=self._start)
+ widget_space = collections.namedtuple(
+ "WidgetSpace",
+ [
+ "button_text",
+ "button",
+ "label_frame",
+ "label_text",
+ "label",
+ "progress_bar",
+ "status_label_text",
+ "status_label",
+ ],
+ )
+
+ button_text = tk.StringVar(value="Start")
+ button = tkinter.ttk.Button(self, textvariable=button_text, command=self._start)
button.pack()
- label_frame = ttk.LabelFrame(self, text='Service:Region')
- label_frame.pack(fill='x')
+ label_frame = tkinter.ttk.LabelFrame(self, text="Service:Region")
+ label_frame.pack(fill="x")
label_text = tk.StringVar()
- label = ttk.Label(label_frame, anchor='w', textvariable=label_text)
- label.pack(fill='x')
-
+ label = tkinter.ttk.Label(label_frame, anchor="w", textvariable=label_text)
+ label.pack(fill="x")
- #XXX: add small fraction to max so progress bar doesn't wrap when work finishes
- progress_bar = ttk.Progressbar(
+ # XXX: add small fraction to max so progress bar doesn't wrap when work finishes
+ progress_bar = tkinter.ttk.Progressbar(
self,
- orient='horizontal',
- length=self.master.winfo_screenwidth()/5,
- mode='determinate',
- maximum=self.work_count+1e-10
+ orient="horizontal",
+ length=self.master.winfo_screenwidth() / 5,
+ mode="determinate",
+ maximum=self.work_count + 1e-10,
)
- progress_bar.pack(fill='both')
+ progress_bar.pack(fill="both")
- status_label_text = tk.StringVar(value='0 / {}'.format(self.work_count))
- status_label = ttk.Label(self, anchor='w', textvariable=status_label_text)
- status_label.pack(fill='x')
-
- return widget_space(button_text,
- button,
- label_frame,
- label_text,
- label,
- progress_bar,
- status_label_text,
- status_label)
+ status_label_text = tk.StringVar(value="0 / {}".format(self.work_count))
+ status_label = tkinter.ttk.Label(
+ self, anchor="w", textvariable=status_label_text
+ )
+ status_label.pack(fill="x")
+
+ return widget_space(
+ button_text,
+ button,
+ label_frame,
+ label_text,
+ label,
+ progress_bar,
+ status_label_text,
+ status_label,
+ )
def _confirm_quit(self):
- if tkMessageBox.askyesno(message='Quit?'):
+ if tkinter.messagebox.askyesno(message="Quit?"):
self.pending_stop = True
self.master.destroy()
def _confirm_cancel(self):
- if tkMessageBox.askyesno(message='Cancel?'):
+ if tkinter.messagebox.askyesno(message="Cancel?"):
self.pending_stop = True
- self.widget_space.button_text.set('Canceled')
- self.widget_space.button.state(['disabled'])
+ self.widget_space.button_text.set("Canceled")
+ self.widget_space.button.state(["disabled"])
def _start(self):
- self.widget_space.button_text.set('Cancel')
- self.widget_space.button['command'] = self._confirm_cancel
+ self.widget_space.button_text.set("Cancel")
+ self.widget_space.button["command"] = self._confirm_cancel
self.worker_task.start()
def update_progress(self, delta):
@@ -91,12 +99,13 @@ def update_progress(self, delta):
:param float delta: increment progress by some amount"""
if self.pending_stop:
- raise LifetimeError('User initiated stop.')
+ raise LifetimeError("User initiated stop.")
self.widget_space.progress_bar.step(delta)
- self.widget_space.status_label_text.set('{} / {}'.format(
- int(self.widget_space.progress_bar['value']),
- self.work_count
- ))
+ self.widget_space.status_label_text.set(
+ "{} / {}".format(
+ int(self.widget_space.progress_bar["value"]), self.work_count
+ )
+ )
def update_svc_text(self, svc_name, region):
"""Update text in status area of GUI.
@@ -104,9 +113,12 @@ def update_svc_text(self, svc_name, region):
:param str svc_name: service name
:param str region: region name
"""
- self.widget_space.label_text.set('{}:{}'.format(svc_name, region))
+ self.widget_space.label_text.set("{}:{}".format(svc_name, region))
def finish_work(self):
"""Update GUI when work is complete."""
- self.widget_space.button.state(['disabled'])
- self.widget_space.button_text.set('Finished')
+ self.widget_space.button.state(["disabled"])
+ exit_button = tkinter.ttk.Button(
+ self.widget_space.button, text="Finished", command=self.master.destroy
+ )
+ exit_button.pack(pady=20)
diff --git a/aws_inventory/store.py b/aws_inventory/store.py
index f8e703e..89259a1 100644
--- a/aws_inventory/store.py
+++ b/aws_inventory/store.py
@@ -11,12 +11,13 @@
import botocore
-import config
-import version
+from . import config
+from . import version
LOGGER = logging.getLogger(__name__)
+
class ResponseEncoder(json.JSONEncoder):
"""Encode responses from operations in order to serialize to JSON."""
@@ -25,6 +26,7 @@ def default(self, o):
return o.isoformat()
return super(ResponseEncoder, self).default(o)
+
class ResultStore(object):
"""Storage and serialization for responses and exceptions."""
@@ -32,8 +34,8 @@ def __init__(self, profile):
self.profile = profile
self._response_store = {} # {svc: {region: {svc_op: response}}}
self._exception_store = {} # {svc: {svc_op: {region: exception}}}
- self.run_date = time.strftime('%Y-%m-%d %H:%M:%S %Z')
- self.commandline = ' '.join(sys.argv)
+ self.run_date = time.strftime("%Y-%m-%d %H:%M:%S %Z")
+ self.commandline = " ".join(sys.argv)
self.version = version.__version__
def add_response(self, service, region, svc_op, resp):
@@ -79,7 +81,7 @@ def get_response_store(self):
:rtype: str
:return: serialized response store in JSON format
"""
- LOGGER.debug('Building the response store.')
+ LOGGER.debug("Building the response store.")
return json.dumps(self._response_store, cls=ResponseEncoder)
def dump_response_store(self, fp):
@@ -104,7 +106,7 @@ def generate_data_file(self, fp):
:param file fp: file to write to
"""
# format of data file for jsTree
- #[
+ # [
# {
# "text" : "Root node",
# "children" : [
@@ -112,76 +114,85 @@ def generate_data_file(self, fp):
# { "text" : "Child node 2" }
# ]
# }
- #]
+ # ]
def build_children(obj):
children = []
if isinstance(obj, dict):
- for key, val in obj.items():
+ for key, val in list(obj.items()):
child = build_children(val)
if isinstance(child, (dict, list, tuple)) and child:
- children.append({'text': key, 'children': child})
+ children.append({"text": key, "children": child})
else:
# leaf node
try:
- children.append({'text': u'{} = {}'.format(key, val)})
+ children.append({"text": "{} = {}".format(key, val)})
except UnicodeDecodeError:
# key or value is probably binary. For example, CloudTrail API ListPublicKeys
- children.append({'text': u'{} = {!r}'.format(key, val)})
+ children.append({"text": "{} = {!r}".format(key, val)})
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
child = build_children(val)
if isinstance(child, (dict, list, tuple)) and child:
- children.append({'text': '[{:d}]'.format(i), 'children': child})
+ children.append({"text": "[{:d}]".format(i), "children": child})
else:
# leaf node
- children.append({'text': child})
+ children.append({"text": child})
else:
return obj
return children
- LOGGER.debug('Building the GUI data model.')
- data = build_children({'[inventory]': self._response_store})
+
+ LOGGER.debug("Building the GUI data model.")
+ data = build_children({"[inventory]": self._response_store})
# assign types to nodes so jsTree can handle them appropriately
- data[0]['type'] = 'root'
- data[0]['state'] = {'opened': True}
- for service in data[0]['children']:
- service['type'] = 'service'
- service['state'] = {'opened': True}
- for region in service['children']:
- region['type'] = 'region'
- region['state'] = {'opened': True}
+ data[0]["type"] = "root"
+ data[0]["state"] = {"opened": True}
+ for service in data[0]["children"]:
+ service["type"] = "service"
+ service["state"] = {"opened": True}
+ for region in service["children"]:
+ region["type"] = "region"
+ region["state"] = {"opened": True}
num_hidden_operations = 0
- for operation in region['children']:
- operation['type'] = 'operation'
+ for operation in region["children"]:
+ operation["type"] = "operation"
# add count of non empty response to operation name
try:
num_non_empty_responses = 0
- for response in operation['children']:
+ for response in operation["children"]:
try:
- if response['text'] == 'ResponseMetadata':
- response['type'] = 'response_metadata'
+ if response["text"] == "ResponseMetadata":
+ response["type"] = "response_metadata"
continue # ignore metadata nodes in count
- num_non_empty_responses += 1 if response['children'] else 0
+ num_non_empty_responses += (
+ 1 if response["children"] else 0
+ )
except KeyError:
# an empty response
pass
if num_non_empty_responses:
- operation['text'] += ' ({:d})'.format(num_non_empty_responses)
+ operation["text"] += " ({:d})".format(
+ num_non_empty_responses
+ )
else:
num_hidden_operations += 1
- operation['state'] = {"hidden": True}
+ operation["state"] = {"hidden": True}
except KeyError:
# no response
pass
- region['a_attr'] = {'title': '{:d} hidden operations'.format(num_hidden_operations)}
-
- out_obj = {'run_date': self.run_date,
- 'commandline': self.commandline,
- 'version': self.version,
- 'botocore_version': botocore.__version__,
- 'responses': data}
+ region["a_attr"] = {
+ "title": "{:d} hidden operations".format(num_hidden_operations)
+ }
+
+ out_obj = {
+ "run_date": self.run_date,
+ "commandline": self.commandline,
+ "version": self.version,
+ "botocore_version": botocore.__version__,
+ "responses": data,
+ }
LOGGER.debug('Writing the GUI data model to file "%s".', fp.name)
json.dump(out_obj, fp, cls=ResponseEncoder)
diff --git a/aws_inventory/version.py b/aws_inventory/version.py
index 10939f0..b3f4756 100644
--- a/aws_inventory/version.py
+++ b/aws_inventory/version.py
@@ -1 +1 @@
-__version__ = '0.1.2'
+__version__ = "0.1.2"
diff --git a/blog_writeup.html b/blog_writeup.html
index 3d73487..5a23e08 100644
--- a/blog_writeup.html
+++ b/blog_writeup.html
@@ -111,4 +111,4 @@
Roadmap
Open Source
You can find the source code hosted at https://github.com/nccgroup/aws-inventory. If you are interested in learning more about the AWS ecosystem, checkout this Awesome List.