diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54d22aa..83eb24b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,208 +1,17 @@ name: CI -# yamllint disable-line rule:truthy on: push: pull_request: ~ -env: - CACHE_VERSION: 1 - DEFAULT_PYTHON: 3.8 - PRE_COMMIT_HOME: ~/.cache/pre-commit - jobs: - # Separate job to pre-populate the base dependency cache - # This prevent upcoming jobs to do the same individually - prepare-base: - name: Prepare base dependencies - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] - steps: - - name: Check out code from GitHub - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - id: python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Restore base Python virtual environment - id: cache-venv - uses: actions/cache@v3 - with: - path: venv - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('pyproject.toml') }} - restore-keys: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}- - - name: Create Python virtual environment - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - python -m venv venv - . venv/bin/activate - pip install -U pip setuptools pre-commit - pip install -e '.[testing]' - - pre-commit: - name: Prepare pre-commit environment - runs-on: ubuntu-latest - needs: prepare-base - steps: - - name: Check out code from GitHub - uses: actions/checkout@v3 - - name: Set up Python ${{ env.DEFAULT_PYTHON }} - uses: actions/setup-python@v4 - id: python - with: - python-version: ${{ env.DEFAULT_PYTHON }} - - name: Restore base Python virtual environment - id: cache-venv - uses: actions/cache@v3 - with: - path: venv - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('pyproject.toml') }} - - name: Fail job if Python cache restore failed - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - echo "Failed to restore Python virtual environment from cache" - exit 1 - - name: Restore pre-commit environment from cache - id: cache-precommit - uses: actions/cache@v3 - with: - path: ${{ env.PRE_COMMIT_HOME }} - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - restore-keys: ${{ env.CACHE_VERSION}}-${{ runner.os }}-pre-commit- - - name: Install pre-commit dependencies - if: steps.cache-precommit.outputs.cache-hit != 'true' - run: | - . venv/bin/activate - pre-commit install-hooks - - lint-pre-commit: - name: Check pre-commit - runs-on: ubuntu-latest - needs: pre-commit - steps: - - name: Check out code from GitHub - uses: actions/checkout@v3 - - name: Set up Python ${{ env.DEFAULT_PYTHON }} - uses: actions/setup-python@v4 - id: python - with: - python-version: ${{ env.DEFAULT_PYTHON }} - - name: Restore base Python virtual environment - id: cache-venv - uses: actions/cache@v3 - with: - path: venv - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('pyproject.toml') }} - - name: Fail job if Python cache restore failed - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - echo "Failed to restore Python virtual environment from cache" - exit 1 - - name: Restore pre-commit environment from cache - id: cache-precommit - uses: actions/cache@v3 - with: - path: ${{ env.PRE_COMMIT_HOME }} - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - - name: Fail job if cache restore failed - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - echo "Failed to restore Python virtual environment from cache" - exit 1 - - name: Run pre-commit - run: | - . venv/bin/activate - pre-commit run --all-files --show-diff-on-failure - - pytest: - runs-on: ubuntu-latest - needs: prepare-base - strategy: - matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] - name: >- - Run tests Python ${{ matrix.python-version }} - steps: - - name: Check out code from GitHub - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - id: python - with: - python-version: ${{ matrix.python-version }} - - name: Restore base Python virtual environment - id: cache-venv - uses: actions/cache@v3 - with: - path: venv - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('pyproject.toml') }} - - name: Fail job if Python cache restore failed - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - echo "Failed to restore Python virtual environment from cache" - exit 1 - - name: Register Python problem matcher - run: | - echo "::add-matcher::.github/workflows/matchers/python.json" - - name: Install Pytest Annotation plugin - run: | - . venv/bin/activate - # Ideally this should be part of our dependencies - # However this plugin is fairly new and doesn't run correctly - # on a non-GitHub environment. - pip install pytest-github-actions-annotate-failures - - name: Run pytest - run: | - . venv/bin/activate - pytest \ - -qq \ - --timeout=20 \ - --durations=10 \ - --cov zigpy_cli \ - --cov-config pyproject.toml \ - -o console_output_style=count \ - -p no:sugar \ - tests - - name: Upload coverage artifact - uses: actions/upload-artifact@v3 - with: - name: coverage-${{ matrix.python-version }} - path: .coverage - - - coverage: - name: Process test coverage - runs-on: ubuntu-latest - needs: pytest - steps: - - name: Check out code from GitHub - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - id: python - with: - python-version: ${{ env.DEFAULT_PYTHON }} - - name: Restore base Python virtual environment - id: cache-venv - uses: actions/cache@v3 - with: - path: venv - key: ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('pyproject.toml') }} - - name: Fail job if Python cache restore failed - if: steps.cache-venv.outputs.cache-hit != 'true' - run: | - echo "Failed to restore Python virtual environment from cache" - exit 1 - - name: Download all coverage artifacts - uses: actions/download-artifact@v3 - - name: Combine coverage results - run: | - . venv/bin/activate - coverage combine coverage*/.coverage* - coverage report - coverage xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + shared-ci: + uses: zigpy/workflows/.github/workflows/ci.yml@main + with: + CODE_FOLDER: zigpy_cli + CACHE_VERSION: 3 + PRE_COMMIT_CACHE_PATH: ~/.cache/pre-commit + PYTHON_VERSION_DEFAULT: 3.9.15 + MINIMUM_COVERAGE_PERCENTAGE: 1 + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index c2bbc1b..3d931cf 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -1,26 +1,12 @@ -name: Publish distributions to PyPI and TestPyPI +name: Publish distributions to PyPI + on: release: types: - published jobs: - build-and-publish: - name: Build and publish distributions to PyPI and TestPyPI - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v4 - with: - python-version: 3.8 - - name: Install wheel - run: >- - pip install wheel build - - name: Build - run: >- - python3 -m build - - name: Publish distribution to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.PYPI_API_TOKEN }} + shared-build-and-publish: + uses: zigpy/workflows/.github/workflows/publish-to-pypi.yml@main + secrets: + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} diff --git a/pyproject.toml b/pyproject.toml index ebac912..f66f782 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ dependencies = [ "coloredlogs", "scapy", "zigpy>=0.55.0", - "bellows>=0.35.1", + "bellows>=0.43.0", "zigpy-deconz>=0.21.0", "zigpy-xbee>=0.18.0", "zigpy-zboss>=1.1.0", diff --git a/requirements_test.txt b/requirements_test.txt new file mode 100644 index 0000000..2913163 --- /dev/null +++ b/requirements_test.txt @@ -0,0 +1,5 @@ +coverage[toml] +pytest +pytest-asyncio +pytest-cov +pytest-timeout diff --git a/zigpy_cli/common.py b/zigpy_cli/common.py index 9cd9fc3..ba4bfd5 100644 --- a/zigpy_cli/common.py +++ b/zigpy_cli/common.py @@ -1,4 +1,5 @@ import click +from zigpy.types import Channels class HexOrDecIntParamType(click.ParamType): @@ -17,4 +18,18 @@ def convert(self, value, param, ctx): self.fail(f"{value!r} is not a valid integer", param, ctx) +class ChannelsType(click.ParamType): + name = "channels" + + def convert(self, value, param, ctx): + if isinstance(value, Channels): + return value + + try: + return Channels.from_channel_list(map(int, value.split(","))) + except ValueError: + self.fail(f"{value!r} is not a valid channel list", param, ctx) + + HEX_OR_DEC_INT = HexOrDecIntParamType() +CHANNELS_LIST = ChannelsType() diff --git a/zigpy_cli/helpers.py b/zigpy_cli/helpers.py new file mode 100644 index 0000000..ca578d5 --- /dev/null +++ b/zigpy_cli/helpers.py @@ -0,0 +1,77 @@ +import struct + +import zigpy.types as t + + +class PcapWriter: + """Class responsible to write in PCAP format.""" + + def __init__(self, file): + """Initialize PCAP file and write global header.""" + self.file = file + + def write_header(self): + self.file.write( + struct.pack(" None: + """Write a packet with its header and TLV metadata.""" + timestamp_sec = int(packet.timestamp.timestamp()) + timestamp_usec = int(packet.timestamp.microsecond) + + sub_tlvs = b"" + + # RSSI + sub_tlvs += ( + t.uint16_t(1).serialize() + + t.uint16_t(4).serialize() + + t.Single(packet.rssi).serialize() + ) + + # LQI + sub_tlvs += ( + t.uint16_t(10).serialize() + + t.uint16_t(1).serialize() + + t.uint8_t(packet.lqi).serialize() + + b"\x00\x00\x00" + ) + + # Channel Assignment + sub_tlvs += ( + t.uint16_t(3).serialize() + + t.uint16_t(3).serialize() + + t.uint16_t(packet.channel).serialize() + + t.uint8_t(0).serialize() # page 0 + + b"\x00" + ) + + # FCS type + sub_tlvs += ( + t.uint16_t(0).serialize() + + t.uint16_t(1).serialize() + + t.uint8_t(1).serialize() # FCS type 1 + + b"\x00\x00\x00" + ) + + tlvs = b"" + + # TAP header: version:u8, reserved: u8, length: u16 + tlvs += struct.pack("": + output = sys.stdout.buffer.raw + + writer = ZigpyPcapWriter(output) + writer.write_header() + + while True: + line = sys.stdin.readline() + data = json.loads(line) + packet = t.CapturedPacket( + timestamp=datetime.datetime.fromisoformat(data["timestamp"]), + rssi=data["rssi"], + lqi=data["lqi"], + channel=data["channel"], + data=bytes.fromhex(data["data"]), + ) + + writer.write_packet(packet) diff --git a/zigpy_cli/radio.py b/zigpy_cli/radio.py index 7926480..481878e 100644 --- a/zigpy_cli/radio.py +++ b/zigpy_cli/radio.py @@ -7,15 +7,20 @@ import itertools import json import logging +import random +import sys import click import zigpy.state import zigpy.types import zigpy.zdo import zigpy.zdo.types +from zigpy.application import ControllerApplication from zigpy_cli.cli import cli, click_coroutine +from zigpy_cli.common import CHANNELS_LIST from zigpy_cli.const import RADIO_LOGGING_CONFIGS, RADIO_TO_PACKAGE, RADIO_TO_PYPI +from zigpy_cli.helpers import PcapWriter LOGGER = logging.getLogger(__name__) @@ -234,3 +239,80 @@ async def change_channel(app, channel): LOGGER.info("Current channel is %s", app.state.network_info.channel) await app.move_network_to_channel(channel) + + +@radio.command() +@click.pass_obj +@click.option("-r", "--channel-hop-randomly", is_flag=True, type=bool, default=False) +@click.option( + "-c", + "--channels", + type=CHANNELS_LIST, + default=zigpy.types.Channels.ALL_CHANNELS, +) +@click.option("-p", "--channel-hop-period", type=float, default=5.0) +@click.option("-o", "--output", type=click.File("wb"), required=True) +@click.option("--interleave", is_flag=True, type=bool, default=False) +@click_coroutine +async def packet_capture( + app, + channel_hop_randomly, + channels, + channel_hop_period, + output, + interleave, +): + if output.name == "" and not interleave: + output = sys.stdout.buffer.raw + + if not channel_hop_randomly: + channels_iter = itertools.cycle(channels) + else: + + def channels_iter_func(): + while True: + yield random.choice(tuple(channels)) + + channels_iter = channels_iter_func() + + if app._packet_capture is ControllerApplication._packet_capture: + raise click.ClickException("Packet capture is not supported by this radio") + + await app.connect() + + if not interleave: + writer = PcapWriter(output) + writer.write_header() + + async with asyncio.TaskGroup() as tg: + channel_hopper_task = None + + async def channel_hopper(): + for channel in channels_iter: + await asyncio.sleep(channel_hop_period) + LOGGER.debug("Changing channel to %s", channel) + await app.packet_capture_change_channel(channel) + + async for packet in app.packet_capture(channel=next(channels_iter)): + if channel_hopper_task is None: + channel_hopper_task = tg.create_task(channel_hopper()) + + LOGGER.debug("Got a packet %s", packet) + + if not interleave: + writer.write_packet(packet) + else: + # To do line interleaving, encode the packets as JSON + output.write( + json.dumps( + { + "timestamp": packet.timestamp.isoformat(), + "rssi": packet.rssi, + "lqi": packet.lqi, + "channel": packet.channel, + "data": packet.data.hex(), + } + ).encode("ascii") + + b"\n" + ) + output.flush()