diff --git a/__main__.py b/__main__.py index b8dce22..506e692 100644 --- a/__main__.py +++ b/__main__.py @@ -1,384 +1,238 @@ -import glob -import shutil -import signal -import sys +import argparse +import datetime +import logging import os +import sys import time +from dataclasses import dataclass +from logging.handlers import TimedRotatingFileHandler from time import sleep +from typing import Any -import psutil as psutil -import argparse +import psutil -from htpclient.binarydownload import BinaryDownload -from htpclient.chunk import Chunk +from htpclient import Agent +from htpclient.chunk import ChunkStatus from htpclient.files import Files from htpclient.generic_cracker import GenericCracker from htpclient.hashcat_cracker import HashcatCracker -from htpclient.hashlist import Hashlist -from htpclient.helpers import start_uftpd, file_get_contents -from htpclient.initialize import Initialize -from htpclient.jsonRequest import * -from htpclient.dicts import * -import logging - from htpclient.task import Task -CONFIG = None -binaryDownload = None - - -def run_health_check(): - global CONFIG, binaryDownload - - logging.info("Health check requested by server!") - logging.info("Retrieving health check settings...") - query = copy_and_set_token(dict_getHealthCheck, CONFIG.get_value('token')) - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get health check!") - sleep(5) - return - elif ans['response'] != 'SUCCESS': - logging.error("Error on getting health check: " + str(ans)) - sleep(5) - return - binaryDownload.check_version(ans['crackerBinaryId']) - check_id = ans['checkId'] - logging.info("Starting check ID " + str(check_id)) - - # write hashes to file - hash_file = open(CONFIG.get_value('hashlists-path') + "/health_check.txt", "w") - hash_file.write("\n".join(ans['hashes'])) - hash_file.close() - - # delete old file if necessary - if os.path.exists(CONFIG.get_value('hashlists-path') + "/health_check.out"): - os.unlink(CONFIG.get_value('hashlists-path') + "/health_check.out") - - # run task - cracker = HashcatCracker(ans['crackerBinaryId'], binaryDownload) - start = int(time.time()) - [states, errors] = cracker.run_health_check(ans['attack'], ans['hashlistAlias']) - end = int(time.time()) - - # read results - if os.path.exists(CONFIG.get_value('hashlists-path') + "/health_check.out"): - founds = file_get_contents(CONFIG.get_value('hashlists-path') + "/health_check.out").replace("\r\n", "\n").split("\n") - else: - founds = [] - if len(states) > 0: - num_gpus = len(states[0].get_temps()) - else: - errors.append("Faild to retrieve one successful cracker state, most likely due to failing.") - num_gpus = 0 - query = copy_and_set_token(dict_sendHealthCheck, CONFIG.get_value('token')) - query['checkId'] = check_id - query['start'] = start - query['end'] = end - query['numGpus'] = num_gpus - query['numCracked'] = len(founds) - 1 - query['errors'] = errors - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to send health check results!") - sleep(5) - return - elif ans['response'] != 'OK': - logging.error("Error on sending health check results: " + str(ans)) - sleep(5) - return - logging.info("Health check completed successfully!") +cur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -# Sets up the logging to stdout and to file with different styles and with the level as set in the config if available -def init_logging(args): - global CONFIG - - log_format = '[%(asctime)s] [%(levelname)-5s] %(message)s' - print_format = '%(message)s' - date_format = '%Y-%m-%d %H:%M:%S' - log_level = logging.INFO - logfile = open('client.log', "a", encoding="utf-8") - - logging.getLogger("requests").setLevel(logging.WARNING) - - CONFIG = Config() - if args.debug: - CONFIG.set_value('debug', True) - if CONFIG.get_value('debug'): - log_level = logging.DEBUG - logging.getLogger("requests").setLevel(logging.DEBUG) - logging.basicConfig(level=log_level, format=print_format, datefmt=date_format) - file_handler = logging.StreamHandler(logfile) - file_handler.setFormatter(logging.Formatter(log_format)) - logging.getLogger().addHandler(file_handler) - - -def init(args): - global CONFIG, binaryDownload - - if len(CONFIG.get_value('files-path')) == 0: - CONFIG.set_value('files-path', os.path.abspath('files')) - if len(CONFIG.get_value('crackers-path')) == 0: - CONFIG.set_value('crackers-path', os.path.abspath('crackers')) - if len(CONFIG.get_value('hashlists-path')) == 0: - CONFIG.set_value('hashlists-path', os.path.abspath('hashlists')) - if len(CONFIG.get_value('zaps-path')) == 0: - CONFIG.set_value('zaps-path', os.path.abspath('.')) - if len(CONFIG.get_value('preprocessors-path')) == 0: - CONFIG.set_value('preprocessors-path', os.path.abspath('preprocessors')) - - if args.files_path and len(args.files_path): - CONFIG.set_value('files-path', os.path.abspath(args.files_path)) - if args.crackers_path and len(args.crackers_path): - CONFIG.set_value('crackers-path', os.path.abspath(args.crackers_path)) - if args.hashlists_path and len(args.hashlists_path): - CONFIG.set_value('hashlists-path', os.path.abspath(args.hashlists_path)) - if args.zaps_path and len(args.zaps_path): - CONFIG.set_value('zaps-path', os.path.abspath(args.zaps_path)) - if args.preprocessors_path and len(args.preprocessors_path): - CONFIG.set_value('preprocessors-path', os.path.abspath(args.preprocessors_path)) - - logging.info("Starting client '" + Initialize.get_version() + "'...") - - # check if there are running hashcat.pid files around (as we assume that nothing is running anymore if the client gets newly started) - if os.path.exists(CONFIG.get_value('crackers-path')): - for root, dirs, files in os.walk(CONFIG.get_value('crackers-path')): - for folder in dirs: - if folder.isdigit() and os.path.exists(CONFIG.get_value('crackers-path') + "/" + folder + "/hashtopolis.pid"): - logging.info("Cleaning hashcat PID file from " + CONFIG.get_value('crackers-path') + "/" + folder) - os.unlink(CONFIG.get_value('crackers-path') + "/" + folder + "/hashtopolis.pid") - - session = Session(requests.Session()).s - session.headers.update({'User-Agent': Initialize.get_version()}) - - if CONFIG.get_value('proxies'): - session.proxies = CONFIG.get_value('proxies') - - if CONFIG.get_value('auth-user') and CONFIG.get_value('auth-password'): - session.auth = (CONFIG.get_value('auth-user'), CONFIG.get_value('auth-password')) - - # connection initialization - Initialize().run(args) - # download and updates - binaryDownload = BinaryDownload(args) - binaryDownload.run() - - # if multicast is set to run, we need to start the daemon - if CONFIG.get_value('multicast') and Initialize().get_os() == 0: - start_uftpd(Initialize().get_os_extension(), CONFIG) - - -def loop(): - global binaryDownload, CONFIG +@dataclass +class Arguments: + """Data class for arguments""" + + de_register: bool + version: bool + number_only: bool + debug: bool + idle: bool + +# Sets up the logging to stdout and to file with different styles and with the level as set in the config if available +def init_logging(debug: bool, log_dir: str = "logs"): + """Initialize logging""" + # Log formats + file_log_format = "%(asctime)s - [%(levelname)-5s] - %(message)s" + console_log_format = "[%(levelname)-5s] %(message)s" # Simpler format for console output + log_date_format = "%Y-%m-%d %H:%M:%S" + log_level = logging.DEBUG if debug else logging.INFO + + # Ensure the logs directory exists + os.makedirs(log_dir, exist_ok=True) + + # Set up a TimedRotatingFileHandler to rotate logs every 5 days + log_path = os.path.join(log_dir, "client.log") + file_handler = TimedRotatingFileHandler(log_path, when="D", interval=5, backupCount=5, encoding="utf-8", utc=True) + file_handler.setLevel(log_level) + file_handler.setFormatter(logging.Formatter(file_log_format, datefmt=log_date_format)) + + # Get the root logger and configure it + root_logger = logging.getLogger() + root_logger.setLevel(log_level) # Set log level for root logger + root_logger.addHandler(file_handler) # Add the file handler to the root logger + + # Console handler with a different format + console_handler = logging.StreamHandler() + console_handler.setLevel(log_level) + console_handler.setFormatter(logging.Formatter(console_log_format)) # Simpler format for console + root_logger.addHandler(console_handler) + + # Optionally, adjust the log level for specific libraries (e.g., 'requests') + logging.getLogger("requests").setLevel(logging.WARNING if not debug else logging.DEBUG) + + +def loop(agent: Agent, idle: bool): + """Main loop""" logging.debug("Entering loop...") - task = Task() - chunk = Chunk() - files = Files() - hashlist = Hashlist() - task_change = True - last_task_id = 0 + task = None + old_task = None + files = Files(agent) cracker = None + idle_check = 0 + idle_state = not idle + while True: - CONFIG.update() - files.deletion_check() # check if there are deletion orders from the server - if task.get_task() is not None: - last_task_id = task.get_task()['taskId'] - task.load_task() - if task.get_task_id() == -1: # get task returned to run a health check - run_health_check() - task.reset_task() + if idle and time.time() - idle_check >= 3600: + idle_check = time.time() + if psutil.cpu_percent() > 10: + idle_state = False + else: + idle_state = True + + if not idle_state: + logging.debug("System is not idle, waiting...") + sleep(60) continue - elif task.get_task() is None: - task_change = True + + if task is not None: + old_task = task + + sleep(5) # wait for 5 seconds before trying again to get a task to avoid spamming the server + try: + task = Task.get_task(agent) + except Exception as e: + logging.error("Failed to get task: %s", e) continue - else: - if task.get_task()['taskId'] is not last_task_id: - task_change = True - # try to download the needed cracker (if not already present) - if not binaryDownload.check_version(task.get_task()['crackerId']): - task_change = True - task.reset_task() + + if agent.last_update_check + datetime.timedelta(weeks=1) < datetime.datetime.now(): + agent.last_update_check = datetime.datetime.now() + logging.info("Checking for updates...") + agent.update_client() + + if agent.last_clean_up + datetime.timedelta(days=1) < datetime.datetime.now(): + logging.info("Cleaning up...") + agent.clean_up() + + if files.last_check + datetime.timedelta(minutes=5) < datetime.datetime.now(): + logging.info("Checking for files to clean up...") + files.clean_up() + + logging.info("Updating config...") + agent.update_config() + + if not task: + logging.warning("No task available") continue - # if prince is used, make sure it's downloaded (deprecated, as preprocessors are integrated generally now) - if 'usePrince' in task.get_task() and task.get_task()['usePrince']: - if not binaryDownload.check_prince(): - continue - # if preprocessor is used, make sure it's downloaded - if 'usePreprocessor' in task.get_task() and task.get_task()['usePreprocessor']: - if not binaryDownload.check_preprocessor(task): - continue - # check if all required files are present - if not files.check_files(task.get_task()['files'], task.get_task()['taskId']): - task.reset_task() + + if task.downloaded_files: + logging.debug("Retrying to get task to check if still current task...") continue - # download the hashlist for the task - if task_change and not hashlist.load_hashlist(task.get_task()['hashlistId']): - task.reset_task() + + if task.task_id == -1: + logging.info("Running health check...") + agent.run_health_check(task) continue - if task_change: # check if the client version is up-to-date and load the appropriate cracker - binaryDownload.check_client_version() - logging.info("Got cracker binary type " + binaryDownload.get_version()['name']) - if binaryDownload.get_version()['name'].lower() == 'hashcat': - cracker = HashcatCracker(task.get_task()['crackerId'], binaryDownload) + + if (old_task and task.task_id != old_task.task_id) or cracker is None or cracker.task.task_id != task.task_id: + if task.cracker.name == "hashcat": + cracker = HashcatCracker(agent, task) else: - cracker = GenericCracker(task.get_task()['crackerId'], binaryDownload) - # if it's a task using hashcat brain, we need to load the found hashes - if task_change and 'useBrain' in task.get_task() and task.get_task()['useBrain'] and not hashlist.load_found(task.get_task()['hashlistId'], task.get_task()['crackerId']): - task.reset_task() - continue - task_change = False - chunk_resp = chunk.get_chunk(task.get_task()['taskId']) - if chunk_resp == 0: - task.reset_task() - continue - elif chunk_resp == -1: - # measure keyspace - if not cracker.measure_keyspace(task, chunk): # failure case - task.reset_task() + cracker = GenericCracker(agent, task) + + logging.info("Getting chunk...") + chunk = task.get_chunk() + + if not chunk: + logging.warning("No chunk available") continue - elif chunk_resp == -3: - run_health_check() - task.reset_task() + + if chunk.status == ChunkStatus.KEYSPACE_REQUIRED: + logging.info("Measuring keyspace...") + cracker.measure_keyspace(chunk) continue - elif chunk_resp == -2: - # measure benchmark - logging.info("Benchmark task...") - result = cracker.run_benchmark(task.get_task()) + + if chunk.status == ChunkStatus.BENCHMARK: + logging.info("Running benchmark...") + result = cracker.run_benchmark(chunk) + if result == 0: sleep(10) - task.reset_task() - # some error must have occurred on benchmarking continue - # send result of benchmark - query = copy_and_set_token(dict_sendBenchmark, CONFIG.get_value('token')) - query['taskId'] = task.get_task()['taskId'] - query['result'] = result - query['type'] = task.get_task()['benchType'] - req = JsonRequest(query) - ans = req.execute() - if ans is None: + + query: dict[str, Any] = { + "action": "sendBenchmark", + "taskId": task.task_id, + "result": result, + "type": task.benchmark_type, + } + + response = agent.post(query) + + if response is None: logging.error("Failed to send benchmark!") sleep(5) - task.reset_task() continue - elif ans['response'] != 'SUCCESS': - logging.error("Error on sending benchmark: " + str(ans)) - sleep(5) - task.reset_task() + + if chunk.status == ChunkStatus.NORMAL: + logging.info("Running chunk...") + if chunk.length == 0: + agent.send_warning("Invalid chunk size (0) retrieved! Retrying...", task.task_id) continue - else: - logging.info("Server accepted benchmark!") + + cracker.run_chunk(chunk) + + if cracker.agent_stopped(): continue - # check if we have an invalid chunk - if chunk.chunk_data() is not None and chunk.chunk_data()['length'] == 0: - logging.error("Invalid chunk size (0) retrieved! Retrying...") - task.reset_task() - continue - # run chunk - logging.info("Start chunk...") - cracker.run_chunk(task.get_task(), chunk.chunk_data(), task.get_preprocessor()) - if cracker.agent_stopped(): - # if the chunk was aborted by a stop from the server, we need to ask for a task again first - task.reset_task() - task_change = True - binaryDownload.check_client_version() - - -def de_register(): - global CONFIG - - logging.info("De-registering client..") - query = copy_and_set_token(dict_deregister, CONFIG.get_value('token')) - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("De-registration failed!") - elif ans['response'] != 'SUCCESS': - logging.error("Error on de-registration: " + str(ans)) - else: - logging.info("Successfully de-registered!") - # cleanup - dirs = [CONFIG.get_value('crackers-path'), CONFIG.get_value('preprocessors-path'), CONFIG.get_value('hashlists-path'), CONFIG.get_value('files-path')] - files = ['config.json', '7zr.exe', '7zr'] - for file in files: - if os.path.exists(file): - os.unlink(file) - for directory in dirs: - if os.path.exists(directory): - shutil.rmtree(directory) - r = glob.glob(CONFIG.get_value('zaps-path') + '/hashlist_*') - for i in r: - shutil.rmtree(i) - logging.info("Cleanup finished!") +def argument_parser() -> Arguments: + """Parse arguments""" + parser = argparse.ArgumentParser( + description="Hashtopolis Client v" + Agent.get_version_number(), prog="python3 hashtopolis.zip" + ) + parser.add_argument( + "--de-register", action="store_true", help="client should automatically de-register from server now" + ) + parser.add_argument("--version", action="store_true", help="show version information") + parser.add_argument("--number-only", action="store_true", help="when using --version show only the number") + parser.add_argument("--debug", "-d", action="store_true", help="enforce debugging output") + parser.add_argument("--idle", action="store_true", help="run in idle mode (only when machine is idle)") + + args = parser.parse_args() + return Arguments( + de_register=args.de_register, + version=args.version, + number_only=args.number_only, + debug=args.debug, + idle=args.idle, + ) if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Hashtopolis Client v' + Initialize.get_version_number(), prog='python3 hashtopolis.zip') - parser.add_argument('--de-register', action='store_true', help='client should automatically de-register from server now') - parser.add_argument('--version', action='store_true', help='show version information') - parser.add_argument('--number-only', action='store_true', help='when using --version show only the number') - parser.add_argument('--disable-update', action='store_true', help='disable retrieving auto-updates of the client from the server') - parser.add_argument('--debug', '-d', action='store_true', help='enforce debugging output') - parser.add_argument('--voucher', type=str, required=False, help='voucher to use to automatically register') - parser.add_argument('--url', type=str, required=False, help='URL to Hashtopolis client API') - parser.add_argument('--cert', type=str, required=False, help='Client TLS cert bundle for Hashtopolis client API') - parser.add_argument('--files-path', type=str, required=False, help='Use given folder path as files location') - parser.add_argument('--crackers-path', type=str, required=False, help='Use given folder path as crackers location') - parser.add_argument('--hashlists-path', type=str, required=False, help='Use given folder path as hashlists location') - parser.add_argument('--preprocessors-path', type=str, required=False, help='Use given folder path as preprocessors location') - parser.add_argument('--zaps-path', type=str, required=False, help='Use given folder path as zaps location') - parser.add_argument('--cpu-only', action='store_true', help='Force client to register as CPU only and also only reading out CPU information') - args = parser.parse_args() + args = argument_parser() + init_logging(args.debug) + logging.debug("Starting client with arguments: %s", args) if args.version: if args.number_only: - print(Initialize.get_version_number()) + logging.info(Agent.get_version_number()) else: - print(Initialize.get_version()) + logging.info(Agent.get_version()) sys.exit(0) + agent = Agent(cur_dir) + if args.de_register: - init_logging(args) - session = Session(requests.Session()).s - session.headers.update({'User-Agent': Initialize.get_version()}) - de_register() + agent.de_register() sys.exit(0) + if agent.is_running(): + logging.error("There is already a hashtopolis agent running in this directory!") + sys.exit(-1) + try: - init_logging(args) - - # check if there is a lock file and check if this pid is still running hashtopolis - if os.path.exists("lock.pid") and os.path.isfile("lock.pid"): - pid = file_get_contents("lock.pid") - logging.info("Found existing lock.pid, checking if python process is running...") - if pid.isdigit() and psutil.pid_exists(int(pid)): - try: - command = psutil.Process(int(pid)).cmdline()[0].replace('\\', '/').split('/') - print(command) - if str.startswith(command[-1], "python"): - logging.fatal("There is already a hashtopolis agent running in this directory!") - sys.exit(-1) - except Exception: - # if we fail to determine the cmd line we assume that it's either not running anymore or another process (non-hashtopolis) - pass - logging.info("Ignoring lock.pid file because PID is not existent anymore or not running python!") - - # create lock file - with open("lock.pid", 'w') as f: - f.write(str(os.getpid())) - f.close() - - init(args) - loop() + loop(agent, args.idle) except KeyboardInterrupt: + agent.send_warning("Client was stopped by user") logging.info("Exiting...") # if lock file exists, remove if os.path.exists("lock.pid"): - os.unlink("lock.pid") + os.remove("lock.pid") sys.exit() + except Exception as e: + print(f"Client crashed: {e}") + agent.send_error(f"Client crashed: {e}") diff --git a/build.sh b/build.sh index 59ea817..2f98905 100755 --- a/build.sh +++ b/build.sh @@ -8,10 +8,10 @@ fi count=$(git log $(git describe --tags --abbrev=0)..HEAD --oneline | wc -l) if [ ${count} \> 0 ]; then - sed -i -E 's/return "([0-9]+)\.([0-9]+)\.([0-9]+)"/return "\1.\2.\3.'$count'"/g' htpclient/initialize.py + sed -i -E 's/return "([0-9]+)\.([0-9]+)\.([0-9]+)"/return "\1.\2.\3.'$count'"/g' htpclient/__init__.py fi; zip -r hashtopolis.zip __main__.py htpclient -x "*__pycache__*" if [ ${count} \> 0 ]; then - sed -i -E 's/return "([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)"/return "\1.\2.\3"/g' htpclient/initialize.py + sed -i -E 's/return "([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)"/return "\1.\2.\3"/g' htpclient/__init__.py fi; \ No newline at end of file diff --git a/htpclient/__init__.py b/htpclient/__init__.py index e69de29..edd2b08 100644 --- a/htpclient/__init__.py +++ b/htpclient/__init__.py @@ -0,0 +1,1070 @@ +import datetime +import logging +import os +import platform +import shutil +import stat +import subprocess +import sys +import time +from typing import Any + +import psutil +import requests +import urllib3 +from unidecode import unidecode +from urllib3.exceptions import InsecureRequestWarning + +from htpclient.config import Config +from htpclient.cracker import Cracker +from htpclient.hashcat_cracker import HashcatCracker +from htpclient.operating_system import OperatingSystem +from htpclient.utils import download_file, file_get_content, file_set_content, replace_double_space + +# Disable SSL warnings +urllib3.disable_warnings(InsecureRequestWarning) + +VERSION_NUMBER = "0.8.0" +VERSION_NAME = "s3-python-" + VERSION_NUMBER + + +class Agent: + """The agent class""" + + def __init__(self, base_dir: str): + self.base_dir = base_dir + self.config = Config(base_dir) + self.last_update_check = datetime.datetime.now() + self.last_clean_up = datetime.datetime.now() + self.send_errors: dict[str, int] = {} + self.send_warnings: dict[str, int] = {} + self.default_error_task = self.config.get_value("default-error-task") + self.agent_id = self.config.get_value("agent-id") + self.api_key = self.config.get_value("api-key") + self.__session = self.__start_session() + self.__url = self.__get_url() + + self.__check_access() + self.__register() + self.__update_device_information() + self.__login() + self.__download_utils() + self.clean_up(start=True) + + self.update_client() + + if self.config.get_value("multicast"): + self.start_uftpd() + + @staticmethod + def get_version(): + """Get the version of the agent""" + return VERSION_NAME + + @staticmethod + def get_version_number(): + """Get the version number of the agent""" + return VERSION_NUMBER + + @property + def operating_system(self): + """Get the operating system of the agent""" + return self.__get_os() + + @operating_system.setter + def operating_system(self, value: Any): + raise AttributeError("Property 'operating_system' is read-only") + + def __start_session(self): + """Start a session with the server.""" + session = requests.Session() + session.headers.update({"User-Agent": Agent.get_version()}) + verify = self.config.get_value("verify-request") + session.verify = verify if isinstance(verify, bool) else True + + cert = self.config.get_value("cert") + + if cert and isinstance(cert, str): + session.cert = cert + logging.debug("Using certificate: %s", cert) + + proxies = self.config.get_value("proxies") + + if proxies and isinstance(proxies, dict): + session.proxies = proxies + logging.debug("Using proxies: %s", proxies) + + auth_user = self.config.get_value("auth-user") + auth_password = self.config.get_value("auth-password") + + if auth_user and auth_password and isinstance(auth_user, str) and isinstance(auth_password, str): + session.auth = (auth_user, auth_password) + logging.debug("Using authentication: %s", auth_user) + + return session + + def __get_url(self): + url = self.config.get_value("url") + + if not url or not isinstance(url, str): + logging.error("URL is not set in the configuration file.") + raise ValueError("URL is not set in the configuration file.") + + logging.debug("Using URL: %s", url) + + return url + + def __get_os(self): + operating_system = platform.system() + + try: + return OperatingSystem.get_by_platform_name(operating_system) + except ValueError as e: + raise ValueError("It seems your operating system is not supported.") from e + + def __check_access(self): + query = {"action": "testConnection"} + response = self.post(query) + + if response is None: + raise ConnectionError("Could not connect to the server.") + + logging.info("Connection to server successful.") + + def __register(self): + token = self.config.get_value("token") + + if token and isinstance(token, str): + return + + query: dict[str, Any] = { + "action": "register", + "voucher": self.config.get_value("voucher"), + "name": self.config.get_value("name"), + } + + if self.config.get_value("cpu-only"): + query["cpu-only"] = True + + response = self.post(query, False) + + if response is None: + raise ConnectionError("Could not register to the server.") + + if not response["token"]: + logging.error("Could not register to the server.") + raise ConnectionError("Could not register to the server.") + + token = response["token"] + self.config.set_value("voucher", "") + self.config.set_value("token", token) + logging.info("Successfully registered to the server.") + + def __update_device_information(self): + query: dict[str, Any] = { + "action": "updateInformation", + "uid": self.config.get_value("uuid"), + "os": self.operating_system.value, + "devices": self.__get_devices(), + } + + response = self.post(query) + + if response is None: + return + + logging.info("Successfully updated device information.") + + def __login(self): + query = { + "action": "login", + "clientSignature": Agent.get_version(), + } + + response = self.post(query) + + if response is None: + raise ConnectionError("Could not login to the server.") + + logging.info("Successfully logged in to the server.") + + if response.get("server-version", None): + logging.info("Server version: %s", response["server-version"]) + + if response.get("multicastEnabled", False): + logging.info("Multicast enabled on server.") + + if self.operating_system != OperatingSystem.LINUX: + self.send_warning("Multicast is only supported on Linux.") + return + + self.config.set_value("multicast", True) + + def __download_utils(self): + seven_zip_path = os.path.join(self.base_dir, "7zr" + OperatingSystem.get_extension(self.operating_system)) + uftpd_path = os.path.join(self.base_dir, "uftpd" + OperatingSystem.get_extension(self.operating_system)) + + if not os.path.isfile(seven_zip_path): + query = {"action": "downloadBinary", "type": "7zr"} + response = self.post(query) + + if response is None: + return + + if not response["executable"]: + self.send_error(f"Getting 7zr failed: {response}") + return + + if not self.download(response["executable"], seven_zip_path): + return + + os.chmod(seven_zip_path, os.stat(seven_zip_path).st_mode | stat.S_IEXEC) + + if not os.path.isfile(uftpd_path) and self.config.get_value("multicast"): + query = {"action": "downloadBinary", "type": "uftpd"} + response = self.post(query) + + if response is None: + return + + if not response["executable"]: + self.send_error(f"Getting uftpd failed: {response}") + return + + if not self.download(response["executable"], uftpd_path): + return + + os.chmod(uftpd_path, os.stat(uftpd_path).st_mode | stat.S_IEXEC) + + def __get_devices(self): + devices: list[str] = [] + cpu_only = self.config.get_value("cpu-only") + + if not isinstance(cpu_only, bool): + cpu_only = False + + if self.operating_system == OperatingSystem.WINDOWS: + devices.extend(self.__get_windows_devices(cpu_only)) + elif self.operating_system == OperatingSystem.LINUX: + devices.extend(self.__get_linux_devices(cpu_only)) + elif self.operating_system == OperatingSystem.MAC: + devices.extend(self.__get_mac_devices(cpu_only)) + else: + self.send_error("Operating system not supported.") + raise ValueError("Operating system not supported.") + + return devices + + def __get_windows_devices(self, cpu_only: bool): + devices: list[str] = [] + + platform_release = platform.uname().release + if platform_release == "" or int(platform_release) >= 10: + try: + output = ( + subprocess.check_output( + "powershell -Command Get-CimInstance Win32_Processor | Select-Object -ExpandProperty Name", + shell=True, + ) + .decode(errors="replace") + .splitlines() + ) + lines = [line.strip() for line in output if line.strip() and line.strip() != "Name"] + devices.extend(lines) + except Exception: + self.send_warning("Could not get CPU information.") + else: + try: + output = subprocess.check_output("wmic cpu get name", shell=True).decode(errors="replace").splitlines() + lines = [line.strip() for line in output if line.strip() and line.strip() != "Name"] + devices.extend(lines) + except Exception: + self.send_warning("Could not get CPU information.") + + if not cpu_only: + if platform_release == "" or int(platform_release) >= 10: + try: + output = ( + subprocess.check_output( + "powershell -Command Get-CimInstance Win32_VideoController | Select-Object -ExpandProperty" + " Name", + shell=True, + ) + .decode(errors="replace") + .splitlines() + ) + lines = [line.strip() for line in output if line.strip() and line.strip() != "Name"] + devices.extend(lines) + except Exception: + self.send_warning("Could not get GPU information.") + else: + try: + output = ( + subprocess.check_output("wmic path win32_VideoController get name", shell=True) + .decode(errors="replace") + .splitlines() + ) + lines = [line.strip() for line in output if line.strip() and line.strip() != "Name"] + devices.extend(lines) + except Exception: + self.send_warning("Could not get GPU information.") + + return devices + + def __get_linux_devices(self, cpu_only: bool): + devices: list[str] = [] + + try: + output = subprocess.check_output("cat /proc/cpuinfo", shell=True).decode(errors="replace").splitlines() + lines = [ + replace_double_space(line.split(":", 1)[1].strip()) + for line in output + if line.strip() and (line.startswith("model name") or line.startswith("physical id")) + ] + + paired_lines = [f"{lines[i + 1]}:{lines[i]}" for i in range(0, len(lines), 2)] + names = [line.split(":", 1)[1].replace("\t", " ") for line in sorted(set(paired_lines))] + devices.extend(names) + except Exception: + self.send_warning("Could not get CPU information.") + + if not cpu_only: + try: + subprocess.check_output("lspci", shell=True) + except Exception: + try: + subprocess.check_output("sudo apt-get install pciutils", shell=True) + except Exception: + self.send_warning("Could not install pciutils.") + return devices + + try: + output = ( + subprocess.check_output("lspci | grep -E 'VGA compatible controller|3D controller'", shell=True) + .decode(errors="replace") + .splitlines() + ) + lines = [line.split(" ", 1)[1].split(":")[1].strip() for line in output if line.strip()] + devices.extend(lines) + except Exception: + self.send_warning("Could not get GPU information.") + + return devices + + def __get_mac_devices(self, cpu_only: bool): + devices: list[str] = [] + + try: + output = ( + subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) + .decode(errors="replace") + .splitlines() + ) + lines = [line.strip() for line in output if line.strip()] + devices.extend(lines) + except Exception: + self.send_warning("Could not get CPU information.") + + if not cpu_only: + try: + output = ( + subprocess.check_output("system_profiler SPDisplaysDataType -detaillevel mini", shell=True) + .decode(errors="replace") + .splitlines() + ) + lines = [ + line.split(":")[1].strip() for line in output if line.strip() and "Chipset Model" in line.strip() + ] + devices.extend(lines) + except Exception: + self.send_warning("Could not get GPU information") + + return devices + + def __request( + self, + method: str, + json: dict[str, Any], + token_required: bool = True, + forced_timeout: int | None = None, + user: bool = False, + ): + try: + logging.debug("Doing request with method %s and data %s", method, json) + timeout = self.config.get_value("request-timeout") + + if not isinstance(timeout, int): + timeout = 30 + + if forced_timeout: + timeout = forced_timeout + + if token_required: + json["token"] = self.config.get_value("token") + + if user: + url = self.__url.replace("api/server.php", "api/user.php") + else: + url = self.__url + + allow_redirects = self.config.get_value("allow-redirects-request") + allow_redirects = allow_redirects if isinstance(allow_redirects, bool) else True + + response = self.__session.request(method, url, json=json, timeout=timeout, allow_redirects=allow_redirects) + + return self.__handle_response(response, json) + except Exception as e: + self.send_error(str(e)) + return None + + def __handle_response(self, response: requests.Response, json: dict[str, Any]): + uri = response.url + if response.status_code != 200: + status_code = response.status_code + self.send_error(f"Status code from server: {status_code} for URI: {uri} with input: {json}") + return None + + logging.debug(response.content) + try: + json_response = response.json() + + if not json_response["response"] in {"OK", "SUCCESS"}: + if json["action"] == "clientError": + return json_response + self.send_error(f"Error from server for URI: {uri}: input: {json} response: {json_response}") + return None + + return json_response + + except Exception as e: + self.send_error(f"Error occurred for URI: {uri}: {e}") + return None + + def post( + self, json: dict[str, Any], token_required: bool = True, forced_timeout: int | None = None, user: bool = False + ): + """Send a POST request to the server.""" + return self.__request("POST", json, token_required, forced_timeout, user) + + def get(self, json: dict[str, Any], token_required: bool = True, forced_timeout: int | None = None): + """Send a GET request to the server.""" + return self.__request("GET", json, token_required, forced_timeout) + + def put(self, json: dict[str, Any], token_required: bool = True, forced_timeout: int | None = None): + """Send a PUT request to the server.""" + return self.__request("PUT", json, token_required, forced_timeout) + + def download(self, url: str, output: str): + """Download a file from the server.""" + try: + logging.debug("Downloading %s to %s", url, output) + base_url = self.config.get_value("url").replace("api/server.php", "") # type: ignore + + if not url.startswith(base_url): # type: ignore + url = base_url + url # type: ignore + + response = self.__session.get(url, stream=True) # type: ignore + + if not response.status_code in [200, 301, 302]: + self.send_error(f"File download header reported wrong status code: {response.status_code}") + return False + + download_file(response, output) + return True + except Exception as e: + self.send_error("Download error while downloading %s: %s" % (url, e)) + return False + + def rsync(self, local_path: str): + """Download a file from the server via rsync.""" + logging.info('Getting file "%s" via rsync', local_path.split("/")[-1]) + remote_path = os.path.join(self.config.get_value("rsync-path"), os.path.basename(local_path)) # type: ignore + try: + subprocess.check_output(f"rsync -avzP --partial {remote_path} {local_path}", shell=True) + except Exception as e: + self.send_error(f"Rsync error while downloading {local_path}: {e}") + return False + + return True + + def clean_up(self, all_files: bool = False, start: bool = False): + """Clean up the agent directory.""" + if all_files: + for key in self.config.DIRECTORY_KEYS: + path = self.config.get_value(key) + + if not isinstance(path, str): + continue + + try: + if os.path.isdir(path): + shutil.rmtree(path) + except Exception as e: + logging.error("Could not remove directory %s: %s", path, e) + + for key in self.config.FILES_KEYS: + path = self.config.get_value(key) + + if not isinstance(path, str): + continue + + try: + if os.path.isfile(path): + os.remove(path) + except Exception as e: + logging.error("Could not remove file %s: %s", path, e) + + files = os.listdir(self.base_dir) + + for file in files: + file_path = os.path.join(self.base_dir, file) + + if os.path.isfile(file_path) and file != "hashtopolis.zip": + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + days_not_accessed = self.config.get_value("file-remove-after-not-accessed-days") + + if not isinstance(days_not_accessed, int): + days_not_accessed = 14 + + not_accessed_time = datetime.timedelta(days=days_not_accessed).total_seconds() + + if self.config.get_value("auto-clean"): + files_dir = self.config.get_value("files-path") + + if isinstance(files_dir, str) and os.path.isdir(files_dir): + for file in os.listdir(files_dir): + file_path = os.path.join(files_dir, file) + + if not os.path.isfile(file_path): + continue + + file_stats = os.stat(file_path) + + if file_stats.st_size == 0: + try: + logging.info("Removing empty file %s", file_path) + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + if (time.time() - file_stats.st_atime) > not_accessed_time: + try: + logging.info( + "Removing file %s as it was not accessed for %s seconds", + file_path, + time.time() - file_stats.st_atime, + ) + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + hashlist_files = self.config.get_value("hashlists-path") + + if isinstance(hashlist_files, str) and os.path.isdir(hashlist_files): + for file in os.listdir(hashlist_files): + file_path = os.path.join(hashlist_files, file) + + if not os.path.isfile(file_path): + continue + + file_stats = os.stat(file_path) + + if file_stats.st_size == 0: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + if (time.time() - file_stats.st_atime) > not_accessed_time: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + crackers_files = self.config.get_value("crackers-path") + + if isinstance(crackers_files, str) and os.path.isdir(crackers_files): + for file in os.listdir(crackers_files): + file_path = os.path.join(crackers_files, file) + + if os.path.isdir(file_path): + continue + + file_stats = os.stat(file_path) + + if file_stats.st_size == 0: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + if (time.time() - file_stats.st_atime) > not_accessed_time: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + preprocessor_files = self.config.get_value("preprocessors-path") + + if isinstance(preprocessor_files, str) and os.path.isdir(preprocessor_files): + for file in os.listdir(preprocessor_files): + file_path = os.path.join(preprocessor_files, file) + + if not os.path.isfile(file_path): + continue + + file_stats = os.stat(file_path) + + if file_stats.st_size == 0: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + # Delete when file is not accessed for 30 days + if (time.time() - file_stats.st_atime) > not_accessed_time: + try: + os.remove(file_path) + except Exception as e: + logging.error("Could not remove file %s: %s", file_path, e) + + zaps_files = self.config.get_value("zaps-path") + + if isinstance(zaps_files, str) and (os.path.isdir(zaps_files) or zaps_files == ""): + for file in os.listdir(zaps_files): + file_path = os.path.join(zaps_files, file) + + if not os.path.isdir(file_path): + continue + + if not "hashlist_" in file: + continue + + if len(os.listdir(file_path)) == 0: + try: + shutil.rmtree(file_path) + except Exception as e: + logging.error("Could not remove directory %s: %s", file_path, e) + + if start: + # Delete hashtopolis.pid files for all crackers as the agent is started again + crackers_path = self.config.get_value("crackers-path") + + if isinstance(crackers_path, str) and os.path.isdir(crackers_path): + for file_or_dir in os.listdir(crackers_path): + file_or_dir_path = os.path.join(crackers_path, file_or_dir) + + if not os.path.isdir(file_or_dir_path) or not file_or_dir.isdigit(): + continue + + hashtopolis_file = os.path.join(file_or_dir_path, "hashtopolis.pid") + + if not os.path.isfile(hashtopolis_file): + continue + + try: + os.remove(hashtopolis_file) + except Exception as e: + logging.error("Could not remove file %s: %s", hashtopolis_file, e) + + # Delete old.zip file if it exists + old_zip_file = os.path.join(self.base_dir, "old.zip") + if os.path.isfile(old_zip_file): + try: + os.remove(old_zip_file) + except Exception as e: + logging.error("Could not remove file %s: %s", old_zip_file, e) + + def de_register(self): + """De-register the agent from the server.""" + self.send_warning("De-registering agent.") + logging.debug("De-registering agent.") + query = {"action": "deregister"} + response = self.post(query) + + if response is None: + return + + logging.info("Successfully de-registered from the server.") + + self.clean_up(True) + + def is_running(self): + """Check if the agent is already running.""" + lock_file = os.path.join(self.base_dir, "lock.pid") + + if os.path.isfile(lock_file): + pid = file_get_content(lock_file) + logging.info("Found lock file with PID %s", pid) + + if pid.isdigit() and psutil.pid_exists(int(pid)): + logging.info("Process with PID %s is running.", pid) + command = psutil.Process(int(pid)).cmdline()[0].replace("\\", "/").split("/") + logging.info("Command: %s", command) + + if command[-1].startswith("python"): + return True + + logging.info("Process with PID %s is not running.", pid) + os.remove(lock_file) + + logging.info("No lock file found.") + file_set_content(lock_file, str(os.getpid())) + + return False + + def update_client(self): + """Check for client updates and download them.""" + logging.info("Checking for client updates.") + query = {"action": "checkClientVersion", "version": Agent.get_version_number(), "type": "python"} + response = self.post(query) + + if response is None: + return + + if response["version"] == "OK": + logging.info("Client is up to date.") + return + + url = response["url"] + + if not url: + self.send_warning("Got empty URL for client update.") + return + + logging.info("New client version available.") + logging.info("Downloading new client version.") + download_file = os.path.join(self.base_dir, "update.zip") + + if os.path.isfile(download_file): + os.remove(download_file) + + if not self.download(url, download_file): + return + + if not os.path.isfile(download_file) or not os.path.getsize(download_file): + self.send_error("Downloaded file is empty.") + return + + old_file = os.path.join(self.base_dir, "old.zip") + + os.rename(os.path.join(self.base_dir, "hashtopolis.zip"), old_file) + os.rename(download_file, os.path.join(self.base_dir, "hashtopolis.zip")) + + logging.info("Update received, restarting client.") + lock_file = os.path.join(self.base_dir, "lock.pid") + + if os.path.isfile(lock_file): + os.remove(lock_file) + + self.send_warning("Restarting client due to update.") + os.execl(sys.executable, sys.executable, "hashtopolis.zip") + sys.exit(0) + + def start_uftpd(self): + """Start the multicast daemon.""" + uftpd_path = os.path.join(self.base_dir, "uftpd" + OperatingSystem.get_extension(self.operating_system)) + + if not os.path.isfile(uftpd_path): + self.send_error("uftpd not found.") + return + + logging.info("Starting uftpd.") + + try: + subprocess.check_output("killall -s 9 uftpd", shell=True) + except subprocess.CalledProcessError: + pass + + multicast_device = self.config.get_value("multicast-device") + files_path = self.config.get_value("files-path") + log_path = self.config.get_value("log-path") + + if not isinstance(log_path, str) or not isinstance(files_path, str) or not isinstance(multicast_device, str): + self.send_error("Log path, files path or multicast device not set.") + return + + command_parts: list[str] = [ + uftpd_path, + "-I", + multicast_device, + "-D", + files_path, + "-L", + os.path.join(log_path, "multicast_" + str(time.time()) + ".log"), + ] + + command = " ".join(command_parts) + + subprocess.check_output(command, shell=True) + logging.info("Started multicast daemon.") + + def update_config(self): + """Update the agent configuration.""" + self.config.update() + + def send_error(self, error: str, task_id: int | None = None, chunk_id: int | None = None): + """Send an error to the server.""" + error = unidecode(error) + error_key = error + str(task_id) + str(chunk_id) + default_task = False + errors_to_ignore = self.config.get_value("error-ignored") + + if isinstance(errors_to_ignore, list): + if any(part in error for part in errors_to_ignore): + logging.warning("Error ignored: %s", error) + return + + if task_id is None and self.default_error_task is not None: + task_id = self.default_error_task # type: ignore + default_task = True + + if error_key in self.send_errors: + if int(time.time() - self.send_errors[error_key]) < self.config.get_value("same-error-timeout"): # type: ignore + logging.warning("Error already sent to server: %s", error) + return + + self.send_errors[error_key] = int(time.time()) + + if default_task: + message = f"Error: {error}" + else: + message = f"Error: {error} - Task: {task_id}" + + query: dict[str, str | int | None] = { + "action": "clientError", + "message": message, + "chunkId": chunk_id, + "taskId": task_id, + } + + if default_task and not self.agent_id is None and not task_id is None and not self.api_key is None: + query_assign: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": task_id, + "accessKey": self.api_key, + } + + query_unassign: dict[str, Any] = { + "section": "task", + "request": "taskUnassignAgent", + "agentId": self.agent_id, + "accessKey": self.api_key, + } + + query_assign_2: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": task_id, + "accessKey": self.api_key, + } + + self.post(query_assign, False, user=True) + self.post(query) + self.post(query_unassign, False, user=True) + self.post(query_assign_2, False, user=True) + + elif not default_task: + logging.error("Sent error to server: %s", error) + response = self.post(query) + + query_assign: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": self.default_error_task, + "accessKey": self.api_key, + } + + query_unassign: dict[str, Any] = { + "section": "task", + "request": "taskUnassignAgent", + "agentId": self.agent_id, + "accessKey": self.api_key, + } + + query_assign_2: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": task_id, + "accessKey": self.api_key, + } + + if response and response["response"] == "ERROR": + query_2: dict[str, Any] = { + "action": "clientError", + "message": "Warning: Task which caused the error is not assigned to agent.", + "chunkId": None, + "taskId": self.default_error_task, + } + + query["taskId"] = self.default_error_task # type: ignore + + self.post(query_assign, False, user=True) + self.post(query) + self.post(query_2) + self.post(query_unassign, False, user=True) + self.post(query_assign_2, False, user=True) + else: + logging.warning("Error not sent to server as no task could be assigned to the error.") + + def send_warning(self, warning: str, task_id: int | None = None, chunk_id: int | None = None): + """Send a warning to the server.""" + warning = unidecode(warning) + warning_key = warning + str(task_id) + str(chunk_id) + default_task = False + errors_to_ignore = self.config.get_value("error-ignored") + + if isinstance(errors_to_ignore, list): + if any(part in warning for part in errors_to_ignore): + logging.warning("Warning ignored: %s", warning) + return + + if task_id is None and self.default_error_task is not None: + task_id = self.default_error_task # type: ignore + default_task = True + + if warning_key in self.send_warnings: + if int(time.time() - self.send_warnings[warning_key]) < self.config.get_value("same-warning-timeout"): # type: ignore + logging.warning("Warning already sent to server: %s", warning) + return + + self.send_warnings[warning_key] = int(time.time()) + + if default_task: + message = f"Warning: {warning}" + else: + message = f"Warning: {warning} - Task: {task_id}" + + query: dict[str, str | int | None] = { + "action": "clientError", + "message": message, + "chunkId": chunk_id, + "taskId": task_id, + } + + if default_task and not self.agent_id is None and not task_id is None and not self.api_key is None: + query_assign: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": task_id, + "accessKey": self.api_key, + } + + query_unassign: dict[str, Any] = { + "section": "task", + "request": "taskUnassignAgent", + "agentId": self.agent_id, + "accessKey": self.api_key, + } + + self.post(query_assign, False, user=True) + self.post(query) + self.post(query_unassign, False, user=True) + + elif not default_task: + logging.warning("Sent warning to server: %s", warning) + response = self.post(query) + + query_assign: dict[str, Any] = { + "section": "task", + "request": "taskAssignAgent", + "agentId": self.agent_id, + "taskId": self.default_error_task, + "accessKey": self.api_key, + } + + query_unassign: dict[str, Any] = { + "section": "task", + "request": "taskUnassignAgent", + "agentId": self.agent_id, + "accessKey": self.api_key, + } + + if response and response["response"] == "ERROR": + query_2: dict[str, Any] = { + "action": "clientError", + "message": "Warning: Task which caused the warning is not assigned to agent.", + "chunkId": None, + "taskId": self.default_error_task, + } + + query["taskId"] = self.default_error_task # type: ignore + + self.post(query_assign, False, user=True) + self.post(query) + self.post(query_2) + self.post(query_unassign, False, user=True) + else: + logging.warning("Warning not sent to server as no task could be assigned to the warning.") + + def run_health_check(self, task: Any): + """Run a health check.""" + logging.info("Running health check.") + query = {"action": "getHealthCheck"} + response = self.post(query) + + if response is None: + return + + try: + cracker = Cracker(self, response["crackerBinaryId"]) + except Exception: + self.send_error("Getting cracker failed on health check.") + return + + check_id = response["checkId"] + logging.info("Starting health check with ID %s.", check_id) + + hashlists_path = self.config.get_value("hashlists-path") + + if not isinstance(hashlists_path, str): + self.send_error("Hashlists path not set.") + return + + health_hashlists_path = os.path.join(hashlists_path, "health_check.txt") + output_file = os.path.join(hashlists_path, "health_check.out") + + file_set_content(health_hashlists_path, "\n".join(response["hashes"])) + + if os.path.exists(output_file): # type: ignore + os.remove(output_file) # type: ignore + + if cracker.name == "hashcat": + cracker = HashcatCracker(self, task) + else: + self.send_error("Unknown cracker for health check.") + return + + start = int(time.time()) + [status, errors] = cracker.run_health_check( + response["attack"], response["hashlistAlias"], health_hashlists_path, output_file + ) + end = int(time.time()) + + if len(status) > 0: + num_gpus = len(status[0].get_temps()) + else: + errors.append("Failed to retrieve one successful cracker state, most likely due to failing.") + num_gpus = 0 + + query: dict[str, Any] = { + "action": "sendHealthCheck", + "checkId": check_id, + "start": start, + "end": end, + "numGpus": num_gpus, + "numCracked": len(file_get_content(output_file).splitlines()), # type: ignore + "errors": errors, + } + + self.post(query) diff --git a/htpclient/binarydownload.py b/htpclient/binarydownload.py deleted file mode 100644 index d6a487e..0000000 --- a/htpclient/binarydownload.py +++ /dev/null @@ -1,223 +0,0 @@ -import logging -import os.path -from pathlib import Path -import stat -import sys -from time import sleep - -from htpclient.config import Config -from htpclient.download import Download -from htpclient.initialize import Initialize -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * - - -class BinaryDownload: - def __init__(self, args): - self.config = Config() - self.last_version = None - self.args = args - - def run(self): - self.check_client_version() - self.__check_utils() - - def get_version(self): - return self.last_version - - def check_client_version(self): - if self.args.disable_update: - return - if os.path.isfile("old.zip"): - os.unlink("old.zip") # cleanup old version - query = copy_and_set_token(dict_checkVersion, self.config.get_value('token')) - query['version'] = Initialize.get_version_number() - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Agent version check failed!") - elif ans['response'] != 'SUCCESS': - logging.error("Error from server: " + str(ans['message'])) - else: - if ans['version'] == 'OK': - logging.info("Client is up-to-date!") - else: - url = ans['url'] - if not url: - logging.warning("Got empty URL for client update!") - else: - logging.info("New client version available!") - if os.path.isfile("update.zip"): - os.unlink("update.zip") - Download.download(url, "update.zip") - if os.path.isfile("update.zip") and os.path.getsize("update.zip"): - if os.path.isfile("old.zip"): - os.unlink("old.zip") - os.rename("hashtopolis.zip", "old.zip") - os.rename("update.zip", "hashtopolis.zip") - logging.info("Update received, restarting client...") - if os.path.exists("lock.pid"): - os.unlink("lock.pid") - os.execl(sys.executable, sys.executable, "hashtopolis.zip") - exit(0) - - def __check_utils(self): - path = '7zr' + Initialize.get_os_extension() - if not os.path.isfile(path): - query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) - query['type'] = '7zr' - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get 7zr!") - sleep(5) - self.__check_utils() - elif ans['response'] != 'SUCCESS' or not ans['executable']: - logging.error("Getting 7zr failed: " + str(ans)) - sleep(5) - self.__check_utils() - else: - Download.download(ans['executable'], path) - os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC) - path = 'uftpd' + Initialize.get_os_extension() - if not os.path.isfile(path) and self.config.get_value('multicast'): - query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) - query['type'] = 'uftpd' - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get uftpd!") - sleep(5) - self.__check_utils() - elif ans['response'] != 'SUCCESS' or not ans['executable']: - logging.error("Getting uftpd failed: " + str(ans)) - sleep(5) - self.__check_utils() - else: - Download.download(ans['executable'], path) - os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC) - - def check_prince(self): - logging.debug("Checking if PRINCE is present...") - path = "prince/" - if os.path.isdir(path): # if it already exists, we don't need to download it - logging.debug("PRINCE is already downloaded") - return True - logging.debug("PRINCE not found, download...") - query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) - query['type'] = 'prince' - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to load prince!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS' or not ans['url']: - logging.error("Getting prince failed: " + str(ans)) - sleep(5) - return False - else: - if not Download.download(ans['url'], "prince.7z"): - logging.error("Download of prince failed!") - sleep(5) - return False - if Initialize.get_os() == 1: - os.system("7zr" + Initialize.get_os_extension() + " x -otemp prince.7z") - else: - os.system("./7zr" + Initialize.get_os_extension() + " x -otemp prince.7z") - for name in os.listdir("temp"): # this part needs to be done because it is compressed with the main subfolder of prince - if os.path.isdir("temp/" + name): - os.rename("temp/" + name, "prince") - break - os.unlink("prince.7z") - os.rmdir("temp") - logging.debug("PRINCE downloaded and extracted") - return True - - def check_preprocessor(self, task): - logging.debug("Checking if requested preprocessor is present...") - path = Path(self.config.get_value('preprocessors-path'), str(task.get_task()['preprocessor'])) - query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) - query['type'] = 'preprocessor' - query['preprocessorId'] = task.get_task()['preprocessor'] - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to load preprocessor settings!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS' or not ans['url']: - logging.error("Getting preprocessor settings failed: " + str(ans)) - sleep(5) - return False - else: - task.set_preprocessor(ans) - if os.path.isdir(path): # if it already exists, we don't need to download it - logging.debug("Preprocessor is already downloaded") - return True - logging.debug("Preprocessor not found, download...") - if not Download.download(ans['url'], "temp.7z"): - logging.error("Download of preprocessor failed!") - sleep(5) - return False - if Initialize.get_os() == 1: - os.system(f"7zr{Initialize.get_os_extension()} x -otemp temp.7z") - else: - os.system(f"./7zr{Initialize.get_os_extension()} x -otemp temp.7z") - for name in os.listdir("temp"): # this part needs to be done because it is compressed with the main subfolder of prince - if os.path.isdir(Path('temp', name)): - os.rename(Path('temp', name), path) - break - os.unlink("temp.7z") - os.rmdir("temp") - logging.debug("Preprocessor downloaded and extracted") - return True - - def check_version(self, cracker_id): - path = Path(self.config.get_value('crackers-path'), str(cracker_id)) - query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) - query['type'] = 'cracker' - query['binaryVersionId'] = cracker_id - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to load cracker!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS' or not ans['url']: - logging.error("Getting cracker failed: " + str(ans)) - sleep(5) - return False - else: - self.last_version = ans - if not os.path.isdir(path): - # we need to download the 7zip - if not Download.download(ans['url'], self.config.get_value('crackers-path') + "/" + str(cracker_id) + ".7z"): - logging.error("Download of cracker binary failed!") - sleep(5) - return False - - # we need to extract the 7zip - temp_folder = Path(self.config.get_value('crackers-path'), 'temp') - zip_file = Path(self.config.get_value('crackers-path'), f'{cracker_id}.7z') - - if Initialize.get_os() == 1: - # Windows - cmd = f'7zr{Initialize.get_os_extension()} x -o"{temp_folder}" "{zip_file}"' - else: - # Linux - cmd = f"./7zr{Initialize.get_os_extension()} x -o'{temp_folder}' '{zip_file}'" - os.system(cmd) - - # Clean up 7zip - os.unlink(zip_file) - - # Workaround for a 7zip containing a folder name or already the contents of a cracker - for name in os.listdir(temp_folder): - to_check_path = Path(temp_folder, name) - if os.path.isdir(to_check_path): - os.rename(to_check_path, path) - else: - os.rename(temp_folder, path) - break - return True diff --git a/htpclient/chunk.py b/htpclient/chunk.py index fb7faae..44d3492 100644 --- a/htpclient/chunk.py +++ b/htpclient/chunk.py @@ -1,60 +1,74 @@ -import logging -from time import sleep +from enum import Enum +from typing import Any -from htpclient.config import Config -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * + +class ChunkStatus(Enum): + """Enum representing the status of a chunk""" + + KEYSPACE_REQUIRED = -1 + BENCHMARK = -2 + FULLY_DISPATCHED = 0 + HEALTH_CHECK = -3 + NORMAL = 1 class Chunk: - def __init__(self): - self.config = Config() - self.chunk = None - - def chunk_data(self): - return self.chunk - - def get_chunk(self, task_id): - query = copy_and_set_token(dict_getChunk, self.config.get_value('token')) - query['taskId'] = task_id - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get chunk!") - sleep(5) - return 0 - elif ans['response'] != 'SUCCESS': - logging.error("Getting of chunk failed: " + str(ans)) - sleep(5) - return 0 - else: - # test what kind the answer is - if ans['status'] == 'keyspace_required': - return -1 - elif ans['status'] == 'benchmark': - return -2 - elif ans['status'] == 'fully_dispatched': - return 0 - elif ans['status'] == 'health_check': - return -3 - else: - self.chunk = ans - return 1 - - def send_keyspace(self, keyspace, task_id): - query = copy_and_set_token(dict_sendKeyspace, self.config.get_value('token')) - query['taskId'] = task_id - query['keyspace'] = int(keyspace) - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to send keyspace!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS': - logging.error("Sending of keyspace failed: " + str(ans)) - sleep(5) + """Class representing a chunk of keyspace""" + + def __init__(self, agent: Any, task_id: int): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.task_id = task_id + + if not self.__load(): + self.agent.send_error("Loading chunk failed") + raise RuntimeError("Loading chunk failed") + + def __load(self): + query: dict[str, Any] = { + "action": "getChunk", + "taskId": self.task_id, + } + + response = self.agent.post(query) + + if response is None: return False - else: - logging.info("Keyspace got accepted!") + + self.status = ( + ChunkStatus[response["status"].upper()] + if response["status"].upper() in ChunkStatus.__members__ + else ChunkStatus.NORMAL + ) + + if self.status == ChunkStatus.HEALTH_CHECK: + return True + + if self.status == ChunkStatus.KEYSPACE_REQUIRED: return True + + if self.status == ChunkStatus.BENCHMARK: + return True + + if self.status == ChunkStatus.FULLY_DISPATCHED: + return True + + self.length = int(response["length"]) + self.chunk_id = int(response["chunkId"]) + self.skip = int(response["skip"]) + + return True + + def send_keyspace(self, keyspace: int): + """Send the keyspace to the server""" + query: dict[str, Any] = { + "action": "sendKeyspace", + "taskId": self.task_id, + "keyspace": keyspace, + } + + response = self.agent.post(query) + + if response is None: + return False + + return True diff --git a/htpclient/config.py b/htpclient/config.py index 6e83cac..8c0b1c6 100644 --- a/htpclient/config.py +++ b/htpclient/config.py @@ -1,30 +1,156 @@ -import os.path +import copy import json +import os +import platform +import uuid class Config: - CONFIG_FILE = "config.json" - config = {} - - def __init__(self): - # load from file - if os.path.isfile(self.CONFIG_FILE): - self.config = json.load(open(self.CONFIG_FILE)) - else: - self.__save() + """Class to handle the configuration of the agent""" - def update(self): - self.config = json.load(open(self.CONFIG_FILE)) + DEFAULT_ERRORS_TO_IGNORE = [ + "clGetPlatformIDs(): CL_PLATFORM_NOT_FOUND_KHR", + "cuInit(): forward compatibility was attempted on non supported HW", + "cuLinkAddData(): the provided PTX was compiled with an unsupported toolchain.", + "Kernel ./OpenCL/shared.cl build failed", + "nvmlDeviceGetTemperatureThreshold(): Not Supported", + "nvmlDeviceGetTemperature(): Not Supported", + "nvmlDeviceGetCurrPcieLinkWidth(): Not Supported", + ] + + DEFAULT_CONFIG: dict[str, str | int | float | bool | None | list[str]] = { + "files-path": "files", + "log-path": "logs", + "log-level": "INFO", + "crackers-path": "crackers", + "hashlists-path": "hashlists", + "zaps-path": "zaps", + "preprocessors-path": "preprocessors", + "multicast-path": "multicast", + "proxies": None, + "auth-user": None, + "auth-password": None, + "multicast": False, + "multicast-device": "eth0", + "rsync": False, + "rsync-path": "rsync", + "cert": None, + "url": None, + "voucher": None, + "token": None, + "cpu-only": False, + "auto-clean": False, + "name": platform.node(), + "uuid": str(uuid.uuid4()), + "request-timeout": 30, + "same-error-timeout": 300, + "same-warning-timeout": 300, + "file-remove-after-not-accessed-days": 14, + "file-deletion-disabled": False, + "outfile-history": False, + "piping-threshold": 95, + "allow-piping": True, + "verify-request": True, + "allow-redirects-request": True, + "default-error-task": None, + "agent-id": None, + "api-key": None, + "error-ignored": DEFAULT_ERRORS_TO_IGNORE, + } + + DIRECTORY_KEYS = { + "files-path", + "log-path", + "crackers-path", + "hashlists-path", + "preprocessors-path", + "multicast-path", + "zaps-path", + "rsync-path", + } + + FILES_KEYS = { + "cert", + } + + REQUIRED_KEYS = { + "url", + } + + def __init__(self, base_dir: str): + self.base_dir = base_dir + self.config_path = os.path.join(base_dir, "config.json") + self.__config: dict[str, str | int | float | bool | None | list[str]] = copy.deepcopy(self.DEFAULT_CONFIG) - def get_value(self, key): - if key in self.config: - return self.config[key] - return '' + if not os.path.isfile(self.config_path): + self.__save() - def set_value(self, key, val): - self.config[key] = val + self.__config.update(self.__load()) self.__save() + self.__build_directories() + self.__check_files_exist() + self.__check_token() + + self.__check_required_keys() + + def __load(self): + with open(self.config_path, "r", encoding="utf-8") as f: + return json.load(f) + def __save(self): - with open(self.CONFIG_FILE, 'w') as f: - json.dump(self.config, f, indent=2, ensure_ascii=False) + with open(self.config_path, "w", encoding="utf-8") as f: + json.dump(self.__config, f, indent=2, ensure_ascii=False) + + def __build_directories(self): + for key in self.DIRECTORY_KEYS: + dir_path = self.__config[key] + + if not isinstance(dir_path, str): + continue + + if not os.path.isabs(dir_path): + dir_path = os.path.join(self.base_dir, dir_path) + self.__config[key] = dir_path + self.__save() + + os.makedirs(dir_path, exist_ok=True) + + def __check_files_exist(self): + for key in self.FILES_KEYS: + file_path = self.__config[key] + + if not isinstance(file_path, str): + continue + + if not os.path.isfile(file_path): + raise FileNotFoundError(f"File '{file_path}' for '{key}' does not exist") + + def __check_token(self): + if self.__config["token"] is None and self.__config["voucher"] is None: + raise ValueError("Please provide a voucher so the agent can register") + + def __check_required_keys(self): + for key in self.REQUIRED_KEYS: + if self.__config[key] is None: + raise KeyError(f"Key '{key}' is required") + + def get_value(self, key: str): + """Get the value of a key from the configuration""" + return self.__config.get(key, None) + + def set_value(self, key: str, value: str | int | float | None): + """Set the value of a key in the configuration""" + self.__config[key] = value + self.__save() + + def get_all(self): + """Get all the configuration""" + return self.__config + + def update(self): + """Update the configuration""" + self.__config = self.__load() + + def __str__(self): + return str(self.__config) diff --git a/htpclient/cracker.py b/htpclient/cracker.py new file mode 100644 index 0000000..2a74ae4 --- /dev/null +++ b/htpclient/cracker.py @@ -0,0 +1,93 @@ +import os +import subprocess +from typing import Any + +from htpclient.operating_system import OperatingSystem +from htpclient.utils import get_system_bit + + +class Cracker: + """Class representing a cracker""" + + def __init__(self, agent: Any, cracker_id: int): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.cracker_id = cracker_id + + if not self.__load(): + self.agent.send_error("Loading cracker failed") + raise RuntimeError("Loading cracker failed") + + def __load(self): + """Load cracker data""" + crackers_dir = self.agent.config.get_value("crackers-path") + + if not isinstance(crackers_dir, str): + return False + + cracker_path = os.path.join(crackers_dir, str(self.cracker_id)) + self.cracker_path = cracker_path + + query: dict[str, Any] = { + "action": "downloadBinary", + "type": "cracker", + "binaryVersionId": self.cracker_id, + } + + response = self.agent.post(query) + + if response is None or "url" not in response or not response["url"]: + self.agent.send_error(f"Getting cracker failed. Response: {response}") + return False + + if not os.path.exists(cracker_path): + + if not self.agent.download(response["url"], cracker_path + ".7z"): + return False + + temp_path = os.path.join(crackers_dir, "temp") + os.makedirs(temp_path, exist_ok=True) + + try: + if self.agent.operating_system == OperatingSystem.WINDOWS: + subprocess.check_output( + f"7zr{self.agent.operating_system.get_extension()} x -o{temp_path} {cracker_path}.7z", + shell=True, + ) + else: + subprocess.check_output( + f"./7zr{self.agent.operating_system.get_extension()} x -o{temp_path} {cracker_path}.7z", + shell=True, + ) + except subprocess.CalledProcessError as e: + self.agent.send_error(f"Extracting cracker failed {e}") + return False + + os.remove(cracker_path + ".7z") + + for file in os.listdir(temp_path): + if os.path.isdir(os.path.join(temp_path, file)): + os.rename(os.path.join(temp_path, file), cracker_path) + break + + os.rename(temp_path, cracker_path) + break + + if os.path.isdir(temp_path): + os.rmdir(temp_path) + + executable = response["executable"] + + if os.path.exists(os.path.join(cracker_path, executable)): + self.executable = os.path.join(cracker_path, executable) + else: + file_path, file_ext = os.path.splitext(executable) + system_bit = get_system_bit() + self.executable = os.path.join(cracker_path, f"{file_path}{system_bit}{file_ext}") + + if not os.path.exists(self.executable): + self.agent.send_error(f"Cracker executable not found {self.executable}") + return False + + self.name = str(response["name"]).lower() + + return True diff --git a/htpclient/dicts.py b/htpclient/dicts.py deleted file mode 100644 index 6917063..0000000 --- a/htpclient/dicts.py +++ /dev/null @@ -1,135 +0,0 @@ -from types import MappingProxyType - - -def copy_and_set_token(dictionary, token): - dict_copy = dictionary.copy() - dict_copy["token"] = token - return dict_copy - - -""" -These dictionaries are defined using MappingProxyType() which makes them read-only. -If you need to change a value you must create a copy of it. E.g. -foo = dict_foo.copy() -foo["key"] = "value" -""" - -dict_os = MappingProxyType( - {'Linux': 0, - 'Windows': 1, - 'Darwin': 2}) - -dict_ext = MappingProxyType( - {0: '', # Linux - 1: '.exe', # Windows - 2: ''}) # Mac OS - -dict_sendBenchmark = MappingProxyType( - {'action': 'sendBenchmark', - 'token': '', - 'taskId': '', - 'type': '', - 'result': ''}) - -dict_getHealthCheck = MappingProxyType( - {'action': 'getHealthCheck', - 'token': ''} -) - -dict_sendHealthCheck = MappingProxyType( - {'action': 'sendHealthCheck', - 'token': '', - 'numCracked': 0, - 'start': 0, - 'end': 0, - 'numGpus': 0, - 'errors': '', - 'checkId': 0} -) - -dict_checkVersion = MappingProxyType( - {'action': 'checkClientVersion', - 'token': '', - 'version': '', - 'type': 'python'}) - -dict_downloadBinary = MappingProxyType( - {'action': 'downloadBinary', - 'token': '', - 'type': ''}) - -dict_login = MappingProxyType( - {'action': 'login', - 'token': '', - 'clientSignature': ''}) - -dict_updateInformation = MappingProxyType( - {'action': 'updateInformation', - 'token': '', - 'uid': '', - 'os': '', - 'devices': ''}) - -dict_register = MappingProxyType( - {'action': 'register', - 'voucher': '', - 'name': ''}) - -dict_testConnection = MappingProxyType( - {'action': 'testConnection'}) - -dict_getChunk = MappingProxyType( - {'action': 'getChunk', - 'token': '', - 'taskId': ''}) - -dict_sendKeyspace = MappingProxyType( - {'action': 'sendKeyspace', - 'token': '', - 'taskId': '', - 'keyspace': 0}) - -dict_getTask = MappingProxyType( - {'action': 'getTask', - 'token': ''}) - -dict_sendProgress = MappingProxyType( - {'action': 'sendProgress', - 'token': '', - 'chunkId': '', - 'keyspaceProgress': '', - 'relativeProgress': '', - 'speed': '', - 'state': '', - 'cracks': ''}) - -dict_clientError = MappingProxyType( - {'action': 'clientError', - 'token': '', - 'taskId': '', - 'chunkId': None, - 'message': ''}) - -dict_getHashlist = MappingProxyType( - {'action': 'getHashlist', - 'token': '', - 'hashlistId': ''}) - -dict_getFound = MappingProxyType( - {'action': 'getFound', - 'token': '', - 'hashlistId': ''}) - -dict_getFile = MappingProxyType( - {'action': 'getFile', - 'token': '', - 'taskId': '', - 'file': ''}) - -dict_getFileStatus = MappingProxyType( - {'action': 'getFileStatus', - 'token': ''}) - -dict_deregister = MappingProxyType( - {'action': 'deregister', - 'token': ''}) diff --git a/htpclient/download.py b/htpclient/download.py deleted file mode 100644 index 36bbfd3..0000000 --- a/htpclient/download.py +++ /dev/null @@ -1,51 +0,0 @@ -import logging -from time import sleep - -import requests -import sys -import os - -from htpclient.initialize import Initialize -from htpclient.session import Session - - -class Download: - @staticmethod - def download(url, output, no_header=False): - try: - session = Session().s - - # Check header - if not no_header: - head = session.head(url) - # not sure if we only should allow 200/301/302, but then it's present for sure - if head.status_code not in [200, 301, 302]: - logging.error("File download header reported wrong status code: " + str(head.status_code)) - return False - - with open(output, "wb") as file: - response = session.get(url, stream=True) - total_length = response.headers.get('Content-Length') - - if total_length is None: # no content length header - file.write(response.content) - else: - dl = 0 - total_length = int(total_length) - for data in response.iter_content(chunk_size=4096): - dl += len(data) - file.write(data) - done = int(50 * dl / total_length) - sys.stdout.write("\rDownloading: [%s%s]" % ('=' * done, ' ' * (50 - done))) - sys.stdout.flush() - sys.stdout.write("\n") - return True - except requests.exceptions.ConnectionError as e: - logging.error("Download error: " + str(e)) - sleep(30) - return False - - @staticmethod - def rsync(remote_path, local_path): - logging.info('getting file "%s" via rsync' % local_path.split('/')[-1]) - os.system('rsync -avzP --partial %s %s' % (remote_path, local_path)) diff --git a/htpclient/exceptions.py b/htpclient/exceptions.py new file mode 100644 index 0000000..6f431e5 --- /dev/null +++ b/htpclient/exceptions.py @@ -0,0 +1,14 @@ +class RestartLoopException(Exception): + pass + + +class QuitException(Exception): + pass + + +class TaskLoadingError(RestartLoopException): + pass + + +class CrackerLoadingError(RestartLoopException): + pass diff --git a/htpclient/files.py b/htpclient/files.py index 4ac12f9..93aec45 100644 --- a/htpclient/files.py +++ b/htpclient/files.py @@ -1,116 +1,222 @@ -import logging -import time -from time import sleep -from pathlib import Path - +import datetime import os +import subprocess +from time import sleep +from typing import Any -from htpclient.config import Config -from htpclient.download import Download -from htpclient.initialize import Initialize -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * +from htpclient.operating_system import OperatingSystem +from htpclient.utils import get_storage_remaining, get_storage_total class Files: - def __init__(self): - self.config = Config() - self.chunk = None - self.last_check = None - self.check_interval = 600 - if self.config.get_value('file-deletion-interval'): - self.check_interval = int(self.config.get_value('file-deletion-interval')) - - def deletion_check(self): - if self.config.get_value('file-deletion-disable'): - return - elif self.last_check is not None and time.time() - self.last_check < self.check_interval: - return - query = copy_and_set_token(dict_getFileStatus, self.config.get_value('token')) - req = JsonRequest(query) - ans = req.execute() - self.last_check = time.time() - if ans is None: - logging.error("Failed to get file status!") - elif ans['response'] != 'SUCCESS': - logging.error("Getting of file status failed: " + str(ans)) + """Class representing files""" + + COMPRESSION_FILE_EXTENSIONS = {".7z"} + POSSIBLE_TEXT_EXTENSIONS = {".txt", ".wordlist", ".wordlists", ".dict", ".dictionary", ".dic", ".gz"} + + def __init__(self, agent: Any): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.last_check = datetime.datetime.now() + self.downloaded: dict[str, bool] = {} + self.deleted_old_files: list[str] = [] + + def check_file_exists(self, file_name: str, task_id: int): + """Check if a file exists and download it if not""" + file_path = os.path.join(self.agent.config.get_value("files-path"), file_name) # type: ignore + + query: dict[str, Any] = { + "action": "getFile", + "taskId": task_id, + "file": file_name, + } + + response = self.agent.post(query) # type: ignore + + if response is None: + return None + + if any(file_name.endswith(ext) for ext in self.COMPRESSION_FILE_EXTENSIONS): + self.downloaded[os.path.splitext(file_path)[0]] = False # type: ignore + return self.check_compressed_file(file_path, response, task_id) # type: ignore + + self.downloaded[file_path] = False + return self.check_single_file(file_path, response, task_id) # type: ignore + + def check_single_file(self, file_path: str, response: dict[str, Any], task_id: int): + """Check a single file""" + if os.path.isfile(file_path) and os.stat(file_path).st_size == int(response["filesize"]): + return file_path + + if os.path.isfile(file_path) and os.stat(file_path).st_size != int(response["filesize"]): + self.agent.send_warning(f"File size mismatch on file: {file_path} - removing file and retrying...", task_id) + os.remove(file_path) + sleep(5) + return None + + if not os.path.isfile(file_path) and self.agent.config.get_value("multicast"): # type: ignore + self.agent.send_warning("Multicast is enabled, need to wait until file was delivered!", task_id) + sleep(5) # in case the file is not there yet (or not completely), we just wait some time and then try again + return None + + if get_storage_total(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_error("Not enough storage space available", task_id) + return None + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_warning("Not enough storage space available, cleaning up files...", task_id) + self.clean_up() + self.agent.clean_up() + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_warning( + "Cleanup did not create enough space, deleting oldest file and then retrying...", task_id + ) + self.remove_oldest_file() + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_error("Not enough storage space available, even after deleting some files", task_id) + return None + + if self.agent.config.get_value("rsync") and self.agent.operating_system != OperatingSystem.WINDOWS: + if not self.agent.rsync(file_path): + return None else: - files = ans['filenames'] - for filename in files: - file_path = Path(self.config.get_value('files-path'), filename) - if filename.find("/") != -1 or filename.find("\\") != -1: - continue # ignore invalid file names - elif os.path.dirname(file_path) != "files": - continue # ignore any case in which we would leave the files folder - elif os.path.exists(file_path): - logging.info("Delete file '" + filename + "' as requested by server...") - # When we get the delete requests, this function will check if the .7z maybe as - # an extracted text file. That file will also be deleted. - if os.path.splitext(file_path)[1] == '.7z': - txt_file = Path(f"{os.path.splitext(file_path)[0]}.txt") - if os.path.exists(txt_file): - logging.info("Also delete assumed wordlist from archive of same file...") - os.unlink(txt_file) - os.unlink(file_path) - - def check_files(self, files, task_id): - for file in files: - file_localpath = Path(self.config.get_value('files-path'), file) - txt_file = Path(f"{os.path.splitext(file_localpath)[0]}.txt") - query = copy_and_set_token(dict_getFile, self.config.get_value('token')) - query['taskId'] = task_id - query['file'] = file - req = JsonRequest(query) - ans = req.execute() - - # Process request - if ans is None: - logging.error("Failed to get file!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS': - logging.error("Getting of file failed: " + str(ans)) + if not self.agent.download(response["url"], file_path): # type: ignore + return None + + self.downloaded[file_path] = True + + if os.path.isfile(file_path) and os.stat(file_path).st_size != int(response["filesize"]): + self.agent.send_warning(f"File size mismatch on file: {file_path} - removing file and retrying...", task_id) + os.remove(file_path) + sleep(5) + return None + + return file_path + + def check_compressed_file(self, file_path: str, response: dict[str, Any], task_id: int): + """Check a compressed file""" + new_file_path = os.path.splitext(file_path)[0] + + if os.path.isfile(new_file_path): + return new_file_path + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_error("Not enough storage space available", task_id) + return None + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_warning("Not enough storage space available, cleaning up files...", task_id) + self.clean_up() + self.agent.clean_up() + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_warning( + "Cleanup did not create enough space, deleting oldest file and then retrying...", task_id + ) + self.remove_oldest_file() + + if get_storage_remaining(self.agent.config.get_value("files-path"), self.agent.operating_system) < int( # type: ignore + response["filesize"] + ): + self.agent.send_error("Not enough storage space available, even after deleting some files", task_id) + return None + + if not os.path.isfile(file_path): + if self.agent.config.get_value("rsync") and self.agent.operating_system != OperatingSystem.WINDOWS: + if not self.agent.rsync(file_path): + return None + else: + if not self.agent.download(response["url"], file_path): # type: ignore + return None + + if os.path.isfile(file_path) and os.stat(file_path).st_size != int(response["filesize"]): + self.agent.send_warning( + f"File size mismatch on file: {file_path} - removing file and retrying...", task_id + ) + os.remove(file_path) sleep(5) - return False + return None + + if os.path.isfile(file_path): + if self.agent.operating_system == OperatingSystem.WINDOWS: + subprocess.check_output( + f"7zr{self.agent.operating_system.get_extension()} x -aoa" + f" -o\"{self.agent.config.get_value('files-path')}\" -y \"{file_path}\"", + shell=True, + ) else: - # Filesize is OK - file_size = int(ans['filesize']) - if os.path.isfile(file_localpath) and os.stat(file_localpath).st_size == file_size: - logging.debug("File is present on agent and has matching file size.") - continue - - # Multicasting configured - elif self.config.get_value('multicast'): - logging.debug("Multicast is enabled, need to wait until it was delivered!") - sleep(5) # in case the file is not there yet (or not completely), we just wait some time and then try again - return False - - # TODO: we might need a better check for this - if os.path.isfile(txt_file): - continue - - # Rsync - if self.config.get_value('rsync') and Initialize.get_os() != 1: - Download.rsync(Path(self.config.get_value('rsync-path'), file), file_localpath) - else: - logging.debug("Starting download of file from server...") - Download.download(self.config.get_value('url').replace("api/server.php", "") + ans['url'], file_localpath) - - # Mismatch filesize - if os.path.isfile(file_localpath) and os.stat(file_localpath).st_size != file_size: - logging.error("file size mismatch on file: %s" % file) - sleep(5) - return False - - # 7z extraction, check if the .txt does exist. - if os.path.splitext(file_localpath)[1] == '.7z' and not os.path.isfile(txt_file): - # extract if needed - files_path = Path(self.config.get_value('files-path')) - if Initialize.get_os() == 1: - # Windows - cmd = f'7zr{Initialize.get_os_extension()} x -aoa -o"{files_path}" -y "{file_localpath}"' - else: - # Linux - cmd = f"./7zr{Initialize.get_os_extension()} x -aoa -o'{files_path}' -y '{file_localpath}'" - os.system(cmd) - return True + subprocess.check_output( + f"./7zr{self.agent.operating_system.get_extension()} x -aoa" + f" -o\"{self.agent.config.get_value('files-path')}\" -y \"{file_path}\"", + shell=True, + ) + + os.remove(file_path) + new_file_path = os.path.splitext(file_path)[0] + + return new_file_path + + def clean_up(self): + """Clean up files""" + self.last_check = datetime.datetime.now() + if self.agent.config.get_value("file-deletion-disable"): + return + + query = {"action": "getFileStatus"} + response = self.agent.post(query) + + if response is None: + return + + file_names = response["filenames"] + + for file_name in file_names: + file_path = os.path.join(self.agent.config.get_value("files-path"), file_name) # type: ignore + + if file_name.find("/") != -1 or file_name.find("\\") != -1: + continue # ignore invalid file names + + if os.path.dirname(file_path) != os.path.dirname(self.agent.config.get_value("files-path")): # type: ignore + continue # ignore any case in which we would leave the files folder + + if os.path.exists(file_path): # type: ignore + if any(file_name.endswith(ext) for ext in self.COMPRESSION_FILE_EXTENSIONS): + new_file_path = os.path.splitext(file_path)[0] # type: ignore + + possible_text_files = [new_file_path] + [ # type: ignore + f"{new_file_path}{ext}" for ext in self.POSSIBLE_TEXT_EXTENSIONS + ] + + for text_file in possible_text_files: # type: ignore + if os.path.exists(text_file): # type: ignore + os.remove(text_file) # type: ignore + + os.remove(file_path) # type: ignore + + def remove_oldest_file(self): + """Remove the oldest file""" + files_dir = self.agent.config.get_value("files-path") # type: ignore + files = os.listdir(files_dir) # type: ignore + + if not files: + return + + oldest_file = min(files, key=lambda f: os.path.getatime(os.path.join(files_dir, f))) # type: ignore + self.deleted_old_files.append(oldest_file) # type: ignore + os.remove(os.path.join(files_dir, oldest_file)) # type: ignore + self.agent.send_warning(f"Removed oldest file: {oldest_file}") diff --git a/htpclient/generic_cracker.py b/htpclient/generic_cracker.py index d6acc8e..f538de8 100644 --- a/htpclient/generic_cracker.py +++ b/htpclient/generic_cracker.py @@ -1,185 +1,337 @@ import logging +import os import subprocess -from time import sleep -from queue import Queue, Empty -from threading import Thread +import time +from queue import Empty, Queue +from threading import Lock, Thread +from typing import IO, Any -from htpclient.config import Config +import unidecode + +from htpclient.chunk import Chunk from htpclient.generic_status import GenericStatus -from htpclient.helpers import send_error -from htpclient.initialize import Initialize -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * +from htpclient.operating_system import OperatingSystem +from htpclient.task import Task +from htpclient.utils import ( + format_speed, + kill_hashcat, + run_command_and_get_output, + run_command_and_get_output_and_errors, +) class GenericCracker: - def __init__(self, cracker_id, binary_download): - self.config = Config() - self.io_q = Queue() - self.callPath = self.config.get_value('crackers-path') + "/" + str(cracker_id) + "/" + binary_download.get_version()['executable'] - self.executable_name = binary_download.get_version()['executable'] - self.keyspace = 0 - - def run_chunk(self, task, chunk, preprocessor): - args = " crack -s " + str(chunk['skip']) - args += " -l " + str(chunk['length']) - hl_path = self.config.get_value('hashlists-path') + "/" + str(task['hashlistId']) - args += " " + task['attackcmd'].replace(task['hashlistAlias'], f"'{hl_path}'") - full_cmd = f"'{self.callPath}'" + args - if Initialize.get_os() == 1: - full_cmd = full_cmd.replace("/", '\\') - logging.debug("CALL: " + full_cmd) - process = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.config.get_value('files-path')) - - logging.debug("started cracking") - out_thread = Thread(target=self.stream_watcher, name='stdout-watcher', args=('OUT', process.stdout)) - err_thread = Thread(target=self.stream_watcher, name='stderr-watcher', args=('ERR', process.stderr)) - out_thread.start() - err_thread.start() - - main_thread = Thread(target=self.run_loop, name='run_loop', args=(process, chunk, task)) + """Class representing a Hashcat cracker""" + + crack_split_length = 1000 + + def __init__(self, agent: Any, task: Task): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.task = task + self.queue: Queue[tuple[str, bytes]] = Queue() + self.call_path = f"{self.task.cracker.executable}" + + self.lock = Lock() + self.cracks: list[str] = [] + self.first_status = False + self.use_pipe = self.task.use_pipe + self.progress = 0 + self.status_count = 0 + self.last_update = 0 + self.uses_slow_hash_flag = False + self.was_stopped = False + + def measure_keyspace(self, chunk: Chunk): # pylint: disable=R0912:too-many-branches + """Measure the keyspace of a chunk""" + attack_command = ( + self.task.attack_command.replace(self.task.hashlist_alias, "") + if self.task.hashlist_alias + else self.task.attack_command + ) + + command = f"{self.call_path} keyspace {attack_command}" + + try: + lines = run_command_and_get_output(command) + except subprocess.CalledProcessError as e: + self.agent.send_error(f"Error while measuring keyspace: {e}", self.task.task_id) + return False + + keyspace = 0 + + for line in lines: + if not line: + continue + + try: + keyspace = int(line) + except ValueError: + pass + + if keyspace == 0: + self.agent.send_error("Failed to measure keyspace as keyspace is 0", self.task.task_id) + return False + + return chunk.send_keyspace(keyspace) + + def run_benchmark(self, chunk: Chunk): + """Run a benchmark""" + hashlists_path = self.agent.config.get_value("hashlists-path") + + if not isinstance(hashlists_path, str): + self.agent.send_error("Hashlists path not set", self.task.task_id) + return None + + hashlist_path = os.path.join(hashlists_path, str(self.task.hashlist_id)) + + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, f'"{hashlist_path}"') + + command = f"{self.call_path} crack {attack_command} -s 0 -l {chunk.length} --timeout={self.task.benchmark_time}" + + try: + output_lines, error_lines = run_command_and_get_output_and_errors(command, ["CL_DEVICE_NOT_AVAILABLE"]) + except Exception as e: + self.agent.send_error(f"Error while running benchmark: {e}", self.task.task_id) + return 0 + + for line in error_lines: + if not line: + continue + + self.agent.send_warning(f"Error while running benchmark: {line}", self.task.task_id) + + last_valid_status = None + for line in output_lines: + if not line: + continue + + status = GenericStatus(line) + if status.is_valid(): + last_valid_status = status + + if last_valid_status is None: + self.agent.send_error("Failed to run benchmark", self.task.task_id) + return 0 + + return float(last_valid_status.get_progress()) / 10000 + + def run_chunk(self, chunk: Chunk): + """Run a chunk""" + self.status_count = 0 + self.was_stopped = False + + hashlists_path = self.agent.config.get_value("hashlists-path") + zaps_path = self.agent.config.get_value("zaps-path") + + if not isinstance(hashlists_path, str): + self.agent.send_error("Hashlists path not set", self.task.task_id) + return + + if not isinstance(zaps_path, str): + self.agent.send_error("Zaps path not set", self.task.task_id) + return + + hashlist_path = os.path.join(hashlists_path, str(self.task.hashlist_id)) + hashlist_output_path = os.path.join(hashlists_path, str(self.task.hashlist_id) + ".out") + hashlist_output_backup_path = os.path.join( + hashlists_path, str(self.task.hashlist_id) + str(time.time()) + ".out.bak" + ) + zap_path = os.path.join(zaps_path, f"hashlist_{self.task.hashlist_id}") + + if os.path.exists(hashlist_output_path): + if self.agent.config.get_value("outfile-history"): + os.rename(hashlist_output_path, hashlist_output_backup_path) + else: + os.remove(hashlist_output_path) + + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, f'"{hashlist_path}"') + + command = f"{self.call_path} crack -s {chunk.skip} -l {chunk.length} {attack_command}" + + if self.agent.operating_system != OperatingSystem.WINDOWS: + process = subprocess.Popen( # pylint: disable=W1509:subprocess-popen-preexec-fn + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid, + ) + else: + process = subprocess.Popen( # pylint: disable=R1732:consider-using-with + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + stdout_watcher = Thread(target=self.__watch_stream, args=("OUT", process.stdout)) + stderr_watcher = Thread(target=self.__watch_stream, args=("ERR", process.stderr)) + + stdout_watcher.start() + stderr_watcher.start() + + self.first_status = False + self.last_update = time.time() + + main_thread = Thread(target=self.__run, name="GenericCrackerRun", args=(process, chunk, zap_path)) main_thread.start() - # wait for all threads to finish process.wait() - out_thread.join() - err_thread.join() - logging.info("finished chunk") + stdout_watcher.join() + stderr_watcher.join() + main_thread.join() + + def __run(self, process: subprocess.Popen[Any], chunk: Chunk, zap_path: str): # pylint: disable=R0912,R0914,R0915 + """Run the Hashcat process""" + self.cracks = [] - def run_loop(self, process, chunk, task): - cracks = [] while True: try: - # Block for 1 second. - item = self.io_q.get(True, 1) + if not self.first_status and time.time() - self.last_update > 10: + query: dict[str, Any] = { + "action": "sendProgress", + "chunkId": chunk.chunk_id, + "keyspaceProgress": chunk.skip, + "relativeProgress": 0, + "speed": 0, + "state": 2, + "cracks": [], + } + + self.agent.post(query) + self.last_update = time.time() + + # Send error message when last update is more then 30 minutes ago + if time.time() - self.last_update > 1800: + self.agent.send_error("No status update for at least 1800 seconds", self.task.task_id) + + name, line = self.queue.get(timeout=1) except Empty: - # No output in either streams for a second. Are we done? if process.poll() is not None: - # is the case when the process is finished break - else: - identifier, line = item - if identifier == 'OUT': - status = GenericStatus(line.decode()) - if status.is_valid(): - # send update to server - progress = status.get_progress() - speed = status.get_speed() - initial = True - while cracks or initial: + continue + + if name == "OUT": + status = GenericStatus(line.decode()) + + if status.is_valid(): + self.status_count += 1 + relative_progress = status.get_progress() + speed = status.get_speed() + + self.first_status = True + initial = True + + state = 4 if relative_progress == 10000 else 2 + + while self.cracks or initial: + with self.lock: initial = False - cracks_backup = [] - if len(cracks) > 1000: - # we split - cnt = 0 - new_cracks = [] - for crack in cracks: - cnt += 1 - if cnt > 1000: - cracks_backup.append(crack) + crack_backup: list[str] = [] + + if len(self.cracks) > self.crack_split_length: + crack_count = 0 + new_cracks: list[str] = [] + + for crack in self.cracks: + crack_count += 1 + if crack_count > self.crack_split_length: + crack_backup.append(crack) else: new_cracks.append(crack) - cracks = new_cracks - - query = copy_and_set_token(dict_sendProgress, self.config.get_value('token')) - query['chunkId'] = chunk['chunkId'] - query['keyspaceProgress'] = chunk['skip'] - query['relativeProgress'] = progress - query['speed'] = speed - query['state'] = (4 if progress == 10000 else 2) - query['cracks'] = cracks - req = JsonRequest(query) - - logging.debug("Sending " + str(len(cracks)) + " cracks...") - ans = req.execute() - if ans is None: - logging.error("Failed to send solve!") - elif ans['response'] != 'SUCCESS': - logging.error("Error from server on solve: " + str(ans)) - else: - if ans['zaps']: - with open(self.config.get_value('files-path') + "/zap", "wb") as zapfile: # need to check if we are in the main dir here - zapfile.write('\n'.join(ans['zaps']).encode()) - zapfile.close() - cracks = cracks_backup + + self.cracks = new_cracks + + query: dict[str, Any] = { + "action": "sendProgress", + "chunkId": chunk.chunk_id, + "keyspaceProgress": chunk.skip, + "relativeProgress": relative_progress, + "speed": speed, + "state": state, + "cracks": self.cracks, + } + + query["cracks"] = self.cracks + + if len(self.cracks) > 0: + logging.info("Found %d cracks. Sending to server...", len(self.cracks)) + logging.info(self.cracks) + + response = self.agent.post(query) + self.last_update = time.time() + + if response is None: + self.was_stopped = True + try: + kill_hashcat(process.pid, self.agent.operating_system) + except ProcessLookupError: + pass + return + + if response.get("agent") == "stop": + self.was_stopped = True + try: + kill_hashcat(process.pid, self.agent.operating_system) + except ProcessLookupError: + pass + return + + if len(self.cracks) > 0: logging.info( - "Progress: " + str(progress / 100) + "% Cracks: " + str(len(cracks)) + - " Accepted: " + str(ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(ans['zaps']))) - else: - line = line.decode() - if ":" in line: - cracks.append(line.strip()) - else: - logging.warning("OUT: " + line.strip()) + "Send %d cracked hashes to server for chunk %d should be %d - %d skipped", + len(self.cracks), + chunk.chunk_id, + len(self.cracks), + response["skipped"], + ) + + zaps = response.get("zaps") + + if zaps: + zap_output = "\tFF\n".join(zaps) + "\tFF\n" + with open(os.path.join(zap_path, f"{time.time()}"), "a+", encoding="utf-8") as f: + f.write(zap_output) + + print( + f"Progress: {relative_progress / 100:.2f}% Speed: {format_speed(speed)} Cracks:" + f" {len(self.cracks)} Accepted: {response['cracked']} Skips:" + f" {response['skipped']} Zaps: {len(zaps)}", + end="\r", + ) + + self.cracks = crack_backup else: - print("ERROR: " + str(line).strip()) - # TODO: send error and abort cracking - - def measure_keyspace(self, task, chunk): - task = task.get_task() - full_cmd = self.callPath + " keyspace " + task['attackcmd'].replace("-a " + task['hashlistAlias'] + " ", "") - if Initialize.get_os() == 1: - full_cmd = full_cmd.replace("/", '\\') - try: - logging.debug("CALL: " + full_cmd) - output = subprocess.check_output(full_cmd, shell=True, cwd=self.config.get_value('files-path')) - except subprocess.CalledProcessError as e: - logging.error("Error during keyspace measurement: " + str(e)) - send_error("Keyspace measure failed!", self.config.get_value('token'), task['taskId'], None) - sleep(5) - return False - output = output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - keyspace = "0" - for line in output: - if not line: - continue - keyspace = line - self.keyspace = int(keyspace) - return chunk.send_keyspace(int(keyspace), task['taskId']) - - def run_benchmark(self, task): - ksp = self.keyspace - if ksp == 0: - ksp = task['keyspace'] - hl_path = self.config.get_value('hashlists-path') + "/" + str(task['hashlistId']) - args = task['attackcmd'].replace(task['hashlistAlias'], f"'{hl_path}'") - full_cmd = self.callPath + " crack " + args + " -s 0 -l " + str(ksp) + " --timeout=" + str(task['bench']) - if Initialize.get_os() == 1: - full_cmd = full_cmd.replace("/", '\\') - logging.debug("CALL: " + full_cmd) - output = subprocess.check_output(full_cmd, shell=True, cwd=self.config.get_value('files-path')) - if output: - output = output.replace(b"\r\n", b"\n").decode('utf-8') - output = output.split('\n') - last_valid_status = None - for line in output: - if not line: - continue - status = GenericStatus(line) - if status.is_valid(): - last_valid_status = status - if last_valid_status is None: - query = copy_and_set_token(dict_clientError, self.config.get_value('token')) - query['taskId'] = task['taskId'] - query['message'] = "Generic benchmark failed!" - req = JsonRequest(query) - req.execute() - return 0 - return float(last_valid_status.get_progress()) / 10000 - else: - query = copy_and_set_token(dict_clientError, self.config.get_value('token')) - query['taskId'] = task['taskId'] - query['message'] = "Generic benchmark gave no output!" - req = JsonRequest(query) - req.execute() - return 0 - - def stream_watcher(self, identifier, stream): + try: + if b":" not in line: + self.agent.send_warning( + f"GenericCracker: Unknown line {unidecode.unidecode(line.decode().strip())}", + self.task.task_id, + ) + continue + except UnicodeDecodeError: + self.agent.send_warning(f"GenericCracker: Unknown line {line.strip()}", self.task.task_id) + continue + + try: + line = line.decode() + except UnicodeDecodeError: + line = "$HEX[" + line.hex() + "]" + + self.cracks.append(line.strip()) + + elif name == "ERR": + msg = unidecode.unidecode(line.decode().strip()) + if msg and msg != "^C": + self.agent.send_error(f"Generic cracker error: {msg}", self.task.task_id) + + def __watch_stream(self, name: str, stream: IO[bytes]): for line in stream: - self.io_q.put((identifier, line)) + self.queue.put((name, line)) if not stream.closed: stream.close() def agent_stopped(self): - return False + """Check if the agent was stopped""" + return self.was_stopped diff --git a/htpclient/generic_status.py b/htpclient/generic_status.py index 9e602a9..fe98f43 100644 --- a/htpclient/generic_status.py +++ b/htpclient/generic_status.py @@ -1,26 +1,30 @@ class GenericStatus: - def __init__(self, line): + """Class to parse hashcat status output""" + + def __init__(self, raw_line: str): # parse + raw_line = raw_line.strip() + self.line_parts = raw_line.split() self.valid = False - self.speed = 0 self.progress = 0 + self.speed = 0 - line = line.split(" ") - if line[0] != "STATUS": + if self.line_parts[0] != "STATUS" or len(self.line_parts) != 3: # invalid line return - elif len(line) != 3: - # invalid line - return - self.progress = int(line[1]) - self.speed = int(line[2]) + + self.progress = int(self.line_parts[1]) + self.speed = int(self.line_parts[2]) self.valid = True def is_valid(self): + """Check if the status is valid""" return self.valid def get_progress(self): + """Get the total progress""" return self.progress def get_speed(self): + """Get the speed""" return self.speed diff --git a/htpclient/hashcat_cracker.py b/htpclient/hashcat_cracker.py index c656496..a144a09 100644 --- a/htpclient/hashcat_cracker.py +++ b/htpclient/hashcat_cracker.py @@ -1,756 +1,794 @@ -import string import logging +import os +import re +import string import subprocess -import psutil -from pathlib import Path -from time import sleep -from queue import Queue, Empty -from threading import Thread, Lock - import time +from queue import Empty, Queue +from threading import Lock, Thread +from typing import IO, Any -from htpclient.config import Config +import psutil +import unidecode + +from htpclient.chunk import Chunk from htpclient.hashcat_status import HashcatStatus -from htpclient.initialize import Initialize -from htpclient.jsonRequest import JsonRequest, os -from htpclient.helpers import send_error, update_files, kill_hashcat, get_bit, print_speed, get_rules_and_hl, get_wordlist, escape_ansi -from htpclient.dicts import * +from htpclient.operating_system import OperatingSystem +from htpclient.task import Task +from htpclient.utils import ( + format_speed, + kill_hashcat, + run_command_and_get_output, + run_command_and_get_output_and_errors, +) class HashcatCracker: - def __init__(self, cracker_id, binary_download): - self.config = Config() - self.io_q = Queue() - self.version_string = "" - - # Build cracker executable name by taking basename plus extension - self.executable_name = binary_download.get_version()['executable'] - k = self.executable_name.rfind(".") - self.executable_name = self.executable_name[:k] + "." + self.executable_name[k + 1:] - self.cracker_path = Path(self.config.get_value('crackers-path'), str(cracker_id)) - - if not os.path.isfile(Path(self.cracker_path, self.executable_name)): # in case it's not the new hashcat filename, try the old one (hashcat.) - self.executable_name = binary_download.get_version()['executable'] - k = self.executable_name.rfind(".") - self.executable_name = self.executable_name[:k] + get_bit() + "." + self.executable_name[k + 1:] - self.executable_path = Path(self.cracker_path, self.executable_name) - - if Initialize.get_os() == 1: - # Windows - self.callPath = f'"{self.executable_name}"' - else: - # Linux / Mac - self.callPath = f"'./{self.executable_name}'" + """Class representing a Hashcat cracker""" + + crack_split_length = 1000 + + def __init__(self, agent: Any, task: Task): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.task = task + self.queue: Queue[tuple[str, bytes]] = Queue() + self.call_path = f"{self.task.cracker.executable}" - cmd = [str(self.executable_path), "--version"] - try: - logging.debug(f"CALL: {' '.join(cmd)}") - output = subprocess.check_output(cmd, cwd=self.cracker_path) + output = run_command_and_get_output(f"{self.call_path} --version") except subprocess.CalledProcessError as e: - logging.error("Error during version detection: " + str(e)) - sleep(5) - self.version_string = output.decode().replace('v', '') + self.agent.send_error(f"Error while checking cracker version: {e}") + return + self.version = output[0].strip().replace("v", "") self.lock = Lock() - self.cracks = [] + self.cracks: list[str] = [] self.first_status = False - self.usePipe = False - self.progressVal = 0 - self.statusCount = 0 + self.use_pipe = self.task.use_pipe + self.progress = 0 + self.status_count = 0 self.last_update = 0 self.uses_slow_hash_flag = False - self.wasStopped = False + self.was_stopped = False + + self.new_output_format = self.__determine_output_format() + + self.output_format = "1,2,3,4" if self.new_output_format else "15" + + def __determine_output_format(self): # pylint: disable=R0911:too-many-return-statements + """Determine if the output format is new""" + if not "-" in self.version: + release = self.version.split(".") - def get_outfile_format(self): - if self.version_string.find('-') == -1: - release = self.version_string.split('.') try: - if int(str(release[0])) >= 6: - return "1,2,3,4" + if int(release[0]) >= 6: + return True except ValueError: - return "1,2,3,4" # if there is a custom version, we assume it's using the new format - return "15" # if we cannot determine the version or if the release is older than 6.0.0, we will use the old format - split = self.version_string.split('-') - if len(split) < 2: - return "15" # something is wrong with the version string, go for old format - release = str(split[0]).split('.') - commit = str(split[1]) - if int(str(release[0])) < 5: - return "15" - elif int(str(release[0])) == 5 and int(str(release[1])) < 1: - return "15" - elif int(str(release[0])) == 5 and int(str(release[1])) == 1 and int(str(release[2])) == 0 and int(commit) < 1618: - return "15" - return "1,2,3,4" # new outfile format - - def build_command(self, task, chunk): - args = [] - - zaps_file = Path(self.config.get_value('zaps-path'), f"hashlist_{task['hashlistId']}") - output_file = Path(self.config.get_value('hashlists-path'), f"{task['hashlistId']}.out") - hashlist_file = Path(self.config.get_value('hashlists-path'), str(task['hashlistId'])) - args.append('--machine-readable') - args.append('--quiet') - args.append('--status') - args.append('--restore-disable') - args.append('--session=hashtopolis') - args.append(f"--status-timer {task['statustimer']}") - args.append(f"--outfile-check-timer={task['statustimer']}") - args.append(f'--outfile-check-dir="{zaps_file}"') - args.append(f'-o "{output_file}"') - args.append(f'--outfile-format={self.get_outfile_format()}') - args.append('-p "\t"') - args.append(f"-s {chunk['skip']}") - args.append(f"-l {chunk['length']}") - - if 'useBrain' in task and task['useBrain']: # when using brain we set the according parameters - args.append('--brain-client') - args.append(f"--brain-host {task['brainHost']}") - args.append(f"--brain-port {task['brainPort']}") - args.append(f"--brain-password {task['brainPass']}") - - if 'brainFeatures' in task: - args.append(f"--brain-client-features {task['brainFeatures']}") - else: # remove should only be used if we run without brain - args.append('--potfile-disable') - args.append('--remove') - args.append(f"--remove-timer={task['statustimer']}") - - files = update_files(task['attackcmd']) - files = files.replace(task['hashlistAlias'], f'"{hashlist_file}"') - args.append(files) - args.append(task['cmdpars']) - - - - full_cmd = ' '.join(args) - full_cmd = f'{self.callPath} {full_cmd}' - - if ' -S ' in full_cmd: - self.uses_slow_hash_flag = True + return True - return full_cmd - - def build_pipe_command(self, task, chunk): - # call the command with piping - pre_args = " --stdout -s " + str(chunk['skip']) + " -l " + str(chunk['length']) + ' ' - pre_args += update_files(task['attackcmd']).replace(task['hashlistAlias'], '') - post_args = " --machine-readable --quiet --status --remove --restore-disable --potfile-disable --session=hashtopolis" - post_args += " --status-timer " + str(task['statustimer']) - post_args += " --outfile-check-timer=" + str(task['statustimer']) - post_args += " --outfile-check-dir='" + self.config.get_value('zaps-path') + "/hashlist_" + str(task['hashlistId']) + "'" - post_args += " -o '" + self.config.get_value('hashlists-path') + "/" + str(task['hashlistId']) + ".out' --outfile-format=" + self.get_outfile_format() + " -p \"" + str(chr(9)) + "\"" - post_args += " --remove-timer=" + str(task['statustimer']) - post_args += " '" + self.config.get_value('hashlists-path') + "/" + str(task['hashlistId']) + "'" - return f"'{self.callPath}'" + pre_args + " | " + f"'{self.callPath}'" + post_args + task['cmdpars'] - - # DEPRECATED - def build_prince_command(self, task, chunk): - binary = "../../prince/pp64." - if Initialize.get_os() != 1: - binary = "./" + binary + "bin" - else: - binary += "exe" - pre_args = " -s " + str(chunk['skip']) + " -l " + str(chunk['length']) + ' ' - pre_args += get_wordlist(update_files(task['attackcmd']).replace(task['hashlistAlias'], '')) - post_args = " --machine-readable --quiet --status --remove --restore-disable --potfile-disable --session=hashtopolis" - post_args += " --status-timer " + str(task['statustimer']) - post_args += " --outfile-check-timer=" + str(task['statustimer']) - post_args += " --outfile-check-dir=../../hashlist_" + str(task['hashlistId']) - post_args += " -o ../../hashlists/" + str(task['hashlistId']) + ".out --outfile-format=" + self.get_outfile_format() + " -p \"" + str(chr(9)) + "\"" - post_args += " --remove-timer=" + str(task['statustimer']) - post_args += " ../../hashlists/" + str(task['hashlistId']) - post_args += get_rules_and_hl(update_files(task['attackcmd']), task['hashlistAlias']).replace(task['hashlistAlias'], '') - return binary + pre_args + " | " + self.callPath + post_args + task['cmdpars'] - - def build_preprocessor_command(self, task, chunk, preprocessor): - binary_path = Path(self.config.get_value('preprocessors-path'), str(task['preprocessor'])) - binary = preprocessor['executable'] - - if not os.path.isfile(binary_path / binary): - split = binary.split(".") - binary = '.'.join(split[:-1]) + get_bit() + "." + split[-1] - binary = binary_path / binary - - pre_args = [] - - # in case the skip or limit command are not available, we try to achieve the same with head/tail (the more chunks are run, the more inefficient it might be) - if preprocessor['skipCommand'] is not None and preprocessor['limitCommand'] is not None: - pre_args.extend([preprocessor['skipCommand'], str(chunk['skip'])]) - pre_args.extend([preprocessor['limitCommand'], str(chunk['length'])]) - - pre_args.append(update_files(task['preprocessorCommand'])) - - # TODO: add support for windows as well (pre-built tools) - if preprocessor['skipCommand'] is None or preprocessor['limitCommand'] is None: - skip_length = chunk['skip'] + chunk['length'] - pre_args.append(f"| head -n {skip_length}") - pre_args.append(f"| tail -n {chunk['length']}") - - zaps_file = Path(self.config.get_value('zaps-path'), f"hashlist_{task['hashlistId']}") - output_file = Path(self.config.get_value('hashlists-path'), f"{task['hashlistId']}.out") - hashlist_file = Path(self.config.get_value('hashlists-path'), str(task['hashlistId'])) - - post_args = [] - post_args.append('--machine-readable') - post_args.append('--quiet') - post_args.append('--status') - post_args.append('--remove') - post_args.append('--restore-disable') - post_args.append('--potfile-disable') - post_args.append('--session=hashtopolis') - post_args.append(f"--status-timer {task['statustimer']}") - - post_args.append(f"--outfile-check-timer={task['statustimer']}") - post_args.append(f'--outfile-check-dir="{zaps_file}"') - post_args.append(f'-o "{output_file}"') - post_args.append(f'--outfile-format={self.get_outfile_format()}') - post_args.append('-p "\t"') - post_args.append(f"--remove-timer={task['statustimer']}") - post_args.append(f'"{hashlist_file}"') - - files = update_files(task['attackcmd']) - files = files.replace(task['hashlistAlias'] + " ", "") - post_args.append(files) - post_args.append(task['cmdpars']) - - pre_args = ' '.join(pre_args) - post_args = ' '.join(post_args) - - full_cmd = f'"{binary}" {pre_args} | {self.callPath} {post_args}' - - return full_cmd - - def run_chunk(self, task, chunk, preprocessor): - if 'enforcePipe' in task and task['enforcePipe']: - logging.info("Enforcing pipe command because of task setting...") - self.usePipe = True - if 'usePrince' in task and task['usePrince']: # DEPRECATED - full_cmd = self.build_prince_command(task, chunk) - elif 'usePreprocessor' in task and task['usePreprocessor']: - full_cmd = self.build_preprocessor_command(task, chunk, preprocessor) - elif self.usePipe: - full_cmd = self.build_pipe_command(task, chunk) - else: - full_cmd = self.build_command(task, chunk) - self.statusCount = 0 - self.wasStopped = False - - # Set paths - outfile_path = Path(self.config.get_value('hashlists-path'), f"{task['hashlistId']}.out") - outfile_backup_path = Path(self.config.get_value('hashlists-path'), f"{task['hashlistId']}_{time.time()}.out") - zapfile_path = Path(self.config.get_value('zaps-path'), f"hashlist_{task['hashlistId']}") - - # clear old found file - earlier we deleted them, but just in case, we just move it to a unique filename if configured so - if os.path.exists(outfile_path): - if self.config.get_value('outfile-history'): - os.rename(outfile_path, outfile_backup_path) - else: - os.unlink(outfile_path) - - # create zap folder - if not os.path.exists(zapfile_path): - os.mkdir(zapfile_path) - - # Call command - logging.debug("CALL: " + full_cmd) - if Initialize.get_os() != 1: - process = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path, preexec_fn=os.setsid) - else: - process = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) - - logging.debug("started cracking") - out_thread = Thread(target=self.stream_watcher, name='stdout-watcher', args=('OUT', process.stdout)) - err_thread = Thread(target=self.stream_watcher, name='stderr-watcher', args=('ERR', process.stderr)) - crk_thread = Thread(target=self.output_watcher, name='crack-watcher', args=(outfile_path, process)) - out_thread.start() - err_thread.start() - crk_thread.start() - self.first_status = False - self.last_update = time.time() + return False - main_thread = Thread(target=self.run_loop, name='run_loop', args=(process, chunk, task)) - main_thread.start() + if len(self.version.split("-")) == 1: + self.agent.send_warning(f"Could not determine hashcat output format version: {self.version}") + return False - # wait for all threads to finish - process.wait() - crk_thread.join() - out_thread.join() - err_thread.join() - main_thread.join() - logging.info("finished chunk") + release = self.version.split("-")[0].split(".") + commit = self.version.split("-")[1] - def run_loop(self, proc, chunk, task): - zap_path = Path(self.config.get_value('zaps-path'), f"hashlist_{task['hashlistId']}") + try: + if int(release[0]) < 5: + return False - self.cracks = [] - piping_threshold = 95 - enable_piping = False - if self.config.get_value('piping-threshold'): - piping_threshold = self.config.get_value('piping-threshold') - if self.config.get_value('allow-piping') != '': - enable_piping = self.config.get_value('allow-piping') - while True: - try: - # Block for 1 second. - if not self.first_status and self.last_update < time.time() - 5: - # send update - query = copy_and_set_token(dict_sendProgress, self.config.get_value('token')) - query['chunkId'] = chunk['chunkId'] - query['keyspaceProgress'] = chunk['skip'] - query['relativeProgress'] = 0 - query['speed'] = 0 - query['state'] = 2 - query['cracks'] = [] - req = JsonRequest(query) - logging.info("Sending keepalive progress to avoid timeout...") - req.execute() - self.last_update = time.time() - item = self.io_q.get(True, 1) - except Empty: - # No output in either streams for a second. Are we done? - if proc.poll() is not None: - # is the case when the process is finished - break - else: - identifier, line = item - if identifier == 'OUT': - status = HashcatStatus(line.decode()) - if status.is_valid(): - self.statusCount += 1 - - # test if we have a low utility - # not allowed if brain is used - if enable_piping and not self.uses_slow_hash_flag and ('useBrain' not in task or not task['useBrain']) and 'slowHash' in task and task['slowHash'] and not self.usePipe: - if task['files'] and not ('usePrince' in task and task['usePrince']) and not ('usePreprocessor' in task and task['usePreprocessor']) and 1 < self.statusCount < 10 and status.get_util() != -1 and status.get_util() < piping_threshold: - # we need to try piping -> kill the process and then wait for issuing the chunk again - self.usePipe = True - chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) - self.progressVal = status.get_progress_total() - chunk_start - logging.info("Detected low UTIL value, restart chunk with piping...") - try: - kill_hashcat(proc.pid, Initialize.get_os()) - except ProcessLookupError: - pass - return + if int(release[0]) == 5 and int(release[1]) < 1: + return False + + if int(release[0]) == 5 and int(release[1]) == 1 and int(release[2]) == 0 and int(commit) < 1618: + return False + + except ValueError: + return True + + return True + + def measure_keyspace(self, chunk: Chunk): # pylint: disable=R0912:too-many-branches + """Measure the keyspace of a chunk""" + if self.task.use_preprocessor: + return self.__measure_keyspace_with_preprocessor(chunk) + + attack_command = ( + self.task.attack_command.replace(self.task.hashlist_alias, "") + if self.task.hashlist_alias + else self.task.attack_command + ) + + command = f"{self.call_path} --keyspace --quiet {attack_command} {self.task.command_parameters}" + + if self.task.use_brain: + command += " -S" - self.first_status = True - # send update to server - logging.debug(line.decode().replace('\n', '').replace('\r', '')) - total = status.get_progress_total() - if self.usePipe: # if we are piping, we might have saved the total progress before switching to piping, so we can use this - total = self.progressVal - # we need to calculate the chunk start, because progress does not start at 0 for a chunk - chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) - if total > 0: - relative_progress = int((status.get_progress() - chunk_start) / float(total - chunk_start) * 10000) - else: # this is the case when we cannot say anything about the progress - relative_progress = 0 - speed = status.get_speed() - initial = True - if status.get_state() == 4 or status.get_state() == 5: - time.sleep(5) # we wait five seconds so all output is loaded from file - # reset piping stuff when a chunk is successfully finished - self.progressVal = 0 - self.usePipe = False - while self.cracks or initial: - self.lock.acquire() - initial = False - cracks_backup = [] - if len(self.cracks) > 1000: - # we split - cnt = 0 - new_cracks = [] - for crack in self.cracks: - cnt += 1 - if cnt > 1000: - cracks_backup.append(crack) - else: - new_cracks.append(crack) - self.cracks = new_cracks - query = copy_and_set_token(dict_sendProgress, self.config.get_value('token')) - query['chunkId'] = chunk['chunkId'] - query['keyspaceProgress'] = status.get_curku() - if (self.usePipe or 'usePrince' in task and task['usePrince'] or 'usePreprocessor' in task and task['usePreprocessor']) and status.get_curku() == 0: - query['keyspaceProgress'] = chunk['skip'] - query['relativeProgress'] = relative_progress - query['speed'] = speed - query['state'] = status.get_state() - # crack format: hash[:salt]:plain:hex_plain:crack_pos (separator will be tab instead of :) - prepared = [] - for crack in self.cracks: - prepared.append(crack.split("\t")) - query['cracks'] = prepared - if status.get_temps(): - query['gpuTemp'] = status.get_temps() - if status.get_all_util(): - query['gpuUtil'] = status.get_all_util() - query['cpuUtil'] = [round(psutil.cpu_percent(), 1)] - req = JsonRequest(query) - - logging.debug("Sending " + str(len(self.cracks)) + " cracks...") - ans = req.execute() - if ans is None: - logging.error("Failed to send solve!") - sleep(1) - elif ans['response'] != 'SUCCESS': - self.wasStopped = True - logging.error("Error from server on solve: " + str(ans)) - try: - kill_hashcat(proc.pid, Initialize.get_os()) - except ProcessLookupError: - pass - sleep(5) - return - elif 'agent' in ans.keys() and ans['agent'] == 'stop': - # server set agent to stop - self.wasStopped = True - logging.info("Received stop order from server!") - try: - kill_hashcat(proc.pid, Initialize.get_os()) - except ProcessLookupError: - pass - sleep(5) - return - else: - cracks_count = len(self.cracks) - self.cracks = cracks_backup - zaps = ans['zaps'] - if zaps: - logging.debug("Writing zaps") - zap_output = "\tFF\n".join(zaps) + '\tFF\n' - f = open(Path(zap_path) / str(time.time()), 'a') - f.write(zap_output) - f.close() - logging.info("Progress:" + str("{:6.2f}".format(relative_progress / 100)) + "% Speed: " + print_speed(speed) + " Cracks: " + str(cracks_count) + " Accepted: " + str(ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(zaps))) - self.lock.release() - else: - # hacky solution to exclude warnings from hashcat - if str(line[0]) not in string.printable: - continue - else: - pass # logging.warning("HCOUT: " + line.strip()) - elif identifier == 'ERR': - msg = escape_ansi(line.replace(b"\r\n", b"\n").decode('utf-8')).strip() - if msg and str(msg) != '^C': # this is maybe not the fanciest way, but as ctrl+c is sent to the underlying process it reports it to stderr - logging.error("HC error: " + msg) - send_error(msg, self.config.get_value('token'), task['taskId'], chunk['chunkId']) - sleep(0.1) # we set a minimal sleep to avoid overreaction of the client sending a huge number of errors, but it should not be slowed down too much, in case the errors are not critical and the agent can continue - - def measure_keyspace(self, task, chunk): - if 'usePrince' in task.get_task() and task.get_task()['usePrince']: - return self.prince_keyspace(task.get_task(), chunk) - elif 'usePreprocessor' in task.get_task() and task.get_task()['usePreprocessor']: - return self.preprocessor_keyspace(task, chunk) - task = task.get_task() # TODO: refactor this to be better code - files = update_files(task['attackcmd']) - files = files.replace(task['hashlistAlias'] + " ", "") - - full_cmd = f"{self.callPath} --keyspace --quiet {files} {task['cmdpars']}" - - if 'useBrain' in task and task['useBrain']: - full_cmd = f"{full_cmd} -S" - - output = b'' try: - logging.debug(f"CALL: {full_cmd}") - output = subprocess.check_output(full_cmd, shell=True, cwd=self.cracker_path, stderr=subprocess.STDOUT) + lines = run_command_and_get_output(command) except subprocess.CalledProcessError as e: - logging.error("Error during keyspace measure: " + str(e) + " Output: " + output.decode(encoding='utf-8')) - send_error("Keyspace measure failed!", self.config.get_value('token'), task['taskId'], None) - sleep(5) + self.agent.send_error(f"Error while measuring keyspace: {e}", self.task.task_id) return False - output = output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - ks = 0 - # try to parse each line as a keyspace result integer (normally only one line should be in output, but some warnings might show up) - for line in output: + + keyspace = 0 + + for line in lines: if not line: continue + try: - ks = int(line) + keyspace = int(line) except ValueError: pass - return chunk.send_keyspace(ks, task['taskId']) - # DEPRECATED - def prince_keyspace(self, task, chunk): - binary = "pp64." - if Initialize.get_os() != 1: - binary = "./" + binary + "bin" + if keyspace == 0: + self.agent.send_error("Failed to measure keyspace as keyspace is 0", self.task.task_id) + + if "--encoding-to iso-8859-1" in attack_command: + command = ( + f"{self.call_path} --keyspace --quiet {attack_command} --encoding-from iso-8859-1" + f" {self.task.command_parameters}" + ) + + try: + lines = run_command_and_get_output(command) + except subprocess.CalledProcessError as e: + self.agent.send_error(f"Error while measuring keyspace: {e}", self.task.task_id) + return False + + new_keyspace = 0 + + for line in lines: + if not line: + continue + + try: + new_keyspace = int(line) + except ValueError: + pass + + if new_keyspace != 0: + self.agent.send_error( + "Keyspace is 0, but it is not 0 with iso-8859-1 encoding change the attack command to use from" + " this encoding", + self.task.task_id, + ) + return False + + return chunk.send_keyspace(keyspace) + + def __measure_keyspace_with_preprocessor(self, chunk: Chunk): + + if self.task.preprocessor.keyspace_command is None: + return chunk.send_keyspace(-1) + + if self.agent.operating_system == OperatingSystem.WINDOWS: + call_path = f'"{self.task.preprocessor.executable}"' else: - binary += "exe" - full_cmd = binary + " --keyspace " + get_wordlist(update_files(task['attackcmd'], True).replace(task['hashlistAlias'], "")) - if Initialize.get_os() == 1: - full_cmd = full_cmd.replace("/", '\\') + call_path = f'"./{self.task.preprocessor.executable}"' + + command = f"{call_path} {self.task.preprocessor.keyspace_command} {self.task.preprocessor_command}" + try: - logging.debug("CALL: " + full_cmd) - output = subprocess.check_output(full_cmd, shell=True, cwd="prince") - except subprocess.CalledProcessError: - logging.error("Error during PRINCE keyspace measure") - send_error("PRINCE keyspace measure failed!", self.config.get_value('token'), task['taskId'], None) - sleep(5) + lines = run_command_and_get_output(command) + except subprocess.CalledProcessError as e: + self.agent.send_error(f"Error while measuring keyspace for preprocessor: {e}", self.task.task_id) return False - output = output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - keyspace = "0" - for line in output: + + keyspace = 0 + + for line in lines: if not line: continue - keyspace = line - # as the keyspace of prince can get very very large, we only save it in case it's small enough to fit in a long, - # otherwise we assume that the user will abort the task earlier anyway - if int(keyspace) > 9000000000000000000: # close to max size of a long long int - return chunk.send_keyspace(-1, task['taskId']) - return chunk.send_keyspace(int(keyspace), task['taskId']) - - def preprocessor_keyspace(self, task, chunk): - preprocessor = task.get_preprocessor() - preprocessors_path = self.config.get_value('preprocessors-path') - if preprocessor['keyspaceCommand'] is None: # in case there is no keyspace flag, we just assume the task will be that large to run forever - return chunk.send_keyspace(-1, task.get_task()['taskId']) - - binary = preprocessor['executable'] - if not os.path.isfile(binary): - split = binary.split(".") - binary = '.'.join(split[:-1]) + get_bit() + "." + split[-1] - - if Initialize.get_os() == 1: - # Windows - binary = f'"{binary}"' - else: - # Mac / Linux - binary = f'"./{binary}"' - - args = [] - args.append(preprocessor['keyspaceCommand']) - args.append(update_files(task.get_task()['preprocessorCommand'])) - full_cmd = ' '.join(args) - full_cmd = f"{binary} {full_cmd}" + try: + keyspace = int(line) + except ValueError: + continue + + if keyspace > 9000000000000000000: + keyspace = -1 + break + + return chunk.send_keyspace(keyspace) + + def run_benchmark(self, chunk: Chunk): # pylint: disable=W0613:unused-argument + """Run a benchmark""" + if self.task.benchmark_type == "speed": + return self.__run_speed_benchmark() + + hashlists_path = self.agent.config.get_value("hashlists-path") + + if not isinstance(hashlists_path, str): + self.agent.send_error("Hashlists path not set", self.task.task_id) + return None + + hashlist_path = os.path.join(hashlists_path, str(self.task.hashlist_id)) + hashlist_output_path = os.path.join(hashlists_path, str(self.task.hashlist_id) + ".out") + + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, f'"{hashlist_path}"') + + command_parts: list[str] = [ + "--machine-readable", + "--quiet", + f"--runtime={self.task.benchmark_time}", + "--restore-disable", + "--potfile-disable", + "--session=hashtopolis", + "-p", + '"\t"', + attack_command, + self.task.command_parameters, + "-o", + f'"{hashlist_output_path}"', + ] + + command = f"{self.call_path} {' '.join(command_parts)}" try: - logging.debug("CALL: " + full_cmd) - output = subprocess.check_output(full_cmd, shell=True, cwd=Path(preprocessors_path, str(task.get_task()['preprocessor']))) - except subprocess.CalledProcessError: - logging.error("Error during preprocessor keyspace measure") - send_error("Preprocessor keyspace measure failed!", self.config.get_value('token'), task.get_task()['taskId'], None) - sleep(5) - return False - output = output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - keyspace = "0" - for line in output: + output_lines, error_lines = run_command_and_get_output_and_errors(command, ["CL_DEVICE_NOT_AVAILABLE"]) + except Exception as e: + self.agent.send_error(f"Error while running benchmark: {e}", self.task.task_id) + return 0 + + for line in error_lines: if not line: continue - keyspace = line - # as the keyspace of preprocessors can get very very large, we only save it in case it's small enough to fit in a long, - # otherwise we assume that the user will abort the task earlier anyway - if int(keyspace) > 9000000000000000000: # close to max size of a long long int - return chunk.send_keyspace(-1, task.get_task()['taskId']) - return chunk.send_keyspace(int(keyspace), task.get_task()['taskId']) - - def run_benchmark(self, task): - if task['benchType'] == 'speed': - # do a speed benchmark - return self.run_speed_benchmark(task) - args = [] - args.append('--machine-readable') - args.append('--quiet') - args.append(f"--runtime={task['bench']}") - - args.append('--restore-disable') - args.append('--potfile-disable') - args.append('--session=hashtopolis') - args.append('-p') - args.append('"\t"') - - - - hashlist_path = Path(self.config.get_value('hashlists-path'), str(task['hashlistId'])) - hashlist_out_path = Path(self.config.get_value('hashlists-path'), f"{str(task['hashlistId'])}.out") - - files = update_files(task['attackcmd']) - files = files.replace(task['hashlistAlias'], f'"{hashlist_path}"') - - args.append(files) - args.append(task['cmdpars']) - args.append('-o') - args.append(f'"{hashlist_out_path}"') - - full_cmd = ' '.join(args) - - full_cmd = f"{self.callPath} {full_cmd}" - - logging.debug(f"CALL: {full_cmd}") - proc = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) - output, error = proc.communicate() - logging.debug("started benchmark") - proc.wait() # wait until done - if error: - error = escape_ansi(error.replace(b"\r\n", b"\n").decode('utf-8')) - # parse errors and send it to server - error = error.split('\n') - for line in error: - if not line: - continue - query = copy_and_set_token(dict_clientError, self.config.get_value('token')) - query['taskId'] = task['taskId'] - query['message'] = line - req = JsonRequest(query) - req.execute() - # return 0 it might not be ideal to return here. In case of errors still try to read the benchmark. - if output: - output = output.replace(b"\r\n", b"\n").decode('utf-8') - output = output.split('\n') - last_valid_status = None - for line in output: - if not line: - continue - logging.debug("HCSTAT: " + line.strip()) - status = HashcatStatus(line) - if status.is_valid(): - last_valid_status = status - if last_valid_status is None: - return 0 - # we just calculate how far in the task the agent went during the benchmark time - return (last_valid_status.get_progress() - last_valid_status.get_rejected()) / float(last_valid_status.get_progress_total()) - return 0 - - def stream_watcher(self, identifier, stream): - for line in stream: - self.io_q.put((identifier, line)) - if not stream.closed: - stream.close() - def run_speed_benchmark(self, task): - args = [] - args.append('--machine-readable') - args.append('--quiet') - args.append('--progress-only') - - args.append('--restore-disable') - args.append('--potfile-disable') - args.append('--session=hashtopolis') - args.append('-p') - args.append('"\t"') - - hashlist_path = Path(self.config.get_value('hashlists-path'), str(task['hashlistId'])) - hashlist_out_path = Path(self.config.get_value('hashlists-path'), f"{str(task['hashlistId'])}.out") - - if 'usePrince' in task and task['usePrince']: - attackcmd = get_rules_and_hl(update_files(task['attackcmd'])) - # Replace #HL# with the real hashlist - attackcmd = attackcmd.replace(task['hashlistAlias'], f'"{hashlist_path}"') - - args.append(attackcmd) - - # This dict is purely used for benchmarking with prince - args.append('example.dict') - args.append(task['cmdpars']) - else: - attackcmd = update_files(task['attackcmd']) + self.agent.send_warning(f"Error while running benchmark: {line}", self.task.task_id) + + last_valid_status = None + for line in output_lines: + if not line: + continue + + status = HashcatStatus(line) + if status.is_valid(): + last_valid_status = status + + if last_valid_status is None: + self.agent.send_error("Failed to run benchmark", self.task.task_id) + return 0 + + return ( + last_valid_status.get_progress() - last_valid_status.get_rejected() + ) / last_valid_status.get_progress_total() + + def __run_speed_benchmark(self): + hashlists_path = self.agent.config.get_value("hashlists-path") + + if not isinstance(hashlists_path, str): + self.agent.send_error("Hashlists path not set", self.task.task_id) + return None + + hashlist_path = os.path.join(hashlists_path, str(self.task.hashlist_id)) + hashlist_output_path = os.path.join(hashlists_path, str(self.task.hashlist_id) + ".out") - # Replace #HL# with the real hashlist - attackcmd = attackcmd.replace(task['hashlistAlias'], f'"{hashlist_path}"') + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, f'"{hashlist_path}"') - args.append(attackcmd) - args.append(task['cmdpars']) - if 'usePreprocessor' in task and task['usePreprocessor']: - args.append('example.dict') - if 'useBrain' in task and task['useBrain']: - args.append('-S') + if "--increment" in self.task.command_parameters: + self.agent.send_error("Incremental mode not supported for speed benchmark", self.task.task_id) + return 0 + + command_parts: list[str] = [ + "--machine-readable", + "--quiet", + "--progress-only", + "--restore-disable", + "--potfile-disable", + "--session=hashtopolis", + "-p", + '"\t"', + attack_command, + self.task.command_parameters, + ] + + if self.task.use_preprocessor: + command_parts.append("example.dict") + + if self.task.use_brain: + command_parts.append("-S") - args.append('-o') - args.append(f'"{hashlist_out_path}"') - - full_cmd = ' '.join(args) - full_cmd = f"{self.callPath} {full_cmd}" + command_parts.extend(["-o", f'"{hashlist_output_path}"']) + + command = f"{self.call_path} {' '.join(command_parts)}" - output = b'' try: - logging.debug(f"CALL: {''.join(full_cmd)}") - output = subprocess.check_output(full_cmd, shell=True, cwd=self.cracker_path, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - logging.error("Error during speed benchmark, return code: " + str(e.returncode) + " Output: " + output.decode(encoding='utf-8')) - send_error("Speed benchmark failed!", self.config.get_value('token'), task['taskId'], None) + output = run_command_and_get_output(command, ["CL_DEVICE_NOT_AVAILABLE"]) + except Exception as e: + self.agent.send_error(f"Error while running benchmark: {e}", self.task.task_id) return 0 - output = output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - benchmark_sum = [0, 0] + + benchmark_sum: dict[int, float | int] = {0: 0, 1: 0.0} + for line in output: - if not line: + if not line or not ":" in line: continue + line = line.split(":") + if len(line) != 3: continue - # we need to do a weighted sum of all the time outputs of the GPUs + try: benchmark_sum[0] += int(line[1]) - benchmark_sum[1] += float(line[2])*int(line[1]) + benchmark_sum[1] += float(line[2]) * int(line[1]) except ValueError: continue + if benchmark_sum[0] == 0: - return 0 # in this case some error happened on the benchmark - return str(benchmark_sum[0]) + ":" + str(float(benchmark_sum[1]) / benchmark_sum[0]) + self.agent.send_error("Failed to run benchmark", self.task.task_id) + return 0 + + return f"{benchmark_sum[0]}:{benchmark_sum[1] / benchmark_sum[0]}" + + def run_chunk(self, chunk: Chunk): + """Run a chunk""" + self.status_count = 0 + self.was_stopped = False + + hashlists_path = self.agent.config.get_value("hashlists-path") + zaps_path = self.agent.config.get_value("zaps-path") + + if not isinstance(hashlists_path, str): + self.agent.send_error("Hashlists path not set", self.task.task_id) + return + + if not isinstance(zaps_path, str): + self.agent.send_error("Zaps path not set", self.task.task_id) + return + + hashlist_path = os.path.join(hashlists_path, str(self.task.hashlist_id)) + hashlist_output_path = os.path.join(hashlists_path, str(self.task.hashlist_id) + ".out") + hashlist_output_backup_path = os.path.join( + hashlists_path, str(self.task.hashlist_id) + str(time.time()) + ".out.bak" + ) + zap_path = os.path.join(zaps_path, f"hashlist_{self.task.hashlist_id}") + + if os.path.exists(hashlist_output_path): + if self.agent.config.get_value("outfile-history"): + os.rename(hashlist_output_path, hashlist_output_backup_path) + else: + os.remove(hashlist_output_path) + + os.makedirs(zap_path, exist_ok=True) + + if self.task.use_preprocessor: + command = self.__get_preprocessor_command(chunk, zaps_path, hashlist_output_path, hashlist_path) + elif self.task.use_pipe: + command = self.__get_pipe_command(chunk, zaps_path, hashlist_output_path, hashlist_path) + else: + command = self.__get_command(chunk, zaps_path, hashlist_output_path, hashlist_path) + + if self.agent.operating_system != OperatingSystem.WINDOWS: + process = subprocess.Popen( # pylint: disable=W1509:subprocess-popen-preexec-fn + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid, + ) + else: + process = subprocess.Popen( # pylint: disable=R1732:consider-using-with + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + stdout_watcher = Thread(target=self.__watch_stream, args=("OUT", process.stdout)) + stderr_watcher = Thread(target=self.__watch_stream, args=("ERR", process.stderr)) + crack_watcher = Thread(target=self.__watch_cracks, args=(hashlist_output_path, process)) + + stdout_watcher.start() + stderr_watcher.start() + crack_watcher.start() + + self.first_status = False + self.last_update = time.time() + + main_thread = Thread(target=self.__run, name="HashcatCrackerRun", args=(process, chunk, zap_path)) + main_thread.start() + + process.wait() + stdout_watcher.join() + stderr_watcher.join() + crack_watcher.join() + main_thread.join() + + def __get_command(self, chunk: Chunk, zaps_path: str, hashlist_output_path: str, hashlist_path: str): + command_parts: list[str] = [ + "--machine-readable", + "--quiet", + "--status", + "--restore-disable", + "--session=hashtopolis", + f"--status-timer {self.task.status_timer}", + f"--outfile-check-timer={self.task.status_timer}", + f'--outfile-check-dir="{zaps_path}"', + f'-o "{hashlist_output_path}"', + f"--outfile-format={self.output_format}", + '-p "\t"', + f"-s {chunk.skip}", + f"-l {chunk.length}", + ] + + if self.task.use_brain: + command_parts.extend([ + "--brain-client", + f"--brain-host {self.task.brain_host}", + f"--brain-port {self.task.brain_port}", + f"--brain-password {self.task.brain_password}", + ]) + if self.task.brain_features: + command_parts.append(f"--brain-client-features {self.task.brain_features}") + else: + command_parts.extend([ + "--potfile-disable", + "--remove", + f"--remove-timer={self.task.status_timer}", + ]) + + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, f'"{hashlist_path}"') + + command_parts.extend([attack_command, self.task.command_parameters]) + + full_command = f"{self.call_path} {' '.join(command_parts)}" + + regex = r"\s-S(?:\s|$)" + + if re.search(regex, full_command): + self.uses_slow_hash_flag = True + + return full_command + + def __get_pipe_command(self, chunk: Chunk, zaps_path: str, hashlist_output_path: str, hashlist_path: str): + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, "") + pre_args = [ + "--stdout", + "-s", + str(chunk.skip), + "-l", + str(chunk.length), + attack_command, + ] + + post_args = [ + "--machine-readable", + "--quiet", + "--status", + "--remove", + "--restore-disable", + "--potfile-disable", + "--session=hashtopolis", + f"--status-timer {self.task.status_timer}", + f"--outfile-check-timer={self.task.status_timer}", + f"--outfile-check-dir={zaps_path}", + f'-o "{hashlist_output_path}"', + f"--outfile-format={self.output_format}", + f'-p "{str(chr(9))}"', + f"--remove-timer={self.task.status_timer}", + f'"{hashlist_path}"', + ] + + return ( + f"{self.call_path} {' '.join(pre_args)} |" + f" {self.call_path} {' '.join(post_args)} {self.task.command_parameters}" + ) + + def __get_preprocessor_command(self, chunk: Chunk, zaps_path: str, hashlist_output_path: str, hashlist_path: str): + pre_args: list[str] = [] + if not self.task.preprocessor.skip_command is None and not self.task.preprocessor.limit_command is None: + pre_args.extend([ + self.task.preprocessor.skip_command, + str(chunk.skip), + self.task.preprocessor.limit_command, + str(chunk.length), + ]) + + pre_args.append(self.task.preprocessor_command) + + if self.task.preprocessor.skip_command is None or self.task.preprocessor.limit_command is None: + skip_length = chunk.skip + chunk.length + pre_args.extend([ + f"| head -n {skip_length}", + f"| tail -n {chunk.length}", + ]) + + attack_command = self.task.attack_command.replace(self.task.hashlist_alias, "") + + post_args = [ + "--machine-readable", + "--quiet", + "--status", + "--restore-disable", + "--potfile-disable", + "--session=hashtopolis", + f"--status-timer {self.task.status_timer}", + f"--outfile-check-timer={self.task.status_timer}", + f"--outfile-check-dir={zaps_path}", + f'-o "{hashlist_output_path}"', + f"--outfile-format={self.output_format}", + '-p "\t"', + f"--remove-timer={self.task.status_timer}", + f'"{hashlist_path}"', + attack_command, + self.task.command_parameters, + ] + + return f"{self.task.preprocessor.executable} {' '.join(pre_args)} | {self.call_path} {' '.join(post_args)}" + + def __run(self, process: subprocess.Popen[Any], chunk: Chunk, zap_path: str): # pylint: disable=R0912,R0914,R0915 + """Run the Hashcat process""" + self.cracks = [] + piping_threshold = self.agent.config.get_value("piping-threshold") + enable_piping = self.agent.config.get_value("allow-piping") + + if not isinstance(piping_threshold, int): + piping_threshold = 95 + + while True: + try: + if not self.first_status and time.time() - self.last_update > 10: + query: dict[str, Any] = { + "action": "sendProgress", + "chunkId": chunk.chunk_id, + "keyspaceProgress": chunk.skip, + "relativeProgress": 0, + "speed": 0, + "state": 2, + "cracks": [], + } + + self.agent.post(query) + self.last_update = time.time() + + # Send error message when last update is more then 30 minutes ago + if time.time() - self.last_update > 1800: + self.agent.send_error("No status update for at least 1800 seconds", self.task.task_id) + + name, line = self.queue.get(timeout=1) + except Empty: + if process.poll() is not None: + break + continue + + if name == "OUT": + status = HashcatStatus(line.decode()) + + if status.is_valid(): + self.status_count += 1 + total_progress = status.get_progress_total() + util_status = status.get_util() + speed = status.get_speed() + state = status.get_state() + + if ( + enable_piping + and not self.uses_slow_hash_flag + and self.task.use_brain + and self.task.slow_hash + and not self.use_pipe + ): + if ( + self.task.file_names + and not self.task.use_preprocessor + and 1 < self.status_count < 10 + and util_status != -1 + and util_status < piping_threshold + ): + self.use_pipe = True + chunk_start = int(total_progress / (chunk.skip + chunk.length) * chunk.skip) + self.progress = total_progress - chunk_start + + try: + kill_hashcat(process.pid, self.agent.operating_system) + except ProcessLookupError: + pass + return + + self.first_status = True + if self.use_pipe: + total_progress = self.progress + + chunk_start = int(total_progress / (chunk.skip + chunk.length) * chunk.skip) + + if total_progress > 0: + relative_progress = int( + (status.get_progress() - chunk_start) / float(total_progress - chunk_start) * 10000 + ) + else: + relative_progress = 0 + + initial = True + + if state in {4, 5}: + self.use_pipe = False + self.progress = 0 + time.sleep(5) + + while self.cracks or initial: + with self.lock: + initial = False + crack_backup: list[str] = [] + + if len(self.cracks) > self.crack_split_length: + crack_count = 0 + new_cracks: list[str] = [] + + for crack in self.cracks: + crack_count += 1 + if crack_count > self.crack_split_length: + crack_backup.append(crack) + else: + new_cracks.append(crack) + + self.cracks = new_cracks + + query: dict[str, Any] = { + "action": "sendProgress", + "chunkId": chunk.chunk_id, + "keyspaceProgress": status.get_curku(), + "relativeProgress": relative_progress, + "speed": speed, + "state": state, + "cracks": self.cracks, + } + + if (self.use_pipe or self.task.use_preprocessor) and status.get_curku() == 0: + query["keyspaceProgress"] = chunk.skip + + prepared: list[tuple[str, ...]] = [] + + # crack format: hash[:salt:double]:plain:hex_plain:crack_pos -> : is replaced by \t + for crack in self.cracks: + hash_, other = crack.split("\t", 1) + count_tab = other.count("\t") + + if count_tab == 2: + plain, hex_plain, crack_pos = other.split("\t") + prepared.append((hash_, plain, hex_plain, crack_pos)) + else: + salt, plain, hex_plain, crack_pos = other.rsplit("\t", 3) + salt = salt.replace("\t", ":") + prepared.append((hash_, salt, plain, hex_plain, crack_pos)) + + query["cracks"] = prepared + + if status.get_temps(): + query["gpuTemp"] = status.get_temps() + + if status.get_all_util(): + query["gpuUtil"] = status.get_all_util() + + query["cpuUtil"] = [round(psutil.cpu_percent(), 1)] + + if len(prepared) > 0: + logging.info("Found %d cracks. Sending to server...", len(prepared)) + logging.info(prepared) + + response = self.agent.post(query) + self.last_update = time.time() + + if response is None: + self.was_stopped = True + try: + kill_hashcat(process.pid, self.agent.operating_system) + except ProcessLookupError: + pass + return + + if response.get("agent") == "stop": + self.was_stopped = True + try: + kill_hashcat(process.pid, self.agent.operating_system) + except ProcessLookupError: + pass + return - def output_watcher(self, file_path, process): - while not os.path.exists(file_path): + if len(self.cracks) > 0: + logging.info( + "Send %d cracked hashes to server for chunk %d should be %d - %d skipped", + len(prepared), + chunk.chunk_id, + len(self.cracks), + response["skipped"], + ) + + self.cracks = crack_backup + zaps = response.get("zaps") + + if zaps: + zap_output = "\tFF\n".join(zaps) + "\tFF\n" + with open(os.path.join(zap_path, f"{time.time()}"), "a+", encoding="utf-8") as f: + f.write(zap_output) + + print( + f"Progress: {relative_progress / 100:.2f}% Speed: {format_speed(speed)} Cracks:" + f" {len(prepared)} Accepted: {response['cracked']} Skips: {response['skipped']} Zaps:" + f" {len(zaps)}", + end="\r", + ) + else: + if str(line[0]) not in string.printable: + continue + elif name == "ERR": + msg = unidecode.unidecode(line.decode().strip()) + if msg and msg != "^C": + self.agent.send_error(f"Hashcat error: {msg}", self.task.task_id) + + def __watch_stream(self, name: str, stream: IO[bytes]): + for line in stream: + self.queue.put((name, line)) + + if not stream.closed: + stream.close() + + def __watch_cracks(self, hashlist_output_path: str, process: subprocess.Popen[Any]): + # Wait until the file exists or the process ends + while not os.path.exists(hashlist_output_path): if process.poll() is not None: return time.sleep(1) - file_handle = open(file_path, encoding="utf-8") - end_count = 0 - while 1: - where = file_handle.tell() - line = file_handle.readline() - if not line: - if process.poll() is None: - time.sleep(0.05) - file_handle.seek(where) + + # Open the file and watch for new lines + with open(hashlist_output_path, "r", encoding="utf-8") as f: + end_count = 0 + + while True: + where = f.tell() + line = f.readline() + + if not line: # No new line + if process.poll() is None: # Process is still running + time.sleep(0.05) + f.seek(where) + else: # Process has ended, but check for more output + time.sleep(0.05) + end_count += 1 + if end_count > 20 * 5: # Stop after 5 second (20 * 0.05s) * 5 of no new output + break else: - time.sleep(0.05) - end_count += 1 - if end_count > 20: - break - else: - self.lock.acquire() - self.cracks.append(line.strip()) - self.lock.release() - file_handle.close() + # Safely add the new crack line to the list + with self.lock: + self.cracks.append(line.strip()) def agent_stopped(self): - return self.wasStopped - - def run_health_check(self, attack, hashlist_alias): - args = " --machine-readable --quiet" - args += " --restore-disable --potfile-disable --session=health " - args += update_files(attack).replace(hashlist_alias, "'" + self.config.get_value('hashlists-path') + "/health_check.txt'") - args += " -o '" + self.config.get_value('hashlists-path') + "/health_check.out'" - full_cmd = f"'{self.callPath}'" + args - if Initialize.get_os() == 1: - full_cmd = full_cmd.replace("/", '\\') - logging.debug(f"CALL: {''.join(full_cmd)}") - proc = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) - output, error = proc.communicate() - logging.debug("Started health check attack") - # wait until done, on the health check we don't send any update during running. Maybe later we could at least - # introduce some heartbeat update to make visible that the agent is still alive. - proc.wait() - errors = [] - states = [] - if error: - error = escape_ansi(error.replace(b"\r\n", b"\n").decode('utf-8')) - error = error.split('\n') - for line in error: - if not line: - continue - errors.append(line) + """Check if the agent was stopped""" + return self.was_stopped + + def run_health_check(self, attack: str, hashlist_alias: str, hashlist_path: str, output_path: str): + """Run a health check""" + attack = attack.replace(hashlist_alias, f'"{hashlist_path}"') + command_parts = [ + "--machine-readable", + "--quiet", + "--restore-disable", + "--potfile-disable", + "--session=health", + attack, + f'-o "{output_path}"', + ] + + full_command = f"{self.call_path} {' '.join(command_parts)}" + + if self.agent.operating_system == OperatingSystem.WINDOWS: + full_command = full_command.replace("/", "\\") + + output, error = run_command_and_get_output_and_errors(full_command) + + errors = [unidecode.unidecode(line) for line in error] + states: list[HashcatStatus] = [] + if output: - output = escape_ansi(output.replace(b"\r\n", b"\n").decode('utf-8')) - output = output.split('\n') for line in output: if not line: continue - logging.debug(line) + status = HashcatStatus(line) if status.is_valid(): states.append(status) - return [states, errors] + + return states, errors diff --git a/htpclient/hashcat_status.py b/htpclient/hashcat_status.py index 98eeffa..6c7d072 100644 --- a/htpclient/hashcat_status.py +++ b/htpclient/hashcat_status.py @@ -1,29 +1,31 @@ class HashcatStatus: - def __init__(self, line): + """Class to parse hashcat status output""" + + def __init__(self, raw_line: str): # parse + raw_line = raw_line.strip() self.status = -1 - self.speed = [] - self.exec_runtime = [] + self.speed: list[tuple[int, int]] = [] self.curku = 0 self.progress = [0, 0] self.rec_hash = [0, 0] self.rec_salt = [0, 0] self.rejected = 0 - self.util = [] - self.temp = [] + self.util: list[int] = [] + self.temp: list[int] = [] - line = line.split("\t") - if line[0] != "STATUS": - # invalid line - return - elif len(line) < 19: + line = raw_line.split("\t") + + if line[0] != "STATUS" or len(line) < 19: # invalid line return + self.status = int(line[1]) index = 3 while line[index] != "EXEC_RUNTIME": - self.speed.append([int(line[index]), int(line[index + 1])]) + self.speed.append((int(line[index]), int(line[index + 1]))) index += 2 + while line[index] != "CURKU": index += 1 self.curku = int(line[index + 1]) @@ -51,39 +53,49 @@ def __init__(self, line): index += 1 def is_valid(self): + """Check if the status is valid""" return self.status >= 0 def get_progress(self): + """Get the progress""" return self.progress[0] def get_state(self): + """Get the state""" return self.status - 1 def get_curku(self): + """Get the current keyspace""" return self.curku def get_temps(self): + """Get the temperatures""" return self.temp def get_progress_total(self): + """Get the total progress""" return self.progress[1] def get_all_util(self): + """Get all the util values""" return self.util def get_util(self): + """Get the average util value""" if not self.util: return -1 util_sum = 0 for u in self.util: util_sum += u - return int(util_sum/len(self.util)) + return int(util_sum / len(self.util)) def get_speed(self): + """Get the speed""" total_speed = 0 for s in self.speed: total_speed += int(float(s[0]) * 1000 / s[1]) return total_speed def get_rejected(self): + """Get the rejected hashes""" return self.rejected diff --git a/htpclient/hashlist.py b/htpclient/hashlist.py index 4e97864..497ce22 100644 --- a/htpclient/hashlist.py +++ b/htpclient/hashlist.py @@ -1,48 +1,66 @@ import logging -from time import sleep - -from htpclient.config import Config -from htpclient.download import Download -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * +import os +from typing import Any class Hashlist: - def __init__(self): - self.config = Config() - self.chunk = None - - def load_hashlist(self, hashlist_id): - query = copy_and_set_token(dict_getHashlist, self.config.get_value('token')) - query['hashlistId'] = hashlist_id - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get hashlist!") - sleep(5) - return False - elif ans['response'] != 'SUCCESS': - logging.error("Getting of hashlist failed: " + str(ans)) - sleep(5) + """Class representing a hashlist""" + + def __init__(self, agent: Any, hashlist_id: int): # pylint: disable=E0601:used-before-assignment + self.hashlist_id = hashlist_id + self.agent = agent + + if not self.__load(): + self.agent.send_error("Loading hashlist failed") + raise RuntimeError("Loading hashlist failed") + + def __load(self): + hashlists_dir = self.agent.config.get_value("hashlists-path") + + if not isinstance(hashlists_dir, str): return False - else: - Download.download(self.config.get_value('url').replace("api/server.php", "") + ans['url'], self.config.get_value('hashlists-path') + "/" + str(hashlist_id), True) + + self.path = os.path.join(hashlists_dir, str(self.hashlist_id)) + + if os.path.isfile(self.path): + logging.info("Hashlist already loaded.") return True - def load_found(self, hashlist_id, cracker_id): - query = copy_and_set_token(dict_getFound, self.config.get_value('token')) - query['hashlistId'] = hashlist_id - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get found of hashlist!") - sleep(5) + query: dict[str, Any] = { + "action": "getHashlist", + "hashlistId": self.hashlist_id, + } + + response = self.agent.post(query) + + if response is None: + return False + + if not response["url"]: + self.agent.send_error(f"Getting hashlist failed {response}") return False - elif ans['response'] != 'SUCCESS': - logging.error("Getting of hashlist founds failed: " + str(ans)) - sleep(5) + + if not self.agent.download(response["url"], self.path): return False - else: - logging.info("Saving found hashes to hashcat potfile...") - Download.download(self.config.get_value('url').replace("api/server.php", "") + ans['url'], self.config.get_value('crackers-path') + "/" + str(cracker_id) + "/hashcat.potfile", True) - return True + + return True + + def load_found_hashes(self, hashlist_id: int, cracker_id: int): + """Load found hashes from the hashlist""" + query = { + "action": "getFound", + "hashlistId": hashlist_id, + } + + response = self.agent.post(query) + + if response is None: + return False + + if not self.agent.download( + response["url"], + os.path.join(self.agent.config.get_value("crackers-path"), str(cracker_id), "hashcat.potfile"), # type: ignore + ): + return False + + return True diff --git a/htpclient/helpers.py b/htpclient/helpers.py deleted file mode 100644 index eb4d1df..0000000 --- a/htpclient/helpers.py +++ /dev/null @@ -1,134 +0,0 @@ -import re -import signal -import sys -import platform -import logging -import time -from types import MappingProxyType -from pathlib import Path - -import os -import subprocess - -from htpclient.dicts import copy_and_set_token, dict_clientError -from htpclient.jsonRequest import JsonRequest -from htpclient.config import Config - - -def log_error_and_exit(message): - logging.error(message) - sys.exit(1) - - -def print_speed(speed): - prefixes = MappingProxyType( - {0: "", - 1: "k", - 2: "M", - 3: "G", - 4: "T"}) - exponent = 0 - while speed > 1000: - exponent += 1 - speed = float(speed) / 1000 - return str("{:6.2f}".format(speed)) + prefixes[exponent] + "H/s" - - -def get_bit(): - if platform.machine().endswith('64'): - return "64" - return "32" - - -def kill_hashcat(pid, get_os): - if get_os != 1: - os.killpg(os.getpgid(pid), signal.SIGTERM) - else: - subprocess.Popen("TASKKILL /F /PID {pid} /T".format(pid=pid)) - - -def send_error(error, token, task_id, chunk_id): - query = copy_and_set_token(dict_clientError, token) - query['message'] = error - query['chunkId'] = chunk_id - query['taskId'] = task_id - req = JsonRequest(query) - req.execute() - - -def file_get_contents(filename): - with open(filename) as f: - return f.read() - - -def start_uftpd(os_extension, config): - try: - subprocess.check_output("killall -s 9 uftpd", shell=True) # stop running service to make sure we can start it again - except subprocess.CalledProcessError: - pass # ignore in case uftpd was not running - path = './uftpd' + os_extension - cmd = path + ' ' - if config.get_value('multicast-device'): - cmd += "-I " + config.get_value('multicast-device') + ' ' - else: - cmd += "-I eth0 " # wild guess as default - cmd += "-D " + os.path.abspath(config.get_value('files-path') + "/") + ' ' - cmd += "-L " + os.path.abspath("multicast/" + str(time.time()) + ".log") - logging.debug("CALL: " + cmd) - subprocess.check_output(cmd, shell=True) - logging.info("Started multicast daemon") - - -def get_wordlist(command): - split = clean_list(command.split(" ")) - for index, part in enumerate(split): - if part[0] == '-': - continue - elif index == 0 or split[index - 1][0] != '-': - return part - return '' - - -def get_rules_and_hl(command, alias): - split = clean_list(command.split(" ")) - rules = [] - for index, part in enumerate(split): - if index > 0 and (split[index - 1] == '-r' or split[index - 1] == '--rules-file'): - rules.append(split[index - 1]) - rules.append(split[index - 0]) - if part == alias: - rules.append(part) - return " ".join(rules) - - -def clean_list(element_list): - index = 0 - for part in element_list: - if not part: - del element_list[index] - index -= 1 - index += 1 - return element_list - - -# the prince flag is deprecated -def update_files(command, prince=False): - config = Config() - - split = command.split(" ") - ret = [] - for part in split: - # test if file exists - if not part: - continue - path = Path(config.get_value('files-path'), part) - if os.path.exists(path): - ret.append(f'"{path}"') - else: - ret.append(str(part)) - return " %s " % " ".join(ret) - - -def escape_ansi(line): - ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') - return ansi_escape.sub('', line) diff --git a/htpclient/initialize.py b/htpclient/initialize.py deleted file mode 100644 index cd9e81c..0000000 --- a/htpclient/initialize.py +++ /dev/null @@ -1,238 +0,0 @@ -import uuid -from time import sleep - -from htpclient.dicts import * -from htpclient.helpers import * -from htpclient.jsonRequest import * - - -class Initialize: - def __init__(self): - self.config = Config() - - @staticmethod - def get_version(): - return "s3-python-" + Initialize.get_version_number() - - @staticmethod - def get_version_number(): - return "0.7.3" - - def run(self, args): - self.__check_cert(args) - self.__check_url(args) - self.__check_token(args) - self.__update_information() - self.__login() - self.__build_directories() - - @staticmethod - def get_os(): - operating_system = platform.system() - try: - return dict_os[operating_system] - except KeyError: - logging.debug("OS: %s" % operating_system) - log_error_and_exit("It seems your operating system is not supported.") - - @staticmethod - def get_os_extension(): - operating_system = Initialize.get_os() - return dict_ext[operating_system] - - def __login(self): - query = copy_and_set_token(dict_login, self.config.get_value('token')) - query['clientSignature'] = self.get_version() - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Login failed!") - sleep(5) - self.__login() - elif ans['response'] != 'SUCCESS': - logging.error("Error from server: " + str(ans)) - self.config.set_value('token', '') - self.__login() - else: - logging.info("Login successful!") - if 'server-version' in ans: - logging.info("Hashtopolis Server version: " + ans['server-version']) - if 'multicastEnabled' in ans and ans['multicastEnabled'] and self.get_os() == 0: # currently only allow linux - logging.info("Multicast enabled!") - self.config.set_value('multicast', True) - if not os.path.isdir("multicast"): - os.mkdir("multicast") - - def decode_output(self, output): - return output.decode(encoding='utf-8').replace("\r\n", "\n").split("\n") - - def __update_information(self): - if not self.config.get_value('uuid'): - self.config.set_value('uuid', str(uuid.uuid4())) - - # collect devices - logging.info("Collecting agent data...") - devices = [] - if Initialize.get_os() == 0: # linux - output = subprocess.check_output("cat /proc/cpuinfo", shell=True) - output = self.decode_output(output) - tmp = [] - for line in output: - line = line.strip() - if not line.startswith('model name') and not line.startswith('physical id'): - continue - value = line.split(':', 1)[1].strip() - while ' ' in value: - value = value.replace(' ', ' ') - tmp.append(value) - - pairs = [] - for i in range(0, len(tmp), 2): - pairs.append("%s:%s" % (tmp[i + 1], tmp[i])) - - for line in sorted(set(pairs)): - devices.append(line.split(':', 1)[1].replace('\t', ' ')) - - if not self.config.get_value('cpu-only'): - try: - output = subprocess.check_output("lspci | grep -E 'VGA compatible controller|3D controller'", shell=True) - except subprocess.CalledProcessError: - # we silently ignore this case on machines where lspci is not present or architecture has no pci bus - output = b"" - output = self.decode_output(output) - for line in output: - if not line: - continue - line = ' '.join(line.split(' ')[1:]).split(':') - devices.append(line[1].strip()) - - elif Initialize.get_os() == 1: # windows - platform_release = platform.uname().release - if platform_release == "" or int(platform_release) >= 10: - processor_information = subprocess.check_output( - 'powershell -Command "Get-CimInstance Win32_Processor | Select-Object -ExpandProperty Name"', - shell=True) - processor_information = self.decode_output(processor_information) - video_controller = subprocess.check_output( - 'powershell -Command "Get-CimInstance Win32_VideoController | Select-Object -ExpandProperty Name"', - shell=True) - video_controller = self.decode_output(video_controller) - else: - processor_information = subprocess.check_output( - 'wmic cpu get name', - shell=True) - processor_information = self.decode_output(processor_information) - video_controller = subprocess.check_output('wmic path win32_VideoController get name', shell=True) - video_controller = self.decode_output(video_controller) - - for source in (processor_information, video_controller): - for line in source: - line = line.rstrip("\r\n ") - if line and line != "Name": - devices.append(line) - - else: # OS X - output = subprocess.check_output("system_profiler SPDisplaysDataType -detaillevel mini", shell=True) - output = self.decode_output(output) - for line in output: - line = line.rstrip("\r\n ") - if "Chipset Model" not in line: - continue - line = line.split(":") - devices.append(line[1].strip()) - - query = copy_and_set_token(dict_updateInformation, self.config.get_value('token')) - query['uid'] = self.config.get_value('uuid') - query['os'] = self.get_os() - query['devices'] = devices - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Information update failed!") - sleep(5) - self.__update_information() - elif ans['response'] != 'SUCCESS': - logging.error("Error from server: " + str(ans)) - sleep(5) - self.__update_information() - - def __check_token(self, args): - if not self.config.get_value('token'): - if self.config.get_value('voucher'): - # voucher is set in config and can be used to autoregister - voucher = self.config.get_value('voucher') - elif args.voucher: - voucher = args.voucher - else: - voucher = input("No token found! Please enter a voucher to register your agent:\n").strip() - name = platform.node() - query = dict_register.copy() - query['voucher'] = voucher - query['name'] = name - if self.config.get_value('cpu-only'): - query['cpu-only'] = True - - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Request failed!") - self.__check_token(args) - elif ans['response'] != 'SUCCESS' or not ans['token']: - logging.error("Registering failed: " + str(ans)) - self.__check_token(args) - else: - token = ans['token'] - self.config.set_value('voucher', '') - self.config.set_value('token', token) - logging.info("Successfully registered!") - - def __check_cert(self, args): - cert = self.config.get_value('cert') - if cert is None: - if args.cert is not None: - cert = os.path.abspath(args.cert) - logging.debug("Setting cert to: " + cert) - self.config.set_value('cert', cert) - - if cert is not None: - Session().s.cert = cert - logging.debug("Configuration session cert to: " + cert) - - def __check_url(self, args): - if not self.config.get_value('url'): - # ask for url - if args.url is None: - url = input("Please enter the url to the API of your Hashtopolis installation:\n").strip() - else: - url = args.url - logging.debug("Setting url to: " + url) - self.config.set_value('url', url) - else: - return - query = dict_testConnection.copy() - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Connection test failed!") - self.config.set_value('url', '') - self.__check_url(args) - elif ans['response'] != 'SUCCESS': - logging.error("Connection test failed: " + str(ans)) - self.config.set_value('url', '') - self.__check_url(args) - else: - logging.debug("Connection test successful!") - - if args.cpu_only is not None and args.cpu_only: - logging.debug("Setting agent to be CPU only..") - self.config.set_value('cpu-only', True) - - def __build_directories(self): - if not os.path.isdir(self.config.get_value('crackers-path')): - os.makedirs(self.config.get_value('crackers-path')) - if not os.path.isdir(self.config.get_value('files-path')): - os.makedirs(self.config.get_value('files-path')) - if not os.path.isdir(self.config.get_value('hashlists-path')): - os.makedirs(self.config.get_value('hashlists-path')) - if not os.path.isdir(self.config.get_value('preprocessors-path')): - os.makedirs(self.config.get_value('preprocessors-path')) diff --git a/htpclient/jsonRequest.py b/htpclient/jsonRequest.py deleted file mode 100644 index 1d68e45..0000000 --- a/htpclient/jsonRequest.py +++ /dev/null @@ -1,24 +0,0 @@ -import logging - -from htpclient.config import * -from htpclient.session import * - - -class JsonRequest: - def __init__(self, data): - self.data = data - self.config = Config() - self.session = Session().s - - def execute(self): - try: - logging.debug(self.data) - r = self.session.post(self.config.get_value('url'), json=self.data, timeout=30) - if r.status_code != 200: - logging.error("Status code from server: " + str(r.status_code)) - return None - logging.debug(r.content) - return r.json() - except Exception as e: - logging.error("Error occurred: " + str(e)) - return None diff --git a/htpclient/operating_system.py b/htpclient/operating_system.py new file mode 100644 index 0000000..1e5390b --- /dev/null +++ b/htpclient/operating_system.py @@ -0,0 +1,26 @@ +from enum import Enum + + +class OperatingSystem(Enum): + """Enum representing the operating system""" + + LINUX = 0 + WINDOWS = 1 + MAC = 2 + + @staticmethod + def get_by_platform_name(name: str): + """Get the operating system by the platform name""" + if name == "Linux": + return OperatingSystem.LINUX + if name == "Windows": + return OperatingSystem.WINDOWS + if name == "Darwin": + return OperatingSystem.MAC + raise ValueError("Unknown platform name") + + def get_extension(self): + """Get the extension for the operating system""" + if self == OperatingSystem.WINDOWS: + return ".exe" + return "" diff --git a/htpclient/preprocessor.py b/htpclient/preprocessor.py new file mode 100644 index 0000000..55f5df8 --- /dev/null +++ b/htpclient/preprocessor.py @@ -0,0 +1,90 @@ +import os +import subprocess +from typing import Any + +from htpclient.operating_system import OperatingSystem +from htpclient.utils import get_system_bit + + +class Preprocessor: + def __init__(self, agent: Any, preprocessor_id: int): # pylint: disable=E0601:used-before-assignment + self.agent = agent + self.preprocessor_id = preprocessor_id + + if not self.__load(): + self.agent.send_error("Loading preprocessor failed") + raise RuntimeError("Loading preprocessor failed") + + def __load(self): + preprocessors_dir = self.agent.config.get_value("preprocessors-path") + + if not isinstance(preprocessors_dir, str): + return False + + preprocessor_path = os.path.join(preprocessors_dir, str(self.preprocessor_id)) + self.preprocessor_path = preprocessor_path + + query: dict[str, Any] = { + "action": "downloadBinary", + "type": "preprocessor", + "preprocessorId": self.preprocessor_id, + } + + response = self.agent.post(query) + + if response is None or "url" not in response or not response["url"]: + self.agent.send_error(f"Getting preprocessor failed. Response: {response}") + return False + + if not self.agent.download(response["url"], preprocessor_path + ".7z"): + return False + + temp_path = os.path.join(preprocessors_dir, "temp") + os.makedirs(temp_path, exist_ok=True) + + try: + if self.agent.operating_system == OperatingSystem.WINDOWS: + subprocess.check_output( + f"7zr{self.agent.operating_system.get_extension()} x -o{temp_path} {preprocessor_path}.7z", + shell=True, + ) + else: + subprocess.check_output( + f"./7zr{self.agent.operating_system.get_extension()} x -o{temp_path} {preprocessor_path}.7z", + shell=True, + ) + except subprocess.CalledProcessError as e: + self.agent.send_error(f"Extracting preprocessor failed {e}") + return False + + os.remove(preprocessor_path + ".7z") + + for file in os.listdir(temp_path): + if os.path.isdir(os.path.join(temp_path, file)): + os.rename(os.path.join(temp_path, file), preprocessor_path) + break + + os.rename(temp_path, preprocessor_path) + break + + if os.path.isdir(temp_path): + os.rmdir(temp_path) + + executable = response["executable"] + + if os.path.exists(os.path.join(preprocessor_path, executable)): + self.executable = os.path.join(preprocessor_path, executable) + else: + file_path, file_ext = os.path.splitext(executable) + system_bit = get_system_bit() + self.executable = os.path.join(preprocessor_path, f"{file_path}{system_bit}{file_ext}") + + if not os.path.exists(self.executable): + self.agent.send_error(f"Preprocessor executable not found {self.executable}") + return False + + self.keyspace_command = str(response["keyspaceCommand"]) if response["keyspaceCommand"] else None + self.skip_command = str(response["skipCommand"]) if response["skipCommand"] else None + self.limit_command = str(response["limitCommand"]) if response["limitCommand"] else None + + return True diff --git a/htpclient/session.py b/htpclient/session.py deleted file mode 100644 index 5552125..0000000 --- a/htpclient/session.py +++ /dev/null @@ -1,11 +0,0 @@ -import requests - - -class Session: - __instance = None - - def __new__(cls, s=None): - if Session.__instance is None: - Session.__instance = object.__new__(cls) - Session.__instance.s = s - return Session.__instance diff --git a/htpclient/task.py b/htpclient/task.py index 55283d4..7418b7b 100644 --- a/htpclient/task.py +++ b/htpclient/task.py @@ -1,55 +1,192 @@ import logging +import os +from enum import Enum from time import sleep +from typing import Any -from htpclient.config import Config -from htpclient.jsonRequest import JsonRequest -from htpclient.dicts import * +from htpclient.chunk import Chunk, ChunkStatus +from htpclient.cracker import Cracker +from htpclient.files import Files +from htpclient.hashlist import Hashlist +from htpclient.preprocessor import Preprocessor + + +class TaskSpecialID(Enum): + """Enum representing special task IDs""" + + HEALTH_CHECK = -1 class Task: - def __init__(self): - self.taskId = 0 - self.task = None - self.config = Config() - self.preprocessor = None - - def reset_task(self): - self.task = None - self.taskId = 0 - - def load_task(self): - if self.taskId != 0: - return - self.task = None - query = copy_and_set_token(dict_getTask, self.config.get_value('token')) - req = JsonRequest(query) - ans = req.execute() - if ans is None: - logging.error("Failed to get task!") - sleep(5) - elif ans['response'] != 'SUCCESS': - logging.error("Error from server: " + str(ans)) - sleep(5) - else: - if ans['taskId'] is None: - logging.info("No task available!") - sleep(5) - return - elif ans['taskId'] == -1: - self.taskId = -1 - return - self.task = ans - self.taskId = ans['taskId'] - logging.info("Got task with id: " + str(ans['taskId'])) - - def get_task(self): - return self.task - - def get_task_id(self): - return self.taskId - - def set_preprocessor(self, settings): - self.preprocessor = settings - - def get_preprocessor(self): - return self.preprocessor + """Class representing a task""" + + task_id: int + + def __init__( + self, + agent: Any, # pylint: disable=E0601:used-before-assignment + task_id: int, + data: dict[str, Any] | None = None, + ): + self.agent = agent + self.task_id = task_id + self.downloaded_files = False + self.forced_encoding = None + + if task_id == TaskSpecialID.HEALTH_CHECK.value: + if data is None: + raise ValueError("Data must be provided for health check") + self.cracker_id = data["crackerBinaryId"] + try: + self.cracker = Cracker(self.agent, self.cracker_id) + except Exception as e: + self.agent.send_error("Loading task failed", task_id) + raise RuntimeError("Loading task failed") from e + + if not self.__load(data): + self.agent.send_error("Loading task failed", task_id) + raise RuntimeError("Loading task failed") + + @staticmethod + def get_task(agent: Any): # pylint: disable=E0601:used-before-assignment + """Get a task from the server""" + query = {"action": "getTask"} + response = agent.post(query) + task_id = None + + if response is None: + return None + + if response["taskId"] is None: + agent.send_warning("No task available") + sleep(10) + return None + + if response["taskId"] == TaskSpecialID.HEALTH_CHECK.value: + return Task(agent, TaskSpecialID.HEALTH_CHECK.value, None) + + task_id = int(response["taskId"]) + logging.info("Got task with id: %s", str(response["taskId"])) + return Task(agent, task_id, response) + + def __load(self, response: dict[str, Any] | None): + if response is None: + return False + + self.cracker_id = int(response["crackerId"]) + self.use_preprocessor = bool(response["usePreprocessor"]) + self.preprocessor_id = int(response["preprocessor"]) + self.preprocessor_command = str(response["preprocessorCommand"]) + self.file_names: list[str] = response["files"] + self.hashlist_id = int(response["hashlistId"]) + self.use_brain = bool(response["useBrain"]) + self.benchmark_type = str(response["benchType"]) + self.benchmark_time = int(response["bench"]) + self.attack_command = str(response["attackcmd"]) + self.command_parameters = str(response["cmdpars"]) + self.hashlist_alias = str(response["hashlistAlias"]) + self.use_pipe = bool(response["enforcePipe"]) + self.slow_hash = bool(response["slowHash"]) + self.status_timer = int(response["statustimer"]) + self.brain_host = str(response.get("brainHost", "")) + self.brain_port = int(response.get("brainPort", 0)) + self.brain_password = str(response.get("brainPass", "")) + self.brain_features = str(response.get("brainFeatures", "")) + + try: + self.cracker = Cracker(self.agent, self.cracker_id) + except Exception as e: + logging.error("Failed to load cracker: %s", e) + return False + + if self.use_preprocessor: + try: + self.preprocessor = Preprocessor(self.agent, self.preprocessor_id) + except Exception as e: + logging.error("Failed to load preprocessor: %s", e) + return False + + try: + self.hashlist = Hashlist(self.agent, self.hashlist_id) + except Exception as e: + logging.error("Failed to load hashlist: %s", e) + return False + + if self.use_brain and not self.hashlist.load_found_hashes(self.hashlist_id, self.cracker_id): + self.agent.send_error(f"Failed to get found hashes for hashlist {self.hashlist_id}", self.task_id) + return False + + # Load the files + files = Files(self.agent) + + file_paths: dict[str, str] = {} + + for file_name in self.file_names: + file_path = files.check_file_exists(file_name, self.task_id) + + if file_path is None: + self.agent.send_error(f"Failed to get file {file_name} for task " + str(self.task_id), self.task_id) + return False + + if not self.downloaded_files and files.downloaded.get(file_path, False): + self.downloaded_files = True + + file_paths[file_name] = file_path + + if len(files.deleted_old_files) > 0: + if any(file_path in file_paths.values() for file_path in files.deleted_old_files): + self.agent.send_error( + "The machine cannot download the file, because the file is too big. The agent cannot clean up any" + " more files.", + self.task_id, + ) + return False + + self.file_paths = file_paths + + for file_name, file_path in file_paths.items(): + logging.info("File %s is at %s", file_name, file_path) + + base_name = os.path.splitext(file_name)[0] + + # When an attack is created with an 7z file, the file extension is not known in the attack command by default + self.attack_command = self.attack_command.replace(f"{base_name}.???", file_name) + self.preprocessor_command = self.preprocessor_command.replace(f"{base_name}.???", file_name) + + if not file_name in self.attack_command: + if os.path.splitext(file_name)[0] in self.attack_command: + self.agent.send_warning( + f"File {file_name} not found in attack command, but base name" + f" {os.path.splitext(file_name)[0]} found", + self.task_id, + ) + self.attack_command = self.attack_command.replace(os.path.splitext(file_name)[0], f'"{file_path}"') + self.preprocessor_command = self.preprocessor_command.replace( + os.path.splitext(file_name)[0], f'"{file_path}"' + ) + else: + self.agent.send_error(f"File {file_name} not found in attack command", self.task_id) + else: + self.attack_command = self.attack_command.replace(file_name, f'"{file_path}"') # type: ignore + self.preprocessor_command = self.preprocessor_command.replace(file_name, f'"{file_path}"') # type: ignore + + return True + + def get_chunk(self): + """Get a chunk for the task""" + try: + chunk = Chunk(self.agent, self.task_id) + except Exception as e: + logging.error("Failed to load chunk: %s", e) + return None + + if chunk.status == ChunkStatus.FULLY_DISPATCHED: + logging.info("Chunk is fully dispatched") + return None + + if chunk.status == ChunkStatus.HEALTH_CHECK: + logging.info("Running health check...") + self.agent.run_health_check(self) + return None + + return chunk diff --git a/htpclient/utils.py b/htpclient/utils.py new file mode 100644 index 0000000..f558906 --- /dev/null +++ b/htpclient/utils.py @@ -0,0 +1,179 @@ +import ctypes +import logging +import os +import platform +import signal +import subprocess + +import requests +from tqdm import tqdm + +from htpclient.operating_system import OperatingSystem + + +def replace_double_space(text: str): + """Replace double spaces with single spaces""" + while " " in text: + text = text.replace(" ", " ") + + return text + + +def file_get_content(file_path: str): + """Get the content of a file""" + with open(file_path, "r", encoding="utf-8") as file: + data = file.read() + + return data + + +def file_set_content(file_path: str, data: str): + """Set the content of a file""" + with open(file_path, "w", encoding="utf-8") as file: + file.write(data) + + +def get_system_bit() -> str: + """Get the system bit""" + return "64" if platform.machine().endswith("64") else "32" + + +def format_speed(speed: float) -> str: + """Format the speed to a human-readable format""" + prefixes = {0: "", 1: "k", 2: "M", 3: "G", 4: "T", 5: "P"} + exponent = 0 + + while speed > 1000: + if exponent == 5: + break + + exponent += 1 + speed = float(speed) / 1000 + + return f"{speed:6.2f}{prefixes[exponent]}H/s" + + +def kill_hashcat(pid: int, operating_system: OperatingSystem): + """Kill the hashcat process""" + if operating_system != OperatingSystem.WINDOWS: + os.killpg(os.getpgid(pid), signal.SIGTERM) + else: + subprocess.run(f"TASKKILL /F /PID {pid} /T", check=True) + logging.info("Killed hashcat process with PID %d", pid) + + +def get_storage_remaining(storage_path: str, operating_system: OperatingSystem) -> int: + """Get the remaining storage space in bytes""" + if operating_system == OperatingSystem.WINDOWS: + free_bytes = ctypes.c_ulonglong(0) + ctypes.windll.kernel32.GetDiskFreeSpaceExW( # type: ignore + ctypes.c_wchar_p(storage_path), None, None, ctypes.pointer(free_bytes) + ) + return free_bytes.value + + stats = os.statvfs(storage_path) + return stats.f_bavail * stats.f_frsize + + +def get_storage_total(storage_path: str, operating_system: OperatingSystem) -> int: + """Get the total storage space in bytes""" + if operating_system == OperatingSystem.WINDOWS: + total_bytes = ctypes.c_ulonglong(0) + free_bytes = ctypes.c_ulonglong(0) + ctypes.windll.kernel32.GetDiskFreeSpaceExW( # type: ignore + ctypes.c_wchar_p(storage_path), None, ctypes.pointer(total_bytes), ctypes.pointer(free_bytes) + ) + return total_bytes.value + + stats = os.statvfs(storage_path) + return stats.f_blocks * stats.f_frsize + + +def download_file(response: requests.Response, output: str): + """Download a file from a response""" + chunk_size = 4096 # Define the chunk size for downloading + + os.makedirs(os.path.dirname(output), exist_ok=True) # Create the output directory + + # Get the total file length from the response headers + total_length = int(response.headers.get("Content-Length", 0)) + + # Open the file for writing in binary mode + with open(output, "wb") as file, tqdm( + total=total_length, unit="B", unit_scale=True, desc="Downloading" + ) as progress_bar: + + try: + # Iterate over the response content in chunks + for chunk in response.iter_content(chunk_size=chunk_size): + if chunk: # Filter out keep-alive new chunks + file.write(chunk) + progress_bar.update(len(chunk)) + + except Exception as e: + logging.error("Error occurred while downloading the file: %s", e) + return False + + return True + + +def run_command_and_get_output(command: str, output_considered_error: list[str] | None = None): + """Run a command and get the output""" + output_lines: list[str] = [] + + logging.debug("Running command: %s", command) + + # Start the process + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + + # Read the output line by line as it is produced + for line in iter(process.stdout.readline, ""): # type: ignore + if not output_considered_error is None and any(error_part in line for error_part in output_considered_error): + process.kill() + raise RuntimeError("Error occurred while running the command") + + print(line, end="") # Print to terminal (real-time progress) + output_lines.append(line.strip()) # Collect the output lines + + process.stdout.close() # type: ignore + process.wait() # Wait for the process to finish + + return output_lines + + +def run_command_and_get_output_and_errors(command: str, output_considered_error: list[str] | None = None): + """Run a command and get the output and errors""" + output_lines: list[str] = [] + error_lines: list[str] = [] + + # Start the process with separate pipes for stdout and stderr + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + # Read the output (stdout and stderr) in real-time + while True: + output = process.stdout.readline() # type: ignore + error = process.stderr.readline() # type: ignore + + if not output_considered_error is None and any( + error_part in output or error_part in error for error_part in output_considered_error + ): + process.kill() + raise RuntimeError("Error occurred while running the command") + + if output == "" and error == "" and process.poll() is not None: + break + + if output: + print(output, end="") # Print stdout to terminal in real-time + output_lines.append(output.strip()) # Append to output list + + if error: + print(error, end="") # Print stderr to terminal in real-time + error_lines.append(error.strip()) # Append to error list + + process.stdout.close() # type: ignore + process.stderr.close() # type: ignore + + process.wait() # Wait for the process to finish + + return output_lines, error_lines diff --git a/requirements.txt b/requirements.txt index 5aba485..26e949e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ requests -psutil \ No newline at end of file +psutil +tqdm +unidecode \ No newline at end of file