From fd63987b8a88ca95114780977bdf0a044d116660 Mon Sep 17 00:00:00 2001 From: Simatwa Date: Mon, 11 Nov 2024 00:13:12 +0300 Subject: [PATCH] fix: Remove yepchat and opengpt providers fix: Remove provider promo in Blackboxai's responses refactor: Format code with black. fix: Other minor bugs --- docs/README.md | 21 +- src/pytgpt/__init__.py | 2 - src/pytgpt/async_providers.py | 6 - src/pytgpt/auto/main.py | 11 +- src/pytgpt/blackboxai/main.py | 24 +- src/pytgpt/console.py | 98 +++---- src/pytgpt/opengpt/__init__.py | 4 - src/pytgpt/opengpt/main.py | 491 --------------------------------- src/pytgpt/utils.py | 26 +- src/pytgpt/yepchat/__init__.py | 11 - src/pytgpt/yepchat/main.py | 480 -------------------------------- tests/test_api.py | 16 +- tests/test_opengpt_tgpt.py | 21 -- tests/test_utils.py | 12 +- tests/test_yepchat_tgpt.py | 21 -- 15 files changed, 94 insertions(+), 1150 deletions(-) delete mode 100644 src/pytgpt/opengpt/__init__.py delete mode 100644 src/pytgpt/opengpt/main.py delete mode 100644 src/pytgpt/yepchat/__init__.py delete mode 100644 src/pytgpt/yepchat/main.py delete mode 100644 tests/test_opengpt_tgpt.py delete mode 100644 tests/test_yepchat_tgpt.py diff --git a/docs/README.md b/docs/README.md index d80b2d3..6a9e748 100644 --- a/docs/README.md +++ b/docs/README.md @@ -75,7 +75,6 @@ The name *python-tgpt* draws inspiration from its parent project [tgpt](https:// These are simply the hosts of the LLMs, they include: - [Koboldai](https://koboldai-koboldcpp-tiefighter.hf.space) -- [OpenGPTs](https://opengpts-example-vz4y4ooboq-uc.a.run.app/) - [OpenAI](https://chat.openai.com) *(API key required)* - [Phind](https://www.phind.com) - [Blackboxai](https://www.blackbox.ai) @@ -299,24 +298,6 @@ print(bot.chat("")) - -
- - -Opengpt - - - -```python -import pytgpt.opengpt as opengpt -bot = opengpt.OPENGPT() -print(bot.chat("")) -``` - -
- -
- phind @@ -349,7 +330,7 @@ print(bot.chat("")) **Version 0.7.0** introduces asynchronous implementation to almost all providers except a few such as *perplexity*, which relies on other libraries which lacks such implementation. -To make it easier, you just have to prefix `Async` to the common synchronous class name. For instance `OPENGPT` will be accessed as `AsyncOPENGPT`: +To make it easier, you just have to prefix `Async` to the common synchronous class name. For instance `PHIND` will be accessed as `AsyncPHIND`: #### Streaming Whole ai response. diff --git a/src/pytgpt/__init__.py b/src/pytgpt/__init__.py index e64c92a..d12d574 100644 --- a/src/pytgpt/__init__.py +++ b/src/pytgpt/__init__.py @@ -13,7 +13,6 @@ tgpt_providers = [ "auto", "openai", - "opengpt", "koboldai", "phind", "blackboxai", @@ -22,7 +21,6 @@ "poe", "groq", "perplexity", - "yepchat", "novita", ] diff --git a/src/pytgpt/async_providers.py b/src/pytgpt/async_providers.py index 6dfcba8..1fd02a0 100644 --- a/src/pytgpt/async_providers.py +++ b/src/pytgpt/async_providers.py @@ -1,6 +1,4 @@ from pytgpt.phind import AsyncPHIND -from pytgpt.yepchat import AsyncYEPCHAT -from pytgpt.opengpt import AsyncOPENGPT from pytgpt.openai import AsyncOPENAI from pytgpt.koboldai import AsyncKOBOLDAI from pytgpt.groq import AsyncGROQ @@ -10,11 +8,9 @@ mapper: dict[str, object] = { "phind": AsyncPHIND, - "opengpt": AsyncOPENGPT, "koboldai": AsyncKOBOLDAI, "blackboxai": AsyncBLACKBOXAI, "gpt4free": AsyncGPT4FREE, - "yepchat": AsyncYEPCHAT, "groq": AsyncGROQ, "openai": AsyncOPENAI, "novita": AsyncNOVITA, @@ -22,8 +18,6 @@ tgpt_mapper: dict[str, object] = { "phind": AsyncPHIND, - "opengpt": AsyncOPENGPT, "koboldai": AsyncKOBOLDAI, "blackboxai": AsyncBLACKBOXAI, - "yepchat": AsyncYEPCHAT, } diff --git a/src/pytgpt/auto/main.py b/src/pytgpt/auto/main.py index 16f9e36..12ffea0 100644 --- a/src/pytgpt/auto/main.py +++ b/src/pytgpt/auto/main.py @@ -1,5 +1,4 @@ from pytgpt.base import Provider, AsyncProvider -from pytgpt.opengpt import OPENGPT, AsyncOPENGPT from pytgpt.koboldai import KOBOLDAI, AsyncKOBOLDAI from pytgpt.phind import PHIND, AsyncPHIND from pytgpt.blackboxai import BLACKBOXAI, AsyncBLACKBOXAI @@ -16,12 +15,9 @@ import logging -provider_map: dict[ - str, Union[OPENGPT, KOBOLDAI, PHIND, BLACKBOXAI, PERPLEXITY, GPT4FREE] -] = { +provider_map: dict[str, Union[KOBOLDAI, PHIND, BLACKBOXAI, PERPLEXITY, GPT4FREE]] = { "phind": PHIND, "perplexity": PERPLEXITY, - "opengpt": OPENGPT, "koboldai": KOBOLDAI, "blackboxai": BLACKBOXAI, "gpt4free": GPT4FREE, @@ -56,9 +52,7 @@ def __init__( act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. exclude(list[str], optional): List of providers to be excluded. Defaults to []. """ - self.provider: Union[ - OPENGPT, KOBOLDAI, PHIND, BLACKBOXAI, PERPLEXITY, GPT4FREE - ] = None + self.provider: Union[KOBOLDAI, PHIND, BLACKBOXAI, PERPLEXITY, GPT4FREE] = None self.provider_name: str = None self.is_conversation = is_conversation self.max_tokens = max_tokens @@ -263,7 +257,6 @@ def __init__( exclude(list[str], optional): List of providers to be excluded. Defaults to []. """ self.provider: Union[ - AsyncOPENGPT, AsyncKOBOLDAI, AsyncPHIND, AsyncBLACKBOXAI, diff --git a/src/pytgpt/blackboxai/main.py b/src/pytgpt/blackboxai/main.py index 4581e8b..bc44b79 100644 --- a/src/pytgpt/blackboxai/main.py +++ b/src/pytgpt/blackboxai/main.py @@ -1,3 +1,4 @@ +import re import json import httpx import requests @@ -12,6 +13,10 @@ default_model = None +provider_promo_text = ( + r"Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai" +) + class BLACKBOXAI(Provider): def __init__( @@ -153,8 +158,13 @@ def for_stream(): try: if bool(value): streaming_text += value + ("\n" if stream else "") - - resp = dict(text=streaming_text) + resp = dict( + text=( + re.sub(provider_promo_text, "", streaming_text) + if provider_promo_text in streaming_text + else streaming_text + ).strip() + ) self.last_response.update(resp) yield value if raw else resp except json.decoder.JSONDecodeError: @@ -352,7 +362,13 @@ async def for_stream(): try: if bool(value): streaming_text += value + ("\n" if stream else "") - resp = dict(text=streaming_text) + resp = dict( + text=( + re.sub(provider_promo_text, "", streaming_text) + if provider_promo_text in streaming_text + else streaming_text + ).strip() + ) self.last_response.update(resp) yield value if raw else resp except json.decoder.JSONDecodeError: @@ -421,7 +437,7 @@ async def get_message(self, response: dict) -> str: bot = BLACKBOXAI() def main(): - resp = bot.ask("hello") + resp = bot.ask("hello", True) for value in resp: print(value) diff --git a/src/pytgpt/console.py b/src/pytgpt/console.py index 5044f88..d3ad232 100644 --- a/src/pytgpt/console.py +++ b/src/pytgpt/console.py @@ -28,7 +28,7 @@ from typing import Iterable -#pytgpt +# pytgpt from pytgpt.utils import Optimizers from pytgpt.utils import default_path @@ -344,6 +344,7 @@ def main(*args, **kwargs): return decorator + class CustomCompleter(Completer): """Suggests query based on user prompts""" @@ -371,14 +372,15 @@ def get_completions(self, document: Document, complete_event): ) return completions for count, suggestion in enumerate( - suggest_query(word, timeout=2, die_silently=True), - start=1): + suggest_query(word, timeout=2, die_silently=True), start=1 + ): completions.append(Completion(suggestion, start_position=-len(word))) if count >= self.suggestions_limit: break return completions return [] + class Main(cmd.Cmd): intro = ( "Welcome to AI Chat in terminal. " @@ -506,21 +508,6 @@ def __init__( act=awesome_prompt, ) - elif provider == "opengpt": - from pytgpt.opengpt import OPENGPT - - self.bot = OPENGPT( - is_conversation=disable_conversation, - max_tokens=max_tokens, - timeout=timeout, - intro=intro, - filepath=filepath, - update_file=update_file, - proxies=proxies, - history_offset=history_offset, - act=awesome_prompt, - ) - elif provider == "koboldai": from pytgpt.koboldai import KOBOLDAI @@ -571,26 +558,6 @@ def __init__( act=awesome_prompt, ) - elif provider == "yepchat": - from pytgpt.yepchat import main as yepchat - - self.bot = yepchat.YEPCHAT( - is_conversation=disable_conversation, - max_tokens=max_tokens, - temperature=temperature, - presence_penalty=top_p, - frequency_penalty=top_k, - top_p=top_p, - model=getOr(model, yepchat.model), - timeout=timeout, - intro=intro, - filepath=filepath, - update_file=update_file, - proxies=proxies, - history_offset=history_offset, - act=awesome_prompt, - ) - elif provider == "gpt4all": assert auth, ( "Path to LLM (.gguf or .bin) file is required. " @@ -743,20 +710,35 @@ def __init__( self.path_to_last_response_audio = None if not non_interactive: self.completer_session = PromptSession( - "", - completer=ThreadedCompleter( - CustomCompleter( - self, - suggestions_limit, - [ - "cd", "copy_this", "h", "last_response", "rawdog", - "settings", "with_copied", - "clear", "exec", "help", "load", "reread", "shell", - "code", "exit", "history", "new_intro", "reset", "sys", - ], - ) - ), - ) + "", + completer=ThreadedCompleter( + CustomCompleter( + self, + suggestions_limit, + [ + "cd", + "copy_this", + "h", + "last_response", + "rawdog", + "settings", + "with_copied", + "clear", + "exec", + "help", + "load", + "reread", + "shell", + "code", + "exit", + "history", + "new_intro", + "reset", + "sys", + ], + ) + ), + ) self.__init_time = time.time() self.__start_time = time.time() self.__end_time = time.time() @@ -787,7 +769,7 @@ def find_range(start, end, hms: bool = False): f"~[`{Fore.LIGHTWHITE_EX}šŸ•’{Fore.BLUE}{current_time}-`" f"{Fore.LIGHTWHITE_EX}šŸ’»{Fore.RED}{find_range(self.__init_time, time.time(), True)}-`" f"{Fore.LIGHTWHITE_EX}āš”{Fore.YELLOW}{find_range(self.__start_time, self.__end_time)}s]`" - # f"\nā•°ā”€>" + # f"\nā•°ā”€>" ) whitelist = ["[", "]", "~", "-", "(", ")"] for character in whitelist: @@ -800,8 +782,9 @@ def find_range(start, end, hms: bool = False): f"~[šŸ•’{current_time}" f"-šŸ’»{find_range(self.__init_time, time.time(), True)}" f"-āš”{find_range(self.__start_time, self.__end_time)}s]" - #"\nā•°ā”€>" + # "\nā•°ā”€>" ) + def cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them @@ -864,7 +847,6 @@ def cmdloop(self, intro=None): except ImportError: pass - def output_bond( self, title: str, @@ -1470,7 +1452,7 @@ class ChatInteractive: ), ) @click.option( - '-sl', + "-sl", "--suggestions-limit", type=click.INT, help="Prompt suggestions limit - 0 to disable suggestion", @@ -1625,7 +1607,7 @@ def interactive( internal_exec=internal_exec, confirm_script=confirm_script, interpreter=interpreter, - suggestions_limit=suggestions_limit + suggestions_limit=suggestions_limit, ) busy_bar.spin_index = busy_bar_index bot.code_theme = code_theme @@ -1925,7 +1907,7 @@ def generate( internal_exec=internal_exec, confirm_script=confirm_script, interpreter=interpreter, - non_interactive=True + non_interactive=True, ) prompt = prompt if prompt else "" copied_placeholder = "{{copied}}" diff --git a/src/pytgpt/opengpt/__init__.py b/src/pytgpt/opengpt/__init__.py deleted file mode 100644 index 1c23201..0000000 --- a/src/pytgpt/opengpt/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .main import OPENGPT -from .main import AsyncOPENGPT - -__info__ = "Interact with OpenGPT's model" diff --git a/src/pytgpt/opengpt/main.py b/src/pytgpt/opengpt/main.py deleted file mode 100644 index 9451b50..0000000 --- a/src/pytgpt/opengpt/main.py +++ /dev/null @@ -1,491 +0,0 @@ -import re -import json -import httpx -import requests -from uuid import uuid4 -from pytgpt.utils import Optimizers -from pytgpt.utils import Conversation -from pytgpt.utils import AwesomePrompts -import pytgpt.exceptions as exceptions -from pytgpt.base import Provider, AsyncProvider -from typing import AsyncGenerator - -session = requests.Session() - - -class OPENGPT(Provider): - def __init__( - self, - is_conversation: bool = True, - max_tokens: int = 600, - timeout: int = 30, - intro: str = None, - filepath: str = None, - update_file: bool = True, - proxies: dict = {}, - history_offset: int = 10250, - act: str = None, - ): - """Instantiates OPENGPT - - Args: - is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True - max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. - timeout (int, optional): Http request timeout. Defaults to 30. - intro (str, optional): Conversation introductory prompt. Defaults to None. - filepath (str, optional): Path to file containing conversation history. Defaults to None. - update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. - proxies (dict, optional): Http request proxies. Defaults to {}. - history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. - act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. - """ - self.max_tokens_to_sample = max_tokens - self.is_conversation = is_conversation - self.chat_endpoint = ( - "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream" - ) - self.stream_chunk_size = 64 - self.timeout = timeout - self.last_response = {} - self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88" - self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app" - - self.headers = { - "authority": self.authority, - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.7", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app", - "pragma": "no-cache", - "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/", - "sec-fetch-site": "same-origin", - "sec-gpc": "1", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", - } - - self.__available_optimizers = ( - method - for method in dir(Optimizers) - if callable(getattr(Optimizers, method)) and not method.startswith("__") - ) - session.headers.update(self.headers) - Conversation.intro = ( - AwesomePrompts().get_act( - act, raise_not_found=True, default=None, case_insensitive=True - ) - if act - else intro or Conversation.intro - ) - self.conversation = Conversation( - is_conversation, self.max_tokens_to_sample, filepath, update_file - ) - self.conversation.history_offset = history_offset - session.proxies = proxies - - def ask( - self, - prompt: str, - stream: bool = False, - raw: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> dict: - """Chat with AI - - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - raw (bool, optional): Stream back raw response as received. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - dict : {} - ```json - { - "messages": [ - { - "content": "Hello there", - "additional_kwargs": {}, - "type": "human", - "example": false - }, - { - "content": "Hello! How can I assist you today?", - "additional_kwargs": { - "agent": { - "return_values": { - "output": "Hello! How can I assist you today?" - }, - "log": "Hello! How can I assist you today?", - "type": "AgentFinish" - } - }, - "type": "ai", - "example": false - }] - } - ``` - """ - conversation_prompt = self.conversation.gen_complete_prompt(prompt) - if optimizer: - if optimizer in self.__available_optimizers: - conversation_prompt = getattr(Optimizers, optimizer)( - conversation_prompt if conversationally else prompt - ) - else: - raise exceptions.FailedToGenerateResponseError( - f"Optimizer is not one of {self.__available_optimizers}" - ) - - session.headers.update(self.headers) - session.headers.update( - dict( - cookie=f"opengpts_user_id={uuid4().__str__()}", - ) - ) - payload = { - "input": [ - { - "content": conversation_prompt, - "additional_kwargs": {}, - "type": "human", - "example": False, - }, - ], - "assistant_id": self.assistant_id, - "thread_id": "", - } - - def for_stream(): - response = session.post( - self.chat_endpoint, json=payload, stream=True, timeout=self.timeout - ) - if ( - not response.ok - or not response.headers.get("Content-Type") - == "text/event-stream; charset=utf-8" - ): - raise exceptions.FailedToGenerateResponseError( - f"Failed to generate response - ({response.status_code}, {response.reason})" - ) - - for value in response.iter_lines( - decode_unicode=True, - chunk_size=self.stream_chunk_size, - ): - try: - modified_value = re.sub("data:", "", value) - resp = json.loads(modified_value) - if len(resp) == 1: - continue - self.last_response.update(resp[1]) - yield value if raw else resp[1] - except json.decoder.JSONDecodeError: - pass - self.conversation.update_chat_history( - prompt, self.get_message(self.last_response) - ) - - def for_non_stream(): - for _ in for_stream(): - pass - return self.last_response - - return for_stream() if stream else for_non_stream() - - def chat( - self, - prompt: str, - stream: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> str: - """Generate response `str` - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - str: Response generated - """ - - def for_stream(): - for response in self.ask( - prompt, True, optimizer=optimizer, conversationally=conversationally - ): - yield self.get_message(response) - - def for_non_stream(): - return self.get_message( - self.ask( - prompt, - False, - optimizer=optimizer, - conversationally=conversationally, - ) - ) - - return for_stream() if stream else for_non_stream() - - def get_message(self, response: dict) -> str: - """Retrieves message only from response - - Args: - response (dict): Response generated by `self.ask` - - Returns: - str: Message extracted - """ - assert isinstance(response, dict), "Response should be of dict data-type only" - return response["content"] - - -class AsyncOPENGPT(AsyncProvider): - def __init__( - self, - is_conversation: bool = True, - max_tokens: int = 600, - timeout: int = 30, - intro: str = None, - filepath: str = None, - update_file: bool = True, - proxies: dict = {}, - history_offset: int = 10250, - act: str = None, - ): - """Instantiates OPENGPT - - Args: - is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True - max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. - timeout (int, optional): Http request timeout. Defaults to 30. - intro (str, optional): Conversation introductory prompt. Defaults to None. - filepath (str, optional): Path to file containing conversation history. Defaults to None. - update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. - proxies (dict, optional): Http request proxies. Defaults to {}. - history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. - act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. - """ - self.max_tokens_to_sample = max_tokens - self.is_conversation = is_conversation - self.chat_endpoint = ( - "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream" - ) - self.stream_chunk_size = 64 - self.timeout = timeout - self.last_response = {} - self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88" - self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app" - - self.headers = { - "authority": self.authority, - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.7", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app", - "pragma": "no-cache", - "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/", - "sec-fetch-site": "same-origin", - "sec-gpc": "1", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", - } - - self.__available_optimizers = ( - method - for method in dir(Optimizers) - if callable(getattr(Optimizers, method)) and not method.startswith("__") - ) - Conversation.intro = ( - AwesomePrompts().get_act( - act, raise_not_found=True, default=None, case_insensitive=True - ) - if act - else intro or Conversation.intro - ) - self.conversation = Conversation( - is_conversation, self.max_tokens_to_sample, filepath, update_file - ) - self.conversation.history_offset = history_offset - self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies) - - async def ask( - self, - prompt: str, - stream: bool = False, - raw: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> dict | AsyncGenerator: - """Chat with AI asynchronously - - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - raw (bool, optional): Stream back raw response as received. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - dict|AsyncGenerator : ai content. - ```json - { - "messages": [ - { - "content": "Hello there", - "additional_kwargs": {}, - "type": "human", - "example": false - }, - { - "content": "Hello! How can I assist you today?", - "additional_kwargs": { - "agent": { - "return_values": { - "output": "Hello! How can I assist you today?" - }, - "log": "Hello! How can I assist you today?", - "type": "AgentFinish" - } - }, - "type": "ai", - "example": false - }] - } - ``` - """ - conversation_prompt = self.conversation.gen_complete_prompt(prompt) - if optimizer: - if optimizer in self.__available_optimizers: - conversation_prompt = getattr(Optimizers, optimizer)( - conversation_prompt if conversationally else prompt - ) - else: - raise Exception( - f"Optimizer is not one of {self.__available_optimizers}" - ) - self.headers.update( - dict( - cookie=f"opengpts_user_id={uuid4().__str__()}", - ) - ) - payload = { - "input": [ - { - "content": conversation_prompt, - "additional_kwargs": {}, - "type": "human", - "example": False, - }, - ], - "assistant_id": self.assistant_id, - "thread_id": "", - } - - async def for_stream(): - async with self.session.stream( - "POST", - self.chat_endpoint, - json=payload, - timeout=self.timeout, - headers=self.headers, - ) as response: - if ( - not response.is_success - or not response.headers.get("Content-Type") - == "text/event-stream; charset=utf-8" - ): - raise exceptions.FailedToGenerateResponseError( - f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}" - ) - - async for value in response.aiter_lines(): - try: - modified_value = re.sub("data:", "", value) - resp = json.loads(modified_value) - if len(resp) == 1: - continue - self.last_response.update(resp[1]) - yield value if raw else resp[1] - except json.decoder.JSONDecodeError: - pass - - self.conversation.update_chat_history( - prompt, await self.get_message(self.last_response) - ) - - async def for_non_stream(): - async for _ in for_stream(): - pass - return self.last_response - - return for_stream() if stream else await for_non_stream() - - async def chat( - self, - prompt: str, - stream: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> str | AsyncGenerator: - """Generate response `str` asynchronously. - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - str|AsyncGenerator: Response generated - """ - - async def for_stream(): - async_ask = await self.ask( - prompt, True, optimizer=optimizer, conversationally=conversationally - ) - async for response in async_ask: - yield await self.get_message(response) - - async def for_non_stream(): - return await self.get_message( - await self.ask( - prompt, - False, - optimizer=optimizer, - conversationally=conversationally, - ) - ) - - return for_stream() if stream else await for_non_stream() - - async def get_message(self, response: dict) -> str: - """Retrieves message only from response - - Args: - response (dict): Response generated by `self.ask` - - Returns: - str: Message extracted - """ - assert isinstance(response, dict), "Response should be of dict data-type only" - return response["content"] - - -if __name__ == "__main__": - bot = OPENGPT() - - def main(): - resp = bot.ask("hello") - for value in resp: - print(value) - - async def asyncmain(): - bot = AsyncOPENGPT() - resp = await bot.chat("hello", True) - async for value in resp: - print(value) - - # main() - import asyncio - - asyncio.run(asyncmain()) diff --git a/src/pytgpt/utils.py b/src/pytgpt/utils.py index 71a727e..6265c87 100644 --- a/src/pytgpt/utils.py +++ b/src/pytgpt/utils.py @@ -45,18 +45,19 @@ os.makedirs(api_static_image_dir.as_posix(), exist_ok=True) os.makedirs(api_static_audio_dir.as_posix(), exist_ok=True) + @lru_cache() -def suggest_query(prompt, timeout:int=20, die_silently:bool=False) -> list[str]: +def suggest_query(prompt, timeout: int = 20, die_silently: bool = False) -> list[str]: """Suggest queries based on prompt""" link = "https://www.google.com/complete/search" params = { - "q" : prompt, - #"cp" : "11", - "client" : "gws-wiz-serp", - "xssi" : "t", - #"gs_pcrt" : "undefined", - "hl" : locale.getlocale()[0], - #"authuser" : "0", + "q": prompt, + # "cp" : "11", + "client": "gws-wiz-serp", + "xssi": "t", + # "gs_pcrt" : "undefined", + "hl": locale.getlocale()[0], + # "authuser" : "0", "pq": "ai chat suggestions", # "dpr" : "1", } @@ -64,8 +65,12 @@ def suggest_query(prompt, timeout:int=20, die_silently:bool=False) -> list[str]: resp = requests.get(link, params=params, timeout=20) resp.raise_for_status() pattern = r'"([^"]+)",\d+' - suggestions = re.findall(pattern, resp.text) - processed_suggestions = [re.sub(r"\\+[\w\\/]*", '', suggestion) for suggestion in suggestions if not suggestion.startswith('https://')] + suggestions = re.findall(pattern, resp.text) + processed_suggestions = [ + re.sub(r"\\+[\w\\/]*", "", suggestion) + for suggestion in suggestions + if not suggestion.startswith("https://") + ] if prompt in processed_suggestions: processed_suggestions.remove(prompt) return processed_suggestions @@ -74,6 +79,7 @@ def suggest_query(prompt, timeout:int=20, die_silently:bool=False) -> list[str]: raise e return [] + def sanitize_stream( chunk: str, intro_value: str = "data:", to_json: bool = True ) -> str | dict: diff --git a/src/pytgpt/yepchat/__init__.py b/src/pytgpt/yepchat/__init__.py deleted file mode 100644 index e408a3f..0000000 --- a/src/pytgpt/yepchat/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .main import YEPCHAT -from .main import AsyncYEPCHAT -from .main import session - -__info__ = "Interact with YepChat, LLM provider" - -__all__ = [ - "YEPCHAT", - "AsyncYEPCHAT", - "session", -] diff --git a/src/pytgpt/yepchat/main.py b/src/pytgpt/yepchat/main.py deleted file mode 100644 index 75c59fb..0000000 --- a/src/pytgpt/yepchat/main.py +++ /dev/null @@ -1,480 +0,0 @@ -import httpx -import requests -import json -from pytgpt.utils import Optimizers -from pytgpt.utils import Conversation -from pytgpt.utils import AwesomePrompts -from pytgpt.utils import sanitize_stream -import pytgpt.exceptions as exceptions -from pytgpt.base import Provider, AsyncProvider - -session = requests.Session() - -model = "Mixtral-8x7B-Instruct-v0.1" - - -class YEPCHAT(Provider): - def __init__( - self, - is_conversation: bool = True, - max_tokens: int = 600, - temperature: float = 0.6, - presence_penalty: int = 0, - frequency_penalty: int = 0, - top_p: float = 0.7, - model: str = model, - timeout: int = 30, - intro: str = None, - filepath: str = None, - update_file: bool = True, - proxies: dict = {}, - history_offset: int = 10250, - act: str = None, - ): - """Instantiates YEPCHAT - - Args: - is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. - max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. - temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6. - presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0. - frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0. - top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7. - model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo". - timeout (int, optional): Http request timeout. Defaults to 30. - intro (str, optional): Conversation introductory prompt. Defaults to None. - filepath (str, optional): Path to file containing conversation history. Defaults to None. - update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. - proxies (dict, optional): Http request proxies. Defaults to {}. - history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. - act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. - """ - self.is_conversation = is_conversation - self.max_tokens_to_sample = max_tokens - self.model = model - self.temperature = temperature - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.top_p = top_p - self.chat_endpoint = "https://api.yep.com/v1/chat/completions" - self.stream_chunk_size = 64 - self.timeout = timeout - self.last_response = {} - self.headers = { - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "Accept-Language": "en-US,en;q=0.9", - "Content-Type": "application/json; charset=utf-8", - "Origin": "https://yep.com", - "Referer": "https://yep.com/", - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", - } - - self.__available_optimizers = ( - method - for method in dir(Optimizers) - if callable(getattr(Optimizers, method)) and not method.startswith("__") - ) - session.headers.update(self.headers) - Conversation.intro = ( - AwesomePrompts().get_act( - act, raise_not_found=True, default=None, case_insensitive=True - ) - if act - else intro or Conversation.intro - ) - self.conversation = Conversation( - is_conversation, self.max_tokens_to_sample, filepath, update_file - ) - self.conversation.history_offset = history_offset - session.proxies = proxies - - def ask( - self, - prompt: str, - stream: bool = False, - raw: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> dict: - """Chat with AI - - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - raw (bool, optional): Stream back raw response as received. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - dict : {} - ```json - { - "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c", - "object": "chat.completion.chunk", - "created": 1713876886, - "model": "Mixtral-8x7B-Instruct-v0.1", - "choices": [ - { - "index": 0, - "delta": { - "role": null, - "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better." - }, - "finish_reason": null - } - ] - } - ``` - """ - conversation_prompt = self.conversation.gen_complete_prompt(prompt) - if optimizer: - if optimizer in self.__available_optimizers: - conversation_prompt = getattr(Optimizers, optimizer)( - conversation_prompt if conversationally else prompt - ) - else: - raise Exception( - f"Optimizer is not one of {self.__available_optimizers}" - ) - session.headers.update(self.headers) - payload = { - "stream": True, - "max_tokens": 1280, - "top_p": self.top_p, - "temperature": self.temperature, - "messages": [{"content": conversation_prompt, "role": "user"}], - "model": self.model, - } - - def for_stream(): - response = session.post( - self.chat_endpoint, json=payload, stream=True, timeout=self.timeout - ) - if not response.ok: - raise exceptions.FailedToGenerateResponseError( - f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}" - ) - - message_load = "" - for value in response.iter_lines( - decode_unicode=True, - delimiter="" if raw else "data:", - chunk_size=self.stream_chunk_size, - ): - try: - resp = json.loads(value) - incomplete_message = self.get_message(resp) - if incomplete_message: - message_load += incomplete_message - resp["choices"][0]["delta"]["content"] = message_load - self.last_response.update(resp) - yield value if raw else resp - elif raw: - yield value - except json.decoder.JSONDecodeError: - pass - self.conversation.update_chat_history( - prompt, self.get_message(self.last_response) - ) - - def for_non_stream(): - for _ in for_stream(): - pass - return self.last_response - - return for_stream() if stream else for_non_stream() - - def chat( - self, - prompt: str, - stream: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> str: - """Generate response `str` - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - str: Response generated - """ - - def for_stream(): - for response in self.ask( - prompt, True, optimizer=optimizer, conversationally=conversationally - ): - yield self.get_message(response) - - def for_non_stream(): - return self.get_message( - self.ask( - prompt, - False, - optimizer=optimizer, - conversationally=conversationally, - ) - ) - - return for_stream() if stream else for_non_stream() - - def get_message(self, response: dict) -> str: - """Retrieves message only from response - - Args: - response (dict): Response generated by `self.ask` - - Returns: - str: Message extracted - """ - assert isinstance(response, dict), "Response should be of dict data-type only" - try: - if response["choices"][0].get("delta"): - return response["choices"][0]["delta"]["content"] - return response["choices"][0]["message"]["content"] - except KeyError: - return "" - - -class AsyncYEPCHAT(AsyncProvider): - def __init__( - self, - is_conversation: bool = True, - max_tokens: int = 600, - temperature: float = 0.6, - presence_penalty: int = 0, - frequency_penalty: int = 0, - top_p: float = 0.7, - model: str = model, - timeout: int = 30, - intro: str = None, - filepath: str = None, - update_file: bool = True, - proxies: dict = {}, - history_offset: int = 10250, - act: str = None, - ): - """Instantiates YEPCHAT - - Args: - is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. - max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. - temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6. - presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0. - frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0. - top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7. - model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo". - timeout (int, optional): Http request timeout. Defaults to 30. - intro (str, optional): Conversation introductory prompt. Defaults to None. - filepath (str, optional): Path to file containing conversation history. Defaults to None. - update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. - proxies (dict, optional): Http request proxies. Defaults to {}. - history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. - act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. - """ - self.is_conversation = is_conversation - self.max_tokens_to_sample = max_tokens - self.model = model - self.temperature = temperature - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.top_p = top_p - self.chat_endpoint = "https://api.yep.com/v1/chat/completions" - self.stream_chunk_size = 64 - self.timeout = timeout - self.last_response = {} - self.headers = { - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "Accept-Language": "en-US,en;q=0.9", - "Content-Type": "application/json; charset=utf-8", - "Origin": "https://yep.com", - "Referer": "https://yep.com/", - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", - } - - self.__available_optimizers = ( - method - for method in dir(Optimizers) - if callable(getattr(Optimizers, method)) and not method.startswith("__") - ) - Conversation.intro = ( - AwesomePrompts().get_act( - act, raise_not_found=True, default=None, case_insensitive=True - ) - if act - else intro or Conversation.intro - ) - self.conversation = Conversation( - is_conversation, self.max_tokens_to_sample, filepath, update_file - ) - self.conversation.history_offset = history_offset - self.session = httpx.AsyncClient( - headers=self.headers, - proxies=proxies, - ) - - async def ask( - self, - prompt: str, - stream: bool = False, - raw: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> dict: - """Chat with AI asynchronously. - - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - raw (bool, optional): Stream back raw response as received. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - dict : {} - ```json - { - "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c", - "object": "chat.completion.chunk", - "created": 1713876886, - "model": "Mixtral-8x7B-Instruct-v0.1", - "choices": [ - { - "index": 0, - "delta": { - "role": null, - "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better." - }, - "finish_reason": null - } - ] - } - ``` - """ - conversation_prompt = self.conversation.gen_complete_prompt(prompt) - if optimizer: - if optimizer in self.__available_optimizers: - conversation_prompt = getattr(Optimizers, optimizer)( - conversation_prompt if conversationally else prompt - ) - else: - raise Exception( - f"Optimizer is not one of {self.__available_optimizers}" - ) - payload = { - "stream": True, - "max_tokens": 1280, - "top_p": self.top_p, - "temperature": self.temperature, - "messages": [{"content": conversation_prompt, "role": "user"}], - "model": self.model, - } - - async def for_stream(): - async with self.session.stream( - "POST", self.chat_endpoint, json=payload, timeout=self.timeout - ) as response: - if not response.is_success: - raise exceptions.FailedToGenerateResponseError( - f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}" - ) - - message_load = "" - async for value in response.aiter_lines(): - try: - resp = sanitize_stream(value) - incomplete_message = await self.get_message(resp) - if incomplete_message: - message_load += incomplete_message - resp["choices"][0]["delta"]["content"] = message_load - self.last_response.update(resp) - yield value if raw else resp - elif raw: - yield value - except json.decoder.JSONDecodeError: - pass - - self.conversation.update_chat_history( - prompt, await self.get_message(self.last_response) - ) - - async def for_non_stream(): - async for _ in for_stream(): - pass - return self.last_response - - return for_stream() if stream else await for_non_stream() - - async def chat( - self, - prompt: str, - stream: bool = False, - optimizer: str = None, - conversationally: bool = False, - ) -> str: - """Generate response `str` asynchronously. - Args: - prompt (str): Prompt to be send. - stream (bool, optional): Flag for streaming response. Defaults to False. - optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. - conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. - Returns: - str: Response generated - """ - - async def for_stream(): - async_ask = await self.ask( - prompt, True, optimizer=optimizer, conversationally=conversationally - ) - - async for response in async_ask: - yield await self.get_message(response) - - async def for_non_stream(): - return await self.get_message( - await self.ask( - prompt, - False, - optimizer=optimizer, - conversationally=conversationally, - ) - ) - - return for_stream() if stream else await for_non_stream() - - async def get_message(self, response: dict) -> str: - """Retrieves message only from response - - Args: - response (dict): Response generated by `self.ask` - - Returns: - str: Message extracted - """ - assert isinstance(response, dict), "Response should be of dict data-type only" - try: - if response["choices"][0].get("delta"): - return response["choices"][0]["delta"]["content"] - return response["choices"][0]["message"]["content"] - except KeyError: - return "" - - -if __name__ == "__main__": - bot = YEPCHAT() - - # resp = bot.ask("coding", stream=False) - # print(bot.get_message(resp)) - # print(json.dumps(resp, indent=4)) - - async def asyncmain(): - bot = AsyncYEPCHAT() - resp = await bot.chat("hello", False) - print(resp) - # async for value in resp: - # print(value) - - import asyncio - - asyncio.run(asyncmain()) diff --git a/tests/test_api.py b/tests/test_api.py index 618e33d..2e6d45f 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -72,8 +72,8 @@ def test_text_stream(self): self.assertTrue(resp.is_success) @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '') == "true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_prompt_to_image_post(self): resp = self.client.post( @@ -90,8 +90,8 @@ def test_prompt_to_image_post(self): self.assertEqual(len(resp_dict["urls"]), 2) @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '') == "true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_prompt_to_image_bytes_post(self): resp = self.client.post( @@ -100,8 +100,8 @@ def test_prompt_to_image_bytes_post(self): self.assertIsNotNone(resp.headers.get("Content-Disposition")) @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '') == "true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_prompt_to_image_bytes_get(self): resp = self.client.get( @@ -110,8 +110,8 @@ def test_prompt_to_image_bytes_get(self): self.assertIsNotNone(resp.headers.get("Content-Disposition")) @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '') == "true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_prompt_to_image_bytes_get_redirect(self): resp = self.client.get( diff --git a/tests/test_opengpt_tgpt.py b/tests/test_opengpt_tgpt.py deleted file mode 100644 index 0e01c33..0000000 --- a/tests/test_opengpt_tgpt.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest -import tests.base as base -from pytgpt.opengpt import OPENGPT -from pytgpt.opengpt import AsyncOPENGPT - - -class TestOpengpt(base.llmBase): - def setUp(self): - self.bot = OPENGPT() - self.prompt = base.prompt - - -class TestAsyncOpenai(base.AsyncProviderBase): - - def setUp(self): - self.bot = AsyncOPENGPT() - self.prompt = base.prompt - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index 258610a..3ce44e1 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -131,8 +131,8 @@ def setUp(self): self.text = "This is a speech synthesis test" @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '')=="true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_text_to_audio(self): """Speech synthesis""" @@ -142,8 +142,8 @@ def test_text_to_audio(self): self.assertIs(type(voice_bytes), bytes) @unittest.skipUnless( - os.getenv('PYTGPT_TEST_AUDIO', '') == "true", - "PYTGPT_TEST_AUDIO environment variable is not set to 'true' " + os.getenv("PYTGPT_TEST_AUDIO", "") == "true", + "PYTGPT_TEST_AUDIO environment variable is not set to 'true' ", ) def test_text_to_audio_save_to(self): """Save speech to a file""" @@ -152,14 +152,16 @@ def test_text_to_audio_save_to(self): self.assertTrue(os.path.exists(saved_to)) os.remove(saved_to) + class TestOthers(unittest.TestCase): def setUp(self): pass def test_query_suggestions(self): - suggestions = suggest_query('hello', die_silently=True) + suggestions = suggest_query("hello", die_silently=True) self.assertIsInstance(suggestions, list) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_yepchat_tgpt.py b/tests/test_yepchat_tgpt.py deleted file mode 100644 index 7d19f7f..0000000 --- a/tests/test_yepchat_tgpt.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest -import tests.base as base -from pytgpt.yepchat import YEPCHAT -from pytgpt.yepchat import AsyncYEPCHAT - - -class TestYepchat(base.llmBase): - def setUp(self): - self.bot = YEPCHAT() - self.prompt = base.prompt - - -class TestAsyncYepchat(base.AsyncProviderBase): - - def setUp(self): - self.bot = AsyncYEPCHAT() - self.prompt = base.prompt - - -if __name__ == "__main__": - unittest.main()