diff --git a/.gitignore b/.gitignore index c8ae07d..835d5ec 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,8 @@ /topos/test/_conv_cache/* !/topos/test/_conv_cache/.gitignore +# cert files +*.pem # Byte-compiled / optimized / DLL files __pycache__/ @@ -99,7 +101,14 @@ ipython_config.py /_cache /_conv_cache -/.env .DS_Store /topos/test/accounts.json +/.env .env +chat_api_gcp/ +topos/credentials.json +topos/justfile +topos/cloudbuild.yaml +dockerfile + +topos.app \ No newline at end of file diff --git a/README.md b/README.md index 4230a6b..bb8a5f9 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,21 @@ A simple api to use your local machine to play chat games over in the [chat arena](https://github.com/jonnyjohnson1/chat-arena). Game options are: -1. Debate +1. Debate + + +## Install with nix (Recommended) +1. Install nix: + macos/linux: `sh <(curl -L https://nixos.org/nix/install)` + windows: `sh <(curl -L https://nixos.org/nix/install) --daemon` +2. Download this repo `git clone https://github.com/jonnyjohnson1/topos-cli` +3. `cd topos-cli` +4. build the backend service: `nix-shell` +5. +``` +topos set --spacy trf // set this value. It only needs to be set once. +topos run // start topos +``` ## Install Instructions requires `brew install just` @@ -34,16 +48,20 @@ Use the tag like this. `topos run --local` ### Step 4a (zrok): Set up web proxy -We are going to expose our backend service to a public network so our phone/tablet can use it. In this case, we use zrok. Below, is an ngrok setup version. +We are going to expose our backend service to a public network so our phone/tablet can use it. In this case, we use zrok. Below is are the guides to set up ngrok. + zrok is opensourced and free. ngrok has a gated requests/month under its free tier, then requires you pay for it. -1. [Install zrok command](https://docs.zrok.io/docs/getting-started/?_gl=1*1yet1eb*_ga*MTQ1MDc2ODAyNi4xNzE3MDE3MTE3*_ga_V2KMEXWJ10*MTcxNzAxNzExNi4xLjAuMTcxNzAxNzExNi42MC4wLjA.*_gcl_au*NDk3NjM1MzEyLjE3MTcwMTcxMTc.#installing-the-zrok-command) -2. `zrok enable ` -3. `zrok status` should show you information -4. Route local path through zrok: `zrok share public http://0.0.0.0:13341` +1. Be sure you have the `topos` server running already in another terminal. +2. [Install zrok command](https://docs.zrok.io/docs/getting-started/?_gl=1*1yet1eb*_ga*MTQ1MDc2ODAyNi4xNzE3MDE3MTE3*_ga_V2KMEXWJ10*MTcxNzAxNzExNi4xLjAuMTcxNzAxNzExNi42MC4wLjA.*_gcl_au*NDk3NjM1MzEyLjE3MTcwMTcxMTc.#installing-the-zrok-command) +3. `zrok enable ` +4. `zrok status` should show you information +5. Route local path through zrok: `zrok share public http://0.0.0.0:13341` This will take you to a new screen with an https:// at the top. Insert this url into the field under settings-> "Api Endpoints" -> "Custom API" -5. After you've insert it into the field, press the test button, and "hello world" should appear next to the button. +6. After you've insert it into the field, press the test button, and "hello world" should appear next to the button. + +[ ] Enable permanent sharing of zrok url [docs](https://docs.zrok.io/docs/guides/docker-share/#permanent-public-share) (requires Docker) ### Step 4b (ngrok): Set up web proxy diff --git a/active_sessions.pkl b/active_sessions.pkl deleted file mode 100644 index e69de29..0000000 diff --git a/config.yaml b/config.yaml deleted file mode 100644 index 8fde4f5..0000000 --- a/config.yaml +++ /dev/null @@ -1 +0,0 @@ -active_spacy_model: en_core_web_trf diff --git a/default.nix b/default.nix new file mode 100644 index 0000000..c2e0232 --- /dev/null +++ b/default.nix @@ -0,0 +1,16 @@ +{ pkgs ? import {} }: + +let + python = pkgs.python3; + +in pkgs.mkShell { + buildInputs = [ + pkgs.just + pkgs.poetry + ]; + + shellHook = '' + just build + topos run + ''; +} \ No newline at end of file diff --git a/justfile b/justfile index ce229dc..b7b5845 100644 --- a/justfile +++ b/justfile @@ -12,4 +12,13 @@ zrok_chat: zrok share public http://0.0.0.0:13394 cert: - openssl req -x509 -newkey rsa:4096 -nodes -out cert.pem -keyout key.pem -days 365 + openssl req -x509 -newkey rsa:4096 -nodes -out topos/cert.pem -keyout topos/key.pem -days 365 + +python: + pyi-makespec --onefile main.py + # add ('topos/config.yaml', 'topos/') + pyinstaller main.spec + create-dmg 'dist/main' --overwrite + +dmg: + create-dmg topos.app --volicon "topos/assets/topos_blk_rounded.png" --icon "topos/assets/topos_blk_rounded.png" \ No newline at end of file diff --git a/plots/sentence_edge_detection.png b/plots/sentence_edge_detection.png deleted file mode 100644 index 1091cbd..0000000 Binary files a/plots/sentence_edge_detection.png and /dev/null differ diff --git a/pyproject.toml b/pyproject.toml index 29e1e26..cd67dfc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,11 +11,11 @@ homepage = "https://dialogues.ai" python = "^3.9" httpx = "^0.27.0" fastapi = "0.109.2" -langchain = "0.1.4" uvicorn = "0.20.0" websockets = "11.0.3" ollama = "0.1.9" spacy = "3.7.2" +pydantic = "2.7.4" transformers = "4.40.2" torch = "2.3.0" diffusers = "0.27.2" @@ -39,6 +39,7 @@ python-multipart = "^0.0.9" pytest-asyncio = "^0.23.7" textblob = "^0.18.0.post0" tk = "0.1.0" +pystray = "0.19.5" supabase = "^2.6.0" psycopg2-binary = "^2.9.9" diff --git a/topos.dmg b/topos.dmg new file mode 100644 index 0000000..1eaf890 Binary files /dev/null and b/topos.dmg differ diff --git a/topos/.env b/topos/.env new file mode 100644 index 0000000..c31faa3 --- /dev/null +++ b/topos/.env @@ -0,0 +1,6 @@ +NEO4J_URI="bolt://localhost:7687" +NEO4J_USER="neo4j" +NEO4J_PASSWORD="password" +NEO4J_TEST_DATABASE="neo4j" +NEO4J_SHOWROOM_DATABASE="neo4j" +JWT_SECRET="j1234cquwn09er8nvq0c9" \ No newline at end of file diff --git a/topos/.env_dev b/topos/.env_dev new file mode 100644 index 0000000..54e3fbd --- /dev/null +++ b/topos/.env_dev @@ -0,0 +1,6 @@ +NEO4J_URI="bolt://localhost:7687" +NEO4J_USER="neo4j" +NEO4J_PASSWORD="password" +NEO4J_TEST_DATABASE="neo4j" +NEO4J_SHOWROOM_DATABASE="neo4j" +JWT_SECRET="terces_tj" \ No newline at end of file diff --git a/topos/FC/_cache/cache__0a63d4946bc44b04a974493806c27f003bd2cfb99163b3396eaf51df50c827f7.pkl b/topos/FC/_cache/cache__0a63d4946bc44b04a974493806c27f003bd2cfb99163b3396eaf51df50c827f7.pkl new file mode 100644 index 0000000..242c679 Binary files /dev/null and b/topos/FC/_cache/cache__0a63d4946bc44b04a974493806c27f003bd2cfb99163b3396eaf51df50c827f7.pkl differ diff --git a/topos/FC/_cache/cache__37785727bdb14e0cf735e52e8120d73d47d1223020984e7ed1317c0f9fe31839.pkl b/topos/FC/_cache/cache__37785727bdb14e0cf735e52e8120d73d47d1223020984e7ed1317c0f9fe31839.pkl new file mode 100644 index 0000000..a8ef046 Binary files /dev/null and b/topos/FC/_cache/cache__37785727bdb14e0cf735e52e8120d73d47d1223020984e7ed1317c0f9fe31839.pkl differ diff --git a/topos/FC/_cache/cache__46d188b37c7f13cba849627c19a785bf03f5d3b9bb5a71b007d13368848b505f.pkl b/topos/FC/_cache/cache__46d188b37c7f13cba849627c19a785bf03f5d3b9bb5a71b007d13368848b505f.pkl new file mode 100644 index 0000000..fff8217 Binary files /dev/null and b/topos/FC/_cache/cache__46d188b37c7f13cba849627c19a785bf03f5d3b9bb5a71b007d13368848b505f.pkl differ diff --git a/topos/FC/_cache/cache__48e178cc5fa3dd83857cd5ac7382bac15cea5f7d133f8b12a4a3fb02a8bef142.pkl b/topos/FC/_cache/cache__48e178cc5fa3dd83857cd5ac7382bac15cea5f7d133f8b12a4a3fb02a8bef142.pkl new file mode 100644 index 0000000..6952606 Binary files /dev/null and b/topos/FC/_cache/cache__48e178cc5fa3dd83857cd5ac7382bac15cea5f7d133f8b12a4a3fb02a8bef142.pkl differ diff --git a/topos/FC/_cache/cache__53c49505ed3d2fd0808acc71da8b7ea1a2cca2bf1fe14e6aaac7dcb1de737479.pkl b/topos/FC/_cache/cache__53c49505ed3d2fd0808acc71da8b7ea1a2cca2bf1fe14e6aaac7dcb1de737479.pkl new file mode 100644 index 0000000..b3a597a Binary files /dev/null and b/topos/FC/_cache/cache__53c49505ed3d2fd0808acc71da8b7ea1a2cca2bf1fe14e6aaac7dcb1de737479.pkl differ diff --git a/topos/FC/_cache/cache__64a3068cf79c224a43d2e166c54f59bad6ee174a3e27dbe96874f57be7744fb9.pkl b/topos/FC/_cache/cache__64a3068cf79c224a43d2e166c54f59bad6ee174a3e27dbe96874f57be7744fb9.pkl new file mode 100644 index 0000000..b0ba697 Binary files /dev/null and b/topos/FC/_cache/cache__64a3068cf79c224a43d2e166c54f59bad6ee174a3e27dbe96874f57be7744fb9.pkl differ diff --git a/topos/FC/_cache/cache__807e1c3364f5b5dcf5e2420e078173078ccef76c87adade0108835d91cad8b75.pkl b/topos/FC/_cache/cache__807e1c3364f5b5dcf5e2420e078173078ccef76c87adade0108835d91cad8b75.pkl new file mode 100644 index 0000000..6afb226 Binary files /dev/null and b/topos/FC/_cache/cache__807e1c3364f5b5dcf5e2420e078173078ccef76c87adade0108835d91cad8b75.pkl differ diff --git a/topos/FC/_cache/cache__852cb5b0468d04fc18fcb3ec8afea8a3c718346ff5467aa79ebce6fc40142354.pkl b/topos/FC/_cache/cache__852cb5b0468d04fc18fcb3ec8afea8a3c718346ff5467aa79ebce6fc40142354.pkl new file mode 100644 index 0000000..1f23ea9 Binary files /dev/null and b/topos/FC/_cache/cache__852cb5b0468d04fc18fcb3ec8afea8a3c718346ff5467aa79ebce6fc40142354.pkl differ diff --git a/topos/FC/_cache/cache__8da4e6617cbb142148556847cef930d52f04297517a83d033164797a805cb430.pkl b/topos/FC/_cache/cache__8da4e6617cbb142148556847cef930d52f04297517a83d033164797a805cb430.pkl new file mode 100644 index 0000000..5dc33e2 Binary files /dev/null and b/topos/FC/_cache/cache__8da4e6617cbb142148556847cef930d52f04297517a83d033164797a805cb430.pkl differ diff --git a/topos/FC/_cache/cache__8f828ece20ddd784f022eff5bf9d11c7bdb05f89a10bac1182510182f016c264.pkl b/topos/FC/_cache/cache__8f828ece20ddd784f022eff5bf9d11c7bdb05f89a10bac1182510182f016c264.pkl new file mode 100644 index 0000000..26e059b Binary files /dev/null and b/topos/FC/_cache/cache__8f828ece20ddd784f022eff5bf9d11c7bdb05f89a10bac1182510182f016c264.pkl differ diff --git a/topos/FC/_cache/cache__97d8bd430beed046419018fa940e746facf4175097eadfa504aba07ebb3c42aa.pkl b/topos/FC/_cache/cache__97d8bd430beed046419018fa940e746facf4175097eadfa504aba07ebb3c42aa.pkl new file mode 100644 index 0000000..bb70e80 Binary files /dev/null and b/topos/FC/_cache/cache__97d8bd430beed046419018fa940e746facf4175097eadfa504aba07ebb3c42aa.pkl differ diff --git a/topos/FC/_cache/cache__a25c396769c76dda47bef041c5aae5e1919818f6b15228fa5963a0472cfb8463.pkl b/topos/FC/_cache/cache__a25c396769c76dda47bef041c5aae5e1919818f6b15228fa5963a0472cfb8463.pkl new file mode 100644 index 0000000..a2e7e0f Binary files /dev/null and b/topos/FC/_cache/cache__a25c396769c76dda47bef041c5aae5e1919818f6b15228fa5963a0472cfb8463.pkl differ diff --git a/topos/FC/_cache/cache__a66ca409e4e0f6a2f76915af1b138901f1645074b46055afb1f2ac52630960b6.pkl b/topos/FC/_cache/cache__a66ca409e4e0f6a2f76915af1b138901f1645074b46055afb1f2ac52630960b6.pkl new file mode 100644 index 0000000..c812e10 Binary files /dev/null and b/topos/FC/_cache/cache__a66ca409e4e0f6a2f76915af1b138901f1645074b46055afb1f2ac52630960b6.pkl differ diff --git a/topos/FC/_cache/cache__ad2514c1b75bbdf48398bbb4b61c2fdcee953bae60db80e35f9b97d5f479a3f3.pkl b/topos/FC/_cache/cache__ad2514c1b75bbdf48398bbb4b61c2fdcee953bae60db80e35f9b97d5f479a3f3.pkl new file mode 100644 index 0000000..91c7d24 Binary files /dev/null and b/topos/FC/_cache/cache__ad2514c1b75bbdf48398bbb4b61c2fdcee953bae60db80e35f9b97d5f479a3f3.pkl differ diff --git a/topos/FC/_cache/cache__b6b06aeea64deb02f23b92456f25ed07be96c7daec42fc911621020733de3e4e.pkl b/topos/FC/_cache/cache__b6b06aeea64deb02f23b92456f25ed07be96c7daec42fc911621020733de3e4e.pkl new file mode 100644 index 0000000..be88512 Binary files /dev/null and b/topos/FC/_cache/cache__b6b06aeea64deb02f23b92456f25ed07be96c7daec42fc911621020733de3e4e.pkl differ diff --git a/topos/FC/_cache/cache__b78cc75689c4698880896d592833d6671ae5328e00c7ef82c1649863a18e1387.pkl b/topos/FC/_cache/cache__b78cc75689c4698880896d592833d6671ae5328e00c7ef82c1649863a18e1387.pkl new file mode 100644 index 0000000..8acb611 Binary files /dev/null and b/topos/FC/_cache/cache__b78cc75689c4698880896d592833d6671ae5328e00c7ef82c1649863a18e1387.pkl differ diff --git a/topos/FC/_cache/cache__b7f2c1b2ba1ef1b53eb1db5c594ded0c151d644d7f60324fcd4fc4b7e3c0f2e4.pkl b/topos/FC/_cache/cache__b7f2c1b2ba1ef1b53eb1db5c594ded0c151d644d7f60324fcd4fc4b7e3c0f2e4.pkl new file mode 100644 index 0000000..b22474c Binary files /dev/null and b/topos/FC/_cache/cache__b7f2c1b2ba1ef1b53eb1db5c594ded0c151d644d7f60324fcd4fc4b7e3c0f2e4.pkl differ diff --git a/topos/FC/_cache/cache__c298a01d4605f0c521fe5630b6970f2e1676be551bb5fe1ec4bb7f14f10a8222.pkl b/topos/FC/_cache/cache__c298a01d4605f0c521fe5630b6970f2e1676be551bb5fe1ec4bb7f14f10a8222.pkl new file mode 100644 index 0000000..6356259 Binary files /dev/null and b/topos/FC/_cache/cache__c298a01d4605f0c521fe5630b6970f2e1676be551bb5fe1ec4bb7f14f10a8222.pkl differ diff --git a/topos/FC/_cache/cache__e0ae102cc31c215f5fa03e7cb252c2e89fba5adbb14e62102d2fdb800d053278.pkl b/topos/FC/_cache/cache__e0ae102cc31c215f5fa03e7cb252c2e89fba5adbb14e62102d2fdb800d053278.pkl new file mode 100644 index 0000000..326c220 Binary files /dev/null and b/topos/FC/_cache/cache__e0ae102cc31c215f5fa03e7cb252c2e89fba5adbb14e62102d2fdb800d053278.pkl differ diff --git a/topos/FC/_cache/cache__e4fa2ff40d574f8aa65d219326d41309986f654b47c66c8632268ded2f660a0e.pkl b/topos/FC/_cache/cache__e4fa2ff40d574f8aa65d219326d41309986f654b47c66c8632268ded2f660a0e.pkl new file mode 100644 index 0000000..5b309e5 Binary files /dev/null and b/topos/FC/_cache/cache__e4fa2ff40d574f8aa65d219326d41309986f654b47c66c8632268ded2f660a0e.pkl differ diff --git a/topos/FC/_cache/cache__e8e2c33cbcae5d83e981cc1eb364ed42d9e49f5fa11b35cad4e831c3060d47a0.pkl b/topos/FC/_cache/cache__e8e2c33cbcae5d83e981cc1eb364ed42d9e49f5fa11b35cad4e831c3060d47a0.pkl new file mode 100644 index 0000000..3c21133 Binary files /dev/null and b/topos/FC/_cache/cache__e8e2c33cbcae5d83e981cc1eb364ed42d9e49f5fa11b35cad4e831c3060d47a0.pkl differ diff --git a/topos/FC/_cache/cache__f381c06e665001c4a4fd02e7e9762842ed47a67749ca7943219b481f19621c64.pkl b/topos/FC/_cache/cache__f381c06e665001c4a4fd02e7e9762842ed47a67749ca7943219b481f19621c64.pkl new file mode 100644 index 0000000..36c9cef Binary files /dev/null and b/topos/FC/_cache/cache__f381c06e665001c4a4fd02e7e9762842ed47a67749ca7943219b481f19621c64.pkl differ diff --git a/topos/FC/semantic_compression.py b/topos/FC/semantic_compression.py index 2a779f1..e19d7d4 100644 --- a/topos/FC/semantic_compression.py +++ b/topos/FC/semantic_compression.py @@ -6,7 +6,6 @@ # from dotenv import load_dotenv from openai import OpenAI -from langchain_community.llms import Ollama from topos.FC.cache_manager import CacheManager from topos.FC.similitude_module import load_model, util diff --git a/topos/api-bak.py b/topos/api-bak.py deleted file mode 100644 index b609151..0000000 --- a/topos/api-bak.py +++ /dev/null @@ -1,208 +0,0 @@ -# api.py -from dotenv import load_dotenv -from fastapi import FastAPI, HTTPException, WebSocket -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import Response, StreamingResponse -from pydantic import BaseModel -import uvicorn -import json -import os - -from topos.summaries.summaries import stream_chat -from topos.FC.semantic_compression import SemanticCompression - -import ssl -import requests - -load_dotenv() - -app = FastAPI() - -try: - openai_api_key = os.environ["OPENAI_API_KEY"] -except KeyError: - openai_api_key = None - print("\033[93mWARNING:\033[0m OPENAI_API_KEY environment variable is not set.") - -semantic_compression = None - -# get the current working directory -project_dir = "/Users/dialogues/developer/topos/cli" -key_path = project_dir + "/key.pem" -cert_path = project_dir + "/cert.pem" - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - - -import tkinter as tk -from tkinter import filedialog - -def read_file_as_bytes(file_path): - try: - with open(file_path, 'rb') as file: - file_bytes = list(file.read()) - return file_bytes - except FileNotFoundError: - print("File not found.") - return None - except Exception as e: - print(f"An error occurred: {e}") - return None - -@app.post("/get_files") -async def get_files(): - root = tk.Tk() - root.withdraw() - filetypes = [("PNG files", "*.png"), ("JPG files", "*.jpg"), ("JPEG files", "*.jpeg")] - file_path = filedialog.askopenfilename(title="Select an image file", - filetypes=(filetypes)) - print(file_path) - - # Use the os.path module - system_path = os.path.abspath("/") - print(system_path) - bytes_list = read_file_as_bytes(file_path) - media_type="application/json", - - # data = json.dumps(, ensure_ascii=False) - # print(data[:110]) - return {"file_name" : [i for i in file_path], "bytes": bytes_list} - -@app.post("/list_models") -async def list_models(): - url = "http://localhost:11434/api/tags" - try: - result = requests.get(url) - if result.status_code == 200: - return {"result": result.json()} - else: - print(f"Ping failed. Status code: {result.status_code}") - return None - except requests.ConnectionError: - print("Ping failed. Connection refused. Check if the server is running.") - return None - - - -""" - -Standard Chat - -""" - - -@app.websocket("/chat") -async def chat(websocket: WebSocket): - await websocket.accept() - data = await websocket.receive_text() - payload = json.loads(data) # Assuming the client sends JSON with 'option' and 'raw_text_prompt' keys - await websocket.send_json({"status": "started"}) - message = payload["message"] - message_history = payload["message_history"] - - """ - the message history is a list of messages and each message can have a lot of data associated with it - - for now we are just going to extract - """ - - model = payload["model"] if "model" in payload else "solar" - - temperature = 0.04 # default temperature - - semantic_compression = SemanticCompression(model=f"ollama:{model}", api_key=openai_api_key) - - if "temperature" in payload: - if payload["temperature"] != None: - tmp = float(payload["temperature"]) - temperature = tmp if tmp <= 1 else 1 - - print(f"MSG: {message}") - print(f"MSGHIST: {len(message_history)}") - print(f"MODEL: {model}") - print(f"TEMP: {temperature}") - # First step, set the prompt for the short summarizer - # insert entries information as system prompt - has_topic = False - if current_topic != "Unknown": - has_topic = True - prompt = f"You are a smooth talking, eloquent, poignant, insightful AI moderator. The current topic is {current_topic}.\n" - - system_prompt = f"You are a smooth talking, eloquent, poignant, insightful AI moderator. The current topic is unknown, so try not to make any judgements thus far - only re-express the input words in your own style:" - user_prompt = "" - if message_history: - # Add the message history prior to the message - user_prompt += '\n'.join(msg['role'] + ": " + msg['content'] for msg in message_history) - - print(f"\t[ system prompt :: {system_prompt} ]") - print(f"\t[ user prompt :: {user_prompt} ]") - simp_msg_history = [{'role': 'system', 'content': system_prompt}] - - # convert message history into basic message history - for i in message_history: - if "images" in i: - simp_msg_history.append({'role': i['role'], 'content': i['content'], 'images': i['images']}) - else: - simp_msg_history.append({'role': i['role'], 'content': i['content']}) - - try: - text = [] - for chunk in stream_chat(simp_msg_history, model = model, temperature=temperature): - text.append(chunk) - story_summary = {'response':''.join(text), 'completed': False} - await websocket.send_json({"status": "generating", **story_summary}) - - output_combined = ''.join(text) - semantic_category = semantic_compression.fetch_semantic_category(output_combined) - - print(f"\t\t[ found semantic category {semantic_category}]") - story_summary = {'response':output_combined, 'completed': True, , "semantic_category": semantic_category} # llm function - await websocket.send_json({"status": "completed", **story_summary}) - except Exception as e: - await websocket.send_json({"status": "error", "message": "Generation failed"}) - await websocket.close() - return - - await websocket.close() - - - - - - - - - - -""" - -START API OPTIONS - -There is the web option for networking with the online, webhosted version -There is the local option to connect the local apps to the Topos API (Grow debugging, and the Chat app) - - -""" - - - - -def start_local_api(): - print("\033[92mINFO:\033[0m API docs available at: http://0.0.0.0:13341/docs") - uvicorn.run(app, host="0.0.0.0", port=13341) # port for the local versions - -def start_web_api(): - print("\033[92mINFO:\033[0m API docs available at: https://0.0.0.0:13341/docs") - uvicorn.run(app, host="0.0.0.0", port=13341, ssl_keyfile=key_path, - ssl_certfile=cert_path) # the web version needs the ssl certificate loaded - -if __name__ == "__main__": - start_local_api() - print("\033[92mINFO:\033[0m API docs available at: http://0.0.0.0:13341/docs") - uvicorn.run(app, host="0.0.0.0", port=13341) \ No newline at end of file diff --git a/topos/api/api.py b/topos/api/api.py index e82fe2c..d4a935d 100644 --- a/topos/api/api.py +++ b/topos/api/api.py @@ -38,4 +38,9 @@ def start_local_api(): def start_web_api(): """Function to start the API in web mode with SSL.""" certs = get_ssl_certificates() - uvicorn.run(app, host="0.0.0.0", port=13341, ssl_keyfile=certs['key_path'], ssl_certfile=certs['cert_path']) \ No newline at end of file + uvicorn.run(app, host="0.0.0.0", port=13341, ssl_keyfile=certs['key_path'], ssl_certfile=certs['cert_path']) + + +def start_hosted_service(): + """Function to start the API in web mode with SSL.""" + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/topos/api/api_routes.py b/topos/api/api_routes.py index cfa79e0..a155d4c 100644 --- a/topos/api/api_routes.py +++ b/topos/api/api_routes.py @@ -1,8 +1,10 @@ # api_routes.py import os -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter, HTTPException, Request +from fastapi.responses import JSONResponse import requests +import signal import tkinter as tk from tkinter import filedialog from topos.FC.conversation_cache_manager import ConversationCacheManager @@ -13,12 +15,28 @@ from ..generations.ollama_chat import generate_response from ..utilities.utils import create_conversation_string +from ..services.ontology_service.mermaid_chart import get_mermaid_chart cache_manager = ConversationCacheManager() class ConversationIDRequest(BaseModel): conversation_id: str +@router.post("/shutdown") +def shutdown(request: Request): + os.kill(os.getpid(), signal.SIGTERM) + return JSONResponse(content={"message": "Server shutting down..."}) + + +@router.get("/health") +async def health_check(): + try: + # Perform any additional checks here if needed + return {"status": "healthy"} + except Exception as e: + raise HTTPException(status_code=500, detail=f"Health check failed: {e}") + + @router.post("/chat_conversation_analysis") async def chat_conversation_analysis(request: ConversationIDRequest): conversation_id = request.conversation_id @@ -271,4 +289,45 @@ def read_file_as_bytes(file_path): return None except Exception as e: print(f"An error occurred: {e}") - return None \ No newline at end of file + return None + + +class MermaidChartPayload(BaseModel): + message: str = None + conversation_id: str + full_conversation: bool = False + model: str = "dolphin-llama3" + temperature: float = 0.04 + +@router.post("/generate_mermaid_chart") +async def generate_mermaid_chart(payload: MermaidChartPayload): + try: + conversation_id = payload.conversation_id + full_conversation = payload.full_conversation + model = payload.model + temperature = payload.temperature + + if full_conversation: + cache_manager = ConversationCacheManager() + conv_data = cache_manager.load_from_cache(conversation_id) + if conv_data is None: + raise HTTPException(status_code=404, detail="Conversation not found in cache") + print(f"\t[ generating mermaid chart :: using model {model} :: full conversation ]") + return {"status": "generating", "response": "generating mermaid chart", 'completed': False} + # TODO: Complete this branch if needed + + else: + message = payload.message + if message: + print(f"\t[ generating mermaid chart :: using model {model} ]") + try: + mermaid_string = await get_mermaid_chart(message) + if mermaid_string == "Failed to generate mermaid": + return {"status": "error", "response": mermaid_string, 'completed': True} + else: + return {"status": "completed", "response": mermaid_string, 'completed': True} + except Exception as e: + return {"status": "error", "response": f"Error: {e}", 'completed': True} + + except Exception as e: + return {"status": "error", "message": str(e)} \ No newline at end of file diff --git a/topos/api/debate_routes.py b/topos/api/debate_routes.py index 41c0f45..4c8cc0d 100644 --- a/topos/api/debate_routes.py +++ b/topos/api/debate_routes.py @@ -9,7 +9,7 @@ from fastapi.security import OAuth2PasswordBearer from jwt.exceptions import InvalidTokenError -from datetime import datetime, timedelta, UTC +from datetime import datetime, timedelta, timezone import json import jwt from uuid import uuid4 @@ -115,7 +115,7 @@ async def login(form_data: OAuth2PasswordRequestForm = Depends()): # Create JWT token token_data = { "user_id": user_id, - "exp": datetime.now(UTC) + timedelta(hours=1) + "exp": datetime.now(timezone.utc) + timedelta(hours=1) } token = jwt.encode(token_data, SECRET_KEY, algorithm=ALGORITHM) print(f"Login successful for user {form_data.username}") @@ -123,7 +123,6 @@ async def login(form_data: OAuth2PasswordRequestForm = Depends()): return {"access_token": token, "token_type": "bearer"} - # WebSocket endpoint with JWT validation @router.websocket("/ws") async def websocket_endpoint(websocket: WebSocket, token: Union[str, None] = Query(default=None), session_id: Union[str, None] = Query(default=None)): diff --git a/topos/api/websocket_handlers.py b/topos/api/websocket_handlers.py index eafd511..5d5f597 100644 --- a/topos/api/websocket_handlers.py +++ b/topos/api/websocket_handlers.py @@ -4,14 +4,14 @@ import traceback from ..generations.ollama_chat import stream_chat -from topos.FC.semantic_compression import SemanticCompression -from ..config import get_openai_api_key +# from topos.FC.semantic_compression import SemanticCompression +# from ..config import get_openai_api_key from ..models.llm_classes import vision_models import json from ..utilities.utils import create_conversation_string from ..services.classification_service.base_analysis import base_text_classifier, base_token_classifier - +from ..services.ontology_service.mermaid_chart import get_mermaid_chart # cache database from topos.FC.conversation_cache_manager import ConversationCacheManager @@ -59,7 +59,7 @@ async def chat(websocket: WebSocket): has_topic = True prompt = f"You are a smooth talking, eloquent, poignant, insightful AI moderator. The current topic is {current_topic}.\n" - system_prompt = f"You are a smooth talking, eloquent, poignant, insightful AI moderator. The current topic is unknown, so try not to make any judgements thus far - only re-express the input words in your own style:" + system_prompt = f"You are a smooth talking, eloquent, poignant, insightful AI moderator." user_prompt = "" if message_history: # Add the message history prior to the message @@ -143,10 +143,10 @@ async def chat(websocket: WebSocket): for chunk in stream_chat(simp_msg_history, model=model, temperature=temperature): output_combined += chunk await websocket.send_json({"status": "generating", "response": output_combined, 'completed': False}) - + # Fetch semantic category from the output - semantic_compression = SemanticCompression(model=f"ollama:{model}", api_key=get_openai_api_key()) - semantic_category = (semantic_compression.fetch_semantic_category(output_combined)).content + # semantic_compression = SemanticCompression(model=f"ollama:{model}", api_key=get_openai_api_key()) + # semantic_category = semantic_compression.fetch_semantic_category(output_combined) # Start timer for base_token_classifier if config['calculateInMessageNER']: @@ -190,7 +190,7 @@ async def chat(websocket: WebSocket): 'message': output_combined, 'timestamp': datetime.now(), }}) - + # Send the final completed message send_pkg = {"status": "completed", "response": output_combined, "semantic_category": semantic_category, "completed": True} if config['calculateModerationTags'] or config['calculateInMessageNER']: @@ -328,3 +328,80 @@ async def meta_chat(websocket: WebSocket): await websocket.send_json({"status": "error", "message": str(e)}) await websocket.close() +@router.websocket("/websocket_mermaid_chart") +async def meta_chat(websocket: WebSocket): + """ + + Generates a mermaid chart from a list of message. + + """ + await websocket.accept() + try: + while True: + data = await websocket.receive_text() + payload = json.loads(data) + message = payload.get("message", None) + conversation_id = payload["conversation_id"] + full_conversation = payload.get("full_conversation", False) + model = payload.get("model", "dolphin-llama3") + temperature = float(payload.get("temperature", 0.04)) + + # load conversation + if full_conversation: + cache_manager = ConversationCacheManager() + conv_data = cache_manager.load_from_cache(conversation_id) + if conv_data is None: + raise HTTPException(status_code=404, detail="Conversation not found in cache") + print(f"\t[ generating mermaid chart :: using model {model} :: full conversation ]") + await websocket.send_json({"status": "generating", "response": "generating mermaid chart", 'completed': False}) + context = create_conversation_string(conv_data, 12) + # TODO Coomplete this branch + else: + if message: + print(f"\t[ generating mermaid chart :: using model {model} ]") + await websocket.send_json({"status": "generating", "response": "generating mermaid chart", 'completed': False}) + try: + mermaid_string = await get_mermaid_chart(message, websocket = websocket) + if mermaid_string == "Failed to generate mermaid": + await websocket.send_json({"status": "error", "response": mermaid_string, 'completed': True}) + else: + await websocket.send_json({"status": "completed", "response": mermaid_string, 'completed': True}) + except Exception as e: + await websocket.send_json({"status": "error", "response": f"Error: {e}", 'completed': True}) + except WebSocketDisconnect: + print("WebSocket disconnected") + except Exception as e: + await websocket.send_json({"status": "error", "message": str(e)}) + await websocket.close() + finally: + await websocket.close() + + + + +@router.websocket("/debate_flow_with_jwt") +async def debate_flow_with_jwt(websocket: WebSocket): + await websocket.accept() + try: + while True: + data = await websocket.receive_text() + payload = json.loads(data) + message_data = payload.get("message_data", None) + model = payload.get("model", None) + + if message_data: + await websocket.send_json({"status": "generating", "response": "starting debate flow analysis", 'completed': False}) + try: + # Assuming DebateSimulator is correctly set up + debate_simulator = await DebateSimulator.get_instance() + response_data = debate_simulator.process_messages(message_data, model) + await websocket.send_json({"status": "completed", "response": response_data, 'completed': True}) + except Exception as e: + await websocket.send_json({"status": "error", "response": f"Error: {e}", 'completed': True}) + except WebSocketDisconnect: + print("WebSocket disconnected") + except Exception as e: + await websocket.send_json({"status": "error", "message": str(e)}) + await websocket.close() + finally: + await websocket.close() \ No newline at end of file diff --git a/topos/app/menu_bar_app.py b/topos/app/menu_bar_app.py new file mode 100644 index 0000000..1d76791 --- /dev/null +++ b/topos/app/menu_bar_app.py @@ -0,0 +1,114 @@ +from ..api import api +from ..downloaders.spacy_loader import download_spacy_model +from topos.utilities.utils import get_root_directory +from ..config import get_ssl_certificates + +import requests +import threading +import webbrowser +from PIL import Image, ImageDraw +import pystray +import time +import os + +import warnings + +ASSETS_PATH = os.path.join(get_root_directory(), "assets/topos_white.png") +API_URL = "http://0.0.0.0:13341/health" +DOCS_URL = "http://0.0.0.0:13341/docs" + +def start_api(): + api.start_local_api() + +def start_web_app(): + global API_URL, DOCS_URL + API_URL = "https://0.0.0.0:13341/health" + DOCS_URL = "https://0.0.0.0:13341/docs" + api_thread = threading.Thread(target=api.start_web_api) + api_thread.daemon = True + api_thread.start() + # Create and start the tray icon on the main thread + create_tray_icon() + +def check_health(icon): + certs = get_ssl_certificates() + if not os.path.exists(certs['cert_path']): + print(f"Certificate file not found: {certs['cert_path']}") + if not os.path.exists(certs['key_path']): + print(f"Key file not found: {certs['key_path']}") + + while icon.visible: + try: + with warnings.catch_warnings(): # cert=(certs['cert_path'], certs['key_path']) #for verification, but wasn't working + warnings.filterwarnings('ignore', message='Unverified HTTPS request') + response = requests.get(API_URL, verify=False) + if response.status_code == 200: + update_status(icon, "Service is running", (170, 255, 0, 255)) + else: + update_status(icon, "Service is not running", "red") + except requests.exceptions.RequestException as e: + update_status(icon, f"Error: {str(e)}", "red") + time.sleep(5) + +def update_status(icon, text, color): + icon.icon = create_image(color) + +def open_docs(): + webbrowser.open_new(DOCS_URL) + + +def create_image(color): + # Load the external image + external_image = Image.open(ASSETS_PATH).convert("RGBA") + # Resize external image to fit the icon size + external_image = external_image.resize((34, 34), Image.Resampling.LANCZOS) + + # Generate an image for the system tray icon + width = 34 + height = 34 + image = Image.new('RGBA', (width, height), (255, 255, 255, 0)) # Transparent background + dc = ImageDraw.Draw(image) + dc.ellipse((22, 22, 32, 32), fill=color) # Smaller circle + + # Combine the images + combined_image = Image.alpha_composite(external_image, image) + + return combined_image + +def create_tray_icon(): + icon = pystray.Icon("Service Status Checker") + icon.icon = create_image("yellow") + icon.menu = pystray.Menu( + pystray.MenuItem("Open API Docs", open_docs), + pystray.MenuItem("Exit", on_exit) + ) + + def on_setup(icon): + icon.visible = True + # Start health check in a separate thread + health_thread = threading.Thread(target=check_health, args=(icon,)) + health_thread.daemon = True + health_thread.start() + + icon.run(setup=on_setup) + +def on_exit(icon, item): + icon.visible = False + icon.stop() + +def start_local_app(): + api_thread = threading.Thread(target=api.start_local_api) + api_thread.daemon = True + api_thread.start() + # Create and start the tray icon on the main thread + create_tray_icon() + +def start_web_app(): + global API_URL, DOCS_URL + API_URL = "https://0.0.0.0:13341/health" + DOCS_URL = "https://0.0.0.0:13341/docs" + api_thread = threading.Thread(target=api.start_web_api) + api_thread.daemon = True + api_thread.start() + # Create and start the tray icon on the main thread + create_tray_icon() \ No newline at end of file diff --git a/topos/assets/topos_blk.png b/topos/assets/topos_blk.png new file mode 100644 index 0000000..8dd2ac1 Binary files /dev/null and b/topos/assets/topos_blk.png differ diff --git a/topos/assets/topos_blk_rounded.png b/topos/assets/topos_blk_rounded.png new file mode 100644 index 0000000..230c7d8 Binary files /dev/null and b/topos/assets/topos_blk_rounded.png differ diff --git a/topos/assets/topos_white.png b/topos/assets/topos_white.png new file mode 100644 index 0000000..bbe8103 Binary files /dev/null and b/topos/assets/topos_white.png differ diff --git a/topos/chat_api/api.py b/topos/chat_api/api.py index 53e0fe3..786a238 100644 --- a/topos/chat_api/api.py +++ b/topos/chat_api/api.py @@ -5,7 +5,7 @@ def start_chat(): """Function to start the API in local mode.""" # print("\033[92mINFO:\033[0m API docs available at: \033[1mhttp://127.0.0.1:13394/docs\033[0m") # subprocess.run(["python", "topos/chat_api/chat_server.py"]) # A barebones chat server - subprocess.run(["uvicorn", "topos.chat_api.fastapi_server:app", "--host", "0.0.0.0", "--port", "13394", "--workers", "1"]) + subprocess.run(["uvicorn", "topos.chat_api.server:app", "--host", "0.0.0.0", "--port", "13394", "--workers", "1"]) # start through zrok # uvicorn main:app --host 127.0.0.1 --port 13394 & zrok expose http://localhost:13394 diff --git a/topos/chat_api/chat_server.py b/topos/chat_api/chat_server.py deleted file mode 100644 index 808d885..0000000 --- a/topos/chat_api/chat_server.py +++ /dev/null @@ -1,197 +0,0 @@ -# chat_server.py -# this version works locally, but doesn't work with zrok - -import socket -import threading -import json -import datetime -from typing import Dict, List, Tuple -from http.server import BaseHTTPRequestHandler, HTTPServer - -from topos.utilities.utils import generate_deci_code - -HOST = "127.0.0.1" -PORT = 13394 -HTTP_PORT = 8080 -LISTENER_LIMIT = 5 -active_clients: Dict[str, List[Tuple[str, socket.socket]]] = {} -user_sessions: Dict[str, str] = {} - -class SessionManager: - def __init__(self): - self.active_sessions: Dict[str, List[Tuple[str, socket.socket]]] = active_clients - self.user_sessions: Dict[str, str] = user_sessions - - def add_session(self, session_id: str, user_id: str, client: socket.socket): - print(f"[ adding {session_id} to active_sessions ]") - if session_id not in self.active_sessions: - self.active_sessions[session_id] = [] - self.active_sessions[session_id].append((user_id, client)) - - def get_active_sessions(self): - return self.active_sessions - - def get_user_sessions(self): - return self.user_sessions - - def add_user_session(self, user_id: str, session_id: str): - print(f"[ adding {user_id} to user_sessions ]") - self.user_sessions[user_id] = session_id - -session_manager = SessionManager() - -# Function to listen for incoming messages from client -def listen_for_message(client, session_id, user_id): - while True: - max_msg_size = 2048 - try: - message = client.recv(max_msg_size).decode('utf-8') - if message: - payload = json.loads(message) - print(f"RECEIVED: {payload}") - if 'content' in payload and 'session_id' in payload['content']: - session_id = payload['content']['session_id'] - user_id = payload['content']['user_id'] - if session_id: - print(f"sending {session_id}") - send_message_to_all(session_id, user_id, payload) - else: - print(f"[ Message from client {user_id} is empty ]") - except: - handle_disconnect(client, session_id, user_id) - break - -# Function to send message to a single client -def send_message_to_client(client, message): - try: - client.sendall(json.dumps(message).encode()) - except: - print("Error sending message to client") - -# Function to send any new message to all the clients currently connected to the server -def send_message_to_all(session_id, sender_user_id, message): - active_sessions = session_manager.get_active_sessions() - if message['message_type'] != 'server': - print(f"[ message to user :: {message['content']['text']}]") - if session_id in active_sessions: - for user_id, client in active_sessions[session_id]: - if message['message_type'] == 'server': - send_message_to_client(client, message) - elif user_id != sender_user_id: - send_message_to_client(client, message) - -# Function to handle client -def client_handler(client): - max_msg_size = 2048 - try: - data = client.recv(max_msg_size).decode('utf-8') - if data: - payload = json.loads(data) - message_type = payload['message_type'] - user_id = payload['user_id'] - username = payload['username'] - - print(f"[ {username} joined the chat") - if message_type == "create_server": - session_id = generate_deci_code(6) - print(f"[ client created chat :: session_id {session_id} ]") - session_manager.add_session(session_id, user_id, client) - session_manager.add_user_session(user_id, session_id) - print(session_manager.get_active_sessions()) - prompt_message = f"{payload['host_name']} created the chat" - data = { - "message_type": "server", - "session_id": session_id, - "message": prompt_message, - "timestamp": datetime.datetime.now().isoformat() - } - send_message_to_all(session_id, user_id, data) - threading.Thread(target=listen_for_message, args=(client, session_id, user_id)).start() - - elif message_type == "join_server": - session_id = payload['session_id'] - username = payload['username'] - if session_id in session_manager.get_active_sessions(): - print(f"[ {username} joined chat :: session_id {session_id} ]") - session_manager.add_session(session_id, user_id, client) - session_manager.add_user_session(user_id, session_id) - join_message = f"{username} joined the chat" - data = { - "message_type": "server", - "session_id": session_id, - "message": join_message, - "timestamp": datetime.datetime.now().isoformat() - } - send_message_to_all(session_id, user_id, data) - threading.Thread(target=listen_for_message, args=(client, session_id, user_id)).start() - else: - send_message_to_client(client, json.dumps({"error": "Invalid session ID"})) - else: - print("[ Client username is empty ]") - except Exception as e: - print(f"Error: {e}") - client.close() - -# Handle disconnect -def handle_disconnect(client, session_id, user_id): - active_sessions = session_manager.get_active_sessions() - if session_id in active_sessions: - clients = active_sessions[session_id] - clients = [c for c in clients if c[1] != client] - if not clients: - del active_sessions[session_id] - disconnect_message = f"{user_id} left the chat" - send_message_to_all(session_id, user_id, { - "message_type": "server", - "session_id": session_id, - "message": disconnect_message, - "timestamp": datetime.datetime.now().isoformat() - }) - user_sessions = session_manager.get_user_sessions() - user_sessions.pop(user_id, None) - client.close() - -# HTTP request handler for health check -class HealthCheckHandler(BaseHTTPRequestHandler): - def do_GET(self): - if self.path == "/test": - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - response = {"response": True} - self.wfile.write(json.dumps(response).encode()) - else: - self.send_response(404) - self.end_headers() - -# Function to start the HTTP server for health check -def start_http_server(): - http_server = HTTPServer((HOST, HTTP_PORT), HealthCheckHandler) - print(f"[ HTTP server running on port {HTTP_PORT} ]") - http_server.serve_forever() - - -# Main function to start the chat server and HTTP server -def main(): - # Start the HTTP server in a separate thread - threading.Thread(target=start_http_server, daemon=True).start() - - # Start the TCP chat server - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - server.bind((HOST, PORT)) - print(f"[ Running the server on {HOST} {PORT} ]") - except: - print(f"[ Unable to bind to host {HOST} and port {PORT} ]") - return - - server.listen(LISTENER_LIMIT) - print("[ Server is listening for connections ]") - - while True: - client, address = server.accept() - print(f"New connected client {address[0]} {address[1]}") - threading.Thread(target=client_handler, args=(client,)).start() - -if __name__ == "__main__": - main() diff --git a/topos/chat_api/client.py b/topos/chat_api/client.py deleted file mode 100644 index a6051b9..0000000 --- a/topos/chat_api/client.py +++ /dev/null @@ -1,62 +0,0 @@ -# client.py - -import asyncio -import threading -import websockets - -HOST = "127.0.0.1" -PORT = 13394 - -def listen_for_messages_from_server(websocket): - async def listen(): - async for message in websocket: - if message: - username, content = message.split("~", 1) - print(f"[{username}] {content}") - else: - print("[ message from server is empty ]") - - asyncio.run(listen()) - -def send_message_to_server(websocket): - async def send(): - while True: - message = input("Message: ") - if message: - await websocket.send(message) - else: - print("[ empty message ]") - await websocket.close() - break - - asyncio.run(send()) - -def communicate_to_server(websocket): - username = input("Enter username: ") - if username: - asyncio.run(websocket.send(username)) - else: - print("[ username cannot be empty ]") - asyncio.run(websocket.close()) - return - - listen_thread = threading.Thread(target=listen_for_messages_from_server, args=(websocket,)) - send_thread = threading.Thread(target=send_message_to_server, args=(websocket,)) - - listen_thread.start() - send_thread.start() - - listen_thread.join() - send_thread.join() - -async def main(): - uri = f"ws://{HOST}:{PORT}/ws/chat" - try: - async with websockets.connect(uri) as websocket: - print(f"[ Successfully connected to server {HOST} {PORT} ]") - communicate_to_server(websocket) - except Exception as e: - print(f"[ Unable to connect to host {HOST} and port {PORT} ]: {e}") - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/topos/chat_api/fastapi_client.py b/topos/chat_api/fastapi_client.py deleted file mode 100644 index 3f7095a..0000000 --- a/topos/chat_api/fastapi_client.py +++ /dev/null @@ -1,128 +0,0 @@ -import asyncio -import threading -import websockets -import json -import random -import string -import datetime - -HOST = "https://6kppfbg5ti9u.share.zrok.io" -PORT = 13394 - -global username -global session_id - -# Generate a random 8-digit session ID -def generate_id(length=8): - return ''.join(random.choices(string.digits, k=length)) - -user_id = generate_id() - -async def listen_for_messages_from_server(websocket): - try: - while True: - try: - message = await websocket.recv() - print("MESSAGE RECEIVED:", message) - except websockets.ConnectionClosed as e: - print(f"[ Connection closed while receiving: {e} ]") - break - except Exception as e: - print(f"[ Error receiving message: {e} ]") - break - except Exception as e: - print(f"[ Error in listening for messages: {e} ]") - -async def send_message_to_server(websocket): - try: - while True: - message = input("Message: ") - if message: - try: - msg = { - 'message_id': generate_id(), - 'message_type': 'user', - 'num_participants': 1, - 'content': { - 'user_id': user_id, - 'session_id': session_id, - 'username': username, - 'text': message - }, - 'timestamp': datetime.datetime.now().isoformat(), - 'metadata': { - 'priority': '', - 'tags': [] - }, - 'attachments': [{'file_name': None, 'file_type': None, 'url': None}] - } - await websocket.send(json.dumps(msg)) - print("MESSAGE SENT:", message) - except Exception as e: - print(f"[ Error sending message: {e} ]") - else: - print("[ empty message ]") - await websocket.close() - break - except websockets.ConnectionClosed as e: - print(f"[ Connection closed while sending: {e} ]") - except Exception as e: - print(f"[ Error in sending messages: {e} ]") - -async def communicate_to_server(websocket): - try: - global username, session_id - username = input("Enter username: ") - session_id = input("Enter sessionId: ") - if username: - try: - msg = { - 'message_type': 'join_server', - 'user_id': user_id, - 'session_id': session_id, - 'created_at': datetime.datetime.now().isoformat(), - 'username': username - } - await websocket.send(json.dumps(msg)) - print(f"USERNAME SENT: {username}") - except Exception as e: - print(f"[ Error sending username: {e} ]") - else: - print("[ username cannot be empty ]") - await websocket.close() - return - - # listen_task = asyncio.create_task(listen_for_messages_from_server(websocket)) - send_task = asyncio.create_task(send_message_to_server(websocket)) - - await asyncio.gather(send_task) #listen_task, - except Exception as e: - print(f"[ Error in communication: {e} ]") - -def start_event_loop(loop): - try: - asyncio.set_event_loop(loop) - loop.run_forever() - except Exception as e: - print(f"[ Error in event loop: {e} ]") - -async def main(): - uri = f"ws://{HOST}:{PORT}/ws/chat" - try: - async with websockets.connect(uri, ping_interval=20, ping_timeout=200) as websocket: - print(f"[ Successfully connected to server {HOST} {PORT} ]") - await communicate_to_server(websocket) - except websockets.ConnectionClosed as e: - print(f"[ Connection closed: {e} ]") - except Exception as e: - print(f"[ Unable to connect to host {HOST} and port {PORT} ]: {e}") - -if __name__ == "__main__": - try: - new_loop = asyncio.new_event_loop() - t = threading.Thread(target=start_event_loop, args=(new_loop,)) - t.start() - - asyncio.run_coroutine_threadsafe(main(), new_loop) - except Exception as e: - print(f"[ Error starting client: {e} ]") diff --git a/topos/chat_api/fastapi_client_message_listener.py b/topos/chat_api/fastapi_client_message_listener.py deleted file mode 100644 index c52b205..0000000 --- a/topos/chat_api/fastapi_client_message_listener.py +++ /dev/null @@ -1,26 +0,0 @@ -import asyncio -import websockets - -HOST = "127.0.0.1" -PORT = 13394 - -async def listen_for_messages(uri): - try: - async with websockets.connect(uri) as websocket: - print(f"[ Successfully connected to server {HOST} {PORT} ]") - while True: - try: - message = await websocket.recv() - print("MESSAGE RECEIVED:", message) - except websockets.ConnectionClosed as e: - print(f"[ Connection closed while receiving: {e} ]") - break - except Exception as e: - print(f"[ Error receiving message: {e} ]") - break - except Exception as e: - print(f"[ Unable to connect to host {HOST} and port {PORT} ]: {e}") - -if __name__ == "__main__": - uri = f"ws://{HOST}:{PORT}/ws/chat" - asyncio.run(listen_for_messages(uri)) diff --git a/topos/chat_api/fastapi_server.py b/topos/chat_api/fastapi_server.py deleted file mode 100644 index 38e301e..0000000 --- a/topos/chat_api/fastapi_server.py +++ /dev/null @@ -1,210 +0,0 @@ -from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends -from fastapi.middleware.cors import CORSMiddleware -from starlette.websockets import WebSocketState -from concurrent.futures import ThreadPoolExecutor -from typing import Dict, List, Tuple -import uvicorn -import json -import asyncio -import datetime -from ..utilities.utils import generate_deci_code - -app = FastAPI() - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], # Change this to the specific origins you want to allow - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -class SessionManager: - def __init__(self): - self.active_sessions: Dict[str, List[Tuple[str, WebSocket]]] = {} - self.user_sessions: Dict[str, str] = {} - - def add_session(self, session_id: str, user_id: str, websocket: WebSocket): - print(f"[ adding {session_id} to active_sessions ]") - if session_id not in self.active_sessions: - self.active_sessions[session_id] = [] - self.active_sessions[session_id].append((user_id, websocket)) - - def get_active_sessions(self): - return self.active_sessions - - def get_user_sessions(self): - return self.user_sessions - - def add_user_session(self, user_id: str, session_id: str): - print(f"[ adding {user_id} to user_sessions ]") - self.user_sessions[user_id] = session_id - -session_manager = SessionManager() -# executor = ThreadPoolExecutor(max_workers=6) - -async def send_message_to_client(client: WebSocket, message: dict): - if not isinstance(message, dict): - print("Message is not a dictionary") - return - - if not client.application_state == WebSocketState.CONNECTED: - print("Client is not connected") - return - - try: - await client.send_json(message) - except Exception as e: - print(e) - -async def send_message_to_all(session_id: str, sender_user_id: str, message: dict, session_manager: SessionManager): - active_sessions = session_manager.get_active_sessions() - print("send_message_to_all") - print(session_id in active_sessions) - if message['message_type'] != 'server': - print(f"[ message to user :: {message['content']['text']}]") - if session_id in active_sessions: - for user_id, client in active_sessions[session_id]: - if message['message_type'] == 'server': - await send_message_to_client(client, message) - elif user_id != sender_user_id: - await send_message_to_client(client, message) - -async def handle_client(websocket: WebSocket, session_manager: SessionManager): - await websocket.accept() - print("client joined") - try: - while True: - data = await asyncio.wait_for(websocket.receive_text(), timeout=600.0) - if data: - payload = json.loads(data) - print(payload) - message_type = payload['message_type'] - print(message_type) - active_sessions = session_manager.get_active_sessions() - user_sessions = session_manager.get_user_sessions() - - if message_type == "create_server": - session_id = generate_deci_code(6) - print(f"[ client created chat :: session_id {session_id} ]") - user_id = payload['user_id'] - host_name = payload['host_name'] - session_manager.add_session(session_id, user_id, websocket) - session_manager.add_user_session(user_id, session_id) - print(session_manager.get_active_sessions()) # shows value - active_sessions = session_manager.get_active_sessions() - - prompt_message = f"{host_name} created the chat" - data = { - "message_type": "server", - "session_id": session_id, - "message": prompt_message, - "timestamp": datetime.datetime.now().isoformat() - } - await send_message_to_all(session_id, user_id, data, session_manager) - - elif message_type == "join_server": - session_id = payload['session_id'] - user_id = payload['user_id'] - username = payload['username'] - active_sessions = session_manager.get_active_sessions() - print(session_id) - print("ACTIVE SESSIONS: ", session_manager.get_active_sessions()) - print("ACTIVE SESSIONS: ", active_sessions) # shows empty when client connects - print(session_id in active_sessions) - if session_id in active_sessions: - print(f"[ {username} joined chat :: session_id {session_id} ]") - session_manager.add_session(session_id, user_id, websocket) - session_manager.add_user_session(user_id, session_id) - join_message = f"{username} joined the chat" - data = { - "message_type": "server", - "session_id": session_id, - "message": join_message, - "timestamp": datetime.datetime.now().isoformat() - } - await send_message_to_all(session_id, user_id, data, session_manager) - else: - await websocket.send_json({"error": "Invalid session ID"}) - break - while True: - data = await websocket.receive_text() - if data: - payload = json.loads(data) - print("RECEIVED: ", payload) - session_id = payload['content']['session_id'] - user_id = payload['content']['user_id'] - if session_id: - print(f"sending {session_id}") - await send_message_to_all(session_id, user_id, payload, session_manager) - else: - print(f"[ Message from client is empty ]") - except WebSocketDisconnect: - print("client disconnected") - await handle_disconnect(websocket, session_manager) - except asyncio.TimeoutError: - print("client disconnected due to timeout") - await handle_disconnect(websocket, session_manager) - except Exception as e: - print(f"client disconnected due to error: {e}") - await handle_disconnect(websocket, session_manager) - -async def handle_disconnect(websocket, session_manager): - active_sessions = session_manager.get_active_sessions() - user_sessions = session_manager.get_user_sessions() - for session_id, clients in active_sessions.items(): - for user_id, client in clients: - if client == websocket: - clients.remove((user_id, client)) - if not clients: - del active_sessions[session_id] - disconnect_message = f"{user_id} left the chat" - await asyncio.shield(send_message_to_all(session_id, user_id, { - "message_type": "server", - "session_id": session_id, - "message": disconnect_message, - "timestamp": datetime.datetime.now().isoformat() - }, session_manager)) - break - user_sessions.pop(user_id, None) - -@app.websocket("/ws/chat") -async def websocket_endpoint(websocket: WebSocket): #, session_manager: SessionManager = Depends(lambda: session_manager) - print("[ client connected :: preparing setup ]") - print(f" current connected sessions :: {session_manager.get_active_sessions()}") - await handle_client(websocket, session_manager) - - -#"http://127.0.0.1:13394 -# ]]" -@app.post("/test") -async def test(): - return {"response": True} - -""" Message JSON Schema - -{ -"message_id": "", -"message_type": "", // OPTIONS: user, ai, server -“num_participants”: , -"content": - { - "sender_id": "", - "conversation_id": "", - "username": "", - "text": "" - }, -"timestamp": "", -"metadata": { - "priority": "", // e.g., normal, high - "tags": ["", ""] - }, -"attachments": [ - { - "file_name": "", - "file_type": "", - "url": "" - } -] } - -""" \ No newline at end of file diff --git a/topos/chat_api/server.py b/topos/chat_api/server.py index 7a3e3f7..1424e4f 100644 --- a/topos/chat_api/server.py +++ b/topos/chat_api/server.py @@ -1,76 +1,233 @@ -import socket -import threading - -HOST = "127.0.0.1" -PORT = 13394 -LISTENER_LIMIT = 5 -active_clients = [] # list of all currently conencted users - - -# function to listen for incoming messages from client -def listen_for_message(client, username): - while 1: - max_msg_size = 2048 - message = client.recv(max_msg_size).decode('utf-8') - if message != '': - final_msg = username + "~" + message - send_message_to_all(final_msg) - else: - print(f"[ Message from client {username} is empty ]") - -# function to send message to a single client -def send_message_to_client(client, message): - client.sendall(message.encode()) - - -# function to send any new message to all the clients -# currently connected to the server. -def send_message_to_all(message): - for user in active_clients: - send_message_to_client(user[1], message) - - -# function to handle client -def client_handler(client): - # server will listen for client message that will - # contain the username - while 1: - max_msg_size = 2048 - username = client.recv(max_msg_size).decode('utf-8') - if username != '': - # actions to take when user joins server - active_clients.append((username, client)) - prompt_message = "SERVER~" + f"{username} joined the chat" - send_message_to_all(prompt_message) - break - else: - print("[ Client username is empty ]") - - threading.Thread(target=listen_for_message, args=(client, username, )).start() - - -# main function -def main(): - # creating the socket class object - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends +from fastapi.middleware.cors import CORSMiddleware +from starlette.websockets import WebSocketState +from concurrent.futures import ThreadPoolExecutor +from typing import Dict, List, Tuple +import uvicorn +import json +import asyncio +import datetime +from ..utilities.utils import generate_deci_code +app = FastAPI() + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Change this to the specific origins you want to allow + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +class SessionManager: + def __init__(self): + self.active_sessions: Dict[str, List[Tuple[str, WebSocket]]] = {} + self.user_sessions: Dict[str, str] = {} + self.usernames: Dict[str, str] = {} + + def add_session(self, session_id: str, user_id: str, websocket: WebSocket): + print(f"[ adding {session_id} to active_sessions ]") + if session_id not in self.active_sessions: + self.active_sessions[session_id] = [] + self.active_sessions[session_id].append((user_id, websocket)) + + def get_active_sessions(self): + return self.active_sessions + + def get_user_sessions(self): + return self.user_sessions + + def add_user_session(self, user_id: str, session_id: str): + print(f"[ adding {user_id} to user_sessions ]") + self.user_sessions[user_id] = session_id + + def add_username(self, user_id: str, username: str): + print(f"[ adding {username} for {user_id} ]") + self.usernames[user_id] = username + + def get_username(self, user_id: str) -> str: + return self.usernames.get(user_id, "Unknown user") + +session_manager = SessionManager() + +async def send_message_to_client(client: WebSocket, message: dict): + if not isinstance(message, dict): + print("Message is not a dictionary") + return + + if not client.application_state == WebSocketState.CONNECTED: + print("Client is not connected") + return + + try: + await client.send_json(message) + except Exception as e: + print(e) + +async def send_message_to_all(session_id: str, sender_user_id: str, message: dict, session_manager: SessionManager): + active_sessions = session_manager.get_active_sessions() + print("send_message_to_all") + print(session_id in active_sessions) + if message['message_type'] != 'server': + print(f"[ message to user :: {message['content']['text']}]") + if session_id in active_sessions: + for user_id, client in active_sessions[session_id]: + if message['message_type'] == 'server': + await send_message_to_client(client, message) + elif user_id != sender_user_id: + await send_message_to_client(client, message) + +async def send_to_all_clients_on_all_sessions(sender_user_id: str, message: dict, session_manager: SessionManager): + active_sessions = session_manager.get_active_sessions() + print("send_message_to_all") + if message['message_type'] != 'server': + print(f"[ message to user :: {message['content']['text']}]") + for session_id in active_sessions: + message["session_id"] = session_id + for user_id, client in active_sessions[session_id]: + if message['message_type'] == 'server': + await send_message_to_client(client, message) + elif user_id != sender_user_id: + await send_message_to_client(client, message) + +async def handle_client(websocket: WebSocket, session_manager: SessionManager, inactivity_event: asyncio.Event): + await websocket.accept() + print("client joined") try: - server.bind((HOST, PORT)) - print(f"[ Running the server on {HOST} {PORT} ]") - except: - print(f"[ Unable to bind to host {HOST} and port {PORT} ]") + while True: + data = await asyncio.wait_for(websocket.receive_text(), timeout=600.0) # removes user if they haven't spoken in 10 minutes + if data: + payload = json.loads(data) + inactivity_event.set() # Reset the inactivity event + print(payload) + message_type = payload['message_type'] + print(message_type) + active_sessions = session_manager.get_active_sessions() + user_sessions = session_manager.get_user_sessions() + + if message_type == "create_server": + session_id = generate_deci_code(6) + print(f"[ client created chat :: session_id {session_id} ]") + user_id = payload['user_id'] + host_name = payload['host_name'] + username = payload['username'] + session_manager.add_session(session_id, user_id, websocket) + session_manager.add_user_session(user_id, session_id) + session_manager.add_username(user_id, username) + print(session_manager.get_active_sessions()) # shows value + active_sessions = session_manager.get_active_sessions() + + prompt_message = f"{host_name} created the chat" + data = { + "message_type": "server", + "session_id": session_id, + "message": prompt_message, + "timestamp": datetime.datetime.now().isoformat() + } + await send_message_to_all(session_id, user_id, data, session_manager) + + elif message_type == "join_server": + session_id = payload['session_id'] + user_id = payload['user_id'] + username = payload['username'] + active_sessions = session_manager.get_active_sessions() + print(session_id) + print("ACTIVE SESSIONS: ", session_manager.get_active_sessions()) + print("ACTIVE SESSIONS: ", active_sessions) # shows empty when client connects + print(session_id in active_sessions) + if session_id in active_sessions: + print(f"[ {username} joined chat :: session_id {session_id} ]") + session_manager.add_session(session_id, user_id, websocket) + session_manager.add_user_session(user_id, session_id) + session_manager.add_username(user_id, username) + join_message = f"{username} joined the chat" + data = { + "message_type": "server", + "session_id": session_id, + "message": join_message, + "timestamp": datetime.datetime.now().isoformat() + } + await send_message_to_all(session_id, user_id, data, session_manager) + else: + await websocket.send_json({"error": "Invalid session ID"}) + break + while True: + data = await websocket.receive_text() + if data: + payload = json.loads(data) + inactivity_event.set() # Reset the inactivity event + print("RECEIVED: ", payload) + session_id = payload['content']['session_id'] + user_id = payload['content']['user_id'] + if session_id: + print(f"sending {session_id}") + await send_message_to_all(session_id, user_id, payload, session_manager) + else: + print(f"[ Message from client is empty ]") + except WebSocketDisconnect: + print("client disconnected") + await handle_disconnect(websocket, session_manager) + except asyncio.TimeoutError: + print("client disconnected due to timeout") + await handle_disconnect(websocket, session_manager) + except Exception as e: + print(f"client disconnected due to error: {e}") + await handle_disconnect(websocket, session_manager) + +async def handle_disconnect(websocket, session_manager): + active_sessions = session_manager.get_active_sessions() + user_sessions = session_manager.get_user_sessions() + for session_id, clients in active_sessions.items(): + for user_id, client in clients: + if client == websocket: + clients.remove((user_id, client)) + if not clients: + del active_sessions[session_id] + username = session_manager.get_username(user_id) + disconnect_message = f"{username} left the chat" + await asyncio.shield(send_message_to_all(session_id, user_id, { + "message_type": "server", + "session_id": session_id, + "message": disconnect_message, + "timestamp": datetime.datetime.now().isoformat() + }, session_manager)) + break + user_sessions.pop(user_id, None) + session_manager.usernames.pop(user_id, None) + +@app.websocket("/ws/chat") +async def websocket_endpoint(websocket: WebSocket): + print("[ client connected :: preparing setup ]") + print(f" current connected sessions :: {session_manager.get_active_sessions()}") + inactivity_event = asyncio.Event() + # inactivity_task = asyncio.create_task(check_inactivity(inactivity_event)) # not applicable for local builds + await handle_client(websocket, session_manager, inactivity_event) + # inactivity_task.cancel() - # Set server limit - server.listen(LISTENER_LIMIT) +async def check_inactivity(inactivity_event: asyncio.Event): + while True: + try: + await asyncio.wait_for(inactivity_event.wait(), timeout=600.0) + inactivity_event.clear() + except asyncio.TimeoutError: + print("No activity detected for 10 minutes, shutting down...") + disconnect_message = f"Conserving power...shutting down..." + await asyncio.shield(send_to_all_clients_on_all_sessions("senderUSErID#45", + { + "message_type": "server", + "message": disconnect_message, + "timestamp": datetime.datetime.now().isoformat() + }, session_manager)) + asyncio.get_event_loop().stop() - # while loop will keep listening to client connections - num_connected_clients = 0 - while 1: - client, address = server.accept() - num_connected_clients += 1 - print(f"New connected client {address[0]} {address[1]} :: {num_connected_clients} of {LISTENER_LIMIT} available") +# perform healthcheck for GCP requirement +@app.get("/healthcheck/") +async def root(): + return {"message": "Status: OK"} - threading.Thread(target=client_handler, args=(client, )).start() +@app.post("/test") +async def test(): + return {"response": True} if __name__ == "__main__": - main() \ No newline at end of file + uvicorn.run(app, host="0.0.0.0", port=8000, workers=1) \ No newline at end of file diff --git a/topos/chat_api/session_manager_class_test.py b/topos/chat_api/session_manager_class_test.py deleted file mode 100644 index 69c9fd8..0000000 --- a/topos/chat_api/session_manager_class_test.py +++ /dev/null @@ -1,82 +0,0 @@ -import random -import string -import json -import asyncio - -# Mock WebSocket class for testing purposes -class WebSocket: - async def receive_text(self): - await asyncio.sleep(1) - if not hasattr(self, 'message_index'): - self.message_index = 0 - - if self.message_index == 0: - self.message_index += 1 - return json.dumps({"message_type": "create_server", "user_id": self.user_id, "host_name": "host1"}) - else: - return json.dumps({"message_type": "join_server", "session_id": self.session_id, "user_id": self.user_id}) - -# Generate a random 8-digit session ID -def generate_session_id(length=8): - return ''.join(random.choices(string.digits, k=length)) - -class SessionManager: - def __init__(self): - self.active_sessions = {} - self.user_sessions = {} - - def add_session(self, session_id, user_id, websocket): - if session_id not in self.active_sessions: - self.active_sessions[session_id] = [] - self.active_sessions[session_id].append((user_id, websocket)) - - def get_active_sessions(self): - return self.active_sessions - - def get_user_sessions(self): - return self.user_sessions - - def add_user_session(self, user_id, session_id): - self.user_sessions[user_id] = session_id - -async def test_session_manager(): - session_manager = SessionManager() - websocket = WebSocket() - websocket.user_id = "user1" - - while True: - data = await websocket.receive_text() - if data: - payload = json.loads(data) - print("Received payload:", payload) - message_type = payload['message_type'] - active_sessions = session_manager.get_active_sessions() - user_sessions = session_manager.get_user_sessions() - - if message_type == "create_server": - session_id = generate_session_id() - websocket.session_id = session_id - print(f"[ client created chat :: session_id {session_id} ]") - user_id = payload['user_id'] - host_name = payload['host_name'] - session_manager.add_session(session_id, user_id, websocket) - session_manager.add_user_session(user_id, session_id) - print("Active Sessions:", active_sessions) - print("User Sessions:", user_sessions) - session_created = True - - elif message_type == "join_server": - session_id = payload.get('session_id') - user_id = payload['user_id'] - session_manager.add_session(session_id, user_id, websocket) - session_manager.add_user_session(user_id, session_id) - print(f"User {user_id} joined session {session_id}") - print("Active Sessions:", active_sessions) - print("User Sessions:", user_sessions) - - # Break the loop after one iteration for testing purposes - # if session_created: - # break - -# Run the test case -asyncio.run(test_session_manager()) diff --git a/topos/cli.py b/topos/cli.py index 16bab4a..e4b8e9b 100644 --- a/topos/cli.py +++ b/topos/cli.py @@ -10,19 +10,23 @@ def main(): parser.add_argument('--web', action='store_true', help="Flag to run the server for web access") parser.add_argument('--local', action='store_true', help="Flag to run the server for local access (default)") parser.add_argument('--spacy', choices=['small', 'med', 'large', 'trf'], help="Specify Spacy model size (only for 'set' command)") + parser.add_argument('--cloud', action='store_true', help="Flag to run the server on cloud") args = parser.parse_args() - if args.command == 'run': """ start the topos api server """ # import api from .api import api - if args.web: - api.start_web_api() + if args.cloud: + api.start_hosted_service() + elif args.web: + from .app import menu_bar_app + menu_bar_app.start_web_app() else: - api.start_local_api() + from .app import menu_bar_app + menu_bar_app.start_local_app() if args.command == 'chat': """ diff --git a/topos/config.py b/topos/config.py index af5e789..50aba8c 100644 --- a/topos/config.py +++ b/topos/config.py @@ -1,6 +1,7 @@ from dotenv import load_dotenv import os from fastapi.middleware.cors import CORSMiddleware +from topos.utilities.utils import get_root_directory def get_openai_api_key(): @@ -11,10 +12,13 @@ def get_openai_api_key(): def get_ssl_certificates(): - project_dir = "/Users/dialogues/developer/topos/cli" + # project_dir = get_root_directory() + # print(project_dir) + # "key_path": project_dir + "/key.pem", + # "cert_path": project_dir + "/cert.pem" return { - "key_path": project_dir + "/key.pem", - "cert_path": project_dir + "/cert.pem" + "key_path": "key.pem", + "cert_path": "cert.pem" } diff --git a/topos/config.yaml b/topos/config.yaml index 8fde4f5..3c7ab5d 100644 --- a/topos/config.yaml +++ b/topos/config.yaml @@ -1 +1 @@ -active_spacy_model: en_core_web_trf +active_spacy_model: en_core_web_trf \ No newline at end of file diff --git a/topos/downloaders/spacy_loader.py b/topos/downloaders/spacy_loader.py index 2df67cf..63a3f65 100644 --- a/topos/downloaders/spacy_loader.py +++ b/topos/downloaders/spacy_loader.py @@ -1,5 +1,8 @@ import subprocess import yaml +import os +from ..utilities.utils import get_python_command, get_root_directory + def download_spacy_model(model_selection): if model_selection == 'small': @@ -12,10 +15,15 @@ def download_spacy_model(model_selection): model_name = "en_core_web_trf" else: #default model_name = "en_core_web_sm" + + python_command = get_python_command() + + # Define the path to the config.yaml file + config_path = os.path.join(get_root_directory(), 'config.yaml') try: - subprocess.run(['python3', '-m', 'spacy', 'download', model_name], check=True) + subprocess.run([python_command, '-m', 'spacy', 'download', model_name], check=True) # Write updated settings to YAML file - with open('config.yaml', 'w') as file: + with open(config_path, 'w') as file: yaml.dump({'active_spacy_model': model_name}, file) print(f"Successfully downloaded '{model_name}' spaCy model.") print(f"'{model_name}' set as active model.") diff --git a/topos/generations/ollama_chat.py b/topos/generations/ollama_chat.py index d84abce..41cc3d2 100644 --- a/topos/generations/ollama_chat.py +++ b/topos/generations/ollama_chat.py @@ -1,20 +1,37 @@ -# summaries.py +from openai import OpenAI -import ollama -def stream_chat(message_history, model = "solar", temperature=0): - stream = ollama.chat( +client = OpenAI( + base_url='http://localhost:11434/v1', + api_key='ollama', # required, but unused +) + +def stream_chat(message_history, model="solar", temperature=0): + response = client.chat.completions.create( model=model, messages=message_history, - stream=True, + temperature=temperature, + stream=True ) - for chunk in stream: - yield chunk["message"]["content"] + for chunk in response: + if chunk.choices[0].delta.content is not None: + yield chunk.choices[0].delta.content def generate_response(context, prompt, model="solar", temperature=0): - response = ollama.chat( + response = client.chat.completions.create( + model=model, + messages=[ + {"role": "system", "content": context}, + {"role": "user", "content": prompt} + ], + stream=False + ) + return response.choices[0].message.content + +def generate_response_messages(message_history, model="solar", temperature=0): + response = client.chat.completions.create( model=model, - messages=[{"role": "system", "content": context}, {"role": "user", "content": prompt}], + messages=message_history, stream=False ) - return response["message"]["content"] + return response.choices[0].message.content \ No newline at end of file diff --git a/topos/main.py b/topos/main.py new file mode 100644 index 0000000..9d2c053 --- /dev/null +++ b/topos/main.py @@ -0,0 +1,7 @@ +from .api import api + +def start_api(): + api.start_hosted_service() + +def topos(): + start_api() \ No newline at end of file diff --git a/topos/requirements.txt b/topos/requirements.txt new file mode 100644 index 0000000..0013760 --- /dev/null +++ b/topos/requirements.txt @@ -0,0 +1,34 @@ +httpx==0.27.0 +fastapi==0.109.2 +uvicorn==0.20.0 +websockets==11.0.3 +ollama==0.1.9 +spacy==3.7.2 +pydantic==2.7.4 +transformers==4.40.2 +torch==2.3.0 +diffusers==0.27.2 +accelerate==0.30.1 +emoji==0.6.0 +pyyaml==6.0.1 +python-dotenv==1.0.0 +openai==1.30.4 +nltk==3.8.1 +scikit-learn==1.5.0 +sentence-transformers==3.0.1 +sentencepiece==0.2.0 +google==3.0.0 +protobuf==5.27.1 +matplotlib==3.9.0 +umap-learn==0.5.6 +pyjwt==2.8.0 +python-multipart==0.0.9 +pytest-asyncio==0.23.7 +textblob==0.18.0.post0 +tk==0.1.0 +pytest==7.4.3 +pytest-cov==4.1.0 +pytest-httpserver==1.0.8 +pillow==10.2.0 +ruff==0.1.8 +https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.7.3/en_core_web_trf-3.7.3-py3-none-any.whl \ No newline at end of file diff --git a/topos/services/ontology_service/mermaid_chart.py b/topos/services/ontology_service/mermaid_chart.py new file mode 100644 index 0000000..c6da5b6 --- /dev/null +++ b/topos/services/ontology_service/mermaid_chart.py @@ -0,0 +1,180 @@ +# ontological_feature_detection.py +import re + +from topos.FC.ontological_feature_detection import OntologicalFeatureDetection +from topos.generations.ollama_chat import generate_response, generate_response_messages + + +def get_ontology_old_method(message): + user_id = "jonny" + session_id = "127348901" + message_id = "1531ijasda8" + + composable_string = f"for user {user_id}, of {session_id}, the message is: {message}" + + ontological_feature_detection = OntologicalFeatureDetection("neo4j_uri", "neo4j_user", "neo4j_password", + "showroom_db_name", False) + + entities, pos_tags, dependencies, relations, srl_results, timestamp, context_entities = ontological_feature_detection.build_ontology_from_paragraph( + user_id, session_id, message_id, composable_string) + + input_components = message, entities, dependencies, relations, srl_results, timestamp, context_entities + + mermaid_syntax = ontological_feature_detection.extract_mermaid_syntax(input_components, input_type="components") + mermaid_to_ascii = ontological_feature_detection.mermaid_to_ascii(mermaid_syntax) + return mermaid_to_ascii + +def extract_mermaid_chart(response): + mermaid_code = re.search(r'```mermaid\n(.*?)```', response, re.DOTALL) + if mermaid_code: + return mermaid_code.group(0) + + # Check for the variation with an extra newline character + mermaid_code_variation = re.search(r'```\nmermaid\n(.*?)```', response, re.DOTALL) + if mermaid_code_variation: + print("\t[ reformatting mermaid chart ]") + # Fix the variation by placing the mermaid text right after the ``` + fixed_mermaid_code = "```mermaid\n" + mermaid_code_variation.group(1) + "\n```" + return fixed_mermaid_code + return None + +def refine_mermaid_lines(mermaid_chart): + lines = mermaid_chart.split('\n') + refined_lines = [] + for line in lines: + if '-->' in line: + parts = line.split('-->') + parts = [part.strip().replace(' ', '_') for part in parts] + refined_line = ' --> '.join(parts) + refined_lines.append(" " + refined_line) # add the indent to the start of the line + else: + refined_lines.append(line) + return '\n'.join(refined_lines) + +async def get_mermaid_chart(message, websocket = None): + """ + Input: String Message + Output: mermaid chart + ``` mermaid + graph TD + Texas -->|is| hot + hot -->|is| uncomfortable + hot -->|is| unwanted + Texas -->|actions| options + options -->|best| Go_INSIDE + options -->|second| Go_to_Canada + options -->|last| try_not_to_die + ```""" + + system_role = "Our goal is to help a visual learner better comprehend a sentence, by illustrating the text in a graph form. Your job is to create a list of graph triples from the speaker's sentence.\n" + system_directive = """RULES: + 1. Extract graph triples from the sentence. + 2. Use very simple synonyms to decrease the nuance in the statement. + 3. Stay true to the sentence, make inferences about the sentiment, intent, if it is reasonable to do so. + 4. Use natural language to create the triples. + 5. Write only the comma separated triples format that follow node, relationship, node pattern + 6. If the statement is an opinion, create a relationship that assigns the speaker has_preference + 6. DO NOT HAVE ANY ISLAND RELATIONSHIPS. ALL EDGES MUST CONNECT.""" + system_examples = """``` + INPUT SENTENCE: The Texas heat is OPPRESSIVE + OUTPUT: + Texas, is, hot + hot, is, uncomfortable + hot, is, unwanted + --- + SENTENCE: "Isn't Italy a better country than Spain?" + OUTPUT: + Italy, is_a, country + Spain, is_a, country + Italy, better, Spain + better, property, comparison + speaker, has_preference, Italy + ```""" + prompt = f"For the sake of illumination, represent this speaker's sentence in triples: {message}" + system_ctx = system_role + system_directive + system_examples + print("\t[ generating sentence_abstractive_graph_triples ]") + if websocket: + await websocket.send_json({"status": "generating", "response": "generating sentence_abstractive_graph_triples", 'completed': False}) + sentence_abstractive_graph_triples = generate_response(system_ctx, prompt, model='dolphin-llama3') + # print(sentence_abstractive_graph_triples) + + prompt = f"We were just given us the above triples to represent this message: '{message}'. Improve and correct their triples in a plaintext codeblock." + print("\t[ generating refined_abstractive_graph_triples ]") + if websocket: + await websocket.send_json({"status": "generating", "response": "generating refined_abstractive_graph_triples", 'completed': False}) + refined_abstractive_graph_triples = generate_response(sentence_abstractive_graph_triples, prompt, model='dolphin-llama3') # a second pass to refine the first generation's responses + # what is being said, + + # add relations to this existing graph that offer actions that can be taken, be humorous and absurd + + # output these graph relations into a mermaid chart we can use in markdown. Follow this form + system_ctx = f"""Generate a mermaid block based off the triples. + It should look like this: +Example 1: + ```mermaid +graph TD; + Italy--> |is_a| country; + Spain--> |is_a| country; + Italy--> better-->Spain; + better-->property-->comparison; + speaker-->has_preference-->Italy; +``` +Example 2: +```mermaid +graph TD; + High_School-->duration_of_study-->10_Years; + High_School-->compared_to-->4_Year_Program; + 10_Year_Program-->more_time-->4_Years; + Speaker-->seeks_change-->High_School_Length; +``` +Rules: +1. No spaces between entities! + """ + prompt =f"""Create a mermaid chart from these triples: {refined_abstractive_graph_triples}. Reduce the noise and combine elements if they are referencing the same thing. +Since United_States and The_United_States are the same thing, make the output just use: United_States. +Example: +Input triples +``` +United_States, has_spending, too_much +The_United_States, could_do_with, less_spending; +too_much, is, undesirable; +``` + +Output Mermaid Where you Substitute The_United_States, with United_States. +```mermaid +graph TD; + United_States --> |has_spending| too_much; + United_States --> |could_do_with| less_spending; + too_much --> |is| undesirable; +``` +""" + attempt = 0 + message_history = [{"role": "system", "content": system_ctx}, {"role": "user", "content": prompt}] + while attempt < 3: + if attempt == 0: + print("\t\t[ generating mermaid chart ]") + if websocket: + await websocket.send_json({"status": "generating", "response": "generating mermaid_chart_from_triples", 'completed': False}) + else: + print(f"\t\t[ generating mermaid chart :: try {attempt + 1}]") + if websocket: + await websocket.send_json({"status": "generating", "response": f"generating mermaid_chart_from_triples :: try {attempt + 1}", 'completed': False}) + response = generate_response_messages(message_history, model='dolphin-llama3') + mermaid_chart = extract_mermaid_chart(response) + if mermaid_chart: + # refined_mermaid_chart = refine_mermaid_lines(mermaid_chart) + return mermaid_chart + # print("FAILED:\n", response) + message_history.append({"role": "user", "content": "That wasn't correct. State why and do it better."}) + attempt += 1 + return "Failed to generate mermaid" + + +# message = "Why can't we go to High School for 10 years instead of 4!!!" +# message2 = "Isn't Italy a better country than Spain?" +# message3 = "The United States could do with a little less spending" +# message4 = "As a group creating a product, we should be steady, have a clear view of the future, and not appear to succumb to dynamic market forces. If their argument takes over ours, they then argue that our organization's valuation could be nothing tomorrow because a new yet-to-be-made (ghost) tech will eat us up. Then they give us no money." +# message5 = "Furthermore, reading improves vocabulary and language skills, which is not as effectively achieved through watching TV." +# syntax = get_mermaid_chart(message4) +# print("MERMAID_CHART:\n", syntax) + diff --git a/topos/test/test_debate_flow_4_evolution.py b/topos/test/test_debate_flow_4_evolution.py index 4bdbaa6..6b92ba1 100644 --- a/topos/test/test_debate_flow_4_evolution.py +++ b/topos/test/test_debate_flow_4_evolution.py @@ -180,13 +180,15 @@ def test_debate_flow_with_jwt(self): if response["status"] == "wepcc_result": self.assertIn("wepcc_result", response) wepcc_result_received = True + print("RESPONSE:\n", response) if response["status"] == "final_results": self.assertIn("results", response) final_results_received = True + print("RESPONSE:\n", response) print(f"\t[ Messaged processed: {message['data']['content']} ]") - + print("Test completed") # Verify that all expected responses were received diff --git a/topos/utilities/utils.py b/topos/utilities/utils.py index dc9b0eb..5115c28 100644 --- a/topos/utilities/utils.py +++ b/topos/utilities/utils.py @@ -1,6 +1,17 @@ import random # utils.py import os +import shutil + + +def get_python_command(): + if shutil.which("python"): + return "python" + elif shutil.which("python3"): + return "python3" + else: + raise EnvironmentError("No Python interpreter found") + def get_root_directory(): # Get the current file's directory