diff --git a/README.md b/README.md index fd656de..9c1af13 100644 --- a/README.md +++ b/README.md @@ -22,4 +22,19 @@ Use the tag like this. ### Step 3: Start Topos on your local machine `topos run` -`topos run --local` \ No newline at end of file +`topos run --local` + +### Step 4a (zrok): Set up web proxy +We are going to expose our backend service to a public network so our phone/tablet can use it. In this case, we use zrok. Below, is an ngrok setup version. +zrok is opensourced and free. +ngrok has a gated requests/month under its free tier, then requires you pay for it. + +1. [Install zrok command](https://docs.zrok.io/docs/getting-started/?_gl=1*1yet1eb*_ga*MTQ1MDc2ODAyNi4xNzE3MDE3MTE3*_ga_V2KMEXWJ10*MTcxNzAxNzExNi4xLjAuMTcxNzAxNzExNi42MC4wLjA.*_gcl_au*NDk3NjM1MzEyLjE3MTcwMTcxMTc.#installing-the-zrok-command) +2. `zrok enable ` +3. `zrok status` should show you information +4. Route local path through zrok: `zrok share public http://0.0.0.0:13341` +This will take you to a new screen with an https:// at the top. +Insert this url into the field under settings-> "Api Endpoints" -> "Custom API" +5. After you've insert it into the field, press the test button, and "hello world" should appear next to the button. + +### Step 4b (ngrok): Set up web proxy diff --git a/pyproject.toml b/pyproject.toml index 1d2b666..24bf731 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "topos" -version = "0.1" +version = "0.2.1" description = "The official Python client for Topos." authors = ["Dialogues "] license = "MIT" @@ -22,6 +22,8 @@ diffusers = "0.27.2" accelerate = "0.30.1" emoji = "0.6.0" pyyaml = "6.0.1" +python-dotenv = "1.0.0" +openai = "1.30.4" [tool.poetry.group.dev.dependencies] pytest = "^7.4.3" diff --git a/setup.cfg b/setup.cfg index c3567fb..9ed08e8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = topos -version = 0.1 +version = 0.2.1 author = Jonny Johnson author_email = jonnyjohnson1@gmail.com description = For interacting with Topos tooling diff --git a/setup.py b/setup.py index 0b0d9d8..c2a95f7 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name='topos', - version='0.1', + version='0.2.1', packages=find_packages(), entry_points={ 'console_scripts': [ diff --git a/topos/.DS_Store b/topos/.DS_Store index e4aaed2..14fc7a4 100644 Binary files a/topos/.DS_Store and b/topos/.DS_Store differ diff --git a/topos/api/api.py b/topos/api/api.py index 47190f4..3875471 100644 --- a/topos/api/api.py +++ b/topos/api/api.py @@ -27,7 +27,7 @@ def start_local_api(): """Function to start the API in local mode.""" - print("\033[92mINFO:\033[0m API docs available at: http://0.0.0.0:13341/docs") + print("\033[92mINFO:\033[0m API docs available at: \033[1mhttp://0.0.0.0:13341/docs\033[0m") uvicorn.run(app, host="0.0.0.0", port=13341) diff --git a/topos/api/api_routes.py b/topos/api/api_routes.py index f742350..35451d0 100644 --- a/topos/api/api_routes.py +++ b/topos/api/api_routes.py @@ -193,44 +193,12 @@ async def create_next_messages(request: GenNextMessageOptions): return {"response" : next_message_options} - -class ConversationSummaryRequest(BaseModel): - conversation_id: str - subject: str - model: str - -# @router.post("/gen_conversation_summary") -# async def create_next_messages(request: ConversationSummaryRequest): -# conversation_id = request.conversation_id -# subject = request.subject -# model = request.model if request.model != None else "dolphin-llama3" - -# # load conversation -# conv_data = cache_manager.load_from_cache(conversation_id) -# if conv_data is None: -# raise HTTPException(status_code=404, detail="Conversation not found in cache") - -# context = create_conversation_string(conv_data, 12) -# print(f"\t[ generating summary :: model {model} :: subject {subject}]") - -# system_prompt = "PRESENT CONVERSATION:\n-------" + context + "\n-------\n" -# query = f"""Summarize this conversation. Frame your response around the subject of {subject} -# """ - -# summarized_conversation = generate_response(system_prompt, query, model=model, temperature=0) -# print(summarized_conversation) - -# # return the summary -# return {"response" : summarized_conversation} - - - class ConversationTopicsRequest(BaseModel): conversation_id: str model: str @router.post("/gen_conversation_topics") -async def create_next_messages(request: ConversationSummaryRequest): +async def create_next_messages(request: ConversationTopicsRequest): conversation_id = request.conversation_id model = request.model if request.model != None else "dolphin-llama3" @@ -264,6 +232,9 @@ async def list_models(): except requests.ConnectionError: raise HTTPException(status_code=500, detail="Server connection error") +@router.post("/test") +async def test(): + return "hello world" @router.post("/get_files") async def get_files(): diff --git a/topos/downloaders/spacy_loader.py b/topos/downloaders/spacy_loader.py index 54470b4..2df67cf 100644 --- a/topos/downloaders/spacy_loader.py +++ b/topos/downloaders/spacy_loader.py @@ -13,7 +13,7 @@ def download_spacy_model(model_selection): else: #default model_name = "en_core_web_sm" try: - subprocess.run(['python', '-m', 'spacy', 'download', model_name], check=True) + subprocess.run(['python3', '-m', 'spacy', 'download', model_name], check=True) # Write updated settings to YAML file with open('config.yaml', 'w') as file: yaml.dump({'active_spacy_model': model_name}, file)