From 2809897efcd63f93db41c14347d2b07be11972eb Mon Sep 17 00:00:00 2001 From: cristy Date: Wed, 25 Dec 2024 12:44:38 +0100 Subject: [PATCH 1/3] feat: ollama provider. Example config: { "SHAI_SUGGESTION_COUNT": "3", "SHAI_API_PROVIDER": "ollama", "OLLAMA_MODEL": "phi3.5", "OLLAMA_API_BASE": "http://localhost:11434/v1/", "SHAI_TEMPERATURE": "0.05" } --- shell_ai/main.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/shell_ai/main.py b/shell_ai/main.py index fe87521..87c50a4 100644 --- a/shell_ai/main.py +++ b/shell_ai/main.py @@ -29,6 +29,7 @@ class APIProvider(Enum): openai = "openai" azure = "azure" groq = "groq" + ollama = "ollama" class Colors: WARNING = '\033[93m' @@ -45,6 +46,7 @@ def main(): Allowed envionment variables: - OPENAI_MODEL: The name of the OpenAI model to use. Defaults to `gpt-3.5-turbo`. + - OLLAMA_MODEL: The name of the Ollama model to use. Defaults to `phi3.5`. - SHAI_SUGGESTION_COUNT: The number of suggestions to generate. Defaults to 3. - SHAI_SKIP_CONFIRM: Skip confirmation of the command to execute. Defaults to false. Set to `true` to skip confirmation. - SHAI_SKIP_HISTORY: Skip writing selected command to shell history (currently supported shells are zsh, bash, csh, tcsh, ksh, and fish). Defaults to false. Set to `true` to skip writing. @@ -100,7 +102,10 @@ def main(): prompt = " ".join(sys.argv[1:]) OPENAI_MODEL = os.environ.get("OPENAI_MODEL", loaded_config.get("OPENAI_MODEL")) + OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", None) OPENAI_MAX_TOKENS = os.environ.get("OPENAI_MAX_TOKENS", None) + OLLAMA_MAX_TOKENS = os.environ.get("OLLAMA_MAX_TOKENS", None) + OLLAMA_API_BASE = os.environ.get("OLLAMA_API_BASE", loaded_config.get("OLLAMA_API_BASE")) OPENAI_API_BASE = os.environ.get("OPENAI_API_BASE", None) OPENAI_ORGANIZATION = os.environ.get("OPENAI_ORGANIZATION", None) OPENAI_PROXY = os.environ.get("OPENAI_PROXY", None) @@ -168,6 +173,14 @@ def main(): groq_api_key=GROQ_API_KEY, temperature=SHAI_TEMPERATURE, ) + elif SHAI_API_PROVIDER == "ollama": + chat = ChatOpenAI( + model_name=OLLAMA_MODEL, + openai_api_base=OLLAMA_API_BASE, + max_tokens=OLLAMA_MAX_TOKENS, + temperature=SHAI_TEMPERATURE, + api_key="ollama" + ) if platform.system() == "Linux": info = platform.freedesktop_os_release() From c2d4726a9c877e151ae6802c1fa71bad4255d5da Mon Sep 17 00:00:00 2001 From: cristy Date: Wed, 25 Dec 2024 13:07:16 +0100 Subject: [PATCH 2/3] feat: ollama provider. Version bump and README.md --- README.md | 14 +++++++++++++- setup.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 665ae49..b92e7fb 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ Shell-AI can be configured through environment variables or a config file locate ### Environment Variables -- `OPENAI_API_KEY`: (Required) Your OpenAI API key +- `OPENAI_API_KEY`: (Required) Your OpenAI API key, leave empty if you use ollama - `GROQ_API_KEY`: (Required if using Groq) Your Groq API key - `OPENAI_MODEL`: The OpenAI model to use (default: "gpt-3.5-turbo") - `SHAI_SUGGESTION_COUNT`: Number of suggestions to generate (default: 3) @@ -69,6 +69,18 @@ Shell-AI can be configured through environment variables or a config file locate } ``` + +### Config Example for Ollama +```json + { + "OPENAI_API_KEY":"", + "SHAI_SUGGESTION_COUNT": "3", + "SHAI_API_PROVIDER": "ollama", + "OLLAMA_MODEL": "phi3.5", + "OLLAMA_API_BASE": "http://localhost:11434/v1/", + "SHAI_TEMPERATURE": "0.05" + } +``` The application will read from this file if it exists, overriding any existing environment variables. Run the application after setting these configurations. diff --git a/setup.py b/setup.py index 0e272c2..298b446 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name='shell-ai', - version='0.4.2', + version='0.4.3', author='Rick Lamers', long_description=long_description, long_description_content_type='text/markdown', From 4b337cb08d20a13f4436b0fd7caff7cd3ea6aae6 Mon Sep 17 00:00:00 2001 From: cristy Date: Thu, 26 Dec 2024 19:04:14 +0100 Subject: [PATCH 3/3] feat: ollama provider. Version bump and README.md --- README.md | 4 +++- shell_ai/main.py | 8 ++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b92e7fb..483b4c5 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,11 @@ Shell-AI can be configured through environment variables or a config file locate - `SHAI_SUGGESTION_COUNT`: Number of suggestions to generate (default: 3) - `SHAI_SKIP_CONFIRM`: Skip command confirmation when set to "true" - `SHAI_SKIP_HISTORY`: Skip writing to shell history when set to "true" -- `SHAI_API_PROVIDER`: Choose between "openai", "azure", or "groq" (default: "groq") +- `SHAI_API_PROVIDER`: Choose between "openai", "ollama", "azure", or "groq" (default: "groq") - `SHAI_TEMPERATURE`: Controls randomness in the output (default: 0.05). Lower values (e.g., 0.05) make output more focused and deterministic, while higher values (e.g., 0.7) make it more creative and varied. - `CTX`: Enable context mode when set to "true" (Note: outputs will be sent to the API) +- `OLLAMA_MODEL`: The Ollama model to use (default: "phi3.5") +- `OLLAMA_API_BASE`: The Ollama endpoint to use (default: "http://localhost:11434/v1/") ### Config File Example diff --git a/shell_ai/main.py b/shell_ai/main.py index 87c50a4..27d1c95 100644 --- a/shell_ai/main.py +++ b/shell_ai/main.py @@ -52,7 +52,7 @@ def main(): - SHAI_SKIP_HISTORY: Skip writing selected command to shell history (currently supported shells are zsh, bash, csh, tcsh, ksh, and fish). Defaults to false. Set to `true` to skip writing. - CTX: Allow the assistant to keep the console outputs as context allowing the LLM to produce more precise outputs. IMPORTANT: the outputs will be sent to OpenAI through their API, be careful if any sensitive data. Default to false. - SHAI_TEMPERATURE: Controls randomness in the output. Lower values make output more focused and deterministic (default: 0.05). - + - OLLAMA_API_BASE: The Ollama endpoint to use (default: "http://localhost:11434/v1/"). Additional required environment variables for Azure Deployments: - OPENAI_API_KEY: Your OpenAI API key. You can find this on https://beta.openai.com/account/api-keys - OPENAI_API_TYPE: "azure" @@ -102,10 +102,10 @@ def main(): prompt = " ".join(sys.argv[1:]) OPENAI_MODEL = os.environ.get("OPENAI_MODEL", loaded_config.get("OPENAI_MODEL")) - OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", None) + OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", loaded_config.get("OLLAMA_MODEL","phi3.5")) OPENAI_MAX_TOKENS = os.environ.get("OPENAI_MAX_TOKENS", None) - OLLAMA_MAX_TOKENS = os.environ.get("OLLAMA_MAX_TOKENS", None) - OLLAMA_API_BASE = os.environ.get("OLLAMA_API_BASE", loaded_config.get("OLLAMA_API_BASE")) + OLLAMA_MAX_TOKENS = os.environ.get("OLLAMA_MAX_TOKENS", loaded_config.get("OLLAMA_MAX_TOKENS",1500))) + OLLAMA_API_BASE = os.environ.get("OLLAMA_API_BASE", loaded_config.get("OLLAMA_API_BASE","http://localhost:11434/v1/")) OPENAI_API_BASE = os.environ.get("OPENAI_API_BASE", None) OPENAI_ORGANIZATION = os.environ.get("OPENAI_ORGANIZATION", None) OPENAI_PROXY = os.environ.get("OPENAI_PROXY", None)