From a3303320f23d141f5e65377bd3816d105102310a Mon Sep 17 00:00:00 2001 From: Vishwanath Martur <64204611+vishwamartur@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:02:20 +0530 Subject: [PATCH] Add Ollama support Related to #7 Add support for Ollama model using the ollama python package. * **`integuru/util/LLM.py`** - Import `Ollama` from `ollama`. - Add `_ollama_model` attribute to `LLMSingleton` class. - Modify `get_instance` method to support Ollama model. - Add `get_ollama_instance` method to return an Ollama instance. * **`integuru/__main__.py`** - Update `model` option help text to include Ollama. * **`README.md`** - Update documentation to reflect Ollama support. --- README.md | 2 ++ integuru/__main__.py | 2 +- integuru/util/LLM.py | 16 +++++++++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c286de6..f61c09e 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,8 @@ Let's assume we want to download utility bills: **Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.** ⚠️ **Note: o1-preview does not support function calls.** + **Ollama support is now available! You can use the Ollama model by specifying `--model ollama` in the command.** + ## Usage After setting up the project, you can use Integuru to analyze and reverse-engineer API requests for external platforms. Simply provide the appropriate .har file and a prompt describing the action that you want to trigger. diff --git a/integuru/__main__.py b/integuru/__main__.py index 2c001a1..137a7bf 100644 --- a/integuru/__main__.py +++ b/integuru/__main__.py @@ -11,7 +11,7 @@ @click.command() @click.option( - "--model", default="gpt-4o", help="The LLM model to use (default is gpt-4o)" + "--model", default="gpt-4o", help="The LLM model to use (default is gpt-4o, supports ollama)" ) @click.option("--prompt", required=True, help="The prompt for the model") @click.option( diff --git a/integuru/util/LLM.py b/integuru/util/LLM.py index 9e20293..4497184 100644 --- a/integuru/util/LLM.py +++ b/integuru/util/LLM.py @@ -1,17 +1,22 @@ from langchain_openai import ChatOpenAI +from ollama import Ollama class LLMSingleton: _instance = None _default_model = "gpt-4o" _alternate_model = "o1-preview" + _ollama_model = "ollama" @classmethod def get_instance(cls, model: str = None): if model is None: model = cls._default_model - if cls._instance is None: - cls._instance = ChatOpenAI(model=model, temperature=1) + if cls._instance is None or cls._instance.model != model: + if model == cls._ollama_model: + cls._instance = Ollama(model=model) + else: + cls._instance = ChatOpenAI(model=model, temperature=1) return cls._instance @classmethod @@ -34,5 +39,10 @@ def switch_to_alternate_model(cls): return cls._instance -llm = LLMSingleton() + @classmethod + def get_ollama_instance(cls): + """Returns an Ollama instance""" + cls._instance = Ollama(model=cls._ollama_model) + return cls._instance +llm = LLMSingleton()