Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump up dependencies to the newest versions #38

Merged
merged 11 commits into from
Nov 5, 2024
12 changes: 7 additions & 5 deletions allms/models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import urllib
from abc import ABC, abstractmethod
from functools import partial
from urllib.error import URLError

import google
import openai
Expand Down Expand Up @@ -77,8 +78,8 @@ def __init__(

self._predict_example = create_base_retry_decorator(
error_types=[
openai.error.RateLimitError, openai.error.APIError, openai.error.Timeout,
openai.error.APIConnectionError, openai.error.ServiceUnavailableError,
openai.RateLimitError, openai.APIError, openai.Timeout,
openai.APIConnectionError, openai.InternalServerError,
google.api_core.exceptions.ResourceExhausted, urllib.error.HTTPError
],
max_retries=max_retries,
Expand Down Expand Up @@ -252,16 +253,17 @@ async def _predict_example(
model_response = await chain.arun({})
else:
model_response = await chain.arun(**input_data.input_mappings)
except openai.error.InvalidRequestError as invalid_request_error:
except openai.InternalServerError as invalid_request_error:
logger.info(f"Error for id {input_data.id} has occurred. Message: {invalid_request_error} ")
if invalid_request_error.error.code == "content_filter":
if invalid_request_error.code == "content_filter":
model_response = None
error_message = f"{IODataConstants.CONTENT_FILTER_MESSAGE}: {invalid_request_error}"
else:
model_response = None
error_message = f"{IODataConstants.ERROR_MESSAGE_STR}: {invalid_request_error}"

except (InvalidArgument, ValueError, TimeoutError, openai.error.Timeout, GCPInvalidRequestError) as other_error:
except (InvalidArgument, ValueError, TimeoutError, openai.APIError, GCPInvalidRequestError,
openai.APITimeoutError) as other_error:
model_response = None
logger.info(f"Error for id {input_data.id} has occurred. Message: {other_error} ")
error_message = f"{type(other_error).__name__}: {other_error}"
Expand Down
4 changes: 2 additions & 2 deletions allms/models/azure_openai.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from asyncio import AbstractEventLoop
from typing import Optional

from langchain.chat_models import AzureChatOpenAI
from langchain_openai import AzureChatOpenAI

from allms.defaults.azure_defaults import AzureGptTurboDefaults
from allms.defaults.general_defaults import GeneralDefaults
Expand Down Expand Up @@ -38,7 +38,7 @@ def _create_llm(self) -> AzureChatOpenAI:
deployment_name=self._config.deployment,
api_version=self._config.api_version,
model_name=self._config.model_name,
base_url=self._config.base_url,
azure_endpoint=self._config.base_url,
api_key=self._config.api_key,
azure_ad_token=self._config.azure_ad_token,
temperature=self._temperature,
Expand Down
2 changes: 1 addition & 1 deletion allms/models/vertexai_base.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from typing import List, Optional, Any, Dict

from google.cloud.aiplatform.models import Prediction
from langchain_google_vertexai import VertexAI, VertexAIModelGarden
from langchain_core.callbacks import AsyncCallbackManagerForLLMRun
from langchain_core.outputs import LLMResult, Generation
from langchain_google_vertexai import VertexAI, VertexAIModelGarden
from pydash import chain

from allms.constants.vertex_ai import VertexModelConstants
Expand Down
2 changes: 1 addition & 1 deletion allms/models/vertexai_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
event_loop=event_loop
)

def _create_llm(self) -> VertexAI:
def _create_llm(self) -> CustomVertexAI:
return CustomVertexAI(
model_name=self._config.gemini_model_name,
max_output_tokens=self._max_output_tokens,
Expand Down
10 changes: 10 additions & 0 deletions allms/utils/response_parsing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from langchain.output_parsers import PydanticOutputParser
from langchain.schema import OutputParserException
from pydantic import ValidationError

from allms.domain.response import ResponseData, ResponseParsingOutput

Expand Down Expand Up @@ -45,6 +46,15 @@ def _parse_response(
The exception message: {output_parser_exception}
"""
)
except ValidationError as validation_error:
return ResponseParsingOutput(
response=None,
error_message=f"""
A ValidationError has occurred for the model response: {model_response_data.response}
The exception message: {validation_error}
"""
)


def parse_model_output(
self,
Expand Down
4,298 changes: 2,441 additions & 1,857 deletions poetry.lock

Large diffs are not rendered by default.

18 changes: 10 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,21 @@ readme = "README.md"
packages = [{include = "allms"}]

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
python = ">=3.9.0,<4.0"
fsspec = "^2023.6.0"
google-cloud-aiplatform = ">=1.57.0"
google-cloud-aiplatform = "1.70.0"
pydash = "^7.0.6"
transformers = "^4.34.1"
pydantic = "1.10.13"
langchain = "^0.1.8"
langchain-google-vertexai = "1.0.4"
aioresponses = "^0.7.6"
tiktoken = "^0.6.0"
openai = "^0.27.8"
pydantic = "2.7.4"
langchain = "0.3.6"
tiktoken = "^0.7.0"
openai = "1.52.0"
pytest-mock = "^3.14.0"
respx = "^0.21.1"
langchain-community = "^0.3.5"
langchain-google-vertexai = "^2.0.7"
sentencepiece = "^0.2.0"
langchain-openai = "^0.2.5"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
Expand Down
7 changes: 0 additions & 7 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from unittest.mock import patch

import pytest
from aioresponses import aioresponses
from langchain_community.llms.fake import FakeListLLM

from allms.domain.configuration import (
Expand Down Expand Up @@ -94,9 +93,3 @@ def models():
event_loop=event_loop
)
}


@pytest.fixture
def mock_aioresponse():
with aioresponses() as http_mock:
yield http_mock
Loading
Loading