Skip to content

Bedrock pixtral fix #10439

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import types
from typing import List, Optional
from typing import List, Optional, TYPE_CHECKING

from litellm.llms.base_llm.chat.transformation import BaseConfig
from litellm.llms.bedrock.chat.invoke_transformations.base_invoke_transformation import (
AmazonInvokeConfig,
)
from litellm.llms.bedrock.common_utils import BedrockError

if TYPE_CHECKING:
from litellm.types.utils import ModelResponse


class AmazonMistralConfig(AmazonInvokeConfig, BaseConfig):
Expand Down Expand Up @@ -81,3 +85,27 @@ def map_openai_params(
if k == "stream":
optional_params["stream"] = v
return optional_params

@staticmethod
def get_outputText(completion_response: dict, model_response: "ModelResponse") -> str:
"""This function extracts the output text from a bedrock mistral completion.
As a side effect, it updates the finish reason for a model response.

Args:
completion_response: JSON from the completion.
model_response: ModelResponse

Returns:
A string with the response of the LLM

"""
if "choices" in completion_response:
outputText = completion_response["choices"][0]["message"]["content"]
model_response.choices[0].finish_reason = completion_response["choices"][0]["finish_reason"]
elif "outputs" in completion_response:
outputText = completion_response["outputs"][0]["text"]
model_response.choices[0].finish_reason = completion_response["outputs"][0]["stop_reason"]
else:
raise BedrockError(message="Unexpected mistral completion response", status_code=400)

return outputText
Original file line number Diff line number Diff line change
Expand Up @@ -366,10 +366,7 @@ def transform_response( # noqa: PLR0915
elif provider == "meta" or provider == "llama" or provider == "deepseek_r1":
outputText = completion_response["generation"]
elif provider == "mistral":
outputText = completion_response["outputs"][0]["text"]
model_response.choices[0].finish_reason = completion_response[
"outputs"
][0]["stop_reason"]
outputText = litellm.AmazonMistralConfig.get_outputText(completion_response, model_response)
else: # amazon titan
outputText = completion_response.get("results")[0].get("outputText")
except Exception as e:
Expand Down
30 changes: 30 additions & 0 deletions tests/litellm/llms/bedrock/chat/test_mistral_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@


from litellm.llms.bedrock.chat.invoke_transformations.amazon_mistral_transformation import AmazonMistralConfig
from litellm.types.utils import ModelResponse


def test_mistral_get_outputText():
# Set initial model response with arbitrary finish reason
model_response = ModelResponse()
model_response.choices[0].finish_reason = "None"

# Models like pixtral will return a completion with the openai format.
mock_json_with_choices = {"choices": [{"message": {"content": "Hello!"}, "finish_reason": "stop"}]}

outputText = AmazonMistralConfig.get_outputText(
completion_response=mock_json_with_choices, model_response=model_response
)

assert outputText == "Hello!"
assert model_response.choices[0].finish_reason == "stop"

# Other models might return a completion behind "outputs"
mock_json_with_output = {"outputs": [{"text": "Hi!", "stop_reason": "finish"}]}

outputText = AmazonMistralConfig.get_outputText(
completion_response=mock_json_with_output, model_response=model_response
)

assert outputText == "Hi!"
assert model_response.choices[0].finish_reason == "finish"
Loading