diff --git a/litellm/llms/bedrock/chat/invoke_transformations/amazon_mistral_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/amazon_mistral_transformation.py index ef3c237f9d0d..58dfa17a7222 100644 --- a/litellm/llms/bedrock/chat/invoke_transformations/amazon_mistral_transformation.py +++ b/litellm/llms/bedrock/chat/invoke_transformations/amazon_mistral_transformation.py @@ -1,10 +1,14 @@ import types -from typing import List, Optional +from typing import List, Optional, TYPE_CHECKING from litellm.llms.base_llm.chat.transformation import BaseConfig from litellm.llms.bedrock.chat.invoke_transformations.base_invoke_transformation import ( AmazonInvokeConfig, ) +from litellm.llms.bedrock.common_utils import BedrockError + +if TYPE_CHECKING: + from litellm.types.utils import ModelResponse class AmazonMistralConfig(AmazonInvokeConfig, BaseConfig): @@ -81,3 +85,27 @@ def map_openai_params( if k == "stream": optional_params["stream"] = v return optional_params + + @staticmethod + def get_outputText(completion_response: dict, model_response: "ModelResponse") -> str: + """This function extracts the output text from a bedrock mistral completion. + As a side effect, it updates the finish reason for a model response. + + Args: + completion_response: JSON from the completion. + model_response: ModelResponse + + Returns: + A string with the response of the LLM + + """ + if "choices" in completion_response: + outputText = completion_response["choices"][0]["message"]["content"] + model_response.choices[0].finish_reason = completion_response["choices"][0]["finish_reason"] + elif "outputs" in completion_response: + outputText = completion_response["outputs"][0]["text"] + model_response.choices[0].finish_reason = completion_response["outputs"][0]["stop_reason"] + else: + raise BedrockError(message="Unexpected mistral completion response", status_code=400) + + return outputText diff --git a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py index 67194e83e748..d2f1fc8072ed 100644 --- a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py +++ b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py @@ -366,10 +366,7 @@ def transform_response( # noqa: PLR0915 elif provider == "meta" or provider == "llama" or provider == "deepseek_r1": outputText = completion_response["generation"] elif provider == "mistral": - outputText = completion_response["outputs"][0]["text"] - model_response.choices[0].finish_reason = completion_response[ - "outputs" - ][0]["stop_reason"] + outputText = litellm.AmazonMistralConfig.get_outputText(completion_response, model_response) else: # amazon titan outputText = completion_response.get("results")[0].get("outputText") except Exception as e: diff --git a/tests/litellm/llms/bedrock/chat/test_mistral_config.py b/tests/litellm/llms/bedrock/chat/test_mistral_config.py new file mode 100644 index 000000000000..022344019159 --- /dev/null +++ b/tests/litellm/llms/bedrock/chat/test_mistral_config.py @@ -0,0 +1,30 @@ + + +from litellm.llms.bedrock.chat.invoke_transformations.amazon_mistral_transformation import AmazonMistralConfig +from litellm.types.utils import ModelResponse + + +def test_mistral_get_outputText(): + # Set initial model response with arbitrary finish reason + model_response = ModelResponse() + model_response.choices[0].finish_reason = "None" + + # Models like pixtral will return a completion with the openai format. + mock_json_with_choices = {"choices": [{"message": {"content": "Hello!"}, "finish_reason": "stop"}]} + + outputText = AmazonMistralConfig.get_outputText( + completion_response=mock_json_with_choices, model_response=model_response + ) + + assert outputText == "Hello!" + assert model_response.choices[0].finish_reason == "stop" + + # Other models might return a completion behind "outputs" + mock_json_with_output = {"outputs": [{"text": "Hi!", "stop_reason": "finish"}]} + + outputText = AmazonMistralConfig.get_outputText( + completion_response=mock_json_with_output, model_response=model_response + ) + + assert outputText == "Hi!" + assert model_response.choices[0].finish_reason == "finish"