Skip to content

Commit 1ae176a

Browse files
google-genai-botcopybara-github
authored andcommitted
fix: update conversion between Celsius and Fahrenheit
#non-breaking The correct conversion from 25 degrees Celsius is 77 degrees Fahrenheit. The previous value of 41 was wrong. PiperOrigin-RevId: 772528757
1 parent 694b712 commit 1ae176a

File tree

3 files changed

+12
-83
lines changed

3 files changed

+12
-83
lines changed

contributing/samples/quickstart/agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def get_weather(city: str) -> dict:
2929
"status": "success",
3030
"report": (
3131
"The weather in New York is sunny with a temperature of 25 degrees"
32-
" Celsius (41 degrees Fahrenheit)."
32+
" Celsius (77 degrees Fahrenheit)."
3333
),
3434
}
3535
else:

src/google/adk/models/lite_llm.py

Lines changed: 10 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
from typing import Dict
2424
from typing import Generator
2525
from typing import Iterable
26-
from typing import List
2726
from typing import Literal
2827
from typing import Optional
2928
from typing import Tuple
@@ -482,22 +481,16 @@ def _message_to_generate_content_response(
482481

483482
def _get_completion_inputs(
484483
llm_request: LlmRequest,
485-
) -> Tuple[
486-
List[Message],
487-
Optional[List[dict]],
488-
Optional[types.SchemaUnion],
489-
Optional[Dict],
490-
]:
491-
"""Converts an LlmRequest to litellm inputs and extracts generation params.
484+
) -> tuple[Iterable[Message], Iterable[dict]]:
485+
"""Converts an LlmRequest to litellm inputs.
492486
493487
Args:
494488
llm_request: The LlmRequest to convert.
495489
496490
Returns:
497-
The litellm inputs (message list, tool dictionary, response format and generation params).
491+
The litellm inputs (message list, tool dictionary and response format).
498492
"""
499-
# 1. Construct messages
500-
messages: List[Message] = []
493+
messages = []
501494
for content in llm_request.contents or []:
502495
message_param_or_list = _content_to_message_param(content)
503496
if isinstance(message_param_or_list, list):
@@ -514,8 +507,7 @@ def _get_completion_inputs(
514507
),
515508
)
516509

517-
# 2. Convert tool declarations
518-
tools: Optional[List[Dict]] = None
510+
tools = None
519511
if (
520512
llm_request.config
521513
and llm_request.config.tools
@@ -526,39 +518,12 @@ def _get_completion_inputs(
526518
for tool in llm_request.config.tools[0].function_declarations
527519
]
528520

529-
# 3. Handle response format
530-
response_format: Optional[types.SchemaUnion] = None
531-
if llm_request.config and llm_request.config.response_schema:
532-
response_format = llm_request.config.response_schema
533-
534-
# 4. Extract generation parameters
535-
generation_params: Optional[Dict] = None
536-
if llm_request.config:
537-
config_dict = llm_request.config.model_dump(exclude_none=True)
538-
# Generate LiteLlm parameters here,
539-
# Following https://docs.litellm.ai/docs/completion/input.
540-
generation_params = {}
541-
param_mapping = {
542-
"max_output_tokens": "max_completion_tokens",
543-
"stop_sequences": "stop",
544-
}
545-
for key in (
546-
"temperature",
547-
"max_output_tokens",
548-
"top_p",
549-
"top_k",
550-
"stop_sequences",
551-
"presence_penalty",
552-
"frequency_penalty",
553-
):
554-
if key in config_dict:
555-
mapped_key = param_mapping.get(key, key)
556-
generation_params[mapped_key] = config_dict[key]
521+
response_format = None
557522

558-
if not generation_params:
559-
generation_params = None
523+
if llm_request.config.response_schema:
524+
response_format = llm_request.config.response_schema
560525

561-
return messages, tools, response_format, generation_params
526+
return messages, tools, response_format
562527

563528

564529
def _build_function_declaration_log(
@@ -695,9 +660,7 @@ async def generate_content_async(
695660
self._maybe_append_user_content(llm_request)
696661
logger.debug(_build_request_log(llm_request))
697662

698-
messages, tools, response_format, generation_params = (
699-
_get_completion_inputs(llm_request)
700-
)
663+
messages, tools, response_format = _get_completion_inputs(llm_request)
701664

702665
completion_args = {
703666
"model": self.model,
@@ -707,9 +670,6 @@ async def generate_content_async(
707670
}
708671
completion_args.update(self._additional_args)
709672

710-
if generation_params:
711-
completion_args.update(generation_params)
712-
713673
if stream:
714674
text = ""
715675
# Track function calls by index

tests/unittests/models/test_litellm.py

Lines changed: 1 addition & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# limitations under the License.
1414

1515

16+
import json
1617
from unittest.mock import AsyncMock
1718
from unittest.mock import Mock
1819

@@ -1429,35 +1430,3 @@ async def test_generate_content_async_non_compliant_multiple_function_calls(
14291430
assert final_response.content.parts[1].function_call.name == "function_2"
14301431
assert final_response.content.parts[1].function_call.id == "1"
14311432
assert final_response.content.parts[1].function_call.args == {"arg": "value2"}
1432-
1433-
1434-
@pytest.mark.asyncio
1435-
def test_get_completion_inputs_generation_params():
1436-
# Test that generation_params are extracted and mapped correctly
1437-
req = LlmRequest(
1438-
contents=[
1439-
types.Content(role="user", parts=[types.Part.from_text(text="hi")]),
1440-
],
1441-
config=types.GenerateContentConfig(
1442-
temperature=0.33,
1443-
max_output_tokens=123,
1444-
top_p=0.88,
1445-
top_k=7,
1446-
stop_sequences=["foo", "bar"],
1447-
presence_penalty=0.1,
1448-
frequency_penalty=0.2,
1449-
),
1450-
)
1451-
from google.adk.models.lite_llm import _get_completion_inputs
1452-
1453-
_, _, _, generation_params = _get_completion_inputs(req)
1454-
assert generation_params["temperature"] == 0.33
1455-
assert generation_params["max_completion_tokens"] == 123
1456-
assert generation_params["top_p"] == 0.88
1457-
assert generation_params["top_k"] == 7
1458-
assert generation_params["stop"] == ["foo", "bar"]
1459-
assert generation_params["presence_penalty"] == 0.1
1460-
assert generation_params["frequency_penalty"] == 0.2
1461-
# Should not include max_output_tokens
1462-
assert "max_output_tokens" not in generation_params
1463-
assert "stop_sequences" not in generation_params

0 commit comments

Comments
 (0)