Skip to content

Commit

Permalink
[Docs] Improved Conversation Patterns documentation (#1005)
Browse files Browse the repository at this point in the history
* Fix swarm use case image links

* Restructure and sequential chat page added

* secrets

* Nested chat text and example

* Secrets args revert

* revert secrets baseline

* Swarm orchestration documentation includes simple group chat example

* .secrets.baseline updated

* wip

* GCM ValueError test

* Formatting and secrets

* Missed test update, test_lmm.py

* mock_credentials.llm_config

* Move Reference Agents to User Guide, fix tests

* test_chats remove mock credentials

* Reference Agent links, added api_type openai to llm configs in tests and documentation

* Revert api_type openai on a test due to llm validation bug.

---------

Co-authored-by: Davor Runje <[email protected]>
Co-authored-by: Davor Runje <[email protected]>
  • Loading branch information
3 people authored Feb 19, 2025
1 parent dd4e414 commit 9cf64fe
Show file tree
Hide file tree
Showing 75 changed files with 826 additions and 283 deletions.
114 changes: 57 additions & 57 deletions .secrets.baseline
Original file line number Diff line number Diff line change
Expand Up @@ -2323,7 +2323,7 @@
"filename": "test/agentchat/contrib/test_lmm.py",
"hashed_secret": "dcce26510eb892e4c25cd8fabf1778261002ae1e",
"is_verified": false,
"line_number": 22,
"line_number": 25,
"is_secret": false
}
],
Expand Down Expand Up @@ -2401,7 +2401,7 @@
"filename": "test/agentchat/test_groupchat.py",
"hashed_secret": "475e81e79c7880f9b5caa35bec50279c459ad2f9",
"is_verified": false,
"line_number": 1450,
"line_number": 1490,
"is_secret": false
}
],
Expand Down Expand Up @@ -2713,7 +2713,7 @@
"filename": "test/website/test_process_notebooks.py",
"hashed_secret": "f5d04c6f567b20a06d5dddf4f8da1b1d6302b84a",
"is_verified": false,
"line_number": 334,
"line_number": 322,
"is_secret": false
}
],
Expand Down Expand Up @@ -2849,61 +2849,11 @@
"is_secret": false
}
],
"website/docs/use-cases/reference-agents/captainagent.mdx": [
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "22e70daedc2981ebdcd3a38cee1bc422d9dfbbe3",
"is_verified": false,
"line_number": 1525,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "fcbf6eded79a9e1b6b1a60e4c9091bebf4505996",
"is_verified": false,
"line_number": 1527,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "c076c348206eea13262359b492188f7e1f1edacc",
"is_verified": false,
"line_number": 1613,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "eeab1aaeb76e1f6497a0491bc06c9aa09b5d4613",
"is_verified": false,
"line_number": 1615,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "5a6c9c6b920846decef76ede494bb751a412d73f",
"is_verified": false,
"line_number": 1664,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/use-cases/reference-agents/captainagent.mdx",
"hashed_secret": "d2ec472b9ba140e55361b04fc963ff10e17d0b70",
"is_verified": false,
"line_number": 1666,
"is_secret": false
}
],
"website/docs/user-guide/basic-concepts/llm-configuration.mdx": [
"website/docs/user-guide/basic-concepts/llm-configuration/llm-configuration.mdx": [
{
"type": "Secret Keyword",
"filename": "website/docs/user-guide/basic-concepts/llm-configuration.mdx",
"hashed_secret": "cf4a956e75901c220c0f5fbaec41987fc6177345",
"filename": "website/docs/user-guide/basic-concepts/llm-configuration/llm-configuration.mdx",
"hashed_secret": "1e3667aaaaa887721550cf5cc8a0c5c5760810ed",
"is_verified": false,
"line_number": 54,
"is_secret": false
Expand Down Expand Up @@ -3114,7 +3064,57 @@
"line_number": 90,
"is_secret": false
}
],
"website/docs/user-guide/reference-agents/captainagent.mdx": [
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "22e70daedc2981ebdcd3a38cee1bc422d9dfbbe3",
"is_verified": false,
"line_number": 1525,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "fcbf6eded79a9e1b6b1a60e4c9091bebf4505996",
"is_verified": false,
"line_number": 1527,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "c076c348206eea13262359b492188f7e1f1edacc",
"is_verified": false,
"line_number": 1613,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "eeab1aaeb76e1f6497a0491bc06c9aa09b5d4613",
"is_verified": false,
"line_number": 1615,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "5a6c9c6b920846decef76ede494bb751a412d73f",
"is_verified": false,
"line_number": 1664,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "website/docs/user-guide/reference-agents/captainagent.mdx",
"hashed_secret": "d2ec472b9ba140e55361b04fc963ff10e17d0b70",
"is_verified": false,
"line_number": 1666,
"is_secret": false
}
]
},
"generated_at": "2025-02-18T08:58:38Z"
"generated_at": "2025-02-18T22:52:39Z"
}
5 changes: 5 additions & 0 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,6 +665,11 @@ def _create_internal_agents(
# Override the selector's config if one was passed as a parameter to this class
speaker_selection_llm_config = self.select_speaker_auto_llm_config or selector.llm_config

if speaker_selection_llm_config is False:
raise ValueError(
"The group chat's internal speaker selection agent does not have an LLM configuration. Please provide a valid LLM config to the group chat's GroupChatManager or set it with the select_speaker_auto_llm_config parameter."
)

# Agent for selecting a single agent name from the response
speaker_selection_agent = ConversableAgent(
"speaker_selection_agent",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def api_key():
def dalle_config() -> dict[str, Any]:
config_list = openai_utils.config_list_from_models(model_list=["dall-e-3"], exclude="aoai")
if not config_list:
config_list = [{"model": "dall-e-3", "api_key": api_key()}]
config_list = [{"api_type": "openai", "model": "dall-e-3", "api_key": api_key()}]
return {"config_list": config_list, "timeout": 120, "cache_seed": None}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_agent():
return AssistantAgent(
name="test_agent",
llm_config={
"config_list": [{"model": "gpt-4O", "api_key": "sk-proj-ABC"}],
"config_list": [{"api_type": "openai", "model": "gpt-4o", "api_key": "sk-proj-ABC"}],
},
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
@pytest.fixture
def lmm_config():
return {
"config_list": [{"model": "gpt-4-vision-preview", "api_key": "sk-my_key"}],
"config_list": [{"api_type": "openai", "model": "gpt-4-vision-preview", "api_key": "sk-my_key"}],
"temperature": 0.5,
"max_tokens": 300,
}
Expand Down
9 changes: 8 additions & 1 deletion test/agentchat/contrib/test_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,14 @@ def setUp(self):
llm_config={
"timeout": 600,
"seed": 42,
"config_list": [{"model": "llava-fake", "base_url": "localhost:8000", "api_key": MOCK_OPEN_AI_API_KEY}],
"config_list": [
{
"api_type": "openai",
"model": "llava-fake",
"base_url": "localhost:8000",
"api_key": MOCK_OPEN_AI_API_KEY,
}
],
},
)

Expand Down
14 changes: 11 additions & 3 deletions test/agentchat/contrib/test_lmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
import unittest
from unittest.mock import MagicMock

from pytest import MonkeyPatch

import autogen
from autogen.agentchat import GroupChat
from autogen.agentchat.contrib.img_utils import get_pil_image
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from autogen.agentchat.conversable_agent import ConversableAgent
Expand All @@ -31,7 +34,9 @@ def setUp(self):
llm_config={
"timeout": 600,
"seed": 42,
"config_list": [{"model": "gpt-4-vision-preview", "api_key": MOCK_OPEN_AI_API_KEY}],
"config_list": [
{"api_type": "openai", "model": "gpt-4-vision-preview", "api_key": MOCK_OPEN_AI_API_KEY}
],
},
)

Expand Down Expand Up @@ -85,7 +90,7 @@ def test_print_received_message(self):


@skip_on_missing_imports(["PIL"], "unknown")
def test_group_chat_with_lmm():
def test_group_chat_with_lmm(monkeypatch: MonkeyPatch):
"""Tests the group chat functionality with two MultimodalConversable Agents.
Verifies that the chat is correctly limited by the max_round parameter.
Each agent is set to describe an image in a unique style, but the chat should not exceed the specified max_rounds.
Expand Down Expand Up @@ -118,9 +123,12 @@ def test_group_chat_with_lmm():
code_execution_config=False,
)

# Mock speaker selection so it doesn't require a GroupChatManager with an LLM
monkeypatch.setattr(GroupChat, "_auto_select_speaker", lambda *args, **kwargs: agent1)

# Setting up the group chat
groupchat = autogen.GroupChat(agents=[agent1, agent2, user_proxy], messages=[], max_round=max_round)
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=None)

# Initiating the group chat and observing the number of rounds
user_proxy.initiate_chat(group_chat_manager, message=f"What do you see? <img {base64_encoded_image}>")
Expand Down
4 changes: 2 additions & 2 deletions test/agentchat/contrib/test_reasoning_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def think_node():
@pytest.fixture
def reasoning_agent():
"""Create a ReasoningAgent instance for testing"""
config_list = [{"model": "gpt-4o", "api_key": "fake_key"}]
config_list = [{"api_type": "openai", "model": "gpt-4o", "api_key": "fake_key"}]
llm_config = {"config_list": config_list, "temperature": 0}
return ReasoningAgent("reasoning_agent", llm_config=llm_config)

Expand Down Expand Up @@ -157,7 +157,7 @@ def test_reasoning_agent_answer():
def helper_test_reasoning_agent_answer(max_depth, beam_size, answer_approach):
"""Test that ReasoningAgent properly terminates when TERMINATE is received"""
mock_config = {
"config_list": [{"model": "gpt-4o", "api_key": "fake", "base_url": "0.0.0.0:8000"}],
"config_list": [{"api_type": "openai", "model": "gpt-4o", "api_key": "fake", "base_url": "0.0.0.0:8000"}],
"temperature": 0,
}
with patch("autogen.agentchat.conversable_agent.ConversableAgent.generate_oai_reply") as mock_oai_reply:
Expand Down
2 changes: 1 addition & 1 deletion test/agentchat/contrib/test_web_surfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_web_surfer() -> None:
page_size = 4096
web_surfer = WebSurferAgent(
"web_surfer",
llm_config={"model": "gpt-4o", "config_list": []},
llm_config={"api_type": "openai", "model": "gpt-4o", "config_list": []},
browser_config={"viewport_size": page_size},
)

Expand Down
7 changes: 5 additions & 2 deletions test/agentchat/test_chats.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,12 @@ def test_chat_messages_for_summary():
messages = assistant.chat_messages_for_summary(user)
assert len(messages) == 1

groupchat = GroupChat(agents=[user, assistant], messages=[], max_round=2)
groupchat = GroupChat(agents=[user, assistant], messages=[], max_round=2, speaker_selection_method="round_robin")
manager = GroupChatManager(
groupchat=groupchat, name="manager", llm_config=False, code_execution_config={"use_docker": False}
groupchat=groupchat,
name="manager",
llm_config=None,
code_execution_config={"use_docker": False},
)
user.initiate_chat(manager, message="What is the capital of France?")
messages = manager.chat_messages_for_summary(user)
Expand Down
Loading

0 comments on commit 9cf64fe

Please sign in to comment.