Skip to content

Commit

Permalink
Merge pull request #172 from l3vels/feat/team-memory
Browse files Browse the repository at this point in the history
Feat: team memory
  • Loading branch information
Chkhikvadze authored Sep 26, 2023
2 parents c68bfa4 + c8bda8d commit 753ce04
Show file tree
Hide file tree
Showing 30 changed files with 459 additions and 285 deletions.
9 changes: 8 additions & 1 deletion .hooks/commit-msg
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
#!/bin/bash

commit_message=$(cat "$1")
regex='^(feat|fix|docs|style|refactor|perf|test|chore|ci|build|merge)(\([a-zA-Z0-9_-]+\))?: .+'
regex='^(feat|fix|docs|style|refactor|perf|test|chore|ci|build)(\([a-zA-Z0-9_-]+\))?: .+'

# Check if the commit message starts with "Merge branch"
if [[ $commit_message =~ ^"Merge branch" ]]; then
# Exit successfully without doing anything
exit 0
fi

# Check for semantic commit message format
if [[ ! $commit_message =~ $regex ]]; then
echo "Commit message failed semantic commit message style."
echo "Please follow the pattern: <type>(<scope>): <message>"
Expand Down
14 changes: 9 additions & 5 deletions apps/server/agents/agent_simulations/agent/dialogue_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(
name: str,
agent_with_configs: AgentWithConfigsOutput,
system_message: SystemMessage,
model: ChatOpenAI
model: ChatOpenAI,
) -> None:
self.name = name
self.agent_with_configs = agent_with_configs
Expand Down Expand Up @@ -52,10 +52,12 @@ def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
is_memory: bool,
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
self.is_memory = is_memory

def reset(self):
for agent in self.agents:
Expand All @@ -71,7 +73,7 @@ def inject(self, name: str, message: str):
# increment time
self._step += 1

def step(self) -> tuple[UUID, str]:
def step(self) -> tuple[UUID, str, str]:
message: str

# 1. choose next speaker
Expand All @@ -83,13 +85,15 @@ def step(self) -> tuple[UUID, str]:
message = speaker.send()

# 3. everyone receives message
for receiver in self.agents:
receiver.receive(speaker.name, message)
# For short memory
if not self.is_memory:
for receiver in self.agents:
receiver.receive(speaker.name, message)

# 4. increment time
self._step += 1
except Exception as err:
message = handle_agent_error(err)
self._step += 1

return speaker.agent_with_configs.agent.id, message
return speaker.agent_with_configs.agent.id, speaker.agent_with_configs.agent.name, message
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
SystemMessage,
)
from typings.agent import AgentWithConfigsOutput

from memory.zep.zep_memory import ZepMemory
from config import Config
from typings.user import UserOutput

class DialogueAgentWithTools(DialogueAgent):
def __init__(
Expand All @@ -21,34 +23,65 @@ def __init__(
system_message: SystemMessage,
model: ChatOpenAI,
tools: List[any],
session_id: str,
user: UserOutput,
is_memory: bool = False,
**tool_kwargs,
) -> None:
super().__init__(name, agent_with_configs, system_message, model)
# self.tools = load_tools(tool_names, **tool_kwargs)
self.tools = tools
self.session_id = session_id
self.user = user
self.is_memory = is_memory

def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
agent_chain = initialize_agent(

memory: ConversationBufferMemory

if self.is_memory:
memory = ZepMemory(
session_id=self.session_id,
url=Config.ZEP_API_URL,
api_key=Config.ZEP_API_KEY,
memory_key="chat_history",
return_messages=True,
)

memory.human_name = self.user.name
memory.ai_name = self.agent_with_configs.agent.name
memory.auto_save = False
else:
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True
)


agent = initialize_agent(
self.tools,
self.model,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
memory=ConversationBufferMemory(
memory_key="chat_history", return_messages=True
),
memory=memory,
agent_kwargs={
"system_message": self.system_message.content,
},
)

prompt = "\n".join(
self.message_history + [self.prefix]
)

res = agent.run(
input=prompt
)

message = AIMessage(
content=agent_chain.run(
input="\n".join(
[self.system_message.content] + self.message_history + [self.prefix]
)
)
content=res
)

return message.content
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from agents.agent_simulations.agent.dialogue_agent_with_tools import DialogueAgentWithTools
from agents.agent_simulations.authoritarian.director_dialogue_agent_with_tools import DirectorDialogueAgentWithTools
from services.pubsub import ChatPubSubService
from l3_base import L3Base
from agents.base_agent import BaseAgent
from postgres import PostgresChatMessageHistory
from models.team import TeamModel
from utils.system_message import SystemMessageBuilder
Expand All @@ -23,9 +23,10 @@
from models.datasource import DatasourceModel
from typings.team_agent import TeamAgentRole
from utils.agent import convert_model_to_response
from config import Config
from memory.zep.zep_memory import ZepMemory


class L3AuthoritarianSpeaker(L3Base):
class AuthoritarianSpeaker(BaseAgent):
def __init__(
self,
settings: AccountSettings,
Expand Down Expand Up @@ -97,12 +98,21 @@ def run(self,
[""] + [f"{agent_config.agent.name}: {agent_config.agent.role}" for agent_config in agents_with_configs]
)

specified_topic = topic #self.generate_specified_prompt(topic, agent_summary, team)

memory = ZepMemory(
session_id=self.session_id,
url=Config.ZEP_API_URL,
api_key=Config.ZEP_API_KEY,
memory_key="chat_history",
return_messages=True,
)

specified_topic = topic #self.generate_specified_prompt(topic, agent_summary, team)
memory.human_name = self.user.name
memory.save_human_message(specified_topic)

print(f"Original topic:\n{topic}\n")
print(f"Detailed topic:\n{specified_topic}\n")
# print(f"Original topic:\n{topic}\n")
# print(f"Detailed topic:\n{specified_topic}\n")

# specified_topic_ai_message = history.create_ai_message(specified_topic)
# self.chat_pubsub_service.send_chat_message(chat_message=specified_topic_ai_message)
Expand All @@ -123,6 +133,9 @@ def run(self,
if director_agent.configs.model_version else "gpt-4"),
speakers=[agent_with_config for agent_with_config in agents_with_configs if agent_with_config.agent.id != director_agent.agent.id],
stopping_probability=self.stopping_probability,
session_id=self.session_id,
user=self.user,
is_memory=team.is_memory
)

agents = [director]
Expand All @@ -135,19 +148,28 @@ def run(self,
tools=self.get_tools(agent_with_configs, self.settings),
system_message=SystemMessage(content=SystemMessageBuilder(agent_with_configs).build()),
model=ChatOpenAI(openai_api_key=self.settings.openai_api_key,temperature=0.2, model_name="gpt-4"),
session_id=self.session_id,
user=self.user,
is_memory=team.is_memory
)
)

simulator = DialogueSimulator(
agents=agents,
selection_function=functools.partial(self.select_next_speaker, director=director),
is_memory=team.is_memory
)
simulator.reset()
simulator.inject("Audience member", specified_topic)

while True:
agent_id, message = simulator.step()
agent_id, agent_name, message = simulator.step()
ai_message = history.create_ai_message(message, None, agent_id)

if team.is_memory:
memory.ai_name = agent_name
memory.save_ai_message(message)

self.chat_pubsub_service.send_chat_message(chat_message=ai_message)

if director.stop:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
)
from agents.agent_simulations.agent.dialogue_agent_with_tools import DialogueAgentWithTools
from typings.agent import AgentWithConfigsOutput
from typings.user import UserOutput

class IntegerOutputParser(RegexParser):
def get_format_instructions(self) -> str:
Expand All @@ -29,8 +30,11 @@ def __init__(
speakers: List[DialogueAgentWithTools],
stopping_probability: float,
tools: List[any],
session_id: str,
user: UserOutput,
is_memory: bool,
) -> None:
super().__init__(name=name, agent_with_configs=agent_with_configs, system_message=system_message, model=model, tools=tools)
super().__init__(name=name, agent_with_configs=agent_with_configs, system_message=system_message, model=model, tools=tools, session_id=session_id, user=user, is_memory=is_memory)
self.speakers = speakers
self.next_speaker = ""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from agents.agent_simulations.agent.dialogue_agent import DialogueAgent, DialogueSimulator
from agents.agent_simulations.agent.dialogue_agent_with_tools import DialogueAgentWithTools

from l3_base import L3Base
from agents.base_agent import BaseAgent
from postgres import PostgresChatMessageHistory
from typings.agent import AgentWithConfigsOutput
from models.team import TeamModel
Expand All @@ -21,8 +21,10 @@
from tools.datasources.get_datasource_tools import get_datasource_tools
from models.datasource import DatasourceModel
from agents.handle_agent_errors import handle_agent_error
from config import Config
from memory.zep.zep_memory import ZepMemory

class L3AgentDebates(L3Base):
class AgentDebates(BaseAgent):
def __init__(
self,
settings: AccountSettings,
Expand Down Expand Up @@ -87,6 +89,17 @@ def run(self,
print(f"Original topic:\n{topic}\n")
print(f"Detailed topic:\n{specified_topic}\n")

memory = ZepMemory(
session_id=self.session_id,
url=Config.ZEP_API_URL,
api_key=Config.ZEP_API_KEY,
memory_key="chat_history",
return_messages=True,
)

memory.human_name = self.user.name
memory.save_human_message(specified_topic)

# specified_topic_ai_message = history.create_ai_message(specified_topic)
# self.chat_pubsub_service.send_chat_message(chat_message=specified_topic_ai_message)

Expand All @@ -101,20 +114,29 @@ def run(self,
if agent_with_config.configs.model_version else "gpt-4"),
tools=self.get_tools(agent_with_config, self.settings),
top_k_results=2,
session_id=self.session_id,
user=self.user,
is_memory=team.is_memory,
)
for agent_with_config in agents_with_configs
]

max_iters = 6
n = 0

simulator = DialogueSimulator(agents=dialogue_agents, selection_function=self.select_next_speaker)
simulator = DialogueSimulator(agents=dialogue_agents, selection_function=self.select_next_speaker, is_memory=team.is_memory)
simulator.reset()
simulator.inject("Moderator", specified_topic)

while n < max_iters:
agent_id, message = simulator.step()
agent_id, agent_name, message = simulator.step()
ai_message = history.create_ai_message(message, None, agent_id)

if team.is_memory:
memory.ai_name = agent_name
memory.save_ai_message(message)

self.chat_pubsub_service.send_chat_message(chat_message=ai_message)

n += 1

Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typings.user import UserOutput
from typings.account import AccountOutput

class L3Base:
class BaseAgent:
def __init__(self, user: UserOutput, account: AccountOutput, session_id: UUID):
self.user = user
self.account = account
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
from postgres import PostgresChatMessageHistory
from memory.zep.zep_memory import ZepMemory
from services.pubsub import ChatPubSubService
from l3_base import L3Base
from agents.base_agent import BaseAgent
from config import Config
from agents.conversational.output_parser import ConvoOutputParser
from utils.system_message import SystemMessageBuilder
from typings.agent import AgentWithConfigsOutput
from typings.config import AccountSettings
from agents.handle_agent_errors import handle_agent_error

class L3Conversational(L3Base):
class ConversationalAgent(BaseAgent):
def run(
self,
settings: AccountSettings,
Expand Down
Loading

0 comments on commit 753ce04

Please sign in to comment.