Skip to content

Commit

Permalink
versão com multiplos agentes e comandos
Browse files Browse the repository at this point in the history
  • Loading branch information
pmarkun committed Oct 1, 2023
1 parent 30541c3 commit 7d453f1
Show file tree
Hide file tree
Showing 13 changed files with 207 additions and 72 deletions.
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
app/models/
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__pycache__/
.vscode/
.env

app/models/*
7 changes: 7 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@ FROM python:3.8
# Copy requirements file and install dependencies
COPY app/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Instale as dependências necessárias para OpenBLAS e pip (se precisar)
RUN apt-get update && apt-get install -y \
libopenblas-dev \
&& rm -rf /var/lib/apt/lists/*

# Instale o pacote Python com os argumentos CMake
RUN CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python

# Copy your application code
COPY app /app
Expand Down
36 changes: 36 additions & 0 deletions app/agent/lex_chatgpt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import os

#LLM
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
#Memory
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain

#CallBack
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

#Prompts
from .prompts import SYS_PROMPT
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')

#Define o LLM
llm = ChatOpenAI(model_name="gpt-3.5-turbo")

prompt = ChatPromptTemplate.from_messages([
SystemMessage(content=SYS_PROMPT), # The persistent system prompt
MessagesPlaceholder(variable_name="chat_history"), # Where the memory will be stored.
HumanMessagePromptTemplate.from_template("{human_input}"), # Where the human input will injected
])

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
llm = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()])

chat_llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
59 changes: 59 additions & 0 deletions app/agent/lex_llama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import os

#LLM
from langchain.llms import LlamaCpp
from langchain.schema import SystemMessage
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder

#Prompt and Memory
from agent.prompts import SYS_PROMPT
from langchain.memory import ConversationBufferMemory

#Chain
from langchain.chains import LLMChain

#Callback para Streaming
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

from utils import download_model

OPENAI_API_KEY = os.getenv('OPENAI_APIKEY')

# Encontra o caminho atual do script



LLM_MODEL = "llama-2-7b-chat.Q4_K_M.gguf"
# Concatena com o caminho relativo do modelo

MODEL_PATH = download_model("TheBloke/Llama-2-7b-Chat-GGUF","llama-2-7b-chat.Q4_K_M.gguf")


#Define o LLM

# Callbacks support token-wise streaming

callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])

llm = LlamaCpp(
model_path=MODEL_PATH,
temperature=0.75,
max_tokens=512,
top_p=1,
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
streaming=True,
stop=["Human:"]
)
#Memória
prompt = PromptTemplate.from_template("<s>[INST] <<SYS>>\n" + SYS_PROMPT + "\n<</SYS>>\n\n{human_input} [/INST]</s>")

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

chat_llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=memory,
verbose=True
)
4 changes: 3 additions & 1 deletion app/agent/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,6 @@
{chat_history}
Novo input: {input}
{agent_scratchpad}"""
{agent_scratchpad}"""

SYS_PROMPT = "You're Lex, an AI Legislative Assisant. From now on you'll speak and write ONLY in Portuguese. Responda sempre e apenas em Português. Não responda em Inglês.\n\nVocê é Lex, uma Inteligência Artificial, enérgica, bem humorada e um pouco sarcástica que atua como co-mandatária em uma proposta de candidatura coletiva progressista para a gestão pública."
110 changes: 45 additions & 65 deletions app/app.py
Original file line number Diff line number Diff line change
@@ -1,70 +1,31 @@
import os
from flask import Flask, render_template, request, session
from flask_socketio import SocketIO
from importlib import import_module
from dotenv import load_dotenv
load_dotenv()
#LLM
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder

#Memory
from langchain.memory import ConversationBufferMemory

from langchain.chains import LLMChain

#Agents
from langchain.agents.conversational.base import ConversationalAgent #Uses GPT-3.5 Format
from langchain.agents.conversational_chat.base import ConversationalChatAgent #Uses ChatGPT Format
from langchain.agents import AgentExecutor

from agent.prompts import PREFIX, SUFFIX, FORMAT_INSTRUCTIONS
from langchain.prompts import PromptTemplate
from tools.library import Library
load_dotenv(os.path.join(os.path.dirname(__file__), '..', '.env'))

ACTIVE_AGENTS = ["lex_chatgpt", "lex_llama"]

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
loaded_agents = {}
for agent in ACTIVE_AGENTS:
module = import_module(f"agent.{agent}")
loaded_agents[agent] = module.chat_llm_chain

OPENAI_API_KEY = os.getenv('OPENAI_APIKEY')
current_agent = os.getenv('DEFAULT_AGENT', loaded_agents[ACTIVE_AGENTS[0]])

#Define o LLM
llm = OpenAI()
llm = ChatOpenAI(model_name="gpt-3.5-turbo")

#Define os prefixos e configurações
ai_prefix = "Lex"
human_prefix = "Usuário"
# Carregando comandos
ACTIVE_COMMANDS = ["agent"]

#Ferramentas
#biblioteca = Library()
#library_dir = os.getenv("LIBRARY_DIR") or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'library')
#library_tools = biblioteca.generate_tools_for_library(library_dir)
#tools = []# + library_tools

#Memória
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content="You are a chatbot having a conversation with a human."), # The persistent system prompt
MessagesPlaceholder(variable_name="chat_history"), # Where the memory will be stored.
HumanMessagePromptTemplate.from_template("{human_input}"), # Where the human input will injected
])
loaded_commands = {}
for command in ACTIVE_COMMANDS:
module = import_module(f"commands.{command}_command")
loaded_commands[command] = module.run

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

llm = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()])

chat_llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)



from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*")
app.secret_key = os.getenv('SECRET_KEY', 'supersecret') # Você precisa definir uma chave secreta para usar sessions
socketio = SocketIO(app, cors_allowed_origins="*", ping_timeout=120)

@app.route('/')
def index():
Expand All @@ -77,20 +38,39 @@ def index():
def robots():
return app.send_static_file('robots.txt')

@socketio.on('connect')
def initialize_session():
session['current_agent'] = current_agent # Definindo o agente padrão

@socketio.on('select_agent')
def handle_agent_selection(agent_name):
if agent_name in loaded_agents.keys():
session['current_agent'] = agent_name

@socketio.on('message')
def handle_message(message):
user_input = message.get('message')
room = request.sid # Obtém o ID da sessão atual

current_agent_name = session.get('current_agent', ACTIVE_AGENTS[0])
current_agent = loaded_agents[current_agent_name]

if not user_input:
socketio.emit('message', {'resposta': 'mensagem não fornecida'}, room=room) # Envia para o room especificado
return
return None

socketio.emit('start_message') # Envia para o room especificado

if user_input.startswith('!'):
command, *args = user_input[1:].split()
if command in loaded_commands:
response = f"[{loaded_commands[command](args, session, ACTIVE_AGENTS, loaded_agents)}]"

else:
response = "Comando desconhecido. Por favor, tente novamente."
else:
response = current_agent.run(human_input=user_input)

response = chat_llm_chain.predict(human_input=user_input)
socketio.emit('start_message', room=room) # Envia para o room especificado
for chunk in response:
socketio.emit('message', chunk, room=room) # Envia para o room especificado
socketio.emit('message', response, room=room) # Envia para o room especificado
socketio.emit('end_message', room=room) # Envia para o room especificado

if __name__ == "__main__":
socketio.run(app, host='0.0.0.0', port=5000, debug=True, allow_unsafe_werkzeug=True)
socketio.run(app, host='0.0.0.0', port=5000, debug=True)
17 changes: 17 additions & 0 deletions app/commands/agent_command.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# commands/agent_command.py

def run(args, session, ACTIVE_AGENTS, loaded_agents):
if args:
subcommand = args[0]
if subcommand == "list":
return f"Agente disponíveis: {', '.join(ACTIVE_AGENTS)}"
elif subcommand == "set":
new_agent = args[1] if len(args) > 1 else None
if new_agent in loaded_agents.keys():
session['current_agent'] = new_agent
return f"Agente configurado para {new_agent}"
else:
return "Agente invalido."
else:
current_agent = session.get('current_agent', ACTIVE_AGENTS[0])
return f"O agente atual é {current_agent}"
4 changes: 3 additions & 1 deletion app/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,6 @@ flask
flask_socketio

gunicorn
eventlet
eventlet

huggingface-hub==0.17.3
13 changes: 10 additions & 3 deletions app/templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,22 @@ <h1>Lex.AI</h1>
</div>

<script>
var socket = io.connect(window.location.origin);
var socket = io.connect(window.location.origin, {
reconnection: true,
reconnectionDelay: 1000,
reconnectionDelayMax : 5000,
reconnectionAttempts: Infinity
});

let aiMessageDiv;
var chatBox = document.getElementById('chat-box');

function addMessage(message, classe) {
var chatBox = document.getElementById('chat-box');
var messageDiv = document.createElement('div');
messageDiv.classList.add('message', classe);
messageDiv.textContent = message;
chatBox.appendChild(messageDiv);
//chatBox.scrollTop = chatBox.scrollHeight;
chatBox.scrollTop = chatBox.scrollHeight;

if (classe === 'ai-message') {
aiMessageDiv = messageDiv;
Expand All @@ -54,6 +60,7 @@ <h1>Lex.AI</h1>
});

socket.on('end_message', function() {
chatBox.scrollTop = chatBox.scrollHeight;
aiMessageDiv = null;
});

Expand Down
22 changes: 22 additions & 0 deletions app/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import os
from huggingface_hub import hf_hub_download

def download_model(repo, model_file):
current_path = os.path.dirname(os.path.abspath(__file__))
model_directory = os.path.join(current_path, "models")

# Cria o diretório 'model' se ele não existir
if not os.path.exists(model_directory):
os.mkdir(model_directory)

model_path = os.path.join(model_directory, model_file)

if not os.path.exists(model_path):
print(f"O modelo {repo}/{model_file} não existe. Baixando...")
hf_hub_download(
repo_id=f"{repo}",
filename=model_file,
cache_dir=model_directory
)

return(model_path)
4 changes: 3 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,6 @@ services:
OPENAI_API_KEY: $OPENAI_APIKEY
WEAVIATE_URL: 'http://localhost:8080'
depends_on:
- weaviate
- weaviate
volumes:
- ./app/models:/app/models
Empty file added models/models.txt
Empty file.

0 comments on commit 7d453f1

Please sign in to comment.