Skip to content

test(examples): Add some examples leveraging pydantic-AI and other chatlas alternatives #66

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 26 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
e785f63
Add some examples leveraging pydantic-AI
karangattu Jun 3, 2025
2f19d5a
linting files
karangattu Jun 3, 2025
02df719
remove requirements.txt
karangattu Jun 3, 2025
6205148
add more alternatives to chatlas
karangattu Jun 18, 2025
027acf5
remove comments in tool calling example
karangattu Jun 18, 2025
63e93bd
add extra packages
karangattu Jun 18, 2025
657015b
add missing structured_output for llm
karangattu Jun 18, 2025
7727633
Update pkg-py/tests/playwright/chat/langchain/structured_output/app.py
karangattu Jun 30, 2025
c3d5f53
Update pkg-py/tests/playwright/chat/llama-index/structured_output/app.py
karangattu Jun 30, 2025
d3bdfaa
Update pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py
karangattu Jun 30, 2025
cf765ed
Update pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py
karangattu Jun 30, 2025
25f82b5
Update pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py
karangattu Jun 30, 2025
7cad49a
remove data sci adventure example for pydantic
karangattu Jul 1, 2025
5e13404
update the example to make it into a shiny app
karangattu Jul 1, 2025
8128c03
use context for maintaining state
karangattu Jul 1, 2025
dee6dcf
maintain context across the conversation
karangattu Jul 1, 2025
2ac6b95
make it more langchain specific
karangattu Jul 1, 2025
b656214
preserve context within the chat conversation
karangattu Jul 1, 2025
6cf3494
allow streaming now in tool calling example
karangattu Jul 1, 2025
f9c38c8
Add session-based chat history retrieval function
karangattu Jul 1, 2025
b00f8a6
remove workout planner app example for pydantic ai
karangattu Jul 1, 2025
8ff507a
make the basic llama-index have streaming responses
karangattu Jul 1, 2025
61e0269
Allow maintaining context and streaming for tool calling example
karangattu Jul 1, 2025
527e985
remove multiple output structures and allow streaming
karangattu Jul 1, 2025
a3e7261
make the app even streamlined
karangattu Jul 1, 2025
7093e2b
use a streaming and stateful chatbot for pydantic ai basic app
karangattu Jul 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/basic/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import os
from dotenv import load_dotenv

from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from shiny.express import ui

_ = load_dotenv()

model = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4.1-nano-2025-04-14",
)

prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. You answer in a friendly and concise manner.",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)

store = {}


def get_session_history(session_id: str):
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]


chain_with_history = RunnableWithMessageHistory(
prompt | model,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)

ui.page_opts(
title="Shiny Chat with LangChain History",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
messages=[
{
"content": "Hello! I'm a chatbot that can remember our conversation. How can I help you today?",
"role": "assistant",
}
],
)
chat.ui()


@chat.on_user_submit
async def handle_user_input(user_input: str):
config = {"configurable": {"session_id": "shiny_session_1"}}
response_stream = chain_with_history.astream(
{"input": user_input},
config=config,
)

async def stream_wrapper():
async for chunk in response_stream:
yield chunk.content

await chat.append_message_stream(stream_wrapper())
44 changes: 44 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/structured_output/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import os
from typing import Optional

from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from shiny.express import ui

_ = load_dotenv()


class Joke(BaseModel):
"""Joke to tell user."""

setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")


_ = Joke.model_rebuild()

chat_client = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4o",
)

ui.page_opts(
title="Hello LangChain Chat Model using structured output",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
messages=["Hello! How can I help you today?"],
)
chat.ui()


@chat.on_user_submit
async def handle_user_input(user_input: str):
joke = chat_client.with_structured_output(Joke).invoke(user_input)
joke_text = f"{joke.setup}\n\n{joke.punchline}\n\nRating: {joke.rating if joke.rating is not None else 'N/A'}"
await chat.append_message(joke_text)
114 changes: 114 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/tool_calling/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
import os
from datetime import datetime

from dotenv import load_dotenv
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from shiny.express import ui

_ = load_dotenv()


@tool
def get_current_time() -> str:
"""Get the current time in HH:MM:SS format."""
return datetime.now().strftime("%H:%M:%S")


@tool
def get_current_date() -> str:
"""Get the current date in YYYY-MM-DD format."""
return datetime.now().strftime("%Y-%m-%d")


@tool
def get_current_weather(city: str) -> str:
"""Get the current weather for a given city."""
return f"The current weather in {city} is sunny with a temperature of 25°C."


@tool
def calculator(expression: str) -> str:
"""Evaluate mathematical expressions"""
return str(eval(expression))


tools = [get_current_time, get_current_date, calculator, get_current_weather]

prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)

llm = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4.1-nano-2025-04-14",
)

agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)

store = {}


def get_session_history(session_id: str):
"""
Retrieves the chat history for a given session ID.
If no history exists, a new one is created.
"""
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]


agent_with_chat_history = RunnableWithMessageHistory(
agent_executor,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
)

ui.page_opts(
title="Shiny Chat with LangChain Agent",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
messages=[
{
"content": "Hello! I'm a chatbot with tools. I can get the time, date, weather, or do calculations. I'll also remember our conversation. How can I help?",
"role": "assistant",
}
],
)
chat.ui()


@chat.on_user_submit
async def handle_user_input(user_input: str):
"""
Handles user input by streaming the agent's response.
"""
config = {"configurable": {"session_id": "shiny_session_tools_1"}}

async def stream_response():
async for event in agent_with_chat_history.astream_events(
{"input": user_input}, config=config, version="v1"
):
kind = event["event"]
if kind == "on_chat_model_stream":
content = event["data"]["chunk"].content
if content:
yield content

await chat.append_message_stream(stream_response())
60 changes: 60 additions & 0 deletions pkg-py/tests/playwright/chat/llama-index/basic/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from dotenv import load_dotenv
from llama_index.core.agent.workflow import FunctionAgent, AgentStream
from llama_index.core.workflow import Context
from llama_index.llms.openai import OpenAI
from shiny.express import ui

_ = load_dotenv()

llm = OpenAI(
model="gpt-4.1-nano-2025-04-14",
)

ui.page_opts(
title="Shiny Chat with LlamaIndex",
fillable=True,
fillable_mobile=True,
)

agent = FunctionAgent(
tools=[],
llm=llm,
system_prompt="You are a pirate with a colorful personality.",
)

ctx = Context(agent)

chat = ui.Chat(
id="chat",
messages=[
{
"role": "assistant",
"content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me anything, matey!",
},
],
)
chat.ui()


async def stream_response_from_agent(user_message: str, context: Context):
handler = agent.run(user_msg=user_message, ctx=context)

async for event in handler.stream_events():
if isinstance(event, AgentStream):
if event.delta:
yield event.delta

await handler



@chat.on_user_submit
async def handle_user_input():
latest_messages = chat.messages()
latest_user_message = latest_messages[-1]["content"]

async def stream_generator():
async for chunk in stream_response_from_agent(latest_user_message, ctx):
yield chunk

await chat.append_message_stream(stream_generator())
99 changes: 99 additions & 0 deletions pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import os

from chatlas import ChatOpenAI
from llama_index.core import StorageContext, load_index_from_storage
from shiny.express import ui

_ = os.environ.get("OPENAI_API_KEY")


# Load the knowledge store (index) from disk
try:
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context)
print("LlamaIndex loaded successfully from ./storage")
except Exception as e:
print(f"Error loading LlamaIndex: {e}")
print(
"Please ensure you have run the index creation script first if this is your initial run."
)
from llama_index.core import Document, VectorStoreIndex

print("Creating a dummy index for demonstration purposes...")
bookstore_documents = [
"Our shipping policy states that standard shipping takes 3-5 business days. Express shipping takes 1-2 business days. Free shipping is offered on all orders over $50.",
"Returns are accepted within 30 days of purchase, provided the book is in its original condition. To initiate a return, please visit our 'Returns' page on the website and fill out the form.",
"The 'BookWorm Rewards' program offers members 10% off all purchases and early access to sales. You earn 1 point for every $1 spent.",
"We accept Visa, Mastercard, American Express, and PayPal.",
"Currently, we do not offer international shipping outside of the United States and Canada.",
"The book 'The Midnight Library' by Matt Haig is a New York Times bestseller. It explores themes of regret and parallel lives.",
"Orders placed before 2 PM EST are processed on the same day.",
]
documents = [Document(text=d) for d in bookstore_documents]
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir="./storage")
print("Dummy index created and saved.")


def retrieve_trusted_content(query: str, top_k: int = 3):
"""
Retrieve relevant content from the bookstore's knowledge base.
This acts as the "lookup" for our customer service assistant.

Parameters
----------
query
The customer's question used to semantically search the knowledge store.
top_k
The number of most relevant policy/book excerpts to retrieve.
"""
retriever = index.as_retriever(similarity_top_k=top_k)
nodes = retriever.retrieve(query)
# Format the retrieved content clearly so Chatlas can use it as "trusted" information
return [f"<excerpt>{x.text}</excerpt>" for x in nodes]


chat_client = ChatOpenAI(
system_prompt=(
"You are 'BookWorm Haven's Customer Service Assistant'. "
"Your primary goal is to help customers with their queries about shipping, returns, "
"payment methods, and book information based *only* on the provided trusted content. "
"If you cannot answer the question using the trusted content, politely state that "
"you don't have that information and suggest they visit the 'Help' section of the website."
),
model="gpt-4o-mini",
)

# This is where Chatlas learns to "look up" information when needed.
chat_client.register_tool(retrieve_trusted_content)


ui.page_opts(
title="BookWorm Haven Customer Service",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
messages=[
"""
Hello! I am BookWorm Haven's Customer Service Assistant.

Here are some examples of what you can ask me:

- <span class="suggestion"> How long does standard shipping take? </span>
- <span class="suggestion"> What is your return policy? </span>
- <span class="suggestion"> Can you tell me about 'The Midnight Library'? </span>

"""
],
)
chat.ui()


# Generate a response when the user submits a message
@chat.on_user_submit
async def handle_user_input(user_input: str):
response = await chat_client.stream_async(user_input)
await chat.append_message_stream(response)
Loading
Loading