Skip to content

Ability to pass a system prompt for chat completion #902

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions lib/langchain/vectorsearch/base.rb
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,27 @@ def generate_hyde_prompt(question:)
prompt_template.format(question: question)
end

# Ask a question and return the answer
# @param question [String] The question to ask
# @param context [Array] The context generated by RAG
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def generate_messages_and_chat(question: , context: , system_prompt: nil, &block)
Copy link
Preview

Copilot AI Apr 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The method signature appears to include extra commas after the keywords 'question:' and 'context:'. It should be defined as 'def generate_messages_and_chat(question:, context:, system_prompt: nil, &block)' to avoid a syntax error.

Suggested change
def generate_messages_and_chat(question: , context: , system_prompt: nil, &block)
def generate_messages_and_chat(question:, context:, system_prompt: nil, &block)

Copilot uses AI. Check for mistakes.

context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [
system_prompt ? {role: 'system', content: system_prompt} : nil,
{role: "user", content: prompt}
].compact
response = llm.chat(messages: messages, &block)

response.context = context
response
end

# Retrieval Augmented Generation (RAG)
#
# @param question [String] User's question
Expand Down
13 changes: 3 additions & 10 deletions lib/langchain/vectorsearch/chroma.rb
Original file line number Diff line number Diff line change
Expand Up @@ -123,24 +123,17 @@ def similarity_search_by_vector(
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result.document
end

context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end

private
Expand Down
13 changes: 4 additions & 9 deletions lib/langchain/vectorsearch/elasticsearch.rb
Original file line number Diff line number Diff line change
Expand Up @@ -141,22 +141,17 @@ def default_query(query_vector)
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result[:input]
end.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)
end

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end

# Search for similar texts
Expand Down
12 changes: 3 additions & 9 deletions lib/langchain/vectorsearch/epsilla.rb
Original file line number Diff line number Diff line change
Expand Up @@ -125,23 +125,17 @@ def similarity_search_by_vector(embedding:, k: 4)
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result.to_s
end
context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end
end
end
15 changes: 4 additions & 11 deletions lib/langchain/vectorsearch/milvus.rb
Original file line number Diff line number Diff line change
Expand Up @@ -139,22 +139,15 @@ def similarity_search_by_vector(embedding:, k: 4)
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

content_data = search_results.dig("data").map { |result| result.dig("content") }
context = search_results.dig("data").map { |result| result.dig("content") }

context = content_data.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end
end
end
12 changes: 3 additions & 9 deletions lib/langchain/vectorsearch/pgvector.rb
Original file line number Diff line number Diff line change
Expand Up @@ -144,23 +144,17 @@ def similarity_search_by_vector(embedding:, k: 4)
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result.content.to_s
end
context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end
end
end
12 changes: 3 additions & 9 deletions lib/langchain/vectorsearch/pinecone.rb
Original file line number Diff line number Diff line change
Expand Up @@ -168,24 +168,18 @@ def similarity_search_by_vector(embedding:, k: 4, namespace: "", filter: nil)
# @param question [String] The question to ask
# @param namespace [String] The namespace to search in
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @param filter [String] The filter to use
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, namespace: "", filter: nil, k: 4, &block)
def ask(question:, namespace: "", filter: nil, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, namespace: namespace, filter: filter, k: k)

context = search_results.map do |result|
result.dig("metadata").to_s
end
context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end

# Pinecone index
Expand Down
12 changes: 3 additions & 9 deletions lib/langchain/vectorsearch/qdrant.rb
Original file line number Diff line number Diff line change
Expand Up @@ -136,23 +136,17 @@ def similarity_search_by_vector(
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [String] The answer to the question
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result.dig("payload").to_s
end
context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end
end
end
12 changes: 3 additions & 9 deletions lib/langchain/vectorsearch/weaviate.rb
Original file line number Diff line number Diff line change
Expand Up @@ -142,23 +142,17 @@ def similarity_search_by_vector(embedding:, k: 4)
# Ask a question and return the answer
# @param question [String] The question to ask
# @param k [Integer] The number of results to have in context
# @param system_prompt [String] Content of the prompt to send as "system"
# @yield [String] Stream responses back one String at a time
# @return [Hash] The answer
def ask(question:, k: 4, &block)
def ask(question:, k: 4, system_prompt: nil, &block)
search_results = similarity_search(query: question, k: k)

context = search_results.map do |result|
result.dig("content").to_s
end
context = context.join("\n---\n")

prompt = generate_rag_prompt(question: question, context: context)

messages = [{role: "user", content: prompt}]
response = llm.chat(messages: messages, &block)

response.context = context
response
generate_messages_and_chat(question: question, context: context, system_prompt: system_prompt, &block)
end

private
Expand Down
12 changes: 12 additions & 0 deletions spec/langchain/vectorsearch/chroma_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,18 @@
end
end

context 'with system prompt' do
let(:system_prompt) { 'You are a helpful assistant' }
before do
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, system_prompt: system_prompt).completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
12 changes: 12 additions & 0 deletions spec/langchain/vectorsearch/elasticsearch_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,18 @@
end
end

context 'with system prompt' do
let(:system_prompt) { 'You are a helpful assistant' }
before do
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, k: 4, system_prompt: system_prompt).completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
12 changes: 12 additions & 0 deletions spec/langchain/vectorsearch/epsilla_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,18 @@
end
end

context 'with system prompt' do
let(:system_prompt) { 'You are a helpful assistant' }
before do
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, k: k, system_prompt: system_prompt).completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
12 changes: 12 additions & 0 deletions spec/langchain/vectorsearch/milvus_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,18 @@
end
end

context 'with system prompt' do
let(:system_prompt) { 'You are a helpful assistant' }
before do
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, system_prompt: system_prompt).completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
11 changes: 11 additions & 0 deletions spec/langchain/vectorsearch/pgvector_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,17 @@
end
end

context 'with system prompt' do
before do
allow(subject.llm).to receive(:chat).with(messages: messages).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, k: k, system_prompt: 'You are a helpful assistant').completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
16 changes: 16 additions & 0 deletions spec/langchain/vectorsearch/pinecone_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,22 @@
end
end

describe "with system prompt" do
let(:system_prompt) { 'You are a helpful assistant' }

before do
allow(subject).to receive(:similarity_search).with(
query: question, namespace: "", filter: nil, k: k
).and_return(matches)
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(metadata.to_s)
end

it "asks a question" do
expect(subject.ask(question: question, system_prompt: system_prompt).completion).to eq(answer)
end
end

describe "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
12 changes: 12 additions & 0 deletions spec/langchain/vectorsearch/qdrant_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,18 @@
end
end

context 'with system prompt' do
let(:system_prompt) { 'You are a helpful assistant' }
before do
allow(subject.llm).to receive(:chat).with(messages: [{role: 'system', content: system_prompt}, *messages]).and_return(response)
expect(response).to receive(:context=).with(text)
end

it "asks a question and returns the answer" do
expect(subject.ask(question: question, k: k, system_prompt: system_prompt).completion).to eq(answer)
end
end

context "with block" do
let(:block) { proc { |chunk| puts "Received chunk: #{chunk}" } }

Expand Down
Loading