Skip to content

Commit

Permalink
Python SK: Use ChatGPT for Semantic Functions (microsoft#643)
Browse files Browse the repository at this point in the history
### Motivation and Context
Users want to use ChatGPT as the service for their semantic functions.
This feature exists with the C# SK. ChatGPT is perfectly capable of
doing this. Additionally added more end-to-end tests with the intention
of being able to run them as integration tests for the Python SK.

### Description
- `OpenAIChatCompletion` now additionally inherits from
`TextCompletionClientBase`. This extends to `AzureOpenAIChatCompletion`.
- If a chat service is also a `TextCompletionClientBase`, it will be
added to both `text_services` and `chat_services`.
- Created 4 end-to-end tests that target the combination of Azure,
OpenAI, text service, and chat service. Abstracted common behavior for
these tests.
- Added asserts to these end-to-end tests. These should be run as
integration tests, but that will be handled in a separate PR.
  • Loading branch information
awharrison-28 authored Apr 25, 2023
1 parent 0b01986 commit 548429d
Show file tree
Hide file tree
Showing 9 changed files with 211 additions and 113 deletions.
2 changes: 1 addition & 1 deletion FEATURE_MATRIX.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
|---|---|---|---|
| TextGeneration ||| Example: Text-Davinci-003 |
| TextEmbeddings ||| Example: Text-Embeddings-Ada-002 |
| ChatCompletion || | Example: GPT4, Chat-GPT |
| ChatCompletion || | Example: GPT4, Chat-GPT |
| Image Generation ||| Example: Dall-E |

## AI Service Endpoints
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,16 @@
ChatCompletionClientBase,
)
from semantic_kernel.connectors.ai.chat_request_settings import ChatRequestSettings
from semantic_kernel.connectors.ai.complete_request_settings import (
CompleteRequestSettings,
)
from semantic_kernel.connectors.ai.text_completion_client_base import (
TextCompletionClientBase,
)
from semantic_kernel.utils.null_logger import NullLogger


class OpenAIChatCompletion(ChatCompletionClientBase):
class OpenAIChatCompletion(ChatCompletionClientBase, TextCompletionClientBase):
_model_id: str
_api_key: str
_org_id: Optional[str] = None
Expand Down Expand Up @@ -118,3 +124,27 @@ async def complete_chat_async(
# TODO: tracking on token counts/etc.

return response.choices[0].message.content

async def complete_simple_async(
self, prompt: str, request_settings: CompleteRequestSettings
) -> str:
"""
Completes the given prompt. Returns a single string completion.
Cannot return multiple completions. Cannot return logprobs.
Arguments:
prompt {str} -- The prompt to complete.
request_settings {CompleteRequestSettings} -- The request settings.
Returns:
str -- The completed text.
"""
prompt_to_message = [("user", prompt)]
chat_settings = ChatRequestSettings(
temperature=request_settings.temperature,
top_p=request_settings.top_p,
presence_penalty=request_settings.presence_penalty,
frequency_penalty=request_settings.frequency_penalty,
max_tokens=request_settings.max_tokens,
)
return await self.complete_chat_async(prompt_to_message, chat_settings)
5 changes: 5 additions & 0 deletions python/semantic_kernel/kernel_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ def add_chat_service(
if self._default_chat_service is None:
self._default_chat_service = service_id

if isinstance(service, TextCompletionClientBase):
self.add_text_service(service_id, service)
if self._default_text_service is None:
self._default_text_service = service_id

return self

def add_embedding_service(
Expand Down
111 changes: 0 additions & 111 deletions python/tests/end-to-end/basics.py

This file was deleted.

19 changes: 19 additions & 0 deletions python/tests/end-to-end/basics_with_azure_oai_chat_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from utils import e2e_summarization

import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai

kernel = sk.Kernel()

# Load credentials from .env file
deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env()

kernel.config.add_chat_service(
"chat-gpt", sk_oai.AzureChatCompletion("gpt-35-turbo", endpoint, api_key)
)

asyncio.run(e2e_summarization.summarize_function_test(kernel))
20 changes: 20 additions & 0 deletions python/tests/end-to-end/basics_with_azure_oai_text_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from utils import e2e_summarization

import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai

kernel = sk.Kernel()

# Load credentials from .env file
deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env()

# Configure LLM service
kernel.config.add_text_service(
"davinci-003", sk_oai.AzureTextCompletion("text-davinci-003", endpoint, api_key)
)

asyncio.run(e2e_summarization.summarize_function_test(kernel))
19 changes: 19 additions & 0 deletions python/tests/end-to-end/basics_with_oai_chat_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from utils import e2e_summarization

import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai

kernel = sk.Kernel()

# Load credentials from .env file

api_key, org_id = sk.openai_settings_from_dot_env()
kernel.config.add_chat_service(
"chat-gpt", sk_oai.OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)
)

asyncio.run(e2e_summarization.summarize_function_test(kernel))
20 changes: 20 additions & 0 deletions python/tests/end-to-end/basics_with_oai_text_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from utils import e2e_summarization

import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai

kernel = sk.Kernel()

# Load credentials from .env file
api_key, org_id = sk.openai_settings_from_dot_env()

# Configure LLM service
kernel.config.add_text_service(
"davinci-003", sk_oai.OpenAITextCompletion("text-davinci-003", api_key, org_id)
)

asyncio.run(e2e_summarization.summarize_function_test(kernel))
96 changes: 96 additions & 0 deletions python/tests/end-to-end/utils/e2e_summarization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import semantic_kernel as sk


async def summarize_function_test(kernel: sk.Kernel):
# Define semantic function using SK prompt template language
sk_prompt = """
{{$input}}
{{$input2}}
Give me the TLDR in exactly 5 words:
"""

# Create the semantic function
tldr_function = kernel.create_semantic_function(
sk_prompt, max_tokens=200, temperature=0, top_p=0.5
)

# User input
text_to_summarize = """
1) A robot may not injure a human being or, through inaction,
allow a human being to come to harm.
2) A robot must obey orders given it by human beings except where
such orders would conflict with the First Law.
3) A robot must protect its own existence as long as such protection
does not conflict with the First or Second Law.
"""

print("Summarizing: ")
print(text_to_summarize)
print()

# Summarize input string and print
summary = await kernel.run_async(tldr_function, input_str=text_to_summarize)

output = str(summary).strip()
print(f"Summary using input string: '{output}'")
assert len(output.split(" ")) == 5

# Summarize input as context variable and print
context_vars = sk.ContextVariables(text_to_summarize)
summary = await kernel.run_async(tldr_function, input_vars=context_vars)

output = str(summary).strip()
print(f"Summary using context variables: '{output}'")
assert len(output.split(" ")) == 5

# Summarize input context and print
context = kernel.create_new_context()
context["input"] = text_to_summarize
summary = await kernel.run_async(tldr_function, input_context=context)

output = str(summary).strip()
print(f"Summary using input context: '{output}'")
assert len(output.split(" ")) == 5

# Summarize input context with additional variables and print
context = kernel.create_new_context()
context["input"] = text_to_summarize
context_vars = sk.ContextVariables("4) All birds are robots.")
summary = await kernel.run_async(
tldr_function, input_context=context, input_vars=context_vars
)

output = str(summary).strip()
print(f"Summary using context and additional variables: '{output}'")
assert len(output.split(" ")) == 5

# Summarize input context with additional input string and print
context = kernel.create_new_context()
context["input"] = text_to_summarize
summary = await kernel.run_async(
tldr_function, input_context=context, input_str="4) All birds are robots."
)

output = str(summary).strip()
print(f"Summary using context and additional string: '{output}'")
assert len(output.split(" ")) == 5

# Summarize input context with additional variables and string and print
context = kernel.create_new_context()
context["input"] = text_to_summarize
context_vars = sk.ContextVariables(variables={"input2": "4) All birds are robots."})
summary = await kernel.run_async(
tldr_function,
input_context=context,
input_vars=context_vars,
input_str="new text",
)

output = str(summary).strip()
print(
f"Summary using context, additional variables, and additional string: '{output}'"
)
assert len(output.split(" ")) == 5

0 comments on commit 548429d

Please sign in to comment.