Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrating Anthropic #1281

Merged
merged 2 commits into from
Dec 26, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 1 addition & 9 deletions instructor/client_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def from_anthropic(
| anthropic.AnthropicVertex
),
mode: instructor.Mode = instructor.Mode.ANTHROPIC_TOOLS,
enable_prompt_caching: bool = False,
beta: bool = False,
**kwargs: Any,
) -> instructor.Instructor | instructor.AsyncInstructor:
Expand Down Expand Up @@ -82,14 +81,7 @@ def from_anthropic(
),
), "Client must be an instance of {anthropic.Anthropic, anthropic.AsyncAnthropic, anthropic.AnthropicBedrock, anthropic.AsyncAnthropicBedrock, anthropic.AnthropicVertex, anthropic.AsyncAnthropicVertex}"

if enable_prompt_caching:
if isinstance(client, (anthropic.Anthropic, anthropic.AsyncAnthropic)):
create = client.beta.prompt_caching.messages.create
else:
raise TypeError(
"Client must be an instance of {anthropic.Anthropic, anthropic.AsyncAnthropic} to enable prompt caching"
)
elif beta:
if beta:
create = client.beta.messages.create
else:
create = client.messages.create
Expand Down
6 changes: 2 additions & 4 deletions instructor/reask.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,8 @@ def reask_anthropic_tools(
):
kwargs = kwargs.copy()
from anthropic.types import Message
from anthropic.types.beta.prompt_caching import PromptCachingBetaMessage

assert isinstance(response, Message) or isinstance(response, PromptCachingBetaMessage), "Response must be a Anthropic Message"
assert isinstance(response, Message), "Response must be a Anthropic Message"

assistant_content = []
tool_use_id = None
Expand Down Expand Up @@ -71,9 +70,8 @@ def reask_anthropic_json(
):
kwargs = kwargs.copy()
from anthropic.types import Message
from anthropic.types.beta.prompt_caching import PromptCachingBetaMessage

assert isinstance(response, Message) or isinstance(response, PromptCachingBetaMessage), "Response must be a Anthropic Message"
assert isinstance(response, Message), "Response must be a Anthropic Message"

reask_msg = {
"role": "user",
Expand Down
24 changes: 19 additions & 5 deletions instructor/retry.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@
from instructor.utils import update_total_usage
from instructor.validators import AsyncValidationError
from openai.types.chat import ChatCompletion
from openai.types.completion_usage import CompletionUsage, CompletionTokensDetails, PromptTokensDetails
from openai.types.completion_usage import (
CompletionUsage,
CompletionTokensDetails,
PromptTokensDetails,
)
from pydantic import BaseModel, ValidationError
from tenacity import (
AsyncRetrying,
Expand Down Expand Up @@ -71,14 +75,24 @@ def initialize_usage(mode: Mode) -> CompletionUsage | Any:
Returns:
CompletionUsage | Any: Initialized usage object.
"""
total_usage = CompletionUsage(completion_tokens=0, prompt_tokens=0, total_tokens=0,
completion_tokens_details = CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0),
prompt_tokens_details = PromptTokensDetails(audio_tokens=0, cached_tokens=0)
total_usage = CompletionUsage(
completion_tokens=0,
prompt_tokens=0,
total_tokens=0,
completion_tokens_details=CompletionTokensDetails(
audio_tokens=0, reasoning_tokens=0
),
prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
)
if mode in {Mode.ANTHROPIC_TOOLS, Mode.ANTHROPIC_JSON}:
from anthropic.types import Usage as AnthropicUsage

total_usage = AnthropicUsage(input_tokens=0, output_tokens=0)
total_usage = AnthropicUsage(
input_tokens=0,
output_tokens=0,
cache_read_input_tokens=0,
cache_creation_input_tokens=0,
)
return total_usage


Expand Down
6 changes: 6 additions & 0 deletions instructor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,12 @@ def update_total_usage(
):
total_usage.input_tokens += response_usage.input_tokens or 0
total_usage.output_tokens += response_usage.output_tokens or 0
total_usage.cache_creation_input_tokens += (
response_usage.cache_creation_input_tokens or 0
)
total_usage.cache_read_input_tokens += (
response_usage.cache_read_input_tokens or 0
)
response.usage = total_usage
return response
except ImportError:
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ test-docs = [
"mistralai<2.0.0,>=1.0.3",
]
anthropic = [
"anthropic<0.41.0,>=0.36.2",
"anthropic==0.42.0",
"xmltodict<0.15,>=0.13",
]
groq = [
Expand Down Expand Up @@ -108,7 +108,7 @@ docs = [
"mkdocs-redirects<2.0.0,>=1.2.1",
]
anthropic = [
"anthropic<0.41.0,>=0.36.2",
"anthropic==0.42.0",
]
test-docs = [
"fastapi<0.116.0,>=0.109.2",
Expand All @@ -118,7 +118,7 @@ test-docs = [
"tabulate<1.0.0,>=0.9.0",
"pydantic-extra-types<3.0.0,>=2.6.0",
"litellm<2.0.0,>=1.35.31",
"anthropic<0.41.0,>=0.36.2",
"anthropic==0.42.0",
"xmltodict<0.15,>=0.13",
"groq<0.14.0,>=0.4.2",
"phonenumbers<9.0.0,>=8.13.33",
Expand Down
17 changes: 9 additions & 8 deletions tests/llm/test_anthropic/test_multimodal.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ class ImageDescription(BaseModel):
colors: list[str] = Field(..., description="The colors in the image")


image_url = "https://github.com/google-gemini/cookbook/blob/main/examples/assets/castle.png?raw=true"


@pytest.mark.parametrize("model, mode", product(models, modes))
def test_multimodal_image_description(model, mode, client):
client = instructor.from_anthropic(client, mode=mode)
Expand All @@ -27,9 +30,7 @@ def test_multimodal_image_description(model, mode, client):
"role": "user",
"content": [
"What is this?",
Image.from_url(
"https://pbs.twimg.com/profile_images/1816950591857233920/ZBxrWCbX_400x400.jpg"
),
Image.from_url(image_url),
],
},
],
Expand Down Expand Up @@ -59,7 +60,7 @@ def test_multimodal_image_description_autodetect(model, mode, client):
"role": "user",
"content": [
"What is this?",
"https://pbs.twimg.com/profile_images/1816950591857233920/ZBxrWCbX_400x400.jpg",
image_url,
],
},
],
Expand Down Expand Up @@ -94,7 +95,7 @@ def test_multimodal_image_description_autodetect_image_params(model, mode, clien
"What is this?",
{
"type": "image",
"source": "https://pbs.twimg.com/profile_images/1816950591857233920/ZBxrWCbX_400x400.jpg",
"source": image_url,
},
],
},
Expand All @@ -117,7 +118,7 @@ def test_multimodal_image_description_autodetect_image_params(model, mode, clien
def test_multimodal_image_description_autodetect_image_params_cache(
model, mode, client
):
client = instructor.from_anthropic(client, mode=mode, enable_prompt_caching=True)
client = instructor.from_anthropic(client, mode=mode)
messages = client.chat.completions.create(
model=model, # Ensure this is a vision-capable model
response_model=None,
Expand Down Expand Up @@ -174,7 +175,7 @@ def test_multimodal_image_description_autodetect_no_response_model(model, mode,
},
{
"role": "user",
"content": "https://pbs.twimg.com/profile_images/1816950591857233920/ZBxrWCbX_400x400.jpg",
"content": image_url,
},
],
max_tokens=1000,
Expand All @@ -192,7 +193,7 @@ def test_multimodal_image_description_autodetect_no_response_model(model, mode,
messages=[
{
"role": "user",
"content": "https://pbs.twimg.com/profile_images/1816950591857233920/ZBxrWCbX_400x400.jpg",
"content": image_url,
},
],
max_tokens=1000,
Expand Down
4 changes: 2 additions & 2 deletions tests/llm/test_anthropic/test_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def test_creation(model, mode, client):

@pytest.mark.parametrize("model, mode", product(models, modes))
def test_creation_with_system_cache(model, mode, client):
client = instructor.from_anthropic(client, mode=mode, enable_prompt_caching=True)
client = instructor.from_anthropic(client, mode=mode)
response, message = client.chat.completions.create_with_completion(
model=model,
response_model=User,
Expand Down Expand Up @@ -83,7 +83,7 @@ def test_creation_with_system_cache(model, mode, client):

@pytest.mark.parametrize("model, mode", product(models, modes))
def test_creation_with_system_cache_anthropic_style(model, mode, client):
client = instructor.from_anthropic(client, mode=mode, enable_prompt_caching=True)
client = instructor.from_anthropic(client, mode=mode)
response, message = client.chat.completions.create_with_completion(
model=model,
system=[
Expand Down
20 changes: 10 additions & 10 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading