diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/JsonFormats.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/JsonFormats.scala
index aeea8bbf..d53c42c2 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/JsonFormats.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/JsonFormats.scala
@@ -1,7 +1,11 @@
package io.cequence.openaiscala.anthropic
import io.cequence.openaiscala.JsonUtil
-import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.{ImageBlock, TextBlock}
+import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.{
+ ImageBlock,
+ TextBlock,
+ ToolUseBlock
+}
import io.cequence.openaiscala.anthropic.domain.Content.{
ContentBlock,
ContentBlocks,
@@ -20,7 +24,7 @@ import io.cequence.openaiscala.anthropic.domain.response.{
CreateMessageResponse,
DeltaText
}
-import io.cequence.openaiscala.anthropic.domain.{ChatRole, Content, Message}
+import io.cequence.openaiscala.anthropic.domain.{ChatRole, Content, Message, ToolSpec}
import play.api.libs.functional.syntax._
import play.api.libs.json._
@@ -68,6 +72,7 @@ trait JsonFormats {
}
implicit val contentBlockReads: Reads[ContentBlock] = new Reads[ContentBlock] {
+ implicit val stringAnyMapFormat: Format[Map[String, Any]] = JsonUtil.StringAnyMapFormat
def reads(json: JsValue): JsResult[ContentBlock] = {
(json \ "type").validate[String].flatMap {
case "text" => (json \ "text").validate[String].map(TextBlock.apply)
@@ -78,6 +83,12 @@ trait JsonFormats {
mediaType <- (source \ "media_type").validate[String]
data <- (source \ "data").validate[String]
} yield ImageBlock(`type`, mediaType, data)
+ case "tool_use" =>
+ for {
+ id <- (json \ "id").validate[String]
+ name <- (json \ "name").validate[String]
+ input <- (json \ "input").validate[Map[String, Any]]
+ } yield ToolUseBlock(id, name, input)
case _ => JsError("Unsupported or invalid content block")
}
}
@@ -127,7 +138,7 @@ trait JsonFormats {
(__ \ "model").read[String] and
(__ \ "stop_reason").readNullable[String] and
(__ \ "stop_sequence").readNullable[String] and
- (__ \ "usage").read[UsageInfo]
+ (__ \ "usage").readNullable[UsageInfo]
)(CreateMessageResponse.apply _)
implicit val createMessageChunkResponseReads: Reads[CreateMessageChunkResponse] =
@@ -135,4 +146,10 @@ trait JsonFormats {
implicit val deltaTextReads: Reads[DeltaText] = Json.reads[DeltaText]
implicit val contentBlockDeltaReads: Reads[ContentBlockDelta] = Json.reads[ContentBlockDelta]
+
+ implicit lazy val toolSpecFormat: OFormat[ToolSpec] = {
+ implicit val stringAnyMapFormat: Format[Map[String, Any]] = JsonUtil.StringAnyMapFormat
+ implicit val config = JsonConfiguration(JsonNaming.SnakeCase)
+ Json.format[ToolSpec]
+ }
}
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/Content.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/Content.scala
index f5da4e0a..fcf09c2a 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/Content.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/Content.scala
@@ -16,5 +16,23 @@ object Content {
mediaType: String,
data: String
) extends ContentBlock
+
+ case class ToolUseBlock(
+ id: String,
+ name: String,
+ input: Map[String, Any] // TODO: allow here only Text content blocks
+ ) extends ContentBlock
+
+// sealed trait ToolUseBlock extends ContentBlock
+// // TODO: allow only for responses to createChatToolCompletion
+// case class ToolUseBlockSuccess(
+// toolUseId: String,
+// content: String // TODO: allow here only Text content blocks
+// ) extends ToolUseBlock
+//
+// case class ToolUseBlockFailure(
+// toolUseId: String,
+// content: String // TODO: allow here only Text content blocks
+// ) extends ToolUseBlock
}
}
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/ToolSpec.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/ToolSpec.scala
new file mode 100644
index 00000000..89ae110e
--- /dev/null
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/ToolSpec.scala
@@ -0,0 +1,7 @@
+package io.cequence.openaiscala.anthropic.domain
+
+final case class ToolSpec(
+ name: String,
+ description: Option[String],
+ inputSchema: Map[String, Any]
+)
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/response/CreateMessageResponse.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/response/CreateMessageResponse.scala
index 154e10f2..84b2b5f5 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/response/CreateMessageResponse.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/response/CreateMessageResponse.scala
@@ -12,7 +12,8 @@ final case class CreateMessageResponse(
model: String,
stop_reason: Option[String],
stop_sequence: Option[String],
- usage: UsageInfo
+ // TODO: it is required according to the API documentation, but it is not present in the response for tool calls
+ usage: Option[UsageInfo]
)
object CreateMessageResponse {
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/settings/AnthropicCreateMessageSettings.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/settings/AnthropicCreateMessageSettings.scala
index 7d0d496e..b604811e 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/settings/AnthropicCreateMessageSettings.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/domain/settings/AnthropicCreateMessageSettings.scala
@@ -5,10 +5,6 @@ final case class AnthropicCreateMessageSettings(
// See [[models|https://docs.anthropic.com/claude/docs/models-overview]] for additional details and options.
model: String,
- // System prompt.
- // A system prompt is a way of providing context and instructions to Claude, such as specifying a particular goal or role. See our [[guide to system prompts|https://docs.anthropic.com/claude/docs/system-prompts]].
- system: Option[String] = None,
-
// The maximum number of tokens to generate before stopping.
// Note that our models may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate.
// Different models have different maximum values for this parameter. See [[models|https://docs.anthropic.com/claude/docs/models-overview]] for details.
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicService.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicService.scala
index eb2acba5..0b2c3da9 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicService.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicService.scala
@@ -2,7 +2,7 @@ package io.cequence.openaiscala.anthropic.service
import akka.NotUsed
import akka.stream.scaladsl.Source
-import io.cequence.openaiscala.anthropic.domain.Message
+import io.cequence.openaiscala.anthropic.domain.{Message, ToolSpec}
import io.cequence.openaiscala.anthropic.domain.response.{
ContentBlockDelta,
CreateMessageResponse
@@ -25,6 +25,11 @@ trait AnthropicService extends CloseableService with AnthropicServiceConsts {
*
* @param messages
* A list of messages comprising the conversation so far.
+ * @param systemPrompt
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such as
+ * specifying a particular goal or role. See our guide to system prompts.
* @param settings
* @return
* create message response
@@ -33,6 +38,45 @@ trait AnthropicService extends CloseableService with AnthropicServiceConsts {
*/
def createMessage(
messages: Seq[Message],
+ systemPrompt: Option[String],
+ settings: AnthropicCreateMessageSettings = DefaultSettings.CreateMessage
+ ): Future[CreateMessageResponse]
+
+ // TODO:
+ /**
+ * Creates a message.
+ *
+ * Send a structured list of input messages with text and/or image content, and the model
+ * will generate the next message in the conversation.
+ *
+ * The Messages API can be used for for either single queries or stateless multi-turn
+ * conversations.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param systemPrompt
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such as
+ * specifying a particular goal or role. See our guide to system prompts.
+ * @param tools
+ * [beta] Definitions of tools that the model may use.
+ *
+ * If you include tools in your API request, the model may return tool_use content blocks
+ * that represent the model's use of those tools. You can then run those tools using the tool
+ * input generated by the model and then optionally return results back to the model using
+ * tool_result content blocks.
+ *
+ * @param settings
+ * @return
+ * create message response
+ * @see
+ * Anthropic Doc
+ */
+ def createToolMessage(
+ messages: Seq[Message],
+ systemPrompt: Option[String],
+ tools: Seq[ToolSpec],
settings: AnthropicCreateMessageSettings = DefaultSettings.CreateMessage
): Future[CreateMessageResponse]
@@ -47,6 +91,11 @@ trait AnthropicService extends CloseableService with AnthropicServiceConsts {
*
* @param messages
* A list of messages comprising the conversation so far.
+ * @param systemPrompt
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such as
+ * specifying a particular goal or role. See our guide to system prompts.
* @param settings
* @return
* create message response
@@ -55,6 +104,7 @@ trait AnthropicService extends CloseableService with AnthropicServiceConsts {
*/
def createMessageStreamed(
messages: Seq[Message],
+ systemPrompt: Option[String],
settings: AnthropicCreateMessageSettings = DefaultSettings.CreateMessage
): Source[ContentBlockDelta, NotUsed]
}
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicServiceFactory.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicServiceFactory.scala
index 56028704..13959f6b 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicServiceFactory.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicServiceFactory.scala
@@ -3,10 +3,16 @@ package io.cequence.openaiscala.anthropic.service
import akka.stream.Materializer
import io.cequence.openaiscala.anthropic.service.impl.{
AnthropicServiceImpl,
- OpenAIAnthropicChatCompletionService
+ OpenAIAnthropicChatCompletionService,
+ OpenAIAnthropicChatToolCompletionService
+}
+import io.cequence.openaiscala.service.{
+ OpenAIChatCompletionService,
+ OpenAIChatToolCompletionService
+}
+import io.cequence.openaiscala.service.StreamedServiceTypes.{
+ OpenAIChatCompletionStreamedService
}
-import io.cequence.openaiscala.service.OpenAIChatCompletionService
-import io.cequence.openaiscala.service.StreamedServiceTypes.OpenAIChatCompletionStreamedService
import io.cequence.openaiscala.service.ws.Timeouts
import scala.concurrent.ExecutionContext
@@ -43,6 +49,17 @@ object AnthropicServiceFactory extends AnthropicServiceConsts {
AnthropicServiceFactory(apiKey, timeouts)
)
+ def asOpenAIChatToolCompletionService(
+ apiKey: String = getAPIKeyFromEnv(),
+ timeouts: Option[Timeouts] = None
+ )(
+ implicit ec: ExecutionContext,
+ materializer: Materializer
+ ): OpenAIChatToolCompletionService =
+ new OpenAIAnthropicChatToolCompletionService(
+ AnthropicServiceFactory(apiKey, timeouts)
+ )
+
/**
* Create a new instance of the [[AnthropicService]]
*
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicWSRequestHelper.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicWSRequestHelper.scala
new file mode 100644
index 00000000..4c5746a3
--- /dev/null
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/AnthropicWSRequestHelper.scala
@@ -0,0 +1,62 @@
+package io.cequence.openaiscala.anthropic.service
+
+import io.cequence.openaiscala.anthropic.service.AnthropicWSRequestHelper.AnthropicBetaHeader
+import io.cequence.openaiscala.{
+ OpenAIScalaClientException,
+ OpenAIScalaEngineOverloadedException,
+ OpenAIScalaRateLimitException,
+ OpenAIScalaServerErrorException,
+ OpenAIScalaTokenCountExceededException,
+ OpenAIScalaUnauthorizedException
+}
+import io.cequence.openaiscala.service.ws.WSRequestExtHelper
+import play.api.libs.json.{JsObject, JsValue}
+
+import scala.concurrent.Future
+
+object AnthropicWSRequestHelper {
+ val AnthropicBetaHeader = "anthropic-beta"
+}
+
+trait AnthropicWSRequestHelper extends WSRequestExtHelper {
+
+ // TODO: introduce Anthropic error model
+ override protected def handleErrorCodes(
+ httpCode: Int,
+ message: String
+ ): Nothing = {
+ val errorMessage = s"Code ${httpCode} : ${message}"
+ httpCode match {
+ case 401 => throw new OpenAIScalaUnauthorizedException(errorMessage)
+ case 429 => throw new OpenAIScalaRateLimitException(errorMessage)
+ case 500 => throw new OpenAIScalaServerErrorException(errorMessage)
+ case 503 => throw new OpenAIScalaEngineOverloadedException(errorMessage)
+ case 400 =>
+ if (
+ message.contains("Please reduce your prompt; or completion length") ||
+ message.contains("Please reduce the length of the messages")
+ )
+ throw new OpenAIScalaTokenCountExceededException(errorMessage)
+ else
+ throw new OpenAIScalaClientException(errorMessage)
+
+ case _ => throw new OpenAIScalaClientException(errorMessage)
+ }
+ }
+
+ protected def execBetaPOSTWithStatus(
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ bodyParams: Seq[(PT, Option[JsValue])] = Nil
+ ): Future[JsValue] = {
+ execPOSTWithStatusAndHeaders(
+ endPoint,
+ endPointParam,
+ params,
+ bodyParams,
+ headers = authHeaders ++ Seq(AnthropicBetaHeader -> "tools-2024-04-04")
+ ).map(handleErrorResponse)
+ }
+
+}
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/AnthropicServiceImpl.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/AnthropicServiceImpl.scala
index 48a75ee8..48df396b 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/AnthropicServiceImpl.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/AnthropicServiceImpl.scala
@@ -10,8 +10,8 @@ import io.cequence.openaiscala.anthropic.domain.response.{
CreateMessageResponse
}
import io.cequence.openaiscala.anthropic.domain.settings.AnthropicCreateMessageSettings
-import io.cequence.openaiscala.anthropic.domain.{ChatRole, Message}
-import io.cequence.openaiscala.anthropic.service.AnthropicService
+import io.cequence.openaiscala.anthropic.domain.{ChatRole, Message, ToolSpec}
+import io.cequence.openaiscala.anthropic.service.{AnthropicService, AnthropicWSRequestHelper}
import io.cequence.openaiscala.service.OpenAIWSRequestHelper
import io.cequence.openaiscala.service.impl.OpenAIWSStreamRequestHelper
import play.api.libs.json.{JsValue, Json}
@@ -22,7 +22,7 @@ import scala.concurrent.Future
// Shouldn't use OpenAIWSRequestHelper and OpenAIWSStreamRequestHelper
private[service] trait AnthropicServiceImpl
extends AnthropicService
- with OpenAIWSRequestHelper
+ with AnthropicWSRequestHelper
with OpenAIWSStreamRequestHelper
with JsonFormats {
@@ -31,23 +31,60 @@ private[service] trait AnthropicServiceImpl
override def createMessage(
messages: Seq[Message],
+ systemPrompt: Option[String],
settings: AnthropicCreateMessageSettings
): Future[CreateMessageResponse] =
execPOST(
EndPoint.messages,
- bodyParams = createBodyParamsForMessageCreation(messages, settings, stream = false)
+ bodyParams =
+ createBodyParamsForMessageCreation(messages, systemPrompt, settings, stream = false)
).map(
_.asSafe[CreateMessageResponse]
)
+ override def createToolMessage(
+ messages: Seq[Message],
+ systemPrompt: Option[String],
+ tools: Seq[ToolSpec],
+ settings: AnthropicCreateMessageSettings
+ ): Future[CreateMessageResponse] = {
+ val coreParams =
+ createBodyParamsForMessageCreation(messages, systemPrompt, settings, stream = false)
+ val extraParams = jsonBodyParams(
+ Param.tools -> Some(tools.map(Json.toJson(_)))
+ )
+
+ def isToolCall = tools.nonEmpty
+
+ if (isToolCall)
+ execBetaPOSTWithStatus(
+ EndPoint.messages,
+ bodyParams = coreParams ++ extraParams
+ ).map(
+ _.asSafe[CreateMessageResponse]
+ )
+ else
+ execPOST(
+ EndPoint.messages,
+ bodyParams = coreParams ++ extraParams
+ ).map(
+ _.asSafe[CreateMessageResponse]
+ )
+ }
+
+ // TODO: somewhere override handleErrorCodes
+ // define Anthropic exceptions based on status codes
+
override def createMessageStreamed(
messages: Seq[Message],
+ systemPrompt: Option[String],
settings: AnthropicCreateMessageSettings
): Source[ContentBlockDelta, NotUsed] =
execJsonStreamAux(
EndPoint.messages,
"POST",
- bodyParams = createBodyParamsForMessageCreation(messages, settings, stream = true)
+ bodyParams =
+ createBodyParamsForMessageCreation(messages, systemPrompt, settings, stream = true)
).map { (json: JsValue) =>
(json \ "error").toOption.map { error =>
throw new OpenAIScalaClientException(error.toString())
@@ -70,6 +107,7 @@ private[service] trait AnthropicServiceImpl
protected def createBodyParamsForMessageCreation(
messages: Seq[Message],
+ systemPrompt: Option[String],
settings: AnthropicCreateMessageSettings,
stream: Boolean
): Seq[(Param, Option[JsValue])] = {
@@ -81,7 +119,7 @@ private[service] trait AnthropicServiceImpl
jsonBodyParams(
Param.messages -> Some(messageJsons),
Param.model -> Some(settings.model),
- Param.system -> settings.system,
+ Param.system -> systemPrompt,
Param.max_tokens -> Some(settings.max_tokens),
Param.metadata -> { if (settings.metadata.isEmpty) None else Some(settings.metadata) },
Param.stop_sequences -> {
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/EndPoint.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/EndPoint.scala
index d80d61a5..a83ba366 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/EndPoint.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/EndPoint.scala
@@ -20,6 +20,7 @@ object Param {
case object stop_sequences extends Param
case object stream extends Param
case object temperature extends Param
+ case object tools extends Param
case object top_p extends Param
case object top_k extends Param
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatCompletionService.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatCompletionService.scala
index 493d0b1a..4cc8ee87 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatCompletionService.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatCompletionService.scala
@@ -3,7 +3,7 @@ package io.cequence.openaiscala.anthropic.service.impl
import akka.NotUsed
import akka.stream.scaladsl.Source
import io.cequence.openaiscala.anthropic.service.AnthropicService
-import io.cequence.openaiscala.domain.BaseMessage
+import io.cequence.openaiscala.domain.{BaseMessage, ToolSpec}
import io.cequence.openaiscala.domain.response.{
ChatCompletionChunkResponse,
ChatCompletionResponse
@@ -41,6 +41,23 @@ private[service] class OpenAIAnthropicChatCompletionService(
underlying
.createMessage(
toAnthropic(messages),
+ toAnthropicSystemPrompt(messages),
+ toAnthropic(settings, messages)
+ )
+ .map(toOpenAI)
+ }
+
+ // TODO: extract another trait extending OpenAIChatCompletionService with createChatToolCompletion
+ def createChatToolCompletion(
+ messages: Seq[BaseMessage],
+ tools: Seq[ToolSpec],
+ settings: CreateChatCompletionSettings
+ ): Future[ChatCompletionResponse] = {
+ underlying
+ .createToolMessage(
+ toAnthropic(messages),
+ toAnthropicSystemPrompt(messages),
+ toAnthropicToolSpecs(tools),
toAnthropic(settings, messages)
)
.map(toOpenAI)
@@ -64,6 +81,7 @@ private[service] class OpenAIAnthropicChatCompletionService(
underlying
.createMessageStreamed(
toAnthropic(messages),
+ toAnthropicSystemPrompt(messages),
toAnthropic(settings, messages)
)
.map(toOpenAI)
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatToolCompletionService.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatToolCompletionService.scala
new file mode 100644
index 00000000..803d8845
--- /dev/null
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/OpenAIAnthropicChatToolCompletionService.scala
@@ -0,0 +1,70 @@
+package io.cequence.openaiscala.anthropic.service.impl
+
+import akka.NotUsed
+import akka.stream.scaladsl.Source
+import io.cequence.openaiscala.anthropic.domain.Message
+import io.cequence.openaiscala.anthropic.domain.Message.UserMessage
+import io.cequence.openaiscala.anthropic.domain.response.CreateMessageResponse
+import io.cequence.openaiscala.anthropic.service.AnthropicService
+import io.cequence.openaiscala.domain.response.{
+ ChatCompletionChunkResponse,
+ ChatCompletionResponse,
+ ChatToolCompletionResponse
+}
+import io.cequence.openaiscala.domain.settings.CreateChatCompletionSettings
+import io.cequence.openaiscala.domain.{BaseMessage, SystemMessage, ToolSpec}
+import io.cequence.openaiscala.service.{
+ OpenAIChatCompletionService,
+ OpenAIChatCompletionStreamedServiceExtra,
+ OpenAIChatToolCompletionService
+}
+
+import scala.concurrent.{ExecutionContext, Future}
+
+private[service] class OpenAIAnthropicChatToolCompletionService(
+ underlying: AnthropicService
+)(
+ implicit executionContext: ExecutionContext
+) extends OpenAIChatToolCompletionService {
+
+ /**
+ * Creates a model response for the given chat conversation expecting a tool call.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param tools
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
+ * Use this to provide a list of functions the model may generate JSON inputs for.
+ * @param responseToolChoice
+ * Controls which (if any) function/tool is called by the model. Specifying a particular
+ * function forces the model to call that function (must be listed in `tools`). Otherwise,
+ * the default "auto" mode is used where the model can pick between generating a message or
+ * calling a function.
+ * @param settings
+ * @return
+ * chat completion response
+ * @see
+ * OpenAI Doc
+ */
+ override def createChatToolCompletion(
+ messages: Seq[BaseMessage],
+ tools: Seq[ToolSpec],
+ responseToolChoice: Option[String],
+ settings: CreateChatCompletionSettings
+ ): Future[ChatToolCompletionResponse] = {
+ val anthropicResponseF: Future[CreateMessageResponse] = underlying.createToolMessage(
+ toAnthropic(messages) ++ responseToolChoice.map(toAnthropicToolUseEncouragement),
+ toAnthropicSystemPrompt(messages),
+ toAnthropicToolSpecs(tools),
+ toAnthropic(settings, messages)
+ )
+ anthropicResponseF.map(toOpenAIChatToolCompletionResponse)
+ }
+
+ // TODO: support streamed version?
+
+ /**
+ * Closes the underlying ws client, and releases all its resources.
+ */
+ override def close(): Unit = underlying.close()
+}
diff --git a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/package.scala b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/package.scala
index e41b36ed..f542d08d 100644
--- a/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/package.scala
+++ b/anthropic-client/src/main/scala/io/cequence/openaiscala/anthropic/service/impl/package.scala
@@ -1,32 +1,41 @@
package io.cequence.openaiscala.anthropic.service
-import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.TextBlock
+import io.cequence.openaiscala.JsonUtil
+import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.{TextBlock, ToolUseBlock}
import io.cequence.openaiscala.anthropic.domain.Content.ContentBlocks
+import io.cequence.openaiscala.anthropic.domain.Message.UserMessage
+import io.cequence.openaiscala.anthropic.domain.response.CreateMessageResponse.UsageInfo
import io.cequence.openaiscala.anthropic.domain.response.{
ContentBlockDelta,
CreateMessageResponse
}
-import io.cequence.openaiscala.anthropic.domain.response.CreateMessageResponse.UsageInfo
import io.cequence.openaiscala.anthropic.domain.settings.AnthropicCreateMessageSettings
-import io.cequence.openaiscala.anthropic.domain.{Content, Message}
+import io.cequence.openaiscala.anthropic.domain.{Content, Message, ToolSpec}
import io.cequence.openaiscala.domain.response.{
ChatCompletionChoiceChunkInfo,
ChatCompletionChoiceInfo,
ChatCompletionChunkResponse,
ChatCompletionResponse,
+ ChatToolCompletionChoiceInfo,
+ ChatToolCompletionResponse,
ChunkMessageSpec,
UsageInfo => OpenAIUsageInfo
}
import io.cequence.openaiscala.domain.settings.CreateChatCompletionSettings
import io.cequence.openaiscala.domain.{
AssistantMessage,
+ AssistantToolMessage,
ChatRole,
+ FunctionCallSpec,
+ FunctionSpec,
MessageSpec,
SystemMessage,
+ ToolCallSpec,
BaseMessage => OpenAIBaseMessage,
Content => OpenAIContent,
ImageURLContent => OpenAIImageContent,
TextContent => OpenAITextContent,
+ ToolSpec => OpenAIToolSpec,
UserMessage => OpenAIUserMessage,
UserSeqMessage => OpenAIUserSeqMessage
}
@@ -35,15 +44,34 @@ import java.{util => ju}
package object impl extends AnthropicServiceConsts {
- def toAnthropic(messages: Seq[OpenAIBaseMessage]): Seq[Message] =
+ def toAnthropic(messages: Seq[OpenAIBaseMessage]): Seq[Message] = {
messages.collect {
- case OpenAIUserMessage(content, _) => Message.UserMessage(content)
+ case OpenAIUserMessage(content, _) =>
+ Message.UserMessage(content)
case OpenAIUserSeqMessage(contents, _) =>
Message.UserMessageContent(contents.map(toAnthropic))
// legacy message type
case MessageSpec(role, content, _) if role == ChatRole.User =>
Message.UserMessage(content)
}
+ }
+
+ def toAnthropicSystemPrompt(messages: Seq[OpenAIBaseMessage]): Option[String] = {
+ val systemMessagesContent = messages.collect { case SystemMessage(content, _) =>
+ content
+ }.mkString("\n")
+
+ if (systemMessagesContent.isEmpty) None else Some(systemMessagesContent)
+ }
+
+ def toAnthropicToolUseEncouragement(toolChoice: String): UserMessage =
+ UserMessage(s"Use the $toolChoice tool in your response.")
+
+ def toAnthropicToolSpecs(toolSpecs: Seq[OpenAIToolSpec]): Seq[ToolSpec] = {
+ toolSpecs.collect { case FunctionSpec(name, description, parameters) =>
+ ToolSpec(name, description, parameters)
+ }
+ }
def toAnthropic(content: OpenAIContent): Content.ContentBlock = {
content match {
@@ -74,7 +102,7 @@ package object impl extends AnthropicServiceConsts {
AnthropicCreateMessageSettings(
model = settings.model,
- system = if (systemMessagesContent.isEmpty) None else Some(systemMessagesContent),
+ // system = if (systemMessagesContent.isEmpty) None else Some(systemMessagesContent),
max_tokens = settings.max_tokens.getOrElse(DefaultSettings.CreateMessage.max_tokens),
metadata = Map.empty,
stop_sequences = settings.stop,
@@ -98,9 +126,26 @@ package object impl extends AnthropicServiceConsts {
logprobs = None
)
),
- usage = Some(toOpenAI(response.usage))
+ usage = response.usage.map(toOpenAI)
)
+ def toOpenAIChatToolCompletionResponse(createMessageResponse: CreateMessageResponse) = {
+ ChatToolCompletionResponse(
+ id = createMessageResponse.id,
+ created = new ju.Date(),
+ model = createMessageResponse.model,
+ system_fingerprint = createMessageResponse.stop_reason,
+ choices = Seq(
+ ChatToolCompletionChoiceInfo(
+ message = toOpenAIAssistantToolMessage(createMessageResponse.content),
+ index = 0,
+ finish_reason = createMessageResponse.stop_reason
+ )
+ ),
+ usage = createMessageResponse.usage.map(toOpenAI)
+ )
+ }
+
def toOpenAI(blockDelta: ContentBlockDelta): ChatCompletionChunkResponse =
ChatCompletionChunkResponse(
id = "",
@@ -130,6 +175,36 @@ package object impl extends AnthropicServiceConsts {
AssistantMessage(singleTextContent, name = None)
}
+ def toOpenAIAssistantToolMessage(content: ContentBlocks): AssistantToolMessage = {
+ println(content.toString)
+ val textMessage = singleTextMessage(content)
+ AssistantToolMessage(
+ content = Some(textMessage),
+ name = None,
+ tool_calls = content.blocks.collect { case toolContent: ToolUseBlock =>
+ toOpenAI(toolContent)
+ }
+ )
+ }
+
+ def toOpenAI(toolUseBlock: ToolUseBlock): (String, ToolCallSpec) = {
+ val inputJson = JsonUtil.StringAnyMapFormat.writes(toolUseBlock.input).toString()
+ toolUseBlock.id -> FunctionCallSpec(toolUseBlock.id, inputJson)
+ }
+
+// def toOpenAI(toolContent: ToolUseBlock): (String, ToolCallSpec) = {
+// toolContent match {
+// case ToolUseBlockSuccess(toolUseId, content) =>
+// toolUseId -> FunctionCallSpec(toolUseId, content)
+// case ToolUseBlockFailure(toolUseId, content) =>
+// // TODO: how to map fail case?
+// ???
+// }
+// }
+
+ private def singleTextMessage(content: ContentBlocks): String =
+ concatenateMessages(content.blocks.collect { case TextBlock(text) => text })
+
private def concatenateMessages(messageContent: Seq[String]): String =
messageContent.mkString("\n")
@@ -140,4 +215,5 @@ package object impl extends AnthropicServiceConsts {
completion_tokens = Some(usageInfo.output_tokens)
)
}
+
}
diff --git a/anthropic-client/src/test/scala/io/cequence/openaiscala/anthropic/JsonFormatsSpec.scala b/anthropic-client/src/test/scala/io/cequence/openaiscala/anthropic/JsonFormatsSpec.scala
index 98573cea..466c5b39 100644
--- a/anthropic-client/src/test/scala/io/cequence/openaiscala/anthropic/JsonFormatsSpec.scala
+++ b/anthropic-client/src/test/scala/io/cequence/openaiscala/anthropic/JsonFormatsSpec.scala
@@ -2,17 +2,23 @@ package io.cequence.openaiscala.anthropic
import io.cequence.openaiscala.anthropic.JsonFormatsSpec.JsonPrintMode
import io.cequence.openaiscala.anthropic.JsonFormatsSpec.JsonPrintMode.{Compact, Pretty}
-import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.{ImageBlock, TextBlock}
-import io.cequence.openaiscala.anthropic.domain.Message
+import io.cequence.openaiscala.anthropic.domain.Content.ContentBlock.{
+ ImageBlock,
+ TextBlock,
+ ToolUseBlock
+}
+import io.cequence.openaiscala.anthropic.domain.Content.{ContentBlock, ContentBlocks}
+import io.cequence.openaiscala.anthropic.domain.{ChatRole, Message, ToolSpec}
import io.cequence.openaiscala.anthropic.domain.Message.{
AssistantMessage,
AssistantMessageContent,
UserMessage,
UserMessageContent
}
+import io.cequence.openaiscala.anthropic.domain.response.CreateMessageResponse
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
-import play.api.libs.json.{Format, Json}
+import play.api.libs.json.{Format, Json, Reads, Writes}
object JsonFormatsSpec {
sealed trait JsonPrintMode
@@ -53,6 +59,67 @@ class JsonFormatsSpec extends AnyWordSpecLike with Matchers with JsonFormats {
testCodec[Message](assistantMessage, json)
}
+ "deserialize a tool_use content block" in {
+ val json =
+ """ {
+ | "type": "tool_use",
+ | "id": "toolu_01A09q90qw90lq917835lq9",
+ | "name": "get_weather",
+ | "input": {"location": "San Francisco, CA", "unit": "celsius"}
+ | }""".stripMargin
+
+ val toolUseBlock = ToolUseBlock(
+ id = "toolu_01A09q90qw90lq917835lq9",
+ name = "get_weather",
+ input = Map(
+ "location" -> "\"San Francisco, CA\"",
+ "unit" -> "\"celsius\""
+ )
+ )
+ testDeserialization[ContentBlock](json, toolUseBlock)
+ }
+
+ // TODO: add deserialization tests for:
+ // 1. ToolUseBlock - success - flat content
+ // 2. ToolUseBlock - success - TextBlock content
+ // 3. ToolUseBlock - failure - flat content
+ // 4. ToolUseBlock - failure - TextBlock content
+
+ val expectedToolSpecJson =
+ """{
+ | "name" : "get_stock_price",
+ | "description" : "Get the current stock price for a given ticker symbol.",
+ | "input_schema" : {
+ | "type" : "object",
+ | "properties" : {
+ | "ticker" : {
+ | "type" : "string",
+ | "description" : "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ | }
+ | },
+ | "required" : [ "ticker" ]
+ | }
+ |}""".stripMargin
+
+ "serialize tools" in {
+ val toolSpec = ToolSpec(
+ name = "get_stock_price",
+ description = Some("Get the current stock price for a given ticker symbol."),
+ inputSchema = Map(
+ "type" -> "object",
+ "properties" -> Map(
+ "ticker" -> Map(
+ "type" -> "string",
+ "description" -> "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ )
+ ),
+ "required" -> Seq("ticker")
+ )
+ )
+
+ testSerialization(toolSpec, expectedToolSpecJson, Pretty)
+ }
+
val expectedImageContentJson =
"""{
| "role" : "user",
@@ -72,6 +139,55 @@ class JsonFormatsSpec extends AnyWordSpecLike with Matchers with JsonFormats {
testCodec[Message](userMessage, expectedImageContentJson, Pretty)
}
+ val createToolMessageResponseJson =
+ """{
+ | "id": "msg_01Aq9w938a90dw8q",
+ | "model": "claude-3-opus-20240229",
+ | "stop_reason": "tool_use",
+ | "role": "assistant",
+ | "content": [
+ | {
+ | "type": "text",
+ | "text": "I need to use the get_weather, and the user wants SF, which is likely San Francisco, CA."
+ | },
+ | {
+ | "type": "tool_use",
+ | "id": "toolu_01A09q90qw90lq917835lq9",
+ | "name": "get_weather",
+ | "input": {"location": "San Francisco, CA", "unit": "celsius"}
+ | }
+ | ]
+ |}""".stripMargin
+
+ "deserialize tool use content block" in {
+ val toolUseResponse = CreateMessageResponse(
+ id = "msg_01Aq9w938a90dw8q",
+ role = ChatRole.Assistant,
+ content = ContentBlocks(
+ Seq(
+ // TODO: check, shouldn't this get to description of a tool use block?
+ TextBlock(
+ "I need to use the get_weather, and the user wants SF, which is likely San Francisco, CA."
+ ),
+ ToolUseBlock(
+ id = "toolu_01A09q90qw90lq917835lq9",
+ name = "get_weather",
+ input = Map(
+ "location" -> "\"San Francisco, CA\"",
+ "unit" -> "\"celsius\""
+ )
+ )
+ )
+ ),
+ model = "claude-3-opus-20240229",
+ stop_reason = Some("tool_use"),
+ stop_sequence = None,
+ usage = None
+ )
+ testDeserialization(createToolMessageResponseJson, toolUseResponse)
+
+ }
+
}
private def testCodec[A](
@@ -80,6 +196,17 @@ class JsonFormatsSpec extends AnyWordSpecLike with Matchers with JsonFormats {
printMode: JsonPrintMode = Compact
)(
implicit format: Format[A]
+ ): Unit = {
+ testSerialization(value, json, printMode)
+ testDeserialization(json, value)
+ }
+
+ private def testSerialization[A](
+ value: A,
+ json: String,
+ printMode: JsonPrintMode = Compact
+ )(
+ implicit writes: Writes[A]
): Unit = {
val jsValue = Json.toJson(value)
val serialized = printMode match {
@@ -87,7 +214,14 @@ class JsonFormatsSpec extends AnyWordSpecLike with Matchers with JsonFormats {
case Pretty => Json.prettyPrint(jsValue)
}
serialized shouldBe json
+ }
+ private def testDeserialization[A](
+ json: String,
+ value: A
+ )(
+ implicit format: Reads[A]
+ ): Unit = {
Json.parse(json).as[A] shouldBe value
}
diff --git a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionStreamedServiceExtra.scala b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionStreamedServiceExtra.scala
new file mode 100644
index 00000000..18918589
--- /dev/null
+++ b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionStreamedServiceExtra.scala
@@ -0,0 +1,35 @@
+package io.cequence.openaiscala.service
+
+import akka.NotUsed
+import akka.stream.scaladsl.Source
+import io.cequence.openaiscala.domain.BaseMessage
+import io.cequence.openaiscala.domain.response.ChatCompletionChunkResponse
+import io.cequence.openaiscala.domain.settings.CreateChatCompletionSettings
+
+/**
+ * Service that offers ONLY a streamed version of OpenAI chat completion endpoint.
+ *
+ * @since March
+ * 2024
+ */
+trait OpenAIChatToolCompletionStreamedServiceExtra
+ extends OpenAIServiceConsts
+ with CloseableService {
+
+ /**
+ * Creates a completion for the chat message(s) with streamed results.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param settings
+ * @return
+ * chat completion response
+ *
+ * @see
+ * OpenAI Doc
+ */
+ def createChatToolCompletionStreamed(
+ messages: Seq[BaseMessage],
+ settings: CreateChatCompletionSettings = DefaultSettings.CreateChatCompletion
+ ): Source[ChatCompletionChunkResponse, NotUsed]
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
index f3c7211c..ba3042d4 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
@@ -340,6 +340,43 @@ trait WSRequestHelper extends HasWSClient {
client.url(url)
}
+ protected def execPOSTWithStatusAndHeaders(
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ bodyParams: Seq[(PT, Option[JsValue])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes,
+ headers: Seq[(String, String)] = Nil
+ ): Future[RichJsResponse] = {
+ val request = getWSRequestOptionalWithHeaders(
+ Some(endPoint),
+ endPointParam,
+ toStringParams(params),
+ headers
+ )
+ val bodyParamsX = bodyParams.collect { case (fieldName, Some(jsValue)) =>
+ (fieldName.toString, jsValue)
+ }
+
+ execPOSTJsonAux(
+ request,
+ JsObject(bodyParamsX),
+ Some(endPoint),
+ acceptableStatusCodes
+ )
+ }
+
+ protected def getWSRequestOptionalWithHeaders(
+ endPoint: Option[PEP],
+ endPointParam: Option[String],
+ params: Seq[(String, Option[Any])],
+ headers: Seq[(String, String)]
+ ) = {
+ val paramsString = paramsOptionalAsString(params)
+ val url = createUrl(endPoint, endPointParam) + paramsString
+ client.url(url).addHttpHeaders(headers: _*)
+ }
+
private def execRequestAux[T](
responseConverter: ResponseConverters.ResponseConverter[T]
)(
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala b/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
index c25f7af3..2c3c99c1 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
@@ -6,6 +6,7 @@ object Retryable {
t: OpenAIScalaClientException
): Option[OpenAIScalaClientException] = Some(t).filter(apply)
+ // TODO: consider retrying for Anthropic
def apply(t: OpenAIScalaClientException): Boolean = t match {
// we retry on these
case _: OpenAIScalaClientTimeoutException => true
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionService.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionService.scala
new file mode 100644
index 00000000..341d7e1e
--- /dev/null
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIChatToolCompletionService.scala
@@ -0,0 +1,37 @@
+package io.cequence.openaiscala.service
+
+import io.cequence.openaiscala.domain.{BaseMessage, ToolSpec}
+import io.cequence.openaiscala.domain.response.ChatToolCompletionResponse
+import io.cequence.openaiscala.domain.settings.CreateChatCompletionSettings
+
+import scala.concurrent.Future
+
+trait OpenAIChatToolCompletionService extends OpenAIServiceConsts with CloseableService {
+
+ /**
+ * Creates a model response for the given chat conversation expecting a tool call.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param tools
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
+ * Use this to provide a list of functions the model may generate JSON inputs for.
+ * @param responseToolChoice
+ * Controls which (if any) function/tool is called by the model. Specifying a particular
+ * function forces the model to call that function (must be listed in `tools`). Otherwise,
+ * the default "auto" mode is used where the model can pick between generating a message or
+ * calling a function.
+ * @param settings
+ * @return
+ * chat completion response
+ * @see
+ * OpenAI Doc
+ */
+ def createChatToolCompletion(
+ messages: Seq[BaseMessage],
+ tools: Seq[ToolSpec],
+ responseToolChoice: Option[String] = None,
+ settings: CreateChatCompletionSettings = DefaultSettings.CreateChatToolCompletion
+ ): Future[ChatToolCompletionResponse]
+
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
index d67c8372..441b78d5 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
@@ -50,7 +50,7 @@ import scala.concurrent.Future
* @since Jan
* 2023
*/
-trait OpenAIService extends OpenAICoreService {
+trait OpenAIService extends OpenAICoreService with OpenAIChatToolCompletionService {
/**
* Retrieves a model instance, providing basic information about the model such as the owner
@@ -93,32 +93,6 @@ trait OpenAIService extends OpenAICoreService {
settings: CreateChatCompletionSettings = DefaultSettings.CreateChatFunCompletion
): Future[ChatFunCompletionResponse]
- /**
- * Creates a model response for the given chat conversation expecting a tool call.
- *
- * @param messages
- * A list of messages comprising the conversation so far.
- * @param tools
- * A list of tools the model may call. Currently, only functions are supported as a tool.
- * Use this to provide a list of functions the model may generate JSON inputs for.
- * @param responseToolChoice
- * Controls which (if any) function/tool is called by the model. Specifying a particular
- * function forces the model to call that function (must be listed in `tools`). Otherwise,
- * the default "auto" mode is used where the model can pick between generating a message or
- * calling a function.
- * @param settings
- * @return
- * chat completion response
- * @see
- * OpenAI Doc
- */
- def createChatToolCompletion(
- messages: Seq[BaseMessage],
- tools: Seq[ToolSpec],
- responseToolChoice: Option[String] = None,
- settings: CreateChatCompletionSettings = DefaultSettings.CreateChatToolCompletion
- ): Future[ChatToolCompletionResponse]
-
/**
* Creates a new edit for the provided input, instruction, and parameters.
*
diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatToolCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatToolCompletion.scala
new file mode 100644
index 00000000..daf5a4f3
--- /dev/null
+++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatToolCompletion.scala
@@ -0,0 +1,52 @@
+package io.cequence.openaiscala.examples.nonopenai
+
+import io.cequence.openaiscala.anthropic.service.AnthropicServiceFactory
+import io.cequence.openaiscala.domain.{
+ FunctionSpec,
+ NonOpenAIModelId,
+ SystemMessage,
+ UserMessage
+}
+import io.cequence.openaiscala.domain.settings.CreateChatCompletionSettings
+import io.cequence.openaiscala.examples.ExampleBase
+import io.cequence.openaiscala.service.{
+ OpenAIChatCompletionService,
+ OpenAIChatToolCompletionService,
+ OpenAICoreService,
+ OpenAIService
+}
+
+import scala.concurrent.Future
+
+object AnthropicCreateChatToolCompletion extends ExampleBase[OpenAIChatToolCompletionService] {
+
+ override protected val service: OpenAIChatToolCompletionService =
+ AnthropicServiceFactory.asOpenAIChatToolCompletionService()
+
+ private val messages = Seq(
+ SystemMessage("You are a helpful assistant."),
+ UserMessage("What's the S&P 500 as of today?")
+ )
+
+ override protected def run: Future[_] =
+ service.createChatToolCompletion(
+ messages = messages,
+ settings = CreateChatCompletionSettings(NonOpenAIModelId.claude_3_haiku_20240307),
+ tools = Seq(
+ FunctionSpec(
+ name = "get_stock_price",
+ description = Some("Get the current stock price of a given company"),
+ parameters = Map(
+ "type" -> "object",
+ "properties" -> Map(
+ "company" -> Map(
+ "type" -> "string",
+ "description" -> "The company name, e.g. Apple Inc."
+ )
+ ),
+ "required" -> Seq("company")
+ )
+ )
+ )
+ )
+}
diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessage.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessage.scala
index f4d66067..b6e616bd 100644
--- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessage.scala
+++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessage.scala
@@ -22,6 +22,7 @@ object AnthropicCreateMessage extends ExampleBase[AnthropicService] {
service
.createMessage(
messages,
+ systemPrompt = None,
settings = AnthropicCreateMessageSettings(
model = NonOpenAIModelId.claude_3_haiku_20240307,
max_tokens = 4096
diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageStreamed.scala
index df1f4f7f..3c4219a2 100644
--- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageStreamed.scala
+++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageStreamed.scala
@@ -21,6 +21,7 @@ object AnthropicCreateMessageStreamed extends ExampleBase[AnthropicService] {
service
.createMessageStreamed(
messages,
+ systemPrompt = None,
settings = AnthropicCreateMessageSettings(
model = NonOpenAIModelId.claude_3_haiku_20240307,
max_tokens = 4096
diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageWithImage.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageWithImage.scala
index 7e293af8..51c63a95 100644
--- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageWithImage.scala
+++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateMessageWithImage.scala
@@ -42,6 +42,7 @@ object AnthropicCreateMessageWithImage extends ExampleBase[AnthropicService] {
service
.createMessage(
messages,
+ systemPrompt = None,
settings = AnthropicCreateMessageSettings(
model = NonOpenAIModelId.claude_3_opus_20240229,
max_tokens = 4096