From 1eee9d437e4bbceb5d38aaaef77670e90e28babf Mon Sep 17 00:00:00 2001 From: Hadley Wickham Date: Wed, 18 Dec 2024 15:26:18 -0600 Subject: [PATCH] Improve docs with an eye to passing initial CRAN checks --- R/chat.R | 5 +++++ R/content.R | 30 +++++++++++++++++++----------- R/interpolate.R | 1 + R/provider-azure.R | 5 +++++ R/provider-bedrock.R | 5 +++++ R/provider-claude.R | 3 +++ R/provider-databricks.R | 5 +++++ R/provider-gemini.R | 5 +++++ R/provider-github.R | 6 ++++++ R/provider-groq.R | 20 +++++++++++++------- R/provider-ollama.R | 7 +++++++ R/provider-openai.R | 2 ++ R/provider-perplexity.R | 25 +++++++++++++++---------- R/provider-vllm.R | 5 +++++ R/provider.R | 3 +++ R/shiny.R | 6 ++++++ R/tokens.R | 3 +++ R/tools-def.R | 1 + R/turns.R | 3 +++ R/types.R | 4 ++++ R/utils-S7.R | 2 +- man/Chat.Rd | 9 +++++++++ man/Content.Rd | 33 ++++++++++++++++++++++++--------- man/Provider.Rd | 8 +++++++- man/Turn.Rd | 6 ++++++ man/Type.Rd | 9 ++++++++- man/chat_azure.Rd | 6 ++++++ man/chat_bedrock.Rd | 6 ++++++ man/chat_claude.Rd | 6 ++++++ man/chat_databricks.Rd | 6 ++++++ man/chat_gemini.Rd | 6 ++++++ man/chat_github.Rd | 6 ++++++ man/chat_groq.Rd | 9 +++++++++ man/chat_ollama.Rd | 9 +++++++++ man/chat_openai.Rd | 2 ++ man/chat_perplexity.Rd | 6 ++++++ man/chat_vllm.Rd | 15 ++++++++------- man/elmer-package.Rd | 4 ++-- man/interpolate.Rd | 3 +++ man/live_console.Rd | 7 +++++++ man/token_usage.Rd | 6 ++++++ man/tool.Rd | 3 +++ 42 files changed, 262 insertions(+), 49 deletions(-) diff --git a/R/chat.R b/R/chat.R index 10ab5d96..0e35eaaa 100644 --- a/R/chat.R +++ b/R/chat.R @@ -13,6 +13,11 @@ NULL #' #' You should generally not create this object yourself, #' but instead call [chat_openai()] or friends instead. +#' +#' @return A Chat object +#' @examplesIf elmer:::openai_key_exists() +#' chat <- chat_openai(echo = TRUE) +#' chat$chat("Tell me a funny joke") Chat <- R6::R6Class("Chat", public = list( #' @param provider A provider object. diff --git a/R/content.R b/R/content.R index bd830921..c9f32006 100644 --- a/R/content.R +++ b/R/content.R @@ -2,11 +2,11 @@ NULL #' Format contents into a textual representation -#' +#' #' @description #' These generic functions can be use to convert [Turn] contents or [Content] -#' objects into textual representations. -#' +#' objects into textual representations. +#' #' * `contents_text()` is the most minimal and only includes [ContentText] #' objects in the output. #' * `contents_markdown()` returns the text content (which it assumes to be @@ -15,7 +15,7 @@ NULL #' * `contents_html()` returns the text content, converted from markdown to #' HTML with [commonmark::markdown_html()], plus HTML representations of #' images and other content types. -#' +#' #' @examples #' turns <- list( #' Turn("user", contents = list( @@ -30,12 +30,12 @@ NULL #' if (rlang::is_installed("commonmark")) { #' contents_html(turns[[1]]) #' } -#' +#' #' @param content The [Turn] or [Content] object to be converted into text. #' `contents_markdown()` also accepts [Chat] instances to turn the entire #' conversation history into markdown text. #' @param ... Additional arguments passed to methods. -#' +#' #' @return A string of text, markdown or HTML. #' @export contents_text <- new_generic("contents_text", "content") @@ -52,12 +52,14 @@ contents_markdown <- new_generic("contents_markdown", "content") #' Content types received from and sent to a chatbot #' #' @description +#' Use these functions if you're writing a package that extends elmer and need +#' to customise methods for various types of content. For normal use, see +#' [content_image_url()] and friends. +#' #' elmer abstracts away differences in the way that different [Provider]s #' represent various types of content, allowing you to more easily write -#' code that works with any chatbot. -#' -#' This set of classes represents the various types of content that can be -#' sent to and received from a provider: +#' code that works with any chatbot. This set of classes represents types of +#' content that can be either sent to and received from a provider: #' #' * `ContentText`: simple text (often in markdown format). This is the only #' type of content that can be streamed live as it's received. @@ -70,6 +72,12 @@ contents_markdown <- new_generic("contents_markdown", "content") #' * `ContentToolResult`: the result of calling the tool (sent by the user). #' #' @export +#' @return S7 objects that all inherit from `Content` +#' @examples +#' Content() +#' ContentText("Tell me a joke") +#' ContentImageRemote("https://www.r-project.org/Rlogo.png") +#' ContentToolRequest(id = "abc", name = "mean", arguments = list(x = 1:5)) Content <- new_class("Content") method(contents_text, Content) <- function(content) { @@ -128,7 +136,7 @@ ContentImageRemote <- new_class( parent = Content, properties = list( url = prop_string(), - detail = prop_string() + detail = prop_string(default = "") ) ) method(format, ContentImageRemote) <- function(x, ...) { diff --git a/R/interpolate.R b/R/interpolate.R index 48160ca7..fb2fa21d 100644 --- a/R/interpolate.R +++ b/R/interpolate.R @@ -13,6 +13,7 @@ #' @param .envir Environment to evaluate `...` expressions in. Used when #' wrapping in another function. See `vignette("wrappers", package = "glue")` #' for more details. +#' @return A \{glue\} string. #' @export #' @examples #' joke <- "You're a cool dude who loves to make jokes. Tell me a joke about {{topic}}." diff --git a/R/provider-azure.R b/R/provider-azure.R index 97bdc369..7b92b82b 100644 --- a/R/provider-azure.R +++ b/R/provider-azure.R @@ -24,6 +24,11 @@ NULL #' @inheritParams chat_openai #' @inherit chat_openai return #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_azure(deployment_id = "gpt-4o-mini") +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_azure <- function(endpoint = azure_endpoint(), deployment_id, api_version = NULL, diff --git a/R/provider-bedrock.R b/R/provider-bedrock.R index 9a779c23..5f88bdf5 100644 --- a/R/provider-bedrock.R +++ b/R/provider-bedrock.R @@ -21,6 +21,11 @@ NULL #' @inherit chat_openai return #' @family chatbots #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_bedrock() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_bedrock <- function(system_prompt = NULL, turns = NULL, model = NULL, diff --git a/R/provider-claude.R b/R/provider-claude.R index 39a9697f..25ccd840 100644 --- a/R/provider-claude.R +++ b/R/provider-claude.R @@ -26,6 +26,9 @@ NULL #' @param max_tokens Maximum number of tokens to generate before stopping. #' @family chatbots #' @export +#' @examplesIf elmer:::anthropic_key_exists() +#' chat <- chat_claude() +#' chat$chat("Tell me three jokes about statisticians") chat_claude <- function(system_prompt = NULL, turns = NULL, max_tokens = 4096, diff --git a/R/provider-databricks.R b/R/provider-databricks.R index 4439c9bf..759ebcd7 100644 --- a/R/provider-databricks.R +++ b/R/provider-databricks.R @@ -40,6 +40,11 @@ #' @inheritParams chat_openai #' @inherit chat_openai return #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_databricks() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_databricks <- function(workspace = databricks_workspace(), system_prompt = NULL, turns = NULL, diff --git a/R/provider-gemini.R b/R/provider-gemini.R index b84a2412..a681a12e 100644 --- a/R/provider-gemini.R +++ b/R/provider-gemini.R @@ -18,6 +18,11 @@ NULL #' @inherit chat_openai return #' @family chatbots #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_gemini() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_gemini <- function(system_prompt = NULL, turns = NULL, base_url = "https://generativelanguage.googleapis.com/v1beta/", diff --git a/R/provider-github.R b/R/provider-github.R index 2e7f8a1c..ad2c473a 100644 --- a/R/provider-github.R +++ b/R/provider-github.R @@ -18,6 +18,11 @@ #' @export #' @inheritParams chat_openai #' @inherit chat_openai return +#' @examples +#' \dontrun{ +#' chat <- chat_github() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_github <- function(system_prompt = NULL, turns = NULL, base_url = "https://models.inference.ai.azure.com/", @@ -30,6 +35,7 @@ chat_github <- function(system_prompt = NULL, check_installed("gitcreds") model <- set_default(model, "gpt-4o") + echo <- check_echo(echo) chat_openai( system_prompt = system_prompt, diff --git a/R/provider-groq.R b/R/provider-groq.R index 84b5b204..40fd15b3 100644 --- a/R/provider-groq.R +++ b/R/provider-groq.R @@ -14,14 +14,20 @@ NULL #' @export #' @family chatbots #' @inheritParams chat_openai +#' @inherit chat_openai return +#' @examples +#' \dontrun{ +#' chat <- chat_groq() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_groq <- function(system_prompt = NULL, - turns = NULL, - base_url = "https://api.groq.com/openai/v1", - api_key = groq_key(), - model = NULL, - seed = NULL, - api_args = list(), - echo = NULL) { + turns = NULL, + base_url = "https://api.groq.com/openai/v1", + api_key = groq_key(), + model = NULL, + seed = NULL, + api_args = list(), + echo = NULL) { turns <- normalize_turns(turns, system_prompt) model <- set_default(model, "llama3-8b-8192") diff --git a/R/provider-ollama.R b/R/provider-ollama.R index 88471f04..2e77670e 100644 --- a/R/provider-ollama.R +++ b/R/provider-ollama.R @@ -16,8 +16,14 @@ #' tried it with. #' #' @inheritParams chat_openai +#' @inherit chat_openai return #' @family chatbots #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_ollama(model = "llama3.2") +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_ollama <- function(system_prompt = NULL, turns = NULL, base_url = "http://localhost:11434", @@ -36,6 +42,7 @@ chat_ollama <- function(system_prompt = NULL, i = "Locally installed models: {.str {models}}." )) } + echo <- check_echo(echo) chat_openai( system_prompt = system_prompt, diff --git a/R/provider-openai.R b/R/provider-openai.R index f73f48a2..6cc3e853 100644 --- a/R/provider-openai.R +++ b/R/provider-openai.R @@ -49,6 +49,8 @@ NULL #' What is the difference between a tibble and a data frame? #' Answer with a bulleted list #' ") +#' +#' chat$chat("Tell me three funny jokes about statistcians") chat_openai <- function(system_prompt = NULL, turns = NULL, base_url = "https://api.openai.com/v1", diff --git a/R/provider-perplexity.R b/R/provider-perplexity.R index ce6a365a..ce3bd64a 100644 --- a/R/provider-perplexity.R +++ b/R/provider-perplexity.R @@ -3,9 +3,9 @@ #' @description #' Sign up at . #' -#' Perplexity AI is a platform for running LLMs that are capable of -#' searching the web in real-time to help them answer questions with -#' information that may not have been available when the model was +#' Perplexity AI is a platform for running LLMs that are capable of +#' searching the web in real-time to help them answer questions with +#' information that may not have been available when the model was #' trained. #' #' This function is a lightweight wrapper around [chat_openai()] with @@ -18,14 +18,19 @@ #' variable. #' @inheritParams chat_openai #' @inherit chat_openai return +#' @examples +#' \dontrun{ +#' chat <- chat_perplexity() +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_perplexity <- function(system_prompt = NULL, - turns = NULL, - base_url = "https://api.perplexity.ai/", - api_key = perplexity_key(), - model = NULL, - seed = NULL, - api_args = list(), - echo = NULL) { + turns = NULL, + base_url = "https://api.perplexity.ai/", + api_key = perplexity_key(), + model = NULL, + seed = NULL, + api_args = list(), + echo = NULL) { model <- set_default(model, "llama-3.1-sonar-small-128k-online") diff --git a/R/provider-vllm.R b/R/provider-vllm.R index 1853a099..72327320 100644 --- a/R/provider-vllm.R +++ b/R/provider-vllm.R @@ -12,6 +12,11 @@ NULL #' @inheritParams chat_openai #' @inherit chat_openai return #' @export +#' @examples +#' \dontrun{ +#' chat <- chat_vllm("http://my-vllm.com") +#' chat$chat("Tell me three jokes about statisticians") +#' } chat_vllm <- function(base_url, system_prompt = NULL, turns = NULL, diff --git a/R/provider.R b/R/provider.R index 62b3eaa9..3b23dfd2 100644 --- a/R/provider.R +++ b/R/provider.R @@ -15,6 +15,9 @@ NULL #' @export #' @param base_url The base URL for the API. #' @param extra_args Arbitrary extra arguments to be included in the request body. +#' @return An S7 Provider object. +#' @examples +#' Provider(base_url = "https://cool-models.com") Provider <- new_class( "Provider", properties = list( diff --git a/R/shiny.R b/R/shiny.R index 0bd00df7..6fcc9c08 100644 --- a/R/shiny.R +++ b/R/shiny.R @@ -13,6 +13,12 @@ #' to use the console. #' @export #' @returns (Invisibly) The input `chat`. +#' @examples +#' \dontrun{ +#' chat <- chat_claude() +#' live_console(chat) +#' live_browser(chat) +#' } live_console <- function(chat, quiet = FALSE) { if (!is_interactive()) { cli::cli_abort("The chat console is only available in interactive mode.") diff --git a/R/tokens.R b/R/tokens.R index d07dbbb9..ec9fc3e4 100644 --- a/R/tokens.R +++ b/R/tokens.R @@ -22,6 +22,9 @@ tokens_reset <- function() { #' have sent and recieved in the current session. #' #' @export +#' @return A data frame +#' @examples +#' token_usage() token_usage <- function() { if (is.null(the$tokens)) { cli::cli_inform(c(x = "No recorded usage in this session")) diff --git a/R/tools-def.R b/R/tools-def.R index 10ff4ddf..876fea77 100644 --- a/R/tools-def.R +++ b/R/tools-def.R @@ -19,6 +19,7 @@ NULL #' function. Each element should be created by a [`type_*()`][type_boolean] #' function. #' @export +#' @return An S7 `ToolDef` object. #' @examplesIf elmer:::openai_key_exists() #' #' # First define the metadata that the model uses to figure out when to diff --git a/R/turns.R b/R/turns.R index f39d3c7c..90370b3c 100644 --- a/R/turns.R +++ b/R/turns.R @@ -27,6 +27,9 @@ NULL #' input and output tokens (respectively) used in this turn. Currently #' only recorded for assistant turns. #' @export +#' @return An S7 `Turn` object +#' @examples +#' Turn(role = "user", contents = list(ContentText("Hello, world!"))) Turn <- new_class( "Turn", properties = list( diff --git a/R/types.R b/R/types.R index 8710996b..9bc4a658 100644 --- a/R/types.R +++ b/R/types.R @@ -8,6 +8,10 @@ NULL #' #' @name Type #' @inheritParams type_boolean +#' @return S7 objects inheriting from `Type` +#' @examples +#' TypeBasic(type = "boolean") +#' TypeArray(items = TypeBasic(type = "boolean")) NULL Type <- new_class( diff --git a/R/utils-S7.R b/R/utils-S7.R index cb239196..ad683933 100644 --- a/R/utils-S7.R +++ b/R/utils-S7.R @@ -4,7 +4,7 @@ prop_string <- function(default = NULL, allow_null = FALSE, allow_na = FALSE) { new_property( class = if (allow_null) NULL | class_character else class_character, - default = default, + default = if (is.null(default) && !allow_null) quote(stop("Required")) else default, validator = function(value) { if (allow_null && is.null(value)) { return() diff --git a/man/Chat.Rd b/man/Chat.Rd index 0c745d20..de94a3e3 100644 --- a/man/Chat.Rd +++ b/man/Chat.Rd @@ -3,6 +3,9 @@ \name{Chat} \alias{Chat} \title{A chat} +\value{ +A Chat object +} \description{ A \code{Chat} is an sequence of sequence of user and assistant \link{Turn}s sent to a specific \link{Provider}. A \code{Chat} is a mutable R6 object that takes care of @@ -14,6 +17,12 @@ your behalf), it also takes care of the tool loop. You should generally not create this object yourself, but instead call \code{\link[=chat_openai]{chat_openai()}} or friends instead. } +\examples{ +\dontshow{if (elmer:::openai_key_exists()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +chat <- chat_openai(echo = TRUE) +chat$chat("Tell me a funny joke") +\dontshow{\}) # examplesIf} +} \section{Methods}{ \subsection{Public methods}{ \itemize{ diff --git a/man/Content.Rd b/man/Content.Rd index 886622af..9ad596ae 100644 --- a/man/Content.Rd +++ b/man/Content.Rd @@ -12,17 +12,21 @@ \usage{ Content() -ContentText(text = character(0)) +ContentText(text = stop("Required")) ContentImage() -ContentImageRemote(url = character(0), detail = character(0)) +ContentImageRemote(url = stop("Required"), detail = "") -ContentImageInline(type = character(0), data = NULL) +ContentImageInline(type = stop("Required"), data = NULL) -ContentToolRequest(id = character(0), name = character(0), arguments = list()) +ContentToolRequest( + id = stop("Required"), + name = stop("Required"), + arguments = list() +) -ContentToolResult(id = character(0), value = NULL, error = NULL) +ContentToolResult(id = stop("Required"), value = NULL, error = NULL) } \arguments{ \item{text}{A single string.} @@ -45,13 +49,18 @@ ContentToolResult(id = character(0), value = NULL, error = NULL) it succeeded, otherwise the error message, as a string. One of \code{value} and \code{error} will always be \code{NULL}.} } +\value{ +S7 objects that all inherit from \code{Content} +} \description{ +Use these functions if you're writing a package that extends elmer and need +to customise methods for various types of content. For normal use, see +\code{\link[=content_image_url]{content_image_url()}} and friends. + elmer abstracts away differences in the way that different \link{Provider}s represent various types of content, allowing you to more easily write -code that works with any chatbot. - -This set of classes represents the various types of content that can be -sent to and received from a provider: +code that works with any chatbot. This set of classes represents types of +content that can be either sent to and received from a provider: \itemize{ \item \code{ContentText}: simple text (often in markdown format). This is the only type of content that can be streamed live as it's received. @@ -64,3 +73,9 @@ assistant). \item \code{ContentToolResult}: the result of calling the tool (sent by the user). } } +\examples{ +Content() +ContentText("Tell me a joke") +ContentImageRemote("https://www.r-project.org/Rlogo.png") +ContentToolRequest(id = "abc", name = "mean", arguments = list(x = 1:5)) +} diff --git a/man/Provider.Rd b/man/Provider.Rd index 34e87f4a..7dfb63c0 100644 --- a/man/Provider.Rd +++ b/man/Provider.Rd @@ -4,13 +4,16 @@ \alias{Provider} \title{A chatbot provider} \usage{ -Provider(base_url = character(0), extra_args = list()) +Provider(base_url = stop("Required"), extra_args = list()) } \arguments{ \item{base_url}{The base URL for the API.} \item{extra_args}{Arbitrary extra arguments to be included in the request body.} } +\value{ +An S7 Provider object. +} \description{ A Provider captures the details of one chatbot service/API. This captures how the API works, not the details of the underlying large language model. @@ -22,3 +25,6 @@ To add support for a new backend, you will need to subclass \code{Provider} (adding any additional fields that your provider needs) and then implement the various generics that control the behavior of each provider. } +\examples{ +Provider(base_url = "https://cool-models.com") +} diff --git a/man/Turn.Rd b/man/Turn.Rd index d87ead79..a1a51adc 100644 --- a/man/Turn.Rd +++ b/man/Turn.Rd @@ -21,6 +21,9 @@ doesn't otherwise expose.} input and output tokens (respectively) used in this turn. Currently only recorded for assistant turns.} } +\value{ +An S7 \code{Turn} object +} \description{ Every conversation with a chatbot consists of pairs of user and assistant turns, corresponding to an HTTP request and response. These turns are @@ -34,3 +37,6 @@ elmer will automatically handle the tool calling loop, which may result in any number of additional cycles. Learn more about tool calling in \code{vignette("tool-calling")}. } +\examples{ +Turn(role = "user", contents = list(ContentText("Hello, world!"))) +} diff --git a/man/Type.Rd b/man/Type.Rd index 182fc591..cd544de2 100644 --- a/man/Type.Rd +++ b/man/Type.Rd @@ -8,7 +8,7 @@ \alias{TypeObject} \title{Type definitions for function calling and structured data extraction.} \usage{ -TypeBasic(description = NULL, required = TRUE, type = character(0)) +TypeBasic(description = NULL, required = TRUE, type = stop("Required")) TypeEnum(description = NULL, required = TRUE, values = character(0)) @@ -45,7 +45,14 @@ Each element should be an S7 \code{Type} object.`} \item{additional_properties}{Can the object have arbitrary additional properties that are not explicitly listed? Only supported by Claude.} } +\value{ +S7 objects inheriting from \code{Type} +} \description{ These S7 classes are provided for use by package devlopers who are extending elmer. In every day use, use \code{\link[=type_boolean]{type_boolean()}} and friends. } +\examples{ +TypeBasic(type = "boolean") +TypeArray(items = TypeBasic(type = "boolean")) +} diff --git a/man/chat_azure.Rd b/man/chat_azure.Rd index 291629c9..8e1b8428 100644 --- a/man/chat_azure.Rd +++ b/man/chat_azure.Rd @@ -59,3 +59,9 @@ The \href{https://azure.microsoft.com/en-us/products/ai-services/openai-service} hosts a number of open source models as well as proprietary models from OpenAI. } +\examples{ +\dontrun{ +chat <- chat_azure(deployment_id = "gpt-4o-mini") +chat$chat("Tell me three jokes about statisticians") +} +} diff --git a/man/chat_bedrock.Rd b/man/chat_bedrock.Rd index 49c314a2..b47da0f1 100644 --- a/man/chat_bedrock.Rd +++ b/man/chat_bedrock.Rd @@ -48,6 +48,12 @@ does not work for you automatically, you'll need to follow the advice at \url{https://www.paws-r-sdk.com/#credentials}. In particular, if your org uses AWS SSO, you'll need to run \verb{aws sso login} at the terminal. } +\examples{ +\dontrun{ +chat <- chat_bedrock() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_claude}()}, diff --git a/man/chat_claude.Rd b/man/chat_claude.Rd index 567f8959..1471cf0b 100644 --- a/man/chat_claude.Rd +++ b/man/chat_claude.Rd @@ -62,6 +62,12 @@ To authenticate, we recommend saving your the \code{ANTHROPIC_API_KEY} env var in your \code{.Renviron} (which you can easily edit by calling \code{usethis::edit_r_environ()}). } +\examples{ +\dontshow{if (elmer:::anthropic_key_exists()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +chat <- chat_claude() +chat$chat("Tell me three jokes about statisticians") +\dontshow{\}) # examplesIf} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_databricks.Rd b/man/chat_databricks.Rd index a115b7d6..aac44efa 100644 --- a/man/chat_databricks.Rd +++ b/man/chat_databricks.Rd @@ -76,6 +76,12 @@ model. Specifically, it supports: } } } +\examples{ +\dontrun{ +chat <- chat_databricks() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_gemini.Rd b/man/chat_gemini.Rd index 9ed54925..612eb916 100644 --- a/man/chat_gemini.Rd +++ b/man/chat_gemini.Rd @@ -53,6 +53,12 @@ To authenticate, we recommend saving your the \code{GOOGLE_API_KEY} env var in your \code{.Renviron} (which you can easily edit by calling \code{usethis::edit_r_environ()}). } +\examples{ +\dontrun{ +chat <- chat_gemini() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_github.Rd b/man/chat_github.Rd index b2d828da..cb219f4f 100644 --- a/man/chat_github.Rd +++ b/man/chat_github.Rd @@ -62,6 +62,12 @@ be accepted into the beta access program. See This function is a lightweight wrapper around \code{\link[=chat_openai]{chat_openai()}} with the defaults tweaked for the GitHub model marketplace. } +\examples{ +\dontrun{ +chat <- chat_github() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_groq.Rd b/man/chat_groq.Rd index f4a41009..881d7012 100644 --- a/man/chat_groq.Rd +++ b/man/chat_groq.Rd @@ -48,6 +48,9 @@ the console). Note this only affects the \code{chat()} method.} } +\value{ +A \link{Chat} object. +} \description{ Sign up at \url{https://groq.com}. @@ -56,6 +59,12 @@ the defaults tweaked for groq. It does not currently support structured data extraction. } +\examples{ +\dontrun{ +chat <- chat_groq() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_ollama.Rd b/man/chat_ollama.Rd index 5048aeab..93fa2328 100644 --- a/man/chat_ollama.Rd +++ b/man/chat_ollama.Rd @@ -43,6 +43,9 @@ the console). Note this only affects the \code{chat()} method.} } +\value{ +A \link{Chat} object. +} \description{ To use \code{chat_ollama()} first download and install \href{https://ollama.com}{Ollama}. Then install some models from the command line, @@ -59,6 +62,12 @@ tried it with. } } } +\examples{ +\dontrun{ +chat <- chat_ollama(model = "llama3.2") +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_openai.Rd b/man/chat_openai.Rd index 3fdcb1f6..524df5ce 100644 --- a/man/chat_openai.Rd +++ b/man/chat_openai.Rd @@ -70,6 +70,8 @@ chat$chat(" What is the difference between a tibble and a data frame? Answer with a bulleted list ") + +chat$chat("Tell me three funny jokes about statistcians") \dontshow{\}) # examplesIf} } \seealso{ diff --git a/man/chat_perplexity.Rd b/man/chat_perplexity.Rd index 70d250ba..49dfcd7e 100644 --- a/man/chat_perplexity.Rd +++ b/man/chat_perplexity.Rd @@ -62,6 +62,12 @@ trained. This function is a lightweight wrapper around \code{\link[=chat_openai]{chat_openai()}} with the defaults tweaked for Perplexity AI. } +\examples{ +\dontrun{ +chat <- chat_perplexity() +chat$chat("Tell me three jokes about statisticians") +} +} \seealso{ Other chatbots: \code{\link{chat_bedrock}()}, diff --git a/man/chat_vllm.Rd b/man/chat_vllm.Rd index 54b080ba..267b9c00 100644 --- a/man/chat_vllm.Rd +++ b/man/chat_vllm.Rd @@ -20,14 +20,9 @@ chat_vllm( \item{system_prompt}{A system prompt to set the behavior of the assistant.} -\item{turns}{A list of turns to start the chat with (i.e., continuing a +\item{turns}{A list of \link{Turn}s to start the chat with (i.e., continuing a previous conversation). If not provided, the conversation begins from -scratch. Do not provide non-\code{NULL} values for both \code{turns} and -\code{system_prompt}. - -Each message in the list should be a named list with at least \code{role} -(usually \code{system}, \code{user}, or \code{assistant}, but \code{tool} is also possible). -Normally there is also a \code{content} field, which is a string.} +scratch.} \item{model}{The model to use for the chat. The default, \code{NULL}, will pick a reasonable default, and tell you about. We strongly recommend explicitly @@ -61,3 +56,9 @@ A \link{Chat} object. provides an efficient and convenient LLMs model server. You can use \code{chat_vllm()} to connect to endpoints powered by vLLM. } +\examples{ +\dontrun{ +chat <- chat_vllm("http://my-vllm.com") +chat$chat("Tell me three jokes about statisticians") +} +} diff --git a/man/elmer-package.Rd b/man/elmer-package.Rd index c1a90135..07ba977b 100644 --- a/man/elmer-package.Rd +++ b/man/elmer-package.Rd @@ -4,11 +4,11 @@ \name{elmer-package} \alias{elmer} \alias{elmer-package} -\title{elmer: Call LLM APIs from R} +\title{elmer: Call Large Language Model APIs} \description{ \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} -A consistent interface for calling LLM APIs. Includes support for streaming. +A consistent interface for calling LLM (large language model) APIs that works across different providers including 'Claude', 'OpenAI' 'Azure' 'Bedrock' and 'Google' 'Gemini'. Supports streaming, async, tool calling, and structured data extraction. } \seealso{ Useful links: diff --git a/man/interpolate.Rd b/man/interpolate.Rd index c3703c8c..7234d121 100644 --- a/man/interpolate.Rd +++ b/man/interpolate.Rd @@ -22,6 +22,9 @@ for more details.} \item{path}{A path to a prompt file (often a \code{.md}).} } +\value{ +A \{glue\} string. +} \description{ These functions are lightweight wrappers around \href{https://glue.tidyverse.org/}{glue} that make it easier to interpolate diff --git a/man/live_console.Rd b/man/live_console.Rd index 48f4e59d..667ed787 100644 --- a/man/live_console.Rd +++ b/man/live_console.Rd @@ -27,3 +27,10 @@ to use the console.} Note that these functions will mutate the input \code{chat} object as you chat because your turns will be appended to the history. } +\examples{ +\dontrun{ +chat <- chat_claude() +live_console(chat) +live_browser(chat) +} +} diff --git a/man/token_usage.Rd b/man/token_usage.Rd index 8c4e1f0d..457c13c9 100644 --- a/man/token_usage.Rd +++ b/man/token_usage.Rd @@ -6,7 +6,13 @@ \usage{ token_usage() } +\value{ +A data frame +} \description{ Call this function to find out the cumulative number of tokens that you have sent and recieved in the current session. } +\examples{ +token_usage() +} diff --git a/man/tool.Rd b/man/tool.Rd index 29e09861..394abb13 100644 --- a/man/tool.Rd +++ b/man/tool.Rd @@ -18,6 +18,9 @@ function.} \item{.name}{The name of the function.} } +\value{ +An S7 \code{ToolDef} object. +} \description{ Define an R function for use by a chatbot. The function will always be run in the current R instance.