From 2331b463e0606cd4ee49ecb353b89b163da06d9e Mon Sep 17 00:00:00 2001 From: Angelo D'Ambrosio Date: Thu, 18 Jul 2024 18:59:47 +0200 Subject: [PATCH] refactor: rely on the llmR package for LLM interaction Removed the llm related functions since they will be provided by the llmR package --- DESCRIPTION | 7 +- NAMESPACE | 2 - R/LLM_calls.R | 473 ---------------------------- R/data_management.R | 2 +- R/summarization.R | 21 +- man/entity_extractor.Rd | 3 +- man/infer_agenda_from_transcript.Rd | 2 +- man/process_messages.Rd | 43 --- man/prompt_llm.Rd | 55 ---- man/speech_to_summary_workflow.Rd | 2 +- man/summarise_full_meeting.Rd | 4 +- man/summarise_transcript.Rd | 2 +- man/use_azure_llm.Rd | 41 --- man/use_custom_llm.Rd | 38 --- man/use_openai_llm.Rd | 30 -- 15 files changed, 24 insertions(+), 701 deletions(-) delete mode 100644 R/LLM_calls.R delete mode 100644 man/process_messages.Rd delete mode 100644 man/prompt_llm.Rd delete mode 100644 man/use_azure_llm.Rd delete mode 100644 man/use_custom_llm.Rd delete mode 100644 man/use_openai_llm.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 2198a07..e54aa4f 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: minutemaker Title: GenAI-based meeting and conferences minutes generator -Version: 0.10.0 +Version: 0.11.0 Authors@R: person("Angelo", "D'Ambrosio", , "a.dambrosioMD@gmail.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-2045-5155")) @@ -11,6 +11,7 @@ Imports: dplyr (>= 1.1.4), httr (>= 1.4.7), jsonlite (>= 1.8.8), + llmR (>= 1.1.0), lubridate (>= 1.9.3), purrr (>= 1.0.2), readr (>= 2.1.4), @@ -19,10 +20,12 @@ Imports: styler (>= 1.10.2), tools (>= 4.3.2), vctrs (>= 0.6.5) +Remotes: + github::bakaburg1/llmR Config/testthat/edition: 3 Encoding: UTF-8 Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 Suggests: av (>= 0.9.0), devtools (>= 2.4.5), diff --git a/NAMESPACE b/NAMESPACE index 440d251..c72e775 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -13,7 +13,6 @@ export(infer_agenda_from_transcript) export(merge_transcripts) export(parse_transcript_json) export(perform_speech_to_text) -export(prompt_llm) export(run_in_terminal) export(set_prompts) export(speech_to_summary_workflow) @@ -23,5 +22,4 @@ export(summarise_transcript) export(validate_agenda) import(dplyr) importFrom(stats,setNames) -importFrom(utils,hasName) importFrom(utils,tail) diff --git a/R/LLM_calls.R b/R/LLM_calls.R deleted file mode 100644 index 8745236..0000000 --- a/R/LLM_calls.R +++ /dev/null @@ -1,473 +0,0 @@ -#' Process chat message into standard format -#' -#' This function takes one or more (a list of) chat messages and processes them -#' into a standard list format with role and content for each message to be fed -#' to a large language model. -#' -#' The standard format is a list of chat messages with the following structure: -#' message: `c(role = "system", content = "Welcome to the chat!")` -#' list of messages: \code{list( -#' c(role = "system", content = "You are an useful AI assistant."), -#' c(role = "user", content = "Hi there!") -#' )} -#' list format: \code{list( -#' list(role = "system", content = "You are an useful AI assistant."), -#' list(role = "user", content = "Hi there!") -#' )} -#' list of lists format: \code{list( -#' list( -#' list(role = "system", content = "You are an useful AI assistant."), -#' list(role = "user", content = "Hi there!") -#' ), -#' list( -#' list(role = "system", content = "You are an useful AI assistant."), -#' list(role = "user", content = "Hi there!") -#' ) -#' )} -#' -#' @param messages A character vector or list of chat messages. In can be a -#' vector, a specifically structured list or a list of both if the goal is the -#' have the API process multiple messages at once. -#' -#' @return A list of chat messages in standard format. -#' -#' @importFrom utils hasName -#' -process_messages <- function(messages) { - - if (missing(messages) || is.null(messages) || length(messages) == 0) { - stop("User messages are required.") - } - - # Assume that a single message is from the user - if (length(messages) == 1 && - is.character(messages) && - is.null(names(messages))) { - messages <- c(user = messages) - } - - # Convert vector to list format - vector_to_list <- function(msg_vec) { - - # Check if vector is in named format - check <- all( - names(msg_vec) %in% - c("system", "user", "assistant", "function") - , na.rm = TRUE) - - check <- check && !is.null(names(msg_vec)) - - if (check) { - - # Convert from vector to list format - msg_vec <- purrr::imap(msg_vec, function(msg, nm) { - list(role = nm, content = msg) - }) |> setNames(NULL) - - } else { - stop("Invalid format for 'messages' vector.") - } - } - - # Validate list format - validate_list_format <- function(msg_list) { - - # Check if the message is in correct list format - check <- !purrr::every(msg_list, function(msg) { - vctrs::obj_is_list(msg) && - hasName(msg, "role") && - hasName(msg, "content") && - msg$role %in% c("system", "user", "assistant", "function") - }) - - return(!check) - } - - # Check if message is in a valid vector format - if (is.character(messages)) { - return(vector_to_list(messages)) - } - - - if (vctrs::obj_is_list(messages)) { - - # Check if valid list format - if (validate_list_format(messages)) { - return(messages) - } - - # It turned out the API doesn't really support batch calls of - # multiple prompts - - # # Check if list of vectors - # if (purrr::every(messages, is.character)) { - # - # # Convert each to list - # return(purrr::map(messages, vector_to_list)) - # - # } - # - # # Check if list of lists - # if (purrr::every(messages, validate_list_format)) { - # - # return(messages) - # - # } - - } - - stop("Message is neither a valid vector nor a valid list.") - -} - -#' Interrogate a Language Model -#' -#' This function sends requests to a specified language model provider (OpenAI, -#' Azure, or a locally running LLM server) and returns the response. It handles -#' rate limiting and retries the request if necessary, and also processes errors -#' in the response. -#' -#' Users can provide their own models by writing a function with the following -#' name pattern: `use__llm`. See the existing functions using the -#' ::: operator for examples. -#' -#' @param messages Messages to be sent to the language model. -#' @param provider The provider of the language model. Defaults to "openai". -#' Other options are "azure" and "local". -#' @param params Additional parameters for the language model request. Defaults -#' to a list with `temperature = 0`. -#' @param force_json A boolean to force the response in JSON format. Default is -#' FALSE. Works only for OpenAI and Azure endpoints. -#' @param log_request A boolean to log the request time. Can be set up globally -#' using the `minutemaker_log_requests` option, which defaults to TRUE. -#' @param ... Additional arguments passed to the language model provider -#' functions. -#' -#' @return Returns the content of the message from the language model response. -#' -#' @export -#' -#' @examples -#' \dontrun{ -#' response <- prompt_llm( -#' messages = c(user = "Hello there!"), -#' provider = "openai") -#' } -#' -prompt_llm <- function( - messages = NULL, - provider = getOption("minutemaker_llm_provider"), - params = list( - temperature = 0 - ), - force_json = FALSE, - log_request = getOption("minutemaker_log_requests", TRUE), - ...) { - - messages <- process_messages(messages) - - if (is.null(provider)) { - stop("Language model provider is not set. ", - "You can use the following option to set it globally:\n", - "minutemaker_llm_provider.") - } - - if (log_request) { - check_and_install_dependencies("tictoc") - } - - # Prepare the body of the request and merge with default - body <- purrr::list_modify(list( - temperature = 0 - ), !!!params) - - body$messages <- messages - - # Force the LLM to answer in JSON format (not all models support this) - if (force_json) { - body$response_format <- list("type" = "json_object") - } - - # Map provider to specific function - llm_fun <- paste0("use_", provider, "_llm") - - if (!exists(llm_fun, mode = "function")) { - stop("Unsupported LLM provider. - You can set it project-wide using the minutemaker_llm_provider option.") - } - - llm_fun <- get(llm_fun) - - # Try to send the request - retry <- FALSE - - while(!exists("response", inherits = FALSE) || retry) { - - #message("Sending message to Azure GPT API.") - retry <- FALSE - - if (log_request) tictoc::tic() - response <- llm_fun(body, ...) - if (log_request) elapsed <- tictoc::toc() - - if (httr::status_code(response) == 429) { - warning("Rate limit exceeded. Waiting before retrying.", - immediate. = TRUE, call. = FALSE) - - to_wait <- as.numeric(httr::headers(response)$`retry-after`) - message("Waiting for ", to_wait, " seconds.\n...") - Sys.sleep(to_wait) - message("Retrying...") - retry <- TRUE - } - } - - - # Check for errors in response - if (httr::http_error(response)) { - err_obj <- httr::content(response)$error - - err_message <- if (is.character(err_obj)) { - err_obj - } else if (is.character(err_obj$message)) { - err_obj$message - } else { - httr::content(response) - } - - stop("Error in LLM request: ", err_message) - } - - # Return the response - parsed <- httr::content(response, as = "parsed", encoding = "UTF-8") - - if (log_request) { - with(parsed$usage, - paste( - "Prompt tokens:", prompt_tokens, - "\nResponse tokens:", completion_tokens, - "\nGeneration speed:", paste( - signif(completion_tokens/(elapsed$toc - elapsed$tic), 3), "t/s"), - "\nTotal tokens:", total_tokens - ) - ) |> message() - } - - # Return the response - purrr::imap_chr(parsed$choices, \(ans, i) { - ans_content <- ans$message$content - - # Manage the case when the answer is cut off due to exceeding the - # output token limit - if (ans$finish_reason == "length") { - i <- if (length(parsed$choices) > 1) paste0(" ", i, " ") else " " - - warning("Answer", i, "exhausted the context window!") - - file_name <- paste0("output_", Sys.time(), ".txt") - - warning( - "Answer", i, "exhausted the context window!\n", - "The answer has been saved to a file: ", file_name - ) - - readr::write_lines(ans_content, file_name) - - choice <- utils::menu( - c( - "Try to complete the answer", - "Keep the incomplete answer", - "Stop the process"), - title = "How do you want to proceed?" - ) - - if (choice == 1) { - # Ask the model to continue the answer - messages_new <- c( - messages, - list(list( - role = "assistant", - content = ans_content - )), - list(list( - role = "user", - content = "continue" - )) - ) - - ans_new <- prompt_llm( - messages_new, provider = provider, params = params, - force_json = force_json, - log_request = log_request, ... - ) - - return(paste0(ans_content, ans_new)) - } else if (choice == 2) { - return(ans_content) - } else { - stop("The process has been stopped.") - } - } else ans_content - }) -} - -#' Use OpenAI Language Model -#' -#' Sends a request to the OpenAI API using the parameters in the `body` -#' argument. It requires an API key and model identifier set in the R options. -#' -#' @param body The body of the request. -#' @param model Model identifier for the OpenAI API. Obtained from R options. -#' @param api_key API key for the OpenAI service. Obtained from R options. -#' @param log_request A boolean to log the request time. Can be set up globally -#' using the `minutemaker_log_requests` option, which defaults to TRUE. -#' -#' @return The function returns the response from the OpenAI API. -#' -use_openai_llm <- function( - body, - model = getOption("minutemaker_openai_model_gpt"), - api_key = getOption("minutemaker_openai_api_key"), - log_request = getOption("minutemaker_log_requests", TRUE) -) { - - if (is.null(api_key) || is.null(model)) { - stop("OpenAI GPT model or API key are not set. ", - "Use the following options to set them:\n", - "minutemaker_openai_model_gpt, ", - "minutemaker_open_api_key options.") - } - - if (log_request) { - message("Interrogating OpenAI: ", model, "...") - } - - body$model = model - - # Prepare the request - httr::POST( - url = "https://api.openai.com/v1/chat/completions", - httr::add_headers( - `Content-Type` = "application/json", - `Authorization` = paste0("Bearer ", api_key) - ), - body = jsonlite::toJSON(body, auto_unbox = TRUE), - encode = "json" - ) - -} - -#' Use Azure Language Model -#' -#' Sends a request to the Azure API for language model completions using the -#' parameters in the `body` argument. This function requires specific Azure -#' configurations (deployment ID, resource name, API key, and API version) set -#' in the R options. -#' -#' @param body The body of the request. -#' @param deployment_id Azure deployment ID for the language model. Obtained -#' from R options. -#' @param resource_name Azure resource name. Obtained from R options. -#' @param api_key API key for the Azure language model service. Obtained from R -#' options. -#' @param api_version API version for the Azure language model service. Obtained -#' from R options. -#' @param log_request A boolean to log the request time. Can be set up globally -#' using the `minutemaker_log_requests` option, which defaults to TRUE. -#' -#' @return The function returns the response from the Azure API. -use_azure_llm <- function( - body, - deployment_id = getOption("minutemaker_azure_deployment_gpt"), - resource_name = getOption("minutemaker_azure_resource_gpt"), - api_key = getOption("minutemaker_azure_api_key_gpt"), - api_version = getOption("minutemaker_azure_api_version"), - log_request = getOption("minutemaker_log_requests", TRUE) -) { - - if (is.null(resource_name) || is.null(deployment_id) || - is.null(api_key) || is.null(api_version)) { - stop("Azure GPT resource name, deployment name,", - ", API key, or API version are not set. ", - "Use the following options to set them:\n", - "minutemaker_azure_deployment_gpt, ", - "minutemaker_azure_resource_gpt, ", - "minutemaker_azure_api_key_gpt, ", - "minutemaker_azure_api_version." - ) - } - - if (log_request) { - message( - "Interrogating Azure OpenAI: ", resource_name, "/", deployment_id, - " (", api_version, ")...") - } - - # Prepare the request - httr::POST( - url = paste0( - "https://", - resource_name, - ".openai.azure.com/openai/deployments/", - deployment_id, - "/chat/completions?api-version=", - api_version), - httr::add_headers(`Content-Type` = "application/json", `api-key` = api_key), - body = jsonlite::toJSON(body, auto_unbox = TRUE) - ) - -} - -#' Use Custom Language Model -#' -#' Sends a request to a custom (local or remote) language model endpoint -#' compatible with the OpenAi API specification, using the parameters in the -#' `body` argument. The user can provide an API key if required. -#' -#' @param body The body of the request. -#' @param endpoint The local endpoint for the language model service. Can be -#' obtained from R options. -#' @param model Model identifier for the custom API, if needed (some API have -#' one model per endpoint, some multiple ones). Obtained from R options. -#' @param api_key Optional API key for the custom language model services that -#' require it. Obtained from R options. -#' @param log_request A boolean to log the request time. Can be set up globally -#' using the `minutemaker_log_requests` option, which defaults to TRUE. -#' -#' @return The function returns the response from the local language model -#' endpoint. -use_custom_llm <- function( - body, - endpoint = getOption("minutemaker_custom_endpoint_gpt"), - model = getOption("minutemaker_custom_model_gpt", NULL), - api_key = getOption("minutemaker_custom_api_key"), - log_request = getOption("minutemaker_log_requests", TRUE) -) { - - if (is.null(endpoint)) { - stop("Local endpoint is not set. ", - "Use the following options to set it:\n", - "minutemaker_custom_endpoint_gpt" - ) - } - - if (log_request) { - message("Interrogating custom LLM: ", endpoint, "/", model, "...") - } - - if (!is.null(model)) { - body$model = model - } - - # Prepare the request - httr::POST( - url = endpoint, - httr::add_headers( - `Content-Type` = "application/json", - if (!is.null(api_key)) { - .headers = c(Authorization = paste0("Bearer ", api_key)) - }), - body = jsonlite::toJSON(body, auto_unbox = TRUE) - ) - -} diff --git a/R/data_management.R b/R/data_management.R index a5248fc..c0f6ac7 100644 --- a/R/data_management.R +++ b/R/data_management.R @@ -1100,7 +1100,7 @@ add_chat_transcript <- function( #' @param llm_provider A string indicating the LLM provider to use for the #' summarization. See `summarise_transcript` for more details. #' @param extra_summarise_args Additional arguments passed to the -#' `prompt_llm` function. See `summarise_transcript` for more details. +#' `llmR::prompt_llm` function. See `summarise_transcript` for more details. #' @param summarization_window_size The size of the summarization window in #' minutes if the "rolling" method is used. See `summarise_transcript` for #' more details. diff --git a/R/summarization.R b/R/summarization.R index cd42a1f..0eb05fc 100644 --- a/R/summarization.R +++ b/R/summarization.R @@ -111,7 +111,7 @@ generate_recording_details <- function( #' get_prompts("output_rolling_aggregation") prompts depending on the task. #' @param prompt_only If TRUE, only the prompt is returned, the LLM is not #' interrogated. Default is FALSE. -#' @param ... Additional arguments passed to the `prompt_llm` function, +#' @param ... Additional arguments passed to the `llmR::prompt_llm` function, #' such as the LLM provider. #' #' @return A summary of the transcript. @@ -252,7 +252,7 @@ summarise_transcript <- function( } # Interrogate the LLM - prompt_llm( + llmR::prompt_llm( c( system = get_prompts("persona"), user = prompt), @@ -280,7 +280,7 @@ summarise_transcript <- function( args = args ) - prompt_llm( + llmR::prompt_llm( c( system = get_prompts("persona"), user = aggregation_prompt), @@ -328,8 +328,8 @@ summarise_transcript <- function( #' `summarise_transcript` for more details and run `get_prompts()` to see the #' defaults. See `summarise_transcript` for more details. #' @param overwrite Whether to overwrite existing summaries. Default is FALSE. -#' @param ... Additional arguments passed to `prompt_llm` function, such as -#' the LLM provider. +#' @param ... Additional arguments passed to `llmR::prompt_llm` function, such +#' as the LLM provider. #' #' @return The result tree of the meeting summary. Also saves the results in the #' output file as side effect. @@ -503,7 +503,7 @@ summarise_full_meeting <- function( #' LLM context. #' @param output_file An optional file to save the results to. Default is NULL, #' i.e., the results are not saved to a file. -#' @param ... Additional arguments passed to the `prompt_llm` function. +#' @param ... Additional arguments passed to the `llmR::prompt_llm` function. #' Keep in consideration that this function needs LLMs that manages long #' context and that produce valid JSON outputs. The `force_json` argument is #' used with OpenAI based LLM but it's not accepted by other LLMs; therefore @@ -686,7 +686,7 @@ infer_agenda_from_transcript <- function( } # Attempt to interrogate the LLM - result_json <- try(prompt_llm( + result_json <- try(llmR::prompt_llm( prompt_set, ..., force_json = TRUE @@ -834,7 +834,7 @@ infer_agenda_from_transcript <- function( user = prompt ) - result_json <- prompt_llm( + result_json <- llmR::prompt_llm( prompt_set, ..., force_json = TRUE ) @@ -877,7 +877,8 @@ infer_agenda_from_transcript <- function( #' them. #' @param prompt_only If TRUE, only the prompt is returned, the LLM is not #' interrogated. Default is FALSE. -#' @param ... Additional arguments passed to the `prompt_llm` function. +#' @param ... Additional arguments passed to the `llmR::prompt_llm` +#' function. #' #' @return A vector with the entities found in the text. #' @@ -936,7 +937,7 @@ entity_extractor <- function( return(task) } - prompt_llm( + llmR::prompt_llm( c("system" = get_prompts("persona"), "user" = task), force_json = TRUE, ...) |> jsonlite::fromJSON() |> diff --git a/man/entity_extractor.Rd b/man/entity_extractor.Rd index 07fc1be..3026ab9 100644 --- a/man/entity_extractor.Rd +++ b/man/entity_extractor.Rd @@ -21,7 +21,8 @@ them.} \item{prompt_only}{If TRUE, only the prompt is returned, the LLM is not interrogated. Default is FALSE.} -\item{...}{Additional arguments passed to the \code{prompt_llm} function.} +\item{...}{Additional arguments passed to the \code{llmR::prompt_llm} +function.} } \value{ A vector with the entities found in the text. diff --git a/man/infer_agenda_from_transcript.Rd b/man/infer_agenda_from_transcript.Rd index e7e70a2..58dd95b 100644 --- a/man/infer_agenda_from_transcript.Rd +++ b/man/infer_agenda_from_transcript.Rd @@ -49,7 +49,7 @@ LLM context.} \item{output_file}{An optional file to save the results to. Default is NULL, i.e., the results are not saved to a file.} -\item{...}{Additional arguments passed to the \code{prompt_llm} function. +\item{...}{Additional arguments passed to the \code{llmR::prompt_llm} function. Keep in consideration that this function needs LLMs that manages long context and that produce valid JSON outputs. The \code{force_json} argument is used with OpenAI based LLM but it's not accepted by other LLMs; therefore diff --git a/man/process_messages.Rd b/man/process_messages.Rd deleted file mode 100644 index 8478326..0000000 --- a/man/process_messages.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/LLM_calls.R -\name{process_messages} -\alias{process_messages} -\title{Process chat message into standard format} -\usage{ -process_messages(messages) -} -\arguments{ -\item{messages}{A character vector or list of chat messages. In can be a -vector, a specifically structured list or a list of both if the goal is the -have the API process multiple messages at once.} -} -\value{ -A list of chat messages in standard format. -} -\description{ -This function takes one or more (a list of) chat messages and processes them -into a standard list format with role and content for each message to be fed -to a large language model. -} -\details{ -The standard format is a list of chat messages with the following structure: -message: \code{c(role = "system", content = "Welcome to the chat!")} -list of messages: \code{list( - c(role = "system", content = "You are an useful AI assistant."), - c(role = "user", content = "Hi there!") - )} -list format: \code{list( - list(role = "system", content = "You are an useful AI assistant."), - list(role = "user", content = "Hi there!") - )} -list of lists format: \code{list( - list( - list(role = "system", content = "You are an useful AI assistant."), - list(role = "user", content = "Hi there!") - ), - list( - list(role = "system", content = "You are an useful AI assistant."), - list(role = "user", content = "Hi there!") - ) - )} -} diff --git a/man/prompt_llm.Rd b/man/prompt_llm.Rd deleted file mode 100644 index 471cb35..0000000 --- a/man/prompt_llm.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/LLM_calls.R -\name{prompt_llm} -\alias{prompt_llm} -\title{Interrogate a Language Model} -\usage{ -prompt_llm( - messages = NULL, - provider = getOption("minutemaker_llm_provider"), - params = list(temperature = 0), - force_json = FALSE, - log_request = getOption("minutemaker_log_requests", TRUE), - ... -) -} -\arguments{ -\item{messages}{Messages to be sent to the language model.} - -\item{provider}{The provider of the language model. Defaults to "openai". -Other options are "azure" and "local".} - -\item{params}{Additional parameters for the language model request. Defaults -to a list with \code{temperature = 0}.} - -\item{force_json}{A boolean to force the response in JSON format. Default is -FALSE. Works only for OpenAI and Azure endpoints.} - -\item{log_request}{A boolean to log the request time. Can be set up globally -using the \code{minutemaker_log_requests} option, which defaults to TRUE.} - -\item{...}{Additional arguments passed to the language model provider -functions.} -} -\value{ -Returns the content of the message from the language model response. -} -\description{ -This function sends requests to a specified language model provider (OpenAI, -Azure, or a locally running LLM server) and returns the response. It handles -rate limiting and retries the request if necessary, and also processes errors -in the response. -} -\details{ -Users can provide their own models by writing a function with the following -name pattern: \verb{use__llm}. See the existing functions using the -::: operator for examples. -} -\examples{ -\dontrun{ -response <- prompt_llm( - messages = c(user = "Hello there!"), - provider = "openai") - } - -} diff --git a/man/speech_to_summary_workflow.Rd b/man/speech_to_summary_workflow.Rd index 2700ffd..1496d54 100644 --- a/man/speech_to_summary_workflow.Rd +++ b/man/speech_to_summary_workflow.Rd @@ -184,7 +184,7 @@ defaults. See \code{summarise_transcript} for more details.} summarization. See \code{summarise_transcript} for more details.} \item{extra_summarise_args}{Additional arguments passed to the -\code{prompt_llm} function. See \code{summarise_transcript} for more details.} +\code{llmR::prompt_llm} function. See \code{summarise_transcript} for more details.} \item{summarization_window_size}{The size of the summarization window in minutes if the "rolling" method is used. See \code{summarise_transcript} for diff --git a/man/summarise_full_meeting.Rd b/man/summarise_full_meeting.Rd index e2ce52c..59d1e2b 100644 --- a/man/summarise_full_meeting.Rd +++ b/man/summarise_full_meeting.Rd @@ -68,8 +68,8 @@ defaults. See \code{summarise_transcript} for more details.} \item{overwrite}{Whether to overwrite existing summaries. Default is FALSE.} -\item{...}{Additional arguments passed to \code{prompt_llm} function, such as -the LLM provider.} +\item{...}{Additional arguments passed to \code{llmR::prompt_llm} function, such +as the LLM provider.} } \value{ The result tree of the meeting summary. Also saves the results in the diff --git a/man/summarise_transcript.Rd b/man/summarise_transcript.Rd index f489344..0f23e2e 100644 --- a/man/summarise_transcript.Rd +++ b/man/summarise_transcript.Rd @@ -71,7 +71,7 @@ get_prompts("output_rolling_aggregation") prompts depending on the task.} \item{prompt_only}{If TRUE, only the prompt is returned, the LLM is not interrogated. Default is FALSE.} -\item{...}{Additional arguments passed to the \code{prompt_llm} function, +\item{...}{Additional arguments passed to the \code{llmR::prompt_llm} function, such as the LLM provider.} } \value{ diff --git a/man/use_azure_llm.Rd b/man/use_azure_llm.Rd deleted file mode 100644 index 17a2207..0000000 --- a/man/use_azure_llm.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/LLM_calls.R -\name{use_azure_llm} -\alias{use_azure_llm} -\title{Use Azure Language Model} -\usage{ -use_azure_llm( - body, - deployment_id = getOption("minutemaker_azure_deployment_gpt"), - resource_name = getOption("minutemaker_azure_resource_gpt"), - api_key = getOption("minutemaker_azure_api_key_gpt"), - api_version = getOption("minutemaker_azure_api_version"), - log_request = getOption("minutemaker_log_requests", TRUE) -) -} -\arguments{ -\item{body}{The body of the request.} - -\item{deployment_id}{Azure deployment ID for the language model. Obtained -from R options.} - -\item{resource_name}{Azure resource name. Obtained from R options.} - -\item{api_key}{API key for the Azure language model service. Obtained from R -options.} - -\item{api_version}{API version for the Azure language model service. Obtained -from R options.} - -\item{log_request}{A boolean to log the request time. Can be set up globally -using the \code{minutemaker_log_requests} option, which defaults to TRUE.} -} -\value{ -The function returns the response from the Azure API. -} -\description{ -Sends a request to the Azure API for language model completions using the -parameters in the \code{body} argument. This function requires specific Azure -configurations (deployment ID, resource name, API key, and API version) set -in the R options. -} diff --git a/man/use_custom_llm.Rd b/man/use_custom_llm.Rd deleted file mode 100644 index 71845b7..0000000 --- a/man/use_custom_llm.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/LLM_calls.R -\name{use_custom_llm} -\alias{use_custom_llm} -\title{Use Custom Language Model} -\usage{ -use_custom_llm( - body, - endpoint = getOption("minutemaker_custom_endpoint_gpt"), - model = getOption("minutemaker_custom_model_gpt", NULL), - api_key = getOption("minutemaker_custom_api_key"), - log_request = getOption("minutemaker_log_requests", TRUE) -) -} -\arguments{ -\item{body}{The body of the request.} - -\item{endpoint}{The local endpoint for the language model service. Can be -obtained from R options.} - -\item{model}{Model identifier for the custom API, if needed (some API have -one model per endpoint, some multiple ones). Obtained from R options.} - -\item{api_key}{Optional API key for the custom language model services that -require it. Obtained from R options.} - -\item{log_request}{A boolean to log the request time. Can be set up globally -using the \code{minutemaker_log_requests} option, which defaults to TRUE.} -} -\value{ -The function returns the response from the local language model -endpoint. -} -\description{ -Sends a request to a custom (local or remote) language model endpoint -compatible with the OpenAi API specification, using the parameters in the -\code{body} argument. The user can provide an API key if required. -} diff --git a/man/use_openai_llm.Rd b/man/use_openai_llm.Rd deleted file mode 100644 index 2833e3b..0000000 --- a/man/use_openai_llm.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/LLM_calls.R -\name{use_openai_llm} -\alias{use_openai_llm} -\title{Use OpenAI Language Model} -\usage{ -use_openai_llm( - body, - model = getOption("minutemaker_openai_model_gpt"), - api_key = getOption("minutemaker_openai_api_key"), - log_request = getOption("minutemaker_log_requests", TRUE) -) -} -\arguments{ -\item{body}{The body of the request.} - -\item{model}{Model identifier for the OpenAI API. Obtained from R options.} - -\item{api_key}{API key for the OpenAI service. Obtained from R options.} - -\item{log_request}{A boolean to log the request time. Can be set up globally -using the \code{minutemaker_log_requests} option, which defaults to TRUE.} -} -\value{ -The function returns the response from the OpenAI API. -} -\description{ -Sends a request to the OpenAI API using the parameters in the \code{body} -argument. It requires an API key and model identifier set in the R options. -}