diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index 430ffe8..f5be800 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -2,9 +2,9 @@ # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help on: push: - branches: [main, master, dev, test] + branches: [main, master, dev, test, tawab] pull_request: - branches: [main, master, dev, test] + branches: [main, master, dev, test, tawab] name: R-CMD-check diff --git a/DESCRIPTION b/DESCRIPTION index 9bf56b9..d06837c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -10,7 +10,7 @@ Description: An interface to easily run local language models with 'Ollama' = 3.0.0) Config/testthat/edition: 3 diff --git a/R/ollama.R b/R/ollama.R index dd2936a..a53063a 100644 --- a/R/ollama.R +++ b/R/ollama.R @@ -97,6 +97,7 @@ list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), end #' @param output The output format. Default is "resp". Other options are "jsonlist", "raw", "df", "text". #' @param stream Enable response streaming. Default is FALSE. #' @param endpoint The endpoint to chat with the model. Default is "/api/chat". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. #' #' @return A response in the format specified in the output parameter. #' @export @@ -120,9 +121,9 @@ list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), end #' list(role = "user", content = "List all the previous messages.") #' ) #' chat("llama3", messages, stream = TRUE) -chat <- function(model, messages, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, endpoint = "/api/chat") { +chat <- function(model, messages, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, endpoint = "/api/chat", host = NULL) { - req <- create_request(endpoint) + req <- create_request(endpoint, host) req <- httr2::req_method(req, "POST") body_json <- list(model = model, @@ -216,6 +217,7 @@ chat <- function(model, messages, output = c("resp", "jsonlist", "raw", "df", "t #' @param model A character string of the model name such as "llama3". #' @param stream Enable response streaming. Default is TRUE. #' @param endpoint The endpoint to pull the model. Default is "/api/pull". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. #' #' @return A httr2 response object. #' @export @@ -223,8 +225,8 @@ chat <- function(model, messages, output = c("resp", "jsonlist", "raw", "df", "t #' @examplesIf test_connection()$status_code == 200 #' pull("llama3") #" pull("all-minilm", stream = FALSE) -pull <- function(model, stream = TRUE, endpoint = "/api/pull") { - req <- create_request(endpoint) +pull <- function(model, stream = TRUE, endpoint = "/api/pull", host = NULL) { + req <- create_request(endpoint, host) req <- httr2::req_method(req, "POST") body_json <- list(model = model, stream = stream) @@ -275,6 +277,7 @@ pull <- function(model, stream = TRUE, endpoint = "/api/pull") { #' #' @param model A character string of the model name such as "llama3". #' @param endpoint The endpoint to delete the model. Default is "/api/delete". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. #' #' @return A httr2 response object. #' @export @@ -283,8 +286,8 @@ pull <- function(model, stream = TRUE, endpoint = "/api/pull") { #' \dontrun{ #' delete("llama3") #' } -delete <- function(model, endpoint = "/api/delete") { - req <- create_request(endpoint) +delete <- function(model, endpoint = "/api/delete", host = NULL) { + req <- create_request(endpoint, host) req <- httr2::req_method(req, "DELETE") body_json <- list(model = model) req <- httr2::req_body_json(req, body_json) @@ -313,6 +316,7 @@ normalize <- function(x) { #' @param normalize Normalize the vector to length 1. Default is TRUE. #' @param keep_alive The time to keep the connection alive. Default is "5m" (5 minutes). #' @param endpoint The endpoint to get the vector embedding. Default is "/api/embeddings". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. #' @param ... Additional options to pass to the model. #' #' @return A numeric vector of the embedding. @@ -322,8 +326,8 @@ normalize <- function(x) { #' embeddings("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.") #' # pass model options to the model #' embeddings("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3) -embeddings <- function(model, prompt, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embeddings", ...) { - req <- create_request(endpoint) +embeddings <- function(model, prompt, normalize = TRUE, keep_alive = "5m", endpoint = "/api/embeddings", host = NULL, ...) { + req <- create_request(endpoint, host) req <- httr2::req_method(req, "POST") opts <- list(...) @@ -367,6 +371,8 @@ embeddings <- function(model, prompt, normalize = TRUE, keep_alive = "5m", endpo #' @param output A character vector of the output format. Default is "resp". Options are "resp", "jsonlist", "raw", "df", "text". #' @param stream Enable response streaming. Default is FALSE. #' @param endpoint The endpoint to generate the completion. Default is "/api/generate". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. +#' #' #' @return A response in the format specified in the output parameter. #' @export @@ -375,9 +381,9 @@ embeddings <- function(model, prompt, normalize = TRUE, keep_alive = "5m", endpo #' generate("llama3", "The sky is...", stream = FALSE, output = "df") #' generate("llama3", "The sky is...", stream = TRUE, output = "df") #' generate("llama3", "The sky is...", stream = FALSE, output = "jsonlist") -generate <- function(model, prompt, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, endpoint = "/api/generate") { +generate <- function(model, prompt, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, endpoint = "/api/generate", host = NULL) { - req <- create_request(endpoint) + req <- create_request(endpoint, host) req <- httr2::req_method(req, "POST") body_json <- list(model = model, diff --git a/man/chat.Rd b/man/chat.Rd index 7892a07..c941ef5 100644 --- a/man/chat.Rd +++ b/man/chat.Rd @@ -9,7 +9,8 @@ chat( messages, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, - endpoint = "/api/chat" + endpoint = "/api/chat", + host = NULL ) } \arguments{ @@ -22,6 +23,8 @@ chat( \item{stream}{Enable response streaming. Default is FALSE.} \item{endpoint}{The endpoint to chat with the model. Default is "/api/chat".} + +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} } \value{ A response in the format specified in the output parameter. diff --git a/man/delete.Rd b/man/delete.Rd index 687feb3..771d70f 100644 --- a/man/delete.Rd +++ b/man/delete.Rd @@ -4,12 +4,14 @@ \alias{delete} \title{Delete a model} \usage{ -delete(model, endpoint = "/api/delete") +delete(model, endpoint = "/api/delete", host = NULL) } \arguments{ \item{model}{A character string of the model name such as "llama3".} \item{endpoint}{The endpoint to delete the model. Default is "/api/delete".} + +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} } \value{ A httr2 response object. diff --git a/man/embeddings.Rd b/man/embeddings.Rd index e185a29..aa3eca1 100644 --- a/man/embeddings.Rd +++ b/man/embeddings.Rd @@ -10,6 +10,7 @@ embeddings( normalize = TRUE, keep_alive = "5m", endpoint = "/api/embeddings", + host = NULL, ... ) } @@ -24,6 +25,8 @@ embeddings( \item{endpoint}{The endpoint to get the vector embedding. Default is "/api/embeddings".} +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} + \item{...}{Additional options to pass to the model.} } \value{ diff --git a/man/generate.Rd b/man/generate.Rd index d506404..907ad68 100644 --- a/man/generate.Rd +++ b/man/generate.Rd @@ -9,7 +9,8 @@ generate( prompt, output = c("resp", "jsonlist", "raw", "df", "text"), stream = FALSE, - endpoint = "/api/generate" + endpoint = "/api/generate", + host = NULL ) } \arguments{ @@ -22,6 +23,8 @@ generate( \item{stream}{Enable response streaming. Default is FALSE.} \item{endpoint}{The endpoint to generate the completion. Default is "/api/generate".} + +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} } \value{ A response in the format specified in the output parameter. diff --git a/man/pull.Rd b/man/pull.Rd index 06df3af..120050a 100644 --- a/man/pull.Rd +++ b/man/pull.Rd @@ -4,7 +4,7 @@ \alias{pull} \title{Pull/download a model} \usage{ -pull(model, stream = TRUE, endpoint = "/api/pull") +pull(model, stream = TRUE, endpoint = "/api/pull", host = NULL) } \arguments{ \item{model}{A character string of the model name such as "llama3".} @@ -12,6 +12,8 @@ pull(model, stream = TRUE, endpoint = "/api/pull") \item{stream}{Enable response streaming. Default is TRUE.} \item{endpoint}{The endpoint to pull the model. Default is "/api/pull".} + +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} } \value{ A httr2 response object.