Skip to content

Commit

Permalink
Merge pull request #60 from jcrodriguez1989/develop
Browse files Browse the repository at this point in the history
Release v0.2.4
  • Loading branch information
jcrodriguez1989 authored Dec 5, 2024
2 parents d22a39a + 2590e92 commit 16b43dd
Show file tree
Hide file tree
Showing 20 changed files with 238 additions and 31 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/code-review-gpt.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
GITHUB_BASE_REF <- paste0("origin/", Sys.getenv("GITHUB_BASE_REF"))
GITHUB_HEAD_REF <- paste0("origin/", Sys.getenv("GITHUB_HEAD_REF"))
system("git fetch origin")
git_diff_cmnd <- paste("git diff", GITHUB_HEAD_REF, GITHUB_BASE_REF)
git_diff_cmnd <- paste("git diff", GITHUB_BASE_REF, GITHUB_HEAD_REF)
diff <- system(git_diff_cmnd, intern = TRUE)
prompt <- paste(
paste0(
Expand Down
5 changes: 3 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@ URL: https://github.com/jcrodriguez1989/chatgpt
BugReports: https://github.com/jcrodriguez1989/chatgpt/issues
Encoding: UTF-8
LazyData: true
RoxygenNote: 7.2.3
RoxygenNote: 7.3.1
Imports:
clipr,
httr,
jsonlite,
miniUI,
rstudioapi,
shiny,
utils
utils,
xfun
7 changes: 7 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,20 @@ export(create_variable_name)
export(document_code)
export(explain_code)
export(find_issues_in_code)
export(generate_image)
export(list_models)
export(optimize_code)
export(refactor_code)
export(reset_chat_session)
importFrom(clipr,read_clip)
importFrom(httr,GET)
importFrom(httr,POST)
importFrom(httr,add_headers)
importFrom(httr,content)
importFrom(httr,content_type_json)
importFrom(httr,stop_for_status)
importFrom(httr,use_proxy)
importFrom(jsonlite,fromJSON)
importFrom(jsonlite,toJSON)
importFrom(miniUI,gadgetTitleBar)
importFrom(miniUI,miniPage)
Expand All @@ -33,4 +38,6 @@ importFrom(shiny,stopApp)
importFrom(shiny,textAreaInput)
importFrom(shiny,updateTextAreaInput)
importFrom(shiny,wellPanel)
importFrom(utils,download.file)
importFrom(utils,getFromNamespace)
importFrom(xfun,base64_encode)
17 changes: 12 additions & 5 deletions R/ask_chatgpt.R
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
#' Note: See also `reset_chat_session`.
#'
#' @param question The question to ask ChatGPT.
#' @param session_id The ID of the session to be used. We can have different conversations by using
#' different session IDs.
#' @param openai_api_key OpenAI's API key.
#' @param images A list of images to attach to the question. It could be a list of URLs or paths.
#'
#' @examples
#' \dontrun{
Expand All @@ -13,17 +17,20 @@
#'
#' @export
#'
ask_chatgpt <- function(question) {
ask_chatgpt <- function(question, session_id = "1", openai_api_key = Sys.getenv("OPENAI_API_KEY"),
images = NULL) {
# Get the existing chat session messages, and add the new message.
chat_session_messages <- append(get("chat_session_messages", envir = .state), list(
list(role = "user", content = question)
chat_session_messages <- append(get_chat_session(session_id), list(
list(role = "user", content = build_prompt_content(question, images))
))
# Send the query to ChatGPT.
chat_gpt_reply <- parse_response(gpt_get_completions(question, messages = chat_session_messages))
chat_gpt_reply <- parse_response(
gpt_get_completions(question, openai_api_key, chat_session_messages)
)
chat_session_messages <- append(chat_session_messages, list(
list(role = "assistant", content = chat_gpt_reply)
))
# Update the chat session messages with the new question and the reply.
assign("chat_session_messages", chat_session_messages, .state)
reset_chat_session(chat_session_messages, session_id)
chat_gpt_reply
}
21 changes: 21 additions & 0 deletions R/build_prompt_content.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#' Build Prompt Content
#'
#' @param question The question to ask ChatGPT.
#' @param images A list of images to attach to the question. It could be a list of URLs or paths.
#'
#' @importFrom xfun base64_encode
#'
build_prompt_content <- function(question, images) {
if (length(images) == 0) {
return(question)
}
prompt_content <- list(list(type = "text", text = question))
append(prompt_content, lapply(images, function(image) {
# If it's a local file, then transform it to base 64 encoding. If not, return the "URL".
image_url <- image
if (file.exists(image)) {
image_url <- paste0("data:image/jpeg;base64,", base64_encode(image))
}
list(type = "image_url", image_url = list(url = image_url))
}))
}
6 changes: 1 addition & 5 deletions R/chatgpt-package.R
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,6 @@
.state <- new.env(parent = emptyenv())

# Empty chat session messages at startup.
assign(
"chat_session_messages",
list(list(role = "system", content = "You are a helpful assistant.")),
envir = .state
)
assign("chat_session_messages", list(), envir = .state)

api_url <- Sys.getenv("OPENAI_API_URL", "https://api.openai.com/v1")
27 changes: 27 additions & 0 deletions R/generate_image.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#' Generate an Image With DALL-E 3
#'
#' @param prompt The prompt for image generation.
#' @param out_file The path where to save the generated image.
#' @param openai_api_key OpenAI's API key.
#'
#' @importFrom httr add_headers content content_type_json POST stop_for_status
#' @importFrom jsonlite fromJSON toJSON
#' @importFrom utils download.file
#'
#' @export
#'
generate_image <- function(prompt, out_file = tempfile(fileext = ".png"),
openai_api_key = Sys.getenv("OPENAI_API_KEY")) {
post_res <- POST(
paste0(api_url, "/images/generations"),
add_headers("Authorization" = paste("Bearer", openai_api_key)),
content_type_json(),
body = toJSON(
list(model = "dall-e-3", prompt = prompt, n = 1, size = "1024x1024"),
auto_unbox = TRUE
)
)
stop_for_status(post_res)
download.file(fromJSON(content(post_res, as = "text", encoding = "UTF-8"))$data$url, out_file)
return(out_file)
}
16 changes: 16 additions & 0 deletions R/get_chat_session.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#' Get Chat Session
#'
#' @param session_id The ID of the session to be used. If `NULL`, it will return an empty session.
#'
get_chat_session <- function(session_id = "1") {
default_session <- list(list(role = "system", content = "You are a helpful assistant."))
if (is.null(session_id)) {
return(default_session)
}
session <- get("chat_session_messages", envir = .state)[[as.character(session_id)]]
# If the session was not found, then it's a new (default) session.
if (is.null(session)) {
session <- default_session
}
session
}
9 changes: 7 additions & 2 deletions R/gpt_get_completions.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@ gpt_get_completions <- function(prompt, openai_api_key = Sys.getenv("OPENAI_API_
}
# See https://platform.openai.com/docs/api-reference/chat .
params <- list(
model = Sys.getenv("OPENAI_MODEL", "gpt-3.5-turbo"),
model = Sys.getenv("OPENAI_MODEL", "gpt-4o-mini"),
max_tokens = as.numeric(Sys.getenv("OPENAI_MAX_TOKENS", 256)),
temperature = as.numeric(Sys.getenv("OPENAI_TEMPERATURE", 1)),
top_p = as.numeric(Sys.getenv("OPENAI_TOP_P", 1)),
frequency_penalty = as.numeric(Sys.getenv("OPENAI_FREQUENCY_PENALTY", 0)),
presence_penalty = as.numeric(Sys.getenv("OPENAI_PRESENCE_PENALTY", 0))
presence_penalty = as.numeric(Sys.getenv("OPENAI_PRESENCE_PENALTY", 0)),
logprobs = as.logical(Sys.getenv("OPENAI_LOGPROBS", FALSE))
)
if (get_verbosity()) {
message(paste0("\n*** ChatGPT input:\n\n", prompt, "\n"))
Expand Down Expand Up @@ -71,6 +72,10 @@ gpt_get_completions <- function(prompt, openai_api_key = Sys.getenv("OPENAI_API_
if (!post_res$status_code %in% 200:299) {
stop(content(post_res))
}
if (get_verbosity() > 1) {
# If verbose is over 1, show the ongoing GPT response.
message(content(post_res, as = "text", encoding = "UTF-8"))
}
post_res <- content(post_res)
final_res <- append(final_res, list(post_res))
# In the case the finish_reason is the length of the message, then we need to keep querying.
Expand Down
24 changes: 24 additions & 0 deletions R/list_models.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#' ChatGPT: List Models
#'
#' @param openai_api_key OpenAI's API key.
#'
#' @examples
#' \dontrun{
#' list_models()
#' }
#'
#' @importFrom httr add_headers content GET stop_for_status
#' @importFrom jsonlite fromJSON
#'
#' @return A data.frame with the available models to be used by OpenAI's API.
#'
#' @export
#'
list_models <- function(openai_api_key = Sys.getenv("OPENAI_API_KEY")) {
get_res <- GET(
paste0(api_url, "/models"),
add_headers("Authorization" = paste("Bearer", openai_api_key))
)
stop_for_status(get_res)
fromJSON(content(get_res, as = "text", encoding = "UTF-8"))$data
}
13 changes: 6 additions & 7 deletions R/parse_response.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,16 @@
#' @param raw_responses The raw response object returned by the OpenAI API.
#' @param verbosity The verbosity level for this function.
#'
#' @importFrom jsonlite toJSON
#'
#' @return Returns a character vector containing the text content of the response.
#'
parse_response <- function(raw_responses, verbosity = get_verbosity()) {
# If we provide a numeric value to `OPENAI_VERBOSE`, and it is `> 1` print return verbosity.
if (verbosity > 1) {
lapply(raw_responses, function(response) message(toJSON(response, pretty = TRUE)))
}
# Parse the message content of the list of raw_responses. Trim those messages, and paste them.
paste(trimws(sapply(raw_responses, function(response) {
parsed_response <- paste(trimws(sapply(raw_responses, function(response) {
sapply(response$choices, function(x) x$message$content)
})), collapse = "")
if (verbosity > 2) {
# If we are in 3-verbose mode, add the raw_responses as an attribute to the return object.
attr(parsed_response, "raw_responses") <- raw_responses
}
parsed_response
}
20 changes: 15 additions & 5 deletions R/reset_chat_session.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,22 @@
#' we want to start a new conversation, we must call `reset_chat_session`.
#'
#' @param system_role ChatGPT's role as an AI assistant.
#' @param session_id The ID of the session to be used. If `NULL`, this function will have no effect.
#'
#' @export
#'
reset_chat_session <- function(system_role = "You are a helpful assistant.") {
assign(
"chat_session_messages", list(list(role = "system", content = system_role)),
envir = .state
)
reset_chat_session <- function(system_role = "You are a helpful assistant.", session_id = "1") {
if (is.null(session_id)) {
return()
}
if (is.list(system_role)) {
# If `system_role` is a list, then it is a ChatGPT session object.
session <- system_role
} else {
# Otherwise, it's a string specifying ChatGPT's role.
session <- list(list(role = "system", content = system_role))
}
all_sessions <- get("chat_session_messages", envir = .state)
all_sessions[[as.character(session_id)]] <- session
assign("chat_session_messages", all_sessions, envir = .state)
}
3 changes: 2 additions & 1 deletion README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,10 @@ ChatGPT model parameters can be tweaked by using environment variables.

The following environment variables can be set to tweak the behavior, as documented in https://beta.openai.com/docs/api-reference/completions/create .

* `OPENAI_MODEL`; defaults to `"gpt-3.5-turbo"`
* `OPENAI_MODEL`; defaults to `"gpt-4o-mini"`
* `OPENAI_MAX_TOKENS`; defaults to `256`
* `OPENAI_TEMPERATURE`; defaults to `1`
* `OPENAI_TOP_P`; defaults to `1`
* `OPENAI_FREQUENCY_PENALTY`; defaults to `0`
* `OPENAI_PRESENCE_PENALTY`; defaults to `0`
* `OPENAI_LOGPROBS`; defaults to `FALSE`
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -355,9 +355,10 @@ The following environment variables can be set to tweak the behavior, as
documented in
<https://beta.openai.com/docs/api-reference/completions/create> .

- `OPENAI_MODEL`; defaults to `"gpt-3.5-turbo"`
- `OPENAI_MODEL`; defaults to `"gpt-4o-mini"`
- `OPENAI_MAX_TOKENS`; defaults to `256`
- `OPENAI_TEMPERATURE`; defaults to `1`
- `OPENAI_TOP_P`; defaults to `1`
- `OPENAI_FREQUENCY_PENALTY`; defaults to `0`
- `OPENAI_PRESENCE_PENALTY`; defaults to `0`
- `OPENAI_LOGPROBS`; defaults to `FALSE`
14 changes: 13 additions & 1 deletion man/ask_chatgpt.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 16 additions & 0 deletions man/build_prompt_content.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 22 additions & 0 deletions man/generate_image.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions man/get_chat_session.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

23 changes: 23 additions & 0 deletions man/list_models.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 16b43dd

Please sign in to comment.