## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, eval=FALSE-------------------------------------------------------- # library(openaiRtools) # # # Method 1: Set API key in environment (recommended) # Sys.setenv(OPENAI_API_KEY = "your-api-key-here") # client <- OpenAI$new() # # # Method 2: Pass API key directly # client <- OpenAI$new(api_key = "your-api-key-here") ## ----chat, eval=FALSE--------------------------------------------------------- # # Basic chat completion # response <- client$chat$completions$create( # messages = list( # list(role = "user", content = "Hello, how are you?") # ), # model = "gpt-4" # ) # # cat(response$choices[[1]]$message$content) # # # With parameters # response <- client$chat$completions$create( # messages = list( # list(role = "system", content = "You are a helpful assistant."), # list(role = "user", content = "What is R?") # ), # model = "gpt-4", # temperature = 0.7, # max_tokens = 200 # ) ## ----embeddings, eval=FALSE--------------------------------------------------- # # Create embeddings # response <- client$embeddings$create( # input = "The quick brown fox jumps over the lazy dog", # model = "text-embedding-ada-002" # ) # # # Access embedding vector # embedding <- response$data[[1]]$embedding # cat("Embedding dimension:", length(embedding)) ## ----images, eval=FALSE------------------------------------------------------- # # Generate image # response <- client$images$create( # prompt = "A cute baby sea otter in a spacesuit, digital art", # model = "dall-e-3", # size = "1024x1024", # quality = "hd" # ) # # # Get image URL # cat("Image URL:", response$data[[1]]$url) ## ----audio, eval=FALSE-------------------------------------------------------- # # Transcribe audio # transcription <- client$audio$transcriptions$create( # file = "recording.mp3", # model = "whisper-1" # ) # # cat("Transcription:", transcription$text) # # # Text-to-speech # audio_data <- client$audio$speech$create( # input = "Hello, this is a test of text to speech.", # model = "tts-1", # voice = "alloy" # ) # # # Save to file # writeBin(audio_data, "speech.mp3") ## ----errors, eval=FALSE------------------------------------------------------- # tryCatch( # { # response <- client$chat$completions$create( # messages = list(list(role = "user", content = "Test")), # model = "gpt-4" # ) # }, # openai_api_error = function(e) { # cat("API Error:", e$message, "\n") # cat("Status Code:", e$status_code, "\n") # }, # openai_connection_error = function(e) { # cat("Connection Error:", e$message, "\n") # }, # error = function(e) { # cat("General Error:", e$message, "\n") # } # ) ## ----config, eval=FALSE------------------------------------------------------- # client <- OpenAI$new( # api_key = "your-key", # base_url = "https://api.openai.com/v1", # organization = "org-123", # project = "proj-456", # timeout = 600 # )