Swift Examples - OpenAI
Service setup
Create an OpenAI service in the AIProxy dashboard
Follow the integration guide, selecting the OpenAI icon on the 'Create a New Service' form.
Get a non-streaming chat completion from OpenAI
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let response = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o",
messages: [.system(content: .text("hello world"))]
))
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion: \(error.localizedDescription)")
}
Get a streaming chat completion from OpenAI
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o-mini",
messages: [.user(content: .text("hello world"))]
)
do {
let stream = try await openAIService.streamingChatCompletionRequest(body: requestBody)
for try await chunk in stream {
print(chunk.choices.first?.delta.content ?? "")
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI streaming chat completion: \(error.localizedDescription)")
}
Send a multi-modal chat completion request to OpenAI
On macOS, use NSImage(named:) in place of UIImage(named:)
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
guard let image = UIImage(named: "myImage") else {
print("Could not find an image named 'myImage' in your app assets")
return
}
guard let imageURL = AIProxy.encodeImageAsURL(image: image, compressionQuality: 0.8) else {
print("Could not convert image to OpenAI's imageURL format")
return
}
do {
let response = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o",
messages: [
.system(
content: .text("Tell me what you see")
),
.user(
content: .parts(
[
.text("What do you see?"),
.imageURL(imageURL, detail: .auto)
]
)
)
]
))
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI multi-modal chat completion: \(error.localizedDescription)")
}
How to generate an image with DALLE
This snippet will print out the URL of an image generated with dall-e-3:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let requestBody = OpenAICreateImageRequestBody(
prompt: "a skier",
model: "dall-e-3"
)
let response = try await openAIService.createImageRequest(body: requestBody)
print(response.data.first?.url ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not generate an image with OpenAI's DALLE: \(error.localizedDescription)")
}
How to ensure OpenAI returns JSON as the chat message content
Use responseFormat and specify in the prompt that OpenAI should return JSON only:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o",
messages: [
.system(content: .text("Return valid JSON only")),
.user(content: .text("Return alice and bob in a list of names"))
],
responseFormat: .jsonObject
)
let response = try await openAIService.chatCompletionRequest(body: requestBody)
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion in JSON mode: \(error.localizedDescription)")
}
How to use OpenAI structured outputs (JSON schemas) in a chat response
This example prompts chatGPT to construct a color palette and conform to a strict JSON schema in its response:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let schema: [String: AIProxyJSONValue] = [
"type": "object",
"properties": [
"colors": [
"type": "array",
"items": [
"type": "object",
"properties": [
"name": [
"type": "string",
"description": "A descriptive name to give the color"
],
"hex_code": [
"type": "string",
"description": "The hex code of the color"
]
],
"required": ["name", "hex_code"],
"additionalProperties": false
]
]
],
"required": ["colors"],
"additionalProperties": false
]
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o",
messages: [
.system(content: .text("Return valid JSON only")),
.user(content: .text("Return a peaches and cream color palette"))
],
responseFormat: .jsonSchema(
name: "palette_creator",
description: "A list of colors that make up a color pallete",
schema: schema,
strict: true
)
)
let response = try await openAIService.chatCompletionRequest(body: requestBody)
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion with structured outputs: \(error.localizedDescription)")
}
How to use OpenAI structured outputs (JSON schemas) in a tool call
This example is taken from the structured outputs announcement: https://openai.com/index/introducing-structured-outputs-in-the-api/
It asks ChatGPT to call a function with the correct arguments to look up a business's unfulfilled orders:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let schema: [String: AIProxyJSONValue] = [
"type": "object",
"properties": [
"location": [
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
],
"unit": [
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The unit of temperature. If not specified in the prompt, always default to fahrenheit",
"default": "fahrenheit"
]
],
"required": ["location", "unit"],
"additionalProperties": false
]
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o-2024-08-06",
messages: [
.user(content: .text("How cold is it today in SF?"))
],
tools: [
.function(
name: "get_weather",
description: "Call this when the user wants the weather",
parameters: schema,
strict: true)
]
)
let response = try await openAIService.chatCompletionRequest(body: requestBody)
if let toolCall = response.choices.first?.message.toolCalls?.first {
let functionName = toolCall.function.name
let arguments = toolCall.function.arguments ?? [:]
print("ChatGPT wants us to call function \(functionName) with arguments: \(arguments)")
} else {
print("Could not get function arguments")
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not make an OpenAI structured output tool call: \(error.localizedDescription)")
}
How to get word-level timestamps in an audio transcription
- Record an audio file in quicktime and save it as "helloworld.m4a"
- Add the audio file to your Xcode project. Make sure it's included in your target: select your audio file in the project tree, type cmd-opt-0 to open the inspect panel, and view Target Membership
- Run this snippet:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let url = Bundle.main.url(forResource: "helloworld", withExtension: "m4a")!
let requestBody = OpenAICreateTranscriptionRequestBody(
file: try Data(contentsOf: url),
model: "whisper-1",
responseFormat: "verbose_json",
timestampGranularities: [.word, .segment]
)
let response = try await openAIService.createTranscriptionRequest(body: requestBody)
if let words = response.words {
for word in words {
print("\(word.word) from \(word.start) to \(word.end)")
}
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not get word-level timestamps from OpenAI: \(error.localizedDescription)")
}
How to use OpenAI text-to-speech
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let requestBody = OpenAITextToSpeechRequestBody(
input: "Hello world",
voice: .nova
)
let mpegData = try await openAIService.createTextToSpeechRequest(body: requestBody)
// Do not use a local `let` or `var` for AVAudioPlayer.
// You need the lifecycle of the player to live beyond the scope of this function.
// Instead, use file scope or set the player as a member of a reference type with long life.
// For example, at the top of this file you may define:
//
// fileprivate var audioPlayer: AVAudioPlayer? = nil
//
// And then use the code below to play the TTS result:
audioPlayer = try AVAudioPlayer(data: mpegData)
audioPlayer?.prepareToPlay()
audioPlayer?.play()
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create ElevenLabs TTS audio: \(error.localizedDescription)")
}
Specify your own clientID to annotate requests
let openAIService = AIProxy.openAIService(
partialKey: "hardcode_partial_key_here",
serviceURL: "hardcode_service_url_here",
clientID: ""
)