We've added support for @MistralAI chat completions and streaming chat completions in AIProxySwift release 0.43.0.
Chat completions:
import AIProxy
let mistralService = AIProxy.mistralService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let response = try await mistralService.chatCompletionRequest(body: .init(
messages: [.user(content: "Hello world")],
model: "mistral-small-latest"
))
print(response.choices.first?.message.content ?? "")
if let usage = response.usage {
print(
"""
Used:
\(usage.promptTokens ?? 0) prompt tokens
\(usage.completionTokens ?? 0) completion tokens
\(usage.totalTokens ?? 0) total tokens
"""
)
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not create mistral chat completion: \(error.localizedDescription)")
}
Streaming chat completion:
import AIProxy
let mistralService = AIProxy.mistralService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard"
)
do {
let stream = try await mistralService.streamingChatCompletionRequest(body: .init(
messages: [.user(content: "Hello world")],
model: "mistral-small-latest"
))
for try await chunk in stream {
print(chunk.choices.first?.delta.content ?? "")
if let usage = chunk.usage {
print(
"""
Used:
\(usage.promptTokens ?? 0) prompt tokens
\(usage.completionTokens ?? 0) completion tokens
\(usage.totalTokens ?? 0) total tokens
"""
)
}
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not create mistral streaming chat completion: \(error.localizedDescription)")
}