AI Agent Toolkit with fast and meaningful streaming response for iOS/macOS applications.
Add the following to your Package.swift:
dependencies: [
.package(url: "https://github.com/vivgrid/viv-swift.git", from: "1.0.0")
]Or add it via Xcode: File → Add Package Dependencies → Enter the repository URL.
- iOS 15.0+ / macOS 12.0+ / tvOS 15.0+ / watchOS 8.0+
- Swift 5.9+
- Xcode 15.0+
import Viv
// Initialize client
let client = Viv(apiKey: "your-api-key")
// Streaming chat completion
let stream = try await client.chat.stream(ChatRequest(
messages: [.user("Hello, how are you?")]
))
// Option 1: Use AsyncSequence
for await chunk in stream {
if case .string(let text) = chunk.data {
print(text, terminator: "")
}
}
// Option 2: Use event handlers
stream
.onContent { delta in
print(delta, terminator: "")
}
.onEnd {
print("\nDone!")
}
.onError { error in
print("Error: \(error)")
}let response = try await client.chat.create(ChatRequest(
messages: [
.system("You are a helpful assistant"),
.user("What is the capital of France?")
]
))
print(response.choices.first?.message.content ?? "")// Define a function
let weatherFunction = Function.Builder(
name: "get_weather",
description: "Get the current weather for a city"
)
.addStringProperty("city", description: "The city name", required: true)
.build()
// Create request with function
let stream = try await client.chat.stream(ChatRequest(
messages: [.user("What's the weather in Tokyo?")],
functions: [weatherFunction]
))
stream
.onFunctionCall { name, arguments in
print("Function called: \(name)")
print("Arguments: \(arguments)")
}
.onFunctionCallResult { name, result in
print("Result: \(result)")
}
.onContent { delta in
print(delta, terminator: "")
}import SwiftUI
import Viv
struct ChatView: View {
@State private var viewModel: ChatCompletionViewModel
@State private var input = ""
init() {
let client = Viv(apiKey: ProcessInfo.processInfo.environment["VIV_API_KEY"] ?? "")
_viewModel = State(initialValue: ChatCompletionViewModel(client: client))
}
var body: some View {
VStack {
ScrollView {
Text(viewModel.content)
.padding()
}
if viewModel.isLoading {
ProgressView()
}
if let error = viewModel.error {
Text(error.localizedDescription)
.foregroundColor(.red)
}
HStack {
TextField("Message", text: $input)
.textFieldStyle(.roundedBorder)
Button("Send") {
Task {
await viewModel.send(input)
input = ""
}
}
.disabled(viewModel.isLoading)
}
.padding()
}
}
}import Combine
let cancellable = client.chat.streamPublisher(request)
.receive(on: DispatchQueue.main)
.sink(
receiveCompletion: { completion in
switch completion {
case .finished:
print("Completed")
case .failure(let error):
print("Error: \(error)")
}
},
receiveValue: { chunk in
if case .string(let text) = chunk.data {
print(text, terminator: "")
}
}
)let config = VivConfiguration(
apiKey: "your-api-key",
baseURL: URL(string: "https://api.vivgrid.com/v1")!,
maxRetries: 3, // Retry transient errors
retryDelay: 1000, // Base retry delay in ms
timeout: 600, // Request timeout in seconds
defaultHeaders: [:], // Custom headers
defaultQuery: [:] // Custom query parameters
)
let client = Viv(configuration: config)do {
let stream = try await client.chat.stream(request)
// ...
} catch let error as VivError {
switch error {
case .authenticationError:
print("Check your API key")
case .rateLimitError(_, let retryAfter):
print("Rate limited. Retry after: \(retryAfter ?? 0)s")
case .serverError(let code, let message):
print("Server error \(code): \(message ?? "")")
case .networkError:
print("Check your network connection")
default:
print("Error: \(error.localizedDescription)")
}
}| Event | Description |
|---|---|
onConnect |
Called when first content/reasoning is received |
onContent |
Called for each content delta |
onReasoning |
Called for each reasoning delta |
onModel |
Called when model identifier is received |
onFunctionCall |
Called when a function call is initiated |
onFunctionCallResult |
Called when a function result is received |
onUsage |
Called when token usage is received |
onEnd |
Called when stream completes normally |
onError |
Called when an error occurs |
onAbort |
Called when stream is cancelled |
The stream automatically accumulates state:
let stream = try await client.chat.stream(request)
// After streaming completes:
print(stream.content) // Full content
print(stream.reasoning) // Full reasoning
print(stream.functionCalls) // All function calls
print(stream.usage) // Token usage
print(stream.model) // Model identifierApache License 2.0