Intermediate35 minModule 2 of 3
Predictions & Error Handling
Make predictions with async/await, handle typed Swift errors, run concurrent generations, and control the prediction lifecycle.
Prediction lifecycle
Creating a prediction
Default — wait for completion
import Skytells
let client = SkytellsClient(apiKey: apiKey)
// The SDK polls until succeeded or failed
let prediction = try await client.predictions.create(
model: "truefusion-pro",
input: [
"prompt": "A red fox in a snowy pine forest, cinematic, golden hour",
"width": 1024,
"height": 1024,
"guidance_scale": 7.5,
"num_inference_steps": 30,
]
)
guard let urls = prediction.output else { return }
let imageURL = urls.first!
print("Image:", imageURL)Non-blocking — return immediately
Use wait: false for long-running predictions where you want to track progress separately:
let pending = try await client.predictions.create(
model: "truefusion-video-pro",
input: [
"prompt": "Ocean waves at sunset, 4K, cinematic",
"duration_seconds": 10,
"aspect_ratio": "16:9",
],
wait: false
)
print("Prediction ID:", pending.id, "Status:", pending.status)Poll manually
var prediction = pending
while prediction.status == .queued || prediction.status == .processing {
try await Task.sleep(nanoseconds: 5_000_000_000) // 5 seconds
prediction = try await client.predictions.get(pending.id)
print("Status:", prediction.status)
}
if prediction.status == .succeeded {
print("Output:", prediction.output ?? [])
} else {
print("Failed:", prediction.error ?? "Unknown error")
}Concurrent predictions
Use Swift's structured concurrency to generate multiple images in parallel:
async let imageA = client.predictions.create(
model: "truefusion-pro",
input: ["prompt": "A mountain landscape, autumn", "seed": 1]
)
async let imageB = client.predictions.create(
model: "truefusion-pro",
input: ["prompt": "A mountain landscape, winter", "seed": 2]
)
async let imageC = client.predictions.create(
model: "truefusion-pro",
input: ["prompt": "A mountain landscape, spring", "seed": 3]
)
let (predA, predB, predC) = try await (imageA, imageB, imageC)
let urls = [predA, predB, predC].compactMap { $0.output?.first }Running 3 predictions concurrently is ~3× faster than sequential.
Typed error handling
The SDK uses a typed SkytellsError enum that maps directly to API error codes:
import Skytells
do {
let prediction = try await client.predictions.create(
model: "truefusion-pro",
input: ["prompt": "test"]
)
handleOutput(prediction)
} catch let error as SkytellsError {
switch error {
case .unauthorized:
showAlert("Invalid API key. Check your configuration.")
case .rateLimitExceeded(let retryAfter):
scheduleRetry(after: retryAfter)
case .invalidInput(let detail):
showAlert("Invalid input: \(detail)")
case .predictionFailed(let message):
showAlert("Generation failed: \(message)")
case .networkError(let underlying):
showAlert("Network error: \(underlying.localizedDescription)")
default:
showAlert("An unexpected error occurred.")
}
} catch {
print("Non-Skytells error:", error)
}Cancellation
Support user-initiated cancellation with Swift's Task cancellation:
class GenerationViewModel: ObservableObject {
@Published var imageURL: URL?
@Published var isGenerating = false
private var generationTask: Task<Void, Never>?
func generate(prompt: String) {
generationTask?.cancel()
generationTask = Task {
isGenerating = true
defer { isGenerating = false }
do {
let prediction = try await client.predictions.create(
model: "truefusion-pro",
input: ["prompt": prompt, "width": 1024, "height": 1024]
)
if !Task.isCancelled, let urlString = prediction.output?.first {
imageURL = URL(string: urlString)
}
} catch is CancellationError {
// User cancelled — no action needed
} catch {
print("Generation error:", error)
}
}
}
func cancel() {
generationTask?.cancel()
}
}Webhooks from Swift
Register a webhook URL when you create the prediction:
let prediction = try await client.predictions.create(
model: "truefusion-video-pro",
input: [
"prompt": "Timelapse of a city, 10 seconds",
"duration_seconds": 10,
],
webhook: "https://yourserver.com/api/webhooks/skytells",
webhookEventsFilter: ["completed"],
wait: false // Don't block — webhook handles completion
)Listing and managing predictions
// List recent predictions
let recent = try await client.predictions.list(limit: 20)
for p in recent {
print("\(p.id): \(p.status) — \(p.model)")
}
// Cancel a running prediction
try await client.predictions.cancel("pred_abc123")
// Delete a prediction and its outputs
try await client.predictions.delete("pred_abc123")Summary
- Use
wait: true(default) for simple synchronous-style code - Use
wait: false+ manual polling for long-running predictions async letenables concurrent parallel generationSkytellsErroris fully typed — switch on it for clean error handlingTaskcancellation integrates cleanly withasync/await