Intermediate40 minModule 3 of 3

Integrate in Your App

Build a production-ready image generation API in Python (FastAPI) and TypeScript (Next.js App Router) — with async processing, webhooks, and proper error handling.

What you'll be able to build after this module

A complete backend API endpoint that accepts a prompt, calls Skytells, and returns an image URL — in both Python and TypeScript, with proper error handling, input validation, and webhook support.


Architecture

POST /api/generate{prompt} POST /v1/predictions prediction_id {prediction_id, status} webhook on complete update DB poll GET /api/generate/:id output URL Client Your API Skytells Database

Simple flow (< 15s generation):

  1. Client → Your API → Skytells → poll → return output URL

Production flow (video/long tasks):

  1. Client → Your API → Skytells → webhook → store output → notify client

Option A: Simple synchronous integration

Best for image models that complete in under 15 seconds.

# main.py
import os
import skytells
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field

app = FastAPI()
client = skytells.Client(api_key=os.environ["SKYTELLS_API_KEY"])

class GenerateRequest(BaseModel):
    prompt: str = Field(..., min_length=3, max_length=1000)
    model: str = "truefusion-pro"
    width: int = Field(1024, ge=256, le=2048)
    height: int = Field(1024, ge=256, le=2048)

class GenerateResponse(BaseModel):
    output: list[str]
    prediction_id: str
    model: str

@app.post("/api/generate", response_model=GenerateResponse)
async def generate(req: GenerateRequest):
    try:
        prediction = client.predictions.create(
            model=req.model,
            input={
                "prompt": req.prompt,
                "width": req.width,
                "height": req.height,
                "num_inference_steps": 30,
                "negative_prompt": "blurry, watermark, low quality",
            },
        )
        return GenerateResponse(
            output=prediction.output,
            prediction_id=prediction.id,
            model=req.model,
        )
    except skytells.exceptions.InvalidInputError as e:
        raise HTTPException(status_code=422, detail=str(e))
    except skytells.exceptions.RateLimitError:
        raise HTTPException(status_code=429, detail="Service busy. Please retry.")
    except Exception:
        raise HTTPException(status_code=500, detail="Generation failed.")

Option B: Async with webhook

Best for video, audio, or when generation takes 30 seconds or more. Returns immediately with a job ID, then delivers the result via webhook.

Create the prediction without waiting

// app/api/generate/route.ts (async version)
const prediction = await client.predictions.create({
  model: 'truefusion-video-pro',
  input: { prompt, duration_seconds: 10 },
  webhook: `${process.env.BASE_URL}/api/webhooks/skytells`,
  webhookEventsFilter: ['completed'],
  wait: false, // Return immediately
});

// Save to DB
await db.jobs.create({
  data: {
    predictionId: prediction.id,
    userId: req.user.id,
    status: 'pending',
  },
});

return Response.json({ jobId: prediction.id, status: 'pending' });

Handle the webhook

// app/api/webhooks/skytells/route.ts
import crypto from 'crypto';

export async function POST(req: NextRequest) {
  const rawBody = await req.text();
  const sig = req.headers.get('x-skytells-signature') ?? '';

  // Verify signature
  const expected = crypto
    .createHmac('sha256', process.env.SKYTELLS_WEBHOOK_SECRET!)
    .update(rawBody)
    .digest('hex');
  const received = sig.replace('sha256=', '');

  try {
    if (!crypto.timingSafeEqual(Buffer.from(expected, 'hex'), Buffer.from(received, 'hex'))) {
      return Response.json({ error: 'Invalid signature' }, { status: 401 });
    }
  } catch { return Response.json({ error: 'Invalid signature' }, { status: 401 }); }

  const prediction = JSON.parse(rawBody);

  // Update DB
  await db.jobs.update({
    where: { predictionId: prediction.id },
    data: { status: prediction.status, outputUrl: prediction.output?.[0] },
  });

  return Response.json({ received: true });
}

Let the client poll for status

// app/api/generate/[id]/route.ts
export async function GET(_req: NextRequest, { params }: { params: { id: string } }) {
  const job = await db.jobs.findUnique({ where: { predictionId: params.id } });
  if (!job) return Response.json({ error: 'Not found' }, { status: 404 });

  return Response.json({
    status: job.status,
    outputUrl: job.outputUrl ?? null,
  });
}

React hook — complete integration

A reusable hook that handles generation, polling, and error states:

// hooks/useImageGeneration.ts
'use client';
import { useState, useCallback } from 'react';

type Status = 'idle' | 'loading' | 'success' | 'error';

export function useImageGeneration() {
  const [status, setStatus] = useState<Status>('idle');
  const [images, setImages] = useState<string[]>([]);
  const [error, setError] = useState<string | null>(null);

  const generate = useCallback(async (prompt: string) => {
    setStatus('loading');
    setImages([]);
    setError(null);

    try {
      const res = await fetch('/api/generate', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ prompt }),
      });

      if (res.status === 429) {
        throw new Error('Service is busy. Please try again in a moment.');
      }

      if (!res.ok) {
        const body = await res.json().catch(() => ({}));
        throw new Error(body.error ?? `Error ${res.status}`);
      }

      const data = await res.json();
      setImages(data.output ?? []);
      setStatus('success');
    } catch (err) {
      setError(err instanceof Error ? err.message : 'Generation failed');
      setStatus('error');
    }
  }, []);

  const reset = useCallback(() => {
    setStatus('idle');
    setImages([]);
    setError(null);
  }, []);

  return { status, images, error, generate, reset };
}
// components/ImageGenerator.tsx
'use client';
import { useState } from 'react';
import { useImageGeneration } from '@/hooks/useImageGeneration';

export function ImageGenerator() {
  const [prompt, setPrompt] = useState('');
  const { status, images, error, generate } = useImageGeneration();

  return (
    <div className="space-y-4">
      <textarea
        value={prompt}
        onChange={e => setPrompt(e.target.value)}
        placeholder="Describe your image..."
        className="w-full rounded border p-3 text-sm"
        rows={3}
      />

      <button
        onClick={() => generate(prompt)}
        disabled={status === 'loading' || !prompt.trim()}
        className="rounded bg-black px-4 py-2 text-sm text-white disabled:opacity-50"
      >
        {status === 'loading' ? 'Generating...' : 'Generate'}
      </button>

      {error && (
        <p className="text-sm text-red-500">{error}</p>
      )}

      {images.map((url, i) => (
        <img key={i} src={url} alt={`Generated ${i + 1}`} className="rounded" />
      ))}
    </div>
  );
}

Input validation best practices

function validatePrompt(prompt: unknown): string {
  if (typeof prompt !== 'string') throw new Error('Prompt must be a string');
  const trimmed = prompt.trim();
  if (trimmed.length < 3) throw new Error('Prompt too short (min 3 characters)');
  if (trimmed.length > 1000) throw new Error('Prompt too long (max 1000 characters)');

  // Remove potential injection patterns
  const sanitized = trimmed
    .replace(/[<>]/g, '') // remove HTML tags
    .replace(/\n{3,}/g, '\n\n'); // collapse excessive newlines

  return sanitized;
}

Summary

You now have:

  • A validated, error-handled API endpoint in FastAPI and Next.js
  • Async webhook-based flow for long-running generation (video, etc.)
  • A React hook for client-side integration
  • Input validation and user-friendly error handling

Next steps:

On this page