From 30f894a9dc1f44309c97eb17ddef826c28bb5056 Mon Sep 17 00:00:00 2001 From: max4c Date: Sat, 14 Feb 2026 19:53:42 -0800 Subject: [PATCH 1/3] Add text/image/video inference examples for ML track --- 02_ml_inference/02_text_to_image/.env.example | 4 + 02_ml_inference/02_text_to_image/.flashignore | 43 ++++ 02_ml_inference/02_text_to_image/.gitignore | 27 +++ 02_ml_inference/02_text_to_image/__init__.py | 0 02_ml_inference/02_text_to_image/demo.py | 149 ++++++++++++ .../02_text_to_image/gpu_worker.py | 109 +++++++++ 02_ml_inference/02_text_to_image/main.py | 38 ++++ .../02_text_to_image/mothership.py | 7 + .../02_text_to_image/pyproject.toml | 10 + .../02_text_to_image/requirements.txt | 1 + .../03_image_to_image/.env.example | 4 + .../03_image_to_image/.flashignore | 43 ++++ 02_ml_inference/03_image_to_image/.gitignore | 28 +++ 02_ml_inference/03_image_to_image/README.md | 71 ++++++ 02_ml_inference/03_image_to_image/__init__.py | 1 + 02_ml_inference/03_image_to_image/demo.py | 79 +++++++ .../03_image_to_image/gpu_worker.py | 143 ++++++++++++ 02_ml_inference/03_image_to_image/main.py | 38 ++++ .../03_image_to_image/mothership.py | 7 + 02_ml_inference/03_image_to_image/poddy.jpg | Bin 0 -> 30015 bytes .../03_image_to_image/pyproject.toml | 10 + .../03_image_to_image/requirements.txt | 1 + 02_ml_inference/04_text_to_video/.env.example | 4 + 02_ml_inference/04_text_to_video/.flashignore | 43 ++++ 02_ml_inference/04_text_to_video/.gitignore | 28 +++ 02_ml_inference/04_text_to_video/README.md | 72 ++++++ 02_ml_inference/04_text_to_video/__init__.py | 1 + 02_ml_inference/04_text_to_video/demo.py | 69 ++++++ .../04_text_to_video/gpu_worker.py | 195 ++++++++++++++++ 02_ml_inference/04_text_to_video/main.py | 38 ++++ .../04_text_to_video/mothership.py | 7 + .../04_text_to_video/pyproject.toml | 10 + .../04_text_to_video/requirements.txt | 1 + .../05_image_to_video/.env.example | 4 + .../05_image_to_video/.flashignore | 43 ++++ 02_ml_inference/05_image_to_video/.gitignore | 28 +++ 02_ml_inference/05_image_to_video/README.md | 76 +++++++ 02_ml_inference/05_image_to_video/__init__.py | 1 + 02_ml_inference/05_image_to_video/demo.py | 71 ++++++ .../05_image_to_video/gpu_worker.py | 215 ++++++++++++++++++ 02_ml_inference/05_image_to_video/main.py | 38 ++++ .../05_image_to_video/mothership.py | 7 + 02_ml_inference/05_image_to_video/poddy.jpg | Bin 0 -> 30015 bytes .../05_image_to_video/pyproject.toml | 10 + .../05_image_to_video/requirements.txt | 1 + 02_ml_inference/README.md | 56 ++++- 02_ml_inference/poddy.jpg | Bin 0 -> 30015 bytes README.md | 10 +- uv.lock | 4 + 49 files changed, 1830 insertions(+), 15 deletions(-) create mode 100644 02_ml_inference/02_text_to_image/.env.example create mode 100644 02_ml_inference/02_text_to_image/.flashignore create mode 100644 02_ml_inference/02_text_to_image/.gitignore create mode 100644 02_ml_inference/02_text_to_image/__init__.py create mode 100755 02_ml_inference/02_text_to_image/demo.py create mode 100644 02_ml_inference/02_text_to_image/gpu_worker.py create mode 100644 02_ml_inference/02_text_to_image/main.py create mode 100644 02_ml_inference/02_text_to_image/mothership.py create mode 100644 02_ml_inference/02_text_to_image/pyproject.toml create mode 100644 02_ml_inference/02_text_to_image/requirements.txt create mode 100644 02_ml_inference/03_image_to_image/.env.example create mode 100644 02_ml_inference/03_image_to_image/.flashignore create mode 100644 02_ml_inference/03_image_to_image/.gitignore create mode 100644 02_ml_inference/03_image_to_image/README.md create mode 100644 02_ml_inference/03_image_to_image/__init__.py create mode 100644 02_ml_inference/03_image_to_image/demo.py create mode 100644 02_ml_inference/03_image_to_image/gpu_worker.py create mode 100644 02_ml_inference/03_image_to_image/main.py create mode 100644 02_ml_inference/03_image_to_image/mothership.py create mode 100644 02_ml_inference/03_image_to_image/poddy.jpg create mode 100644 02_ml_inference/03_image_to_image/pyproject.toml create mode 100644 02_ml_inference/03_image_to_image/requirements.txt create mode 100644 02_ml_inference/04_text_to_video/.env.example create mode 100644 02_ml_inference/04_text_to_video/.flashignore create mode 100644 02_ml_inference/04_text_to_video/.gitignore create mode 100644 02_ml_inference/04_text_to_video/README.md create mode 100644 02_ml_inference/04_text_to_video/__init__.py create mode 100644 02_ml_inference/04_text_to_video/demo.py create mode 100644 02_ml_inference/04_text_to_video/gpu_worker.py create mode 100644 02_ml_inference/04_text_to_video/main.py create mode 100644 02_ml_inference/04_text_to_video/mothership.py create mode 100644 02_ml_inference/04_text_to_video/pyproject.toml create mode 100644 02_ml_inference/04_text_to_video/requirements.txt create mode 100644 02_ml_inference/05_image_to_video/.env.example create mode 100644 02_ml_inference/05_image_to_video/.flashignore create mode 100644 02_ml_inference/05_image_to_video/.gitignore create mode 100644 02_ml_inference/05_image_to_video/README.md create mode 100644 02_ml_inference/05_image_to_video/__init__.py create mode 100644 02_ml_inference/05_image_to_video/demo.py create mode 100644 02_ml_inference/05_image_to_video/gpu_worker.py create mode 100644 02_ml_inference/05_image_to_video/main.py create mode 100644 02_ml_inference/05_image_to_video/mothership.py create mode 100644 02_ml_inference/05_image_to_video/poddy.jpg create mode 100644 02_ml_inference/05_image_to_video/pyproject.toml create mode 100644 02_ml_inference/05_image_to_video/requirements.txt create mode 100644 02_ml_inference/poddy.jpg diff --git a/02_ml_inference/02_text_to_image/.env.example b/02_ml_inference/02_text_to_image/.env.example new file mode 100644 index 0000000..91af5f2 --- /dev/null +++ b/02_ml_inference/02_text_to_image/.env.example @@ -0,0 +1,4 @@ +# RUNPOD_API_KEY=your_api_key_here +# FLASH_HOST=localhost +# FLASH_PORT=8888 +# LOG_LEVEL=INFO diff --git a/02_ml_inference/02_text_to_image/.flashignore b/02_ml_inference/02_text_to_image/.flashignore new file mode 100644 index 0000000..10ffb6d --- /dev/null +++ b/02_ml_inference/02_text_to_image/.flashignore @@ -0,0 +1,43 @@ +# Flash Build Ignore Patterns + +# Python cache +__pycache__/ +*.pyc + +# Virtual environments +venv/ +.venv/ +env/ + +# IDE +.vscode/ +.idea/ + +# Environment files +.env +.env.local + +# Git +.git/ +.gitignore + +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Flash resources +.flash_resources.pkl + +# Tests +tests/ +test_*.py +*_test.py + +# Documentation +docs/ +*.md +!README.md + +# Demo output +generated.png diff --git a/02_ml_inference/02_text_to_image/.gitignore b/02_ml_inference/02_text_to_image/.gitignore new file mode 100644 index 0000000..4ea30c5 --- /dev/null +++ b/02_ml_inference/02_text_to_image/.gitignore @@ -0,0 +1,27 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# Virtual environments +.venv/ +venv/ +env/ + +# Environment +.env +.env.local + +# Flash +.flash_resources.pkl +.tetra_resources.pkl + +# IDE +.vscode/ +.idea/ + +# Demo output +generated.png diff --git a/02_ml_inference/02_text_to_image/__init__.py b/02_ml_inference/02_text_to_image/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/02_ml_inference/02_text_to_image/demo.py b/02_ml_inference/02_text_to_image/demo.py new file mode 100755 index 0000000..ede93a6 --- /dev/null +++ b/02_ml_inference/02_text_to_image/demo.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +""" +Flash Demo — Generate an image with Flux and display it in your terminal. + +Usage: + 1. Start the server: cd 02_ml_inference/02_text_to_image && flash run + 2. Run this script: python demo.py + 3. Or with a prompt: python demo.py "a cat astronaut on mars" +""" + +import base64 +import io +import json +import os +import shutil +import subprocess +import sys +import time +import urllib.error +import urllib.request + +API_URL = "http://localhost:8888/gpu/generate" +DEFAULT_PROMPT = "a tiny astronaut floating above earth, watercolor style" +OUTPUT_FILE = "generated.png" + +# ── Terminal image rendering ───────────────────────────────────────── + + +def render_in_terminal(image_bytes: bytes, max_width: int | None = None): + """Render an image in the terminal using ANSI true-color half-blocks. + + Works in any terminal that supports 24-bit color (iTerm2, Kitty, + WezTerm, Windows Terminal, most modern terminals). + """ + from PIL import Image + + img = Image.open(io.BytesIO(image_bytes)).convert("RGB") + + # Fit to terminal width + term_width = max_width or min(shutil.get_terminal_size().columns, 80) + aspect = img.height / img.width + w = term_width + h = int(w * aspect) + if h % 2 != 0: + h += 1 + + img = img.resize((w, h), Image.LANCZOS) + px = img.load() + + lines = [] + for y in range(0, h, 2): + row = [] + for x in range(w): + r1, g1, b1 = px[x, y] + r2, g2, b2 = px[x, y + 1] if y + 1 < h else (0, 0, 0) + row.append(f"\033[38;2;{r1};{g1};{b1}m\033[48;2;{r2};{g2};{b2}m▀") + lines.append("".join(row) + "\033[0m") + + print("\n".join(lines)) + + +def try_imgcat(image_bytes: bytes) -> bool: + """Try to display via imgcat (iTerm2) or chafa.""" + for cmd in ("imgcat", "chafa", "viu"): + if shutil.which(cmd): + try: + proc = subprocess.run( + [cmd, "-"], + input=image_bytes, + timeout=5, + ) + return proc.returncode == 0 + except Exception: + continue + return False + + +def display_image(image_bytes: bytes): + """Display an image in the terminal with the best available method.""" + # Try native image tools first (high-res) + if try_imgcat(image_bytes): + return + + # Fall back to ANSI half-block rendering (works everywhere) + render_in_terminal(image_bytes) + + +# ── Main ───────────────────────────────────────────────────────────── + + +def main(): + prompt = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else DEFAULT_PROMPT + + print() + print(" ⚡ Flash Demo — Flux Text-to-Image") + print(" ─────────────────────────────────────") + print(f' Prompt: "{prompt}"') + print(f" Server: {API_URL}") + print() + + # Build request + hf_token = os.environ.get("HF_TOKEN", "") + payload = json.dumps({"prompt": prompt, "hf_token": hf_token}).encode() + req = urllib.request.Request( + API_URL, + data=payload, + headers={"Content-Type": "application/json"}, + ) + + # Send request with timing + print(" Sending to RunPod GPU worker...", end="", flush=True) + t0 = time.time() + + try: + resp = urllib.request.urlopen(req, timeout=300) + except urllib.error.URLError as e: + print(f"\n\n Error: Could not connect to {API_URL}") + print(" Make sure the Flash server is running: flash run") + print(f" ({e})") + sys.exit(1) + + result = json.loads(resp.read()) + elapsed = time.time() - t0 + + if result.get("status") != "success": + print(f"\n\n Error from worker: {result}") + sys.exit(1) + + # Decode image + image_bytes = base64.b64decode(result["image_base64"]) + size_kb = len(image_bytes) / 1024 + + print(f" done! ({elapsed:.1f}s)") + print(f" Image: {result.get('width')}x{result.get('height')}px, {size_kb:.0f}KB") + print() + + # Save to disk + with open(OUTPUT_FILE, "wb") as f: + f.write(image_bytes) + print(f" Saved to {OUTPUT_FILE}") + print() + + # Display in terminal + display_image(image_bytes) + print() + + +if __name__ == "__main__": + main() diff --git a/02_ml_inference/02_text_to_image/gpu_worker.py b/02_ml_inference/02_text_to_image/gpu_worker.py new file mode 100644 index 0000000..af1ac9e --- /dev/null +++ b/02_ml_inference/02_text_to_image/gpu_worker.py @@ -0,0 +1,109 @@ +"""Flux Text-to-Image — GPU Worker + +One function. One decorator. Images from the cloud. +""" + +import os + +from fastapi import APIRouter +from pydantic import BaseModel, Field +from runpod_flash import GpuGroup, LiveServerless, remote + +# ── GPU Configuration ──────────────────────────────────────────────── +# FLUX.1-schnell is a fast distilled model (~12GB VRAM). +# ADA_24 gives us an RTX 4090-class GPU with 24GB — plenty of room. +gpu_config = LiveServerless( + name="02_02_flux_schnell", + gpus=[GpuGroup.AMPERE_80], + workersMin=1, + workersMax=3, + idleTimeout=5, +) + + +# ── The entire inference pipeline in one function ──────────────────── +@remote( + resource_config=gpu_config, + dependencies=[ + "diffusers", + "torch", + "transformers", + "accelerate", + "sentencepiece", + "protobuf", + ], +) +async def generate_image(input_data: dict) -> dict: + """Generate an image with FLUX.1-schnell on a remote GPU.""" + import base64 + import io + + import torch + from diffusers import FluxPipeline + from huggingface_hub import login + + hf_token = input_data.get("hf_token", "") + if hf_token: + login(token=hf_token) + + prompt = input_data.get("prompt", "a lightning flash above a datacenter") + width = input_data.get("width", 512) + height = input_data.get("height", 512) + num_steps = input_data.get("num_steps", 4) + + pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-schnell", + torch_dtype=torch.bfloat16, + ) + pipe.enable_model_cpu_offload() + + image = pipe( + prompt, + num_inference_steps=num_steps, + width=width, + height=height, + guidance_scale=0.0, + ).images[0] + + buf = io.BytesIO() + image.save(buf, format="PNG") + buf.seek(0) + + return { + "status": "success", + "image_base64": base64.b64encode(buf.read()).decode(), + "prompt": prompt, + "width": width, + "height": height, + } + + +# ── FastAPI Router ─────────────────────────────────────────────────── +gpu_router = APIRouter() + + +class ImageRequest(BaseModel): + prompt: str = Field( + default="a tiny astronaut floating in space, watercolor style", + description="Text prompt describing the image to generate", + ) + width: int = Field(default=512, description="Image width in pixels") + height: int = Field(default=512, description="Image height in pixels") + num_steps: int = Field(default=4, description="Number of diffusion steps (1-8)") + hf_token: str = Field( + default="", + description="Optional Hugging Face token. Uses HF_TOKEN env var when omitted.", + ) + + +@gpu_router.post("/generate") +async def generate(request: ImageRequest): + """Generate an image from a text prompt using FLUX.1-schnell.""" + hf_token = request.hf_token.strip() or os.environ.get("HF_TOKEN", "") + return await generate_image({ + "prompt": request.prompt, + "width": request.width, + "height": request.height, + "num_steps": request.num_steps, + "hf_token": hf_token, + }) diff --git a/02_ml_inference/02_text_to_image/main.py b/02_ml_inference/02_text_to_image/main.py new file mode 100644 index 0000000..2cb1f50 --- /dev/null +++ b/02_ml_inference/02_text_to_image/main.py @@ -0,0 +1,38 @@ +import logging +import os + +from fastapi import FastAPI +from gpu_worker import gpu_router + +logger = logging.getLogger(__name__) + +app = FastAPI( + title="Flux Text-to-Image", + description="Generate images from text prompts with FLUX.1-schnell on RunPod serverless GPUs", + version="1.0.0", +) + +app.include_router(gpu_router, prefix="/gpu", tags=["Text-to-Image"]) + + +@app.get("/") +def home(): + return { + "message": "Flux Text-to-Image API", + "docs": "/docs", + "endpoints": {"generate": "/gpu/generate"}, + } + + +@app.get("/ping") +def ping(): + return {"status": "healthy"} + + +if __name__ == "__main__": + import uvicorn + + host = os.getenv("FLASH_HOST", "localhost") + port = int(os.getenv("FLASH_PORT", 8888)) + logger.info(f"Starting Flash server on {host}:{port}") + uvicorn.run(app, host=host, port=port) diff --git a/02_ml_inference/02_text_to_image/mothership.py b/02_ml_inference/02_text_to_image/mothership.py new file mode 100644 index 0000000..7cb8059 --- /dev/null +++ b/02_ml_inference/02_text_to_image/mothership.py @@ -0,0 +1,7 @@ +"""Mothership Endpoint Configuration""" + +from runpod_flash import CpuLiveLoadBalancer + +mothership = CpuLiveLoadBalancer( + name="02_02_text_to_image-mothership", +) diff --git a/02_ml_inference/02_text_to_image/pyproject.toml b/02_ml_inference/02_text_to_image/pyproject.toml new file mode 100644 index 0000000..beb9159 --- /dev/null +++ b/02_ml_inference/02_text_to_image/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "flash-flux-text-to-image" +version = "0.1.0" +description = "Generate images with FLUX.1-schnell via RunPod Flash" +requires-python = ">=3.10" +dependencies = [ + "runpod-flash", + "fastapi>=0.104.0", + "pillow>=10.0.0", +] diff --git a/02_ml_inference/02_text_to_image/requirements.txt b/02_ml_inference/02_text_to_image/requirements.txt new file mode 100644 index 0000000..a73ed1a --- /dev/null +++ b/02_ml_inference/02_text_to_image/requirements.txt @@ -0,0 +1 @@ +runpod-flash diff --git a/02_ml_inference/03_image_to_image/.env.example b/02_ml_inference/03_image_to_image/.env.example new file mode 100644 index 0000000..8360712 --- /dev/null +++ b/02_ml_inference/03_image_to_image/.env.example @@ -0,0 +1,4 @@ +# FLASH_HOST=localhost +# FLASH_PORT=8888 +# LOG_LEVEL=INFO +# RUNPOD_API_KEY=your_api_key_here diff --git a/02_ml_inference/03_image_to_image/.flashignore b/02_ml_inference/03_image_to_image/.flashignore new file mode 100644 index 0000000..6c8e627 --- /dev/null +++ b/02_ml_inference/03_image_to_image/.flashignore @@ -0,0 +1,43 @@ +# Flash Build Ignore Patterns + +# Python cache +__pycache__/ +*.pyc + +# Virtual environments +venv/ +.venv/ +env/ + +# IDE +.vscode/ +.idea/ + +# Environment files +.env +.env.local + +# Git +.git/ +.gitignore + +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Flash resources +.runpod/ + +# Tests +tests/ +test_*.py +*_test.py + +# Documentation +docs/ +*.md +!README.md + +# Demo output +transformed.png diff --git a/02_ml_inference/03_image_to_image/.gitignore b/02_ml_inference/03_image_to_image/.gitignore new file mode 100644 index 0000000..cf5cbb3 --- /dev/null +++ b/02_ml_inference/03_image_to_image/.gitignore @@ -0,0 +1,28 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# Virtual environments +.venv/ +venv/ +env/ + +# Environment +.env +.env.local + +# Flash +.flash_resources.pkl +.tetra_resources.pkl +.runpod/ + +# IDE +.vscode/ +.idea/ + +# Demo output +transformed.png diff --git a/02_ml_inference/03_image_to_image/README.md b/02_ml_inference/03_image_to_image/README.md new file mode 100644 index 0000000..2b6a52e --- /dev/null +++ b/02_ml_inference/03_image_to_image/README.md @@ -0,0 +1,71 @@ +# Image-to-Image with Stable Diffusion + +Serverless image-to-image API built with Runpod Flash and Stable Diffusion v1.5. + +## What this example does + +- Accepts an input image as base64 +- Applies prompt-guided transformation with `StableDiffusionImg2ImgPipeline` +- Returns a transformed image as base64 PNG + +## Quick Start + +```bash +cd 02_ml_inference/03_image_to_image +pip install -r requirements.txt +cp .env.example .env +# Add RUNPOD_API_KEY in .env +flash run +``` + +Open docs at `http://localhost:8888/docs`. + +## Endpoint + +### POST `/gpu/transform` + +Request body: + +```json +{ + "image_base64": "", + "prompt": "turn this portrait into a cinematic oil painting", + "negative_prompt": "blurry, low quality", + "strength": 0.65, + "guidance_scale": 7.5, + "num_steps": 25, + "seed": 42 +} +``` + +Response: + +```json +{ + "status": "success", + "image_base64": "", + "model": "runwayml/stable-diffusion-v1-5", + "prompt": "...", + "negative_prompt": "...", + "strength": 0.65, + "guidance_scale": 7.5, + "num_steps": 25, + "seed": 42, + "timestamp": "2026-02-15T12:34:56.789123" +} +``` + +## Local Demo Script + +Run the demo client against your local endpoint: + +```bash +python demo.py "turn this into a watercolor painting" output.png +``` + +## Notes + +- First request can take longer because the worker and model need to warm up. +- Input images are resized to `512x512` before inference for stable memory usage. +- If `image_base64` is omitted, the endpoint uses `poddy.jpg` as the default input image. +- Quality is intentionally baseline for fast, reliable, and lower-cost demo runs; this is a starter configuration, not a max-quality preset. diff --git a/02_ml_inference/03_image_to_image/__init__.py b/02_ml_inference/03_image_to_image/__init__.py new file mode 100644 index 0000000..5d8d1d1 --- /dev/null +++ b/02_ml_inference/03_image_to_image/__init__.py @@ -0,0 +1 @@ +"""Image-to-image inference example package.""" diff --git a/02_ml_inference/03_image_to_image/demo.py b/02_ml_inference/03_image_to_image/demo.py new file mode 100644 index 0000000..cc1ef59 --- /dev/null +++ b/02_ml_inference/03_image_to_image/demo.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" +Send an input image to the local Flash img2img endpoint and save the output. + +Usage: + python demo.py "turn this into a watercolor painting" [output.png] + python demo.py input.png "turn this into a watercolor painting" [output.png] +""" + +import base64 +import json +import sys +import urllib.error +import urllib.request +from pathlib import Path + +API_URL = "http://localhost:8888/gpu/transform" +DEFAULT_IMAGE = Path(__file__).resolve().parent / "poddy.jpg" +DEFAULT_PROMPT = "turn this into a cinematic watercolor painting" +DEFAULT_OUTPUT = "transformed.png" + + +def main() -> None: + args = sys.argv[1:] + + if not args: + input_path = DEFAULT_IMAGE + prompt = DEFAULT_PROMPT + output_path = Path(DEFAULT_OUTPUT).resolve() + else: + first_arg_path = Path(args[0]).expanduser() + if first_arg_path.exists(): + input_path = first_arg_path.resolve() + prompt = args[1] if len(args) > 1 else DEFAULT_PROMPT + output_path = Path(args[2] if len(args) > 2 else DEFAULT_OUTPUT).resolve() + else: + input_path = DEFAULT_IMAGE + prompt = args[0] + output_path = Path(args[1] if len(args) > 1 else DEFAULT_OUTPUT).resolve() + + if not input_path.exists(): + print(f"Input image not found: {input_path}") + sys.exit(1) + + image_base64 = base64.b64encode(input_path.read_bytes()).decode("utf-8") + payload = { + "image_base64": image_base64, + "prompt": prompt, + "strength": 0.65, + "guidance_scale": 7.5, + "num_steps": 25, + } + + request = urllib.request.Request( + API_URL, + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", + ) + + try: + with urllib.request.urlopen(request, timeout=300) as response: + result = json.loads(response.read().decode("utf-8")) + except urllib.error.URLError as exc: + print(f"Request failed: {exc}") + print("Make sure the server is running from this folder with: flash run") + sys.exit(1) + + if result.get("status") != "success": + print(f"Worker error: {result}") + sys.exit(1) + + output_bytes = base64.b64decode(result["image_base64"]) + output_path.write_bytes(output_bytes) + print(f"Saved transformed image to {output_path}") + + +if __name__ == "__main__": + main() diff --git a/02_ml_inference/03_image_to_image/gpu_worker.py b/02_ml_inference/03_image_to_image/gpu_worker.py new file mode 100644 index 0000000..1103f54 --- /dev/null +++ b/02_ml_inference/03_image_to_image/gpu_worker.py @@ -0,0 +1,143 @@ +import base64 +from pathlib import Path + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field +from runpod_flash import GpuGroup, LiveServerless, remote + +DEFAULT_IMAGE_PATH = Path(__file__).resolve().parent / "poddy.jpg" + + +def load_default_image_base64() -> str: + return base64.b64encode(DEFAULT_IMAGE_PATH.read_bytes()).decode("utf-8") + + +gpu_config = LiveServerless( + name="02_03_image_to_image_gpu", + gpus=[GpuGroup.ADA_24], + workersMin=0, + workersMax=2, + idleTimeout=5, +) + + +@remote( + resource_config=gpu_config, + dependencies=[ + "diffusers", + "torch", + "transformers", + "accelerate", + "safetensors", + "pillow", + ], +) +class ImageToImageWorker: + def __init__(self): + import torch + from diffusers import StableDiffusionImg2ImgPipeline + + self._torch = torch + self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + torch_dtype=torch.float16, + safety_checker=None, + requires_safety_checker=False, + ) + self.pipe = self.pipe.to("cuda") + self.pipe.enable_attention_slicing() + + async def transform(self, input_data: dict) -> dict: + import base64 + import io + from datetime import datetime + + from PIL import Image + + image_base64 = input_data.get("image_base64", "") + prompt = input_data.get("prompt", "").strip() + negative_prompt = input_data.get("negative_prompt", "").strip() + strength = float(input_data.get("strength", 0.65)) + guidance_scale = float(input_data.get("guidance_scale", 7.5)) + num_steps = int(input_data.get("num_steps", 25)) + seed = input_data.get("seed") + + if not image_base64: + return {"status": "error", "error": "image_base64 is required"} + if not prompt: + return {"status": "error", "error": "prompt is required"} + + try: + image_bytes = base64.b64decode(image_base64) + input_image = Image.open(io.BytesIO(image_bytes)).convert("RGB").resize((512, 512)) + except Exception as exc: + return {"status": "error", "error": f"Invalid input image: {exc}"} + + generator = None + if seed is not None: + generator = self._torch.Generator(device="cuda").manual_seed(int(seed)) + + output_image = self.pipe( + prompt=prompt, + negative_prompt=negative_prompt if negative_prompt else None, + image=input_image, + strength=strength, + guidance_scale=guidance_scale, + num_inference_steps=num_steps, + generator=generator, + ).images[0] + + output_buffer = io.BytesIO() + output_image.save(output_buffer, format="PNG") + output_buffer.seek(0) + + return { + "status": "success", + "image_base64": base64.b64encode(output_buffer.read()).decode("utf-8"), + "model": "runwayml/stable-diffusion-v1-5", + "prompt": prompt, + "negative_prompt": negative_prompt or None, + "strength": strength, + "guidance_scale": guidance_scale, + "num_steps": num_steps, + "seed": seed, + "timestamp": datetime.now().isoformat(), + } + + +gpu_router = APIRouter() +worker: ImageToImageWorker | None = None + + +def get_worker() -> ImageToImageWorker: + global worker + if worker is None: + worker = ImageToImageWorker() + return worker + + +class ImageToImageRequest(BaseModel): + image_base64: str = Field( + default="", + description="Input image encoded as base64. If omitted, defaults to poddy.jpg.", + ) + prompt: str = Field(description="Prompt that describes how to transform the image") + negative_prompt: str = Field(default="", description="What to avoid in the output image") + strength: float = Field(default=0.65, ge=0.1, le=1.0) + guidance_scale: float = Field(default=7.5, ge=0.0, le=20.0) + num_steps: int = Field(default=25, ge=1, le=50) + seed: int | None = Field(default=None, ge=0) + + +@gpu_router.post("/transform") +async def transform(request: ImageToImageRequest): + payload = request.model_dump() + if not payload.get("image_base64"): + try: + payload["image_base64"] = load_default_image_base64() + except FileNotFoundError as exc: + raise HTTPException(status_code=500, detail=f"Default image not found: {exc}") from exc + result = await get_worker().transform(payload) + if result.get("status") != "success": + raise HTTPException(status_code=400, detail=result.get("error", "Image transformation failed")) + return result diff --git a/02_ml_inference/03_image_to_image/main.py b/02_ml_inference/03_image_to_image/main.py new file mode 100644 index 0000000..7a001cd --- /dev/null +++ b/02_ml_inference/03_image_to_image/main.py @@ -0,0 +1,38 @@ +import logging +import os + +from fastapi import FastAPI +from gpu_worker import gpu_router + +logger = logging.getLogger(__name__) + +app = FastAPI( + title="Image-to-Image API", + description="Transform images with Stable Diffusion on RunPod serverless GPUs", + version="1.0.0", +) + +app.include_router(gpu_router, prefix="/gpu", tags=["Image-to-Image"]) + + +@app.get("/") +def home(): + return { + "message": "Image-to-Image API", + "docs": "/docs", + "endpoints": {"transform": "/gpu/transform"}, + } + + +@app.get("/ping") +def ping(): + return {"status": "healthy"} + + +if __name__ == "__main__": + import uvicorn + + host = os.getenv("FLASH_HOST", "localhost") + port = int(os.getenv("FLASH_PORT", 8888)) + logger.info(f"Starting Flash server on {host}:{port}") + uvicorn.run(app, host=host, port=port) diff --git a/02_ml_inference/03_image_to_image/mothership.py b/02_ml_inference/03_image_to_image/mothership.py new file mode 100644 index 0000000..55eab2a --- /dev/null +++ b/02_ml_inference/03_image_to_image/mothership.py @@ -0,0 +1,7 @@ +"""Mothership endpoint configuration.""" + +from runpod_flash import CpuLiveLoadBalancer + +mothership = CpuLiveLoadBalancer( + name="02_03_image_to_image-mothership", +) diff --git a/02_ml_inference/03_image_to_image/poddy.jpg b/02_ml_inference/03_image_to_image/poddy.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74937103d5519407af4923cc790c6a4b8bf611eb GIT binary patch literal 30015 zcmeFYcT`i|w>BC&2BavVNe87WU6Bsb1O*|GAiYQj0V&dZla2x+(xinDI)q+BQ&Fj* zDi9E*3PE}ack}+vIo}y~j623R?%(gi;NFY1HhZl#=QE#KxR}1c(Gt}Coa{lM$B#im zAP|TWL{4-KL;_q90bd{@RuI`g*B}s#=*ItCTM!BS*BD|Di1_;d86WEeBK@!Nz%&18 zTK{$bpZ_URK_CjChg3{LLOhM=Ki5RWB%p%-?iUvmDBZ?cW{vcL)C6fq!@4-yQgO2malGe|O;D9r$+#{@sCpci`V0_;&~Xzjxqb z4ip6<`lk^S0d6=kF);}#3D8K%NG}^1IoUsr{J$E-KaKLTUHPAOIUSft3jDi5PDcJe z{r}e&7e4^M{?{_}?ay$s+zffu8oyaJ}8 z0}&IEkPwrSkOA)tj10aEC6LmSUE>y4A-}F~O~K>IAQ7JSfs$9Xwg+M`y2B@F^CIF3 zn30K@h4tnwegQ!tDe1fSWbVs8P*c~?)Ox55H8g_3jh~v>KC`oTaCCC^^7iq4>E|C1 z85JE98}};y^_%pJ%(q$ZvI~ofOG*)C$nv`S26SUnb4x3xx37O-@Z+b?W8)Lp$*Jj? zS=`d{&y`=lSJ&3@yMOog4-SuxPk`9Qf0hG$|Idp4*LvuI^$?Sil8{ndu7`;DCGa7k zCne(+C%>kmPhstOokt>^l0h}?Lv7C$UP*%;h|P;pFe9H7?k4_nN&l?q|87AM|G!rB zKMVSw^<2z?s7Z)`10$gaDS-$qtA+7|Y6VbXykMBVIzyO1VZ0TCzB&l_aDfF1v&b0q zwTR;dN{Q4No&X<%{~ibe27~nVCjy7$Ou-yL|A;* zN98>P*{ur7;j#Q0;5NQT!C5&ED+BUIfLzqIDAhuvWw> z48y{>5u^~A~YK`Un)mnj?sCm9B6kqIj!2jh0Ey<2|T^(AAnc?jfwO zA0U_0cvtKm$K?H#MJyUX1*|Pvvs6|NCVA`&5+w z*U8wVk}>SWFo|Q^? zaxLEDfgUB9Gm@bou69kA(%DLZOrE>akRHZODcGZ_0;TNNraQ5zgtC|kB1rd9++;E! zSy*{$9BD{I@s$lInG}w*(uGx7l>zL>k#PsRha}F!mKTx|U6r#!Vgkk^h)ji)l^J{) z&SMaugOX_%tubPH&PqAw`1~P;oxF%f;0Qng&_>o8y736`tQ0w%QV^#=rVH8>#65N{ zBLXGo))?XxoG`MpQlLm+%WcOh;)YAKcq)zPck2os=+a}+iH70gCgnF@F!Whqx#ITL zV5&Tk(F$mHysa z2_`}yqusS0#!AvX;AMfsFXKHx9NSkMd$*A}vB^N%Tg@XXT{+ShV{0lInSF(}fJ;OT z%Jiz}TN4o(I@mcJ)-n)4w!rX!-mzBWk=;BwQLdOeF8eE>nFLk=p$9U()IesgQ7tT`okG29D zDi*b4}OY&!MiXsJP1RzDy`IdtGj zKt9y`5Tw!q?9uYv+a~39X{5w?Oe2OE@oB>d*-ob7&to@MkTTu)0XT~rNg9G}cO)_+nuZ=^*YQeS zfJD42i_}UOjp)aLvv<8E3-&XadBPfQsG*6L*AN;>na$&^EK)|3!swGRx%Kf4ybO4j zO<~6oR(11t2+A&S_Dl*V#VH-2uUA*JEtJ@A3T zz1~K=#uW?f`l3Y3B&+64+_~5ssL#tH$;n%T@x$w{+-%f_;#K38#VgIsV~EerX)`9@ zy{odnS@(O~ty_*?6Zr$`Hwv(o_CeijZRBM~{pw=yvsV#@K=!ZczP>eCp`fW1vE>zR zDfrIr?37z3pYi)zy|PDdiG7CfWM>?nGlnid-2G zH!c7$`AdN5?w1k=_G6FMcgl45Q(hRu~aerZbE(T)$iG0{&VYYvAVa`KNBL@U4sX9Z zn~Yf*fM*!lW}X8isZoVTw*mbrppdPAoKZ%*)-10GQX-c`yiuLzN<#n+#&U9NT|U@o z;ZAN1Y_vC4@R2$oa>Js4c1Ra{^uI`t^1c8G&sN5*EeXw7YuaQbza2sqiaL9wK#Dzz zYITM<(s7(C=|K+a1OmeQ^8%rI!o~6#;d{3U#v S=?02)3S7d?AHMp9zP+8EjNV&QuOu&i z>8UFRYq%G#DUDGNitI3!jVGEcG}M2oK%?-4+RK#jJN{fp7!xyiz~myAoI=v^Lt#Ze{X9nfAz8?dN1(T@mAv1Fjg}n-RBKtx7v-z zZYsOo5qc5^?H<;if!^j*`yW}VJpp5(ER+ULIEsLroAQ8ZjL(XT;4G2nf(QV1Rc;br zOseb)oN+1DDj=RNeq+RRVNgsSR_uu^G~&%zV-gn+pb#LM3;{ipH0THnU&zP}ek{XZ zY5bq=c1P+k{sI8_&|*+M`YAO>B1(Y&L`z%So(ZWi0g}qAZ>P_$&2xZKA(vrT%ot_8 zSqA9~LG!;&&UmKbv?Oji5Uwo)^w1D6%r8us3a^3=$LKgjU_HfE~Y0yJk$ZX zf~$G-yvPFX1C4r^0Mp#>#wNxG{Rfi0HD`>%R4y0HlyYe&dKqkJwOxYU%)fMVzinZR@6;G~9lh&!8raq)jCKpxLQWl=PX zmO5a!t0>D=AkH=2`t*U&z91tUL`L0IL<~UCml-C+LLM>bWM=A1!Or7FEphA7JA|8v z2rw`*@)22Doq^6%&z|CapJCB9-dlWhj}q^D53nWQ7^@p|T^`+gMyEmtE^nt%%-GLS!g%#}Iyqf;_I|FuQ@=>jLmPc}i2PMazmJFb zyWd1}yyFT=;DZZLt)CqJMG3GlO2Cw-AQB~mp@ZSS~y zdI9?QEAtMO>XEXLa_pLAK^H}+bqamZ^PQ0?d8R&@4Fdw}t zH2I#Sqx)L8<9^y591G6wrcHCoBn!)kC?aLLptnZ|+p$|i|i?Ppbk zzjuID>#116%?Lw;JF#hQ5-Aqgj~krzhe-_|AMIeC&7JXC<$qYS_;o$8?&S4~+aNYH zXu&G@lMh;e+>W8tH#<10I=6$n6C-O zt?M(PP3e;}1pd)fLFG~&B&jjVTESU`v*1#T-N5Z?qs!mYzu};H{8) z<2ljZgJ3xbsXd@(s0bxYF+HH%A8fMX0Tqf6F^mveO=h+e=TJZ{$+mJsxZ(lR1?CZT z_Dimgi(vZi9npQ3dD>)-Y;bYHVRk{8!^EpQRh}3S7;x3 zV=_Zf3Fjs6LCqVC_c_YJhQNxdv2S&E2TL3&yH^G0SkcBRyA1M$a`3_+>78c;F}!+X zCTh2y`dm1PUVN$Z*Lix7SmYDghK<|~;UIbt!1d?i!PAptahxvFgqKgr$FEpX;s!(RtF$ z@wA$kMdKsw4Ll!-7kGEBm>Y#XQ^sXYy)&<3Tk51`(wpb|azzgWP6cRlc>s()T#fr7 zlGH~=Wj>F>+GHR#E-ExyUf|MlfB~3RR%6Ic<*(Fc$o5l~1q=xj57o3`IN$@zfq`y7 zyNu%mE-j1U9)wHJWHp*T6`)BE_5g2steDcJPzn^q#jqm>erJn}@)>94)dEO$iE#&+ z@s(u2b2FzaGzd4-kem1*K2{;(2k;1=QC70D!e(7SN^meW@101rHzr(-in9Qz=7+>^ zmKY~vr^yGHCiV~}-mATN73uKrx5nUhVv#k-u`7J5GW%QR)iLdmYQJ(kz$R#Bbo|8$ zORp(f237W}2!AmPv_WK^2kvp%wVe}&4yVFe{e8>HBwZ(EofR(2UK~+ zE|T{fzVygFJPNOn4QUu1l^#18OpftxR<*uKHVpO zI-^;r{EnAEmI%L59WoM-D6SMs;56059o>h;>hv8}DXn#EB&D5}zff@L;dguS=PAFx zV*hWpkJh$)yP;D$vB%Tr;+Ah5W}<{e6_ z;!fctEjX_;Fc^C4J0y|Q%i><3;`=I-$4j(V|FXAODsnyCZORe+Nz#ya9~5c!d9Bg+ zVdJb=%X+|@ed)O%?#9uDCA<9CeH-pNJR8*UZ{tGi^N|V>Il=33Ed#PspN8)tEefO6 zD)vTA7N&l}YidsfPl-duPJZ|5F`60&7gtb!7JJxz5&++HAMBc?Vb4?)*21=T{V=-= zx&Wy)&%UV&Z568vKR-%8s9r(f29RZbUVX11OUL)_?z(0uweYZ4b?q>chCoiGPFnru zaK(?sorvQ4`Jp|`zjvw~C`4U!hrNSgkFy@?d#89!h6v3D=k%%mWMi`}tG+Q6K*n;oG<2Ifvv z0l~c{mZ~G4bM*(tlt`c} z>s1E*aHh%{PrsNXDBP|Relh>M#G}S+30>GRAbftmk~GEY&)BBGQXQHKSI<&$YeGzR z^5zx+XDRy5=%F!nhsCnL^nmU?w#ojVJxR}MZgq0hwb7daASwwUCYcsPUOGTwA}%S6 z(38jx`Af%*i-$6BB3yaRfK*{!T~EcFzL=2*?1UrJ1zZ3S`z47{qdUL!aX(zus-1e| z7*1&raLH+Ec=wS;-q4?pq3?dQthWdD%=K4~5-#y7Ij{t-*sAedJl_ zw81v}Jqh`h^T)NYnp;XO#AlK0uRa}C|H>IoP-y?Y*s|d7+QD0Q7EHe~5Uri}mmkkG zZ~=JNs-gS_zL2f1Usc*O+V zZPhuo+L2NR(}#b4H6$OnB%n1cWu@Z%UJKkf3_NU$QM7#fYbZngv#IEjtzXYQ+oL_K zcOdGP_%MsAY=l^-5Drb&8Xa`XFFs6vO>J=z_2GN0LXEOgbf~lCj_18I`te1F&38C= z+K}=5x~k+LC0ww@;pq+IQEr<9U#7>`A3AwD{StSy$sZC^Xuc^wenaEBggZfEf5@`_ z(P-XxxU|;56KKqrZzXpd9171KwKV1rrX4Nls6eV;xmE7RVHG#syt_J6k~d@@wryAz z<=4aG$(t6B8y+tBG9fG?$GYxpl@ex-^r_8Q&P2-|l7j|B_Bj{05Bo8)DazP=&du88 z(9KYKO$W)Xhn>))sfoqfdu52T*$Ytq@{8T|cC?7E5~KWsmy2!e{;rMhpiHpE#4=y= z*c5vsnZpXs>2dhDREOjeQ}Vvt+qWilqawK4){IZDzdJK6-T1WRzL=`|;XxIc+^zYm zYse@}(YMI!$^|I%GoECN>o^p@h{lIvk^%y$<^X2lAI*#1d@l8yrTcspmUwQoY!+aj zdGAMGzfxKSs@X`dc5gVDAmrpZZL+X&ZAdDhTt|eyA^C61 zjK;IQ?c~e;GyYSv&o=bX(8@z8!|NX0YnZG#U+x3BJje9bF_toCZeCb42>!FZ`}^!b+fc^@YKa~JoY zxzM-XN^#(m>H}ANRJQ0V-h>cFy|<8sFw_ui5N2xmx^Sy@=4eCRjlXY-E-lPd`coY0 zM~K8s^+AqFdQb)8wbCqG2zptEF#t^XwKK$4`|k0*NsjFwO*q?;(pF!=3L~xrh_~vG z6T^s`kP~^a((;>J5yi%zR)y9FQ(A@eZ?LY#(3KFUMjb8{)0G{f3c;yrWIhil4PHIc z0K#lZ<*O-x8zyNk-jm!91ckXF-VZhm;?%tdDJ9~7sHg?D0EjEap-N?x1t_!gxc4iG z0E#bMczLU?1-JM=<>UKSmCI3rD(?tdh`4Ace>NZ%2CAH- z3yN_}Kx?HwcPoI!#k7jjuI0OSXyGT~r`BzsC@|sNd#O^cFKDnx6i=<|^@fH8XuP*& zd!u^K$_MhhtNwSyVq?Vp84Bmq&N;hwVwnLDa|NMn?k+;>@8FI`M(KmNWfBhTQpnwL zTkay2{$NhjkDw@c)Y02(;?>Q#RYDU>?v)Q-cZ_0g@Cd6RB z{z);yfZC0vb_D8IsX15DwBa z$>QGX*J1#t3QCWINq7LGLs6n#JAqLYE@x2TY1T7&XS+OBAw=yVq?|MLNFZB)j?^Se z)HzEN@E5>q(ZpkFM6@N^yb!=;D88~PZZ1|JL6^a$KU+!(FnQXsR$MH595G^eGj-X@+= zS$W>+_%O_1dpb;h%kTme?zYr?Ghwz>5A0lty8vCA-p~O;)8oSTTKBAvkUY)xUl1EI zLg{ja!UpZE^2<1~A0^4CrK@)ogFf-JC3SUCqjs0q@+Ui7p3NqGjG%x09lXoQ?3A#x zy|koGJLFtHgm^|x%VYM~CB-Tkm4DzeBjs1r5wkv1z@%a7U$iUP^!3LL(YJYXgg=4y zT?tOOVHq(=d#cbGF9MhX&yOQyJkOjp zIw-mTU13arl|yya%V&CHaOmYh^8^jVmL!?XzggbB@mE4p_5~>B0>oQh!cgQLB01Om zG!`9d2RN(FqFem5F@(E*hX>}^TZWMXCXXz*#>XjcooWiZy&P8Q9CL9)U4R}LPvMh< z{a@f`I~UHQLR~BwLk5)^-bRKxx>Jy4|HWj#wg}GDcozSMB5CW7TZ!PuhgKOAZ0sp@ zk1Ok)s?=q)->i4x60J6e_m(a|5p{%boI6;{9X-)_r7IQI@#e;kyUr&{xtKeM;p>8GeD1y3ed=7SArB;G33BAauEP9o_$Z;ET81OseuT zJun*ydO(qK1}*u7a4>F2o}W^`=gGX1TY63YWu0nCdvW8P#M76pw~Ryc-@dA{&bEeR z`+Hy#7#`+qB};=7-b{f(U;U2)pa}H==O10w&BpIa_XN-6GwkuVhhxle(i`_(r$%%m zzcslA%Gwgh%td?610M65#XN_*kVFnBbb;}os-An#WubLA_YYq4;elZEb#luIIAX(o zeyIOzsPzSiW2#M*>jG5ulH?0#8ea%xb+P<39~T-4oXnY??47Ze!?y#Dp-oc4ZuT@y zbo+bOV_nkw6mK5l<%TKeGOtxnm7TT1*C=BD@Z#Mu)~vZ>GrkAX$EWyvx_4!(XBn)7 zIX~_yy*ooqeJk?QD3|ke{U8~@wEO5cthWpfa!_<8;> zGV;)kfcqI@?yOZ%VQr%7gnU^MY#nK4b22LAdY}-qjnSMTF;hJf-A% zOK^+Yg`LpwE!@g>KnAHaR~)#N8^9Az6-(?+Pi_f45FQ{^!{Px@;;zcF1{H84VRW+K zb{^u(Py=9O7zF_H6jNPstHfBioLM5B8h=%b2U5tz-mlG5m9_Z!r=*%@4O0G%i7IRQ z1Hc5O7bfB^Z4mGvQt44PW#Z!rw`8NB3({EK1{3vY83*8_|9i=HbMyqx+b=Z~B?%epAv-LBvz8S9LY4V#HS z=Eu!9q|OXT&UTdetjtU{m1NwjlXDs$S@UsONS#XCfD3(daWR}s{m@e_f<^oMnCwIe ze4btPQ6=3H_-gzrMmXj10vZjt_POiM-vphW1tb&0W>!R!mEB`EY}d$m#)PVAbCX_uo_Lr3#0j$+uP##(cfrF9) z4u#4*fccb06xylW$P5KC?b<0bvE^D~M2YtG$qLGKyOf=(X6x<>z!*pjZoyzm&hKo# z`t;I}S+hT?U8<nR@`?dYEy!HQ=NepO@RENY7A3M|mIYBQ z_42{88yv_oERzSje!8@DBw8FX8PxEKT74?(6Mqxe`7}MWgPy0;V(077YQ6a?AKpP6 zpn+y{8P!Yf*LG7j&z6d>jc%QANF3f;3Po(6-3UH4J91e@Yzn*VFfKodKGWH0TzV`s z9YI>8k^5!#8H#gWoJZ2soy7eem8kO>+XW~R6%@EL*RRxgFDmHv;zm}LPZy1!{#KC6 zir30xPrrI&)lG8wY25YtTQ7M&M~BU$&+5+MQ&la^QpKMw*HS7Q<@IiCy5hFt*e(0qr$1Rp38AbE%P`~j7a+ZpEyTyIgYQGB zgb+l1?h4M+yelf`W+2jnr{aYKK8;l>B}Gl0EH_2nh_n_H_K>)%Vhc{Vs&$TXZC5|I z%{iENb)sSEyQRd!%-#9Ena!5_gVtV0Q(K6$>u(5GPtNnZY!LBNJ-m~$Q!9VMJ2ZSe))9j_uyJ_UTWW0~Ck5MeH55?a3CL_PC&;);n@@{;XM z;iHYIyCp->t5-vp(tfk(`p$S?9#6As?L+_48B=j)S{VsF0Vtu5gnuU5 zDg5zphHq#@*Q%%1N&Uf#$)qn{j;~+Tmx^25$9MHE*yrgt{dui)M{_M3xntSKIi1HB zp&>&Q9XLGbc1O5*o&Y(vQ(oTr)tQ%V*^iBy_R=d=QEtjO@1Cf5~Ca(5@7HA;F6l9=>(p09`nBjSVI{3u|lWO+EJRxQj0B2 zf4iKAes&PcS$_d~^jk-F~aVdcw0bj1`id8kE(G`>+T5s(tJ*aua6>U$0TF4gm%?vca_PFMCaF^)PxYuAdh59&6hd>!l<8x?+? z_W4^}^(PcnN{at#xgw2OuC(v?_1Dt#t+f+xYi=^;s_Cx{^P5%34GT+#*s@Bk=kYXG z1OE14OxeY?_RFO8G*FhaN$bKx*A0A-q3G=9Jl*&M2cQ5+5DO&c@*02?Kn8TOs{^NY zKpD1q8DWwEQfC?BBXC23^g?AILuQu<&f=FANN%-O8&#((H$-tLlwZc4qKI2@YK0Iz z?Q|}{{KT#q2+?4tsAY`fxK9kjK}aA__K&eBkmCX6M{C!f7)7Uyl^Soay=Cx#$^F%i;dUNcWEFnD!2AOvlt;cU(B3+-i0Gq>FW=9XgyFQ?2= zi*3T_Cx+NfjLUrz(7q3-B?|kY6-G`r{kpY*a96PcBja+r33x#_gf{S=}<7i=|p|!uVL9QI#Wjggf~9^_1rstR_Z}FUq|OMPoND7 z0ElHz3bPXs4s)rrSY`e7M&kQZXV##v?q2RfdwxGHa_pY!>QAKmJQG3u(s~Ya%(PqW zW(}RyenCSPDyZ6)aNlrv!(nmwj9u?E$d3twU$vww*l9|#4`-C>oRB(p-M-;Cur~kM zI|45MtfZY+sn?P@WHxlia$#=npnC_q5GD^@YL>r`3v92L)r{VHuM_`E>G-$L{p>*G zoAC6fg-HW2t(hw(grGuf-qN~)VxPj-zIHw_-W1`so^cj#ATG>hF<;i8;VjBPo!WQrOFGwI@M`$7a%+Fu3eL=Lpn{wpvyr_%R8dK z&#V}(nesaCgGAZ(4=TA~Q9L-+yR8dd`#)cgk3{9`pRh01PP|t3jI?~_Z1RPJXD6v@ zlS#i}s^=Fj>it`EU`lWPH4=ZajAeU?-uPb_TdXwSupZUS=8HVu9p9qUPanpw{+%0I zNnf-b*;Q(5Y<@Z3uJ1T!@a+co_M4QCHeG6OEOrXNR_V6+zAwXg6Q~?By6}sdI7{iH z4U4gAs($B!+O>GxpfyD1R9zvE_bqK=hIURoD;qovP4#nXZ!U1^`bT>s+a^%VGV=js z#4iOG(@`A+?Tl5ed+puq%{!zk@v(@_bHW>NBPrWO?^Z5(V~bU}YmfiV`HB^tOaTR_ zYc8%Kx7R|Q2D-oer0)@G50O51!D&XKo8Lrd$Ui#IQD}Yb+vb^g|E7D_7+&!WQP{zo zB)-k}CIkxjsX%BCr;_t9$87KN@U6Mt$%lU{ng)L>zPGW^TT&0O+|obgJ<~aIBfU2- zI$`=WyN~+VGcJw6GlK+34_2=oE-oZhB6d0&-`xl+WWXG(eGMu(HJo+|*t~AC&{uIJ z_wXmD^Qk+nyv(PX!iL6LoB(S~)j zz8Dy-Un`mNI@U7w?0*E#GEFC;a*(&TM4B^ zo<9z8&PIUsP{fMGx8Wjgs|_0~lmi`%*VmwwO5n{H$`TTp2U`Y|C@p0nYbLbM15$%{ zNwxc125~Z#^j857sLU$XR!UjOD2fJK%nhVvBU(R1uw-)|Al0OcxsaqJW3AlbBx0&U zDp#H206VglOiDdUf!5-xm)h1e z^rfM-Q>H&oCSQN2!45vSMdqlf1X;&zd}5c{V81sj7&11Gj^bBXoA;}>m&vfLglhs7 zhj%g*ob8^-8umyy>oad#u9^wt?Iy*k8ukF)K=H+s0`ID3eSud{pavrx3Iv#p6ol&- zi2Jw|oQ?Ffvorw=p8}JNglTR-DK9|ant$94^Ym4+kBmx~QsZ^Z;3{Bse5+>tJj*gB zWpgxyJo3;IgGcaZVk>2ZM4UJS!0EBJMNZ28LBt64+X4Osfc)5Kz!r@6QdjUIGi4iU z4L2Bt8}pdKD2Q&YQAu(Med3gxyclFs2#9XMt}lc`i=Ab?%qMpGHEtTl<+loXQf|XC z?@CKx$X-G*nT1AL7ob*gsK~8Kk&p~ojGoChB1({_u#XHnb;iq-)PDbNWD9Rgbo;6A z8*YiAm5<4q>OR{k>5I$p^Tx5xZT9LNm@f<79t(f+t_H=vVv}a`eQ5H=68y{ZVEc?J zCu`koBDa;3b2?-=pV77@P(5aHY(W0kpOZiH0t}(;d?LXSANWV$E)F~IagC9zUza*` zfA_s~mY6(Hsi??^wa-)et<{;t<{4=&n)EDK@xZsoWzOHuN+76Mam5J;LQ88cow zUx1W)&nbqVZ3KIs9xxL`W!-8tf-@*;D}KOCE44SaIt8&``&w39FF-*Tpr~DIictyS zZ`JROa@?5g&VO_sB0h1Kakr)ixH{Y8Eh=x$g%01a{Gp~;C-c3xX^8n`T*OBX8=Vrl z*H;%FdykZqdF$QE@f?9zbF6IctApcQ14?E&^ms#bd*vj?6CJuJAmk8#LeNvh&uS3< z<`AB)?Lk(An|zK!0k4TyhvhB(L*aMQa+B}o%I6Yk2Q+^YK;zbiNz`}6;;^-CY_)@r zYJaD68?6}(Or3iro`9>*%b0$mQ#eh&&dAkk^`YHMKQ2h_2tWU}t8lXy)2*erjkYyD zFFkqk`lGVK+xWw3YCmW%-l^0dOJa+9lCkWaqDL0{E2GeVJ}uB6_2X7Tsob#q7v28# zFaCP)*b#7uOBU^E@%H(AtXNVCwbC4D^nVPSnXxGHxk z*PPnmJAlW_`6l}dbZ45tNk^(siM-F2o~1R)5cV2P-sQ++KU5*P1W?2$iTJM7M&F{#C~c4y;6v z<%b8Pf%YCTC3LXY-4xCl)_YYSt(3pf1p>r>)Ht}@E)rmo#ELNcI3V864JmHi7i$7W zXg`tYlNN}aMUz5!x)p!`6(>&%eHPc20ad8;1OWc}WZr|siiUQSz;7aRrW9PFbzV&- z^_2iJ<8*;yr*&J;g`rS~9thmDUt!d1cSBreqLx8VEs2 zQR0%|wWfW0!+4Q+Kr4)90#92|>CTgLdqX{pQ!uNR{_7)De8)k6Uqpj36-~d-qD?ML zK3hJ1)EfLaA1J4lGExF(z}W##4@7$ks`ug{VfQH)vf7r26m&ex7Mpxk@GzhH#Du5=H zFl}o9>m&q4>>8cjfi0Z$a<#GCE&6H@JsZR2RW&TeWnTJQ`=*P{LrH& z^GPl>9=3+TZnFC$u~!bN*8L17z(bS-pTeAiWRXD6cD-_XgHpodX;m$ldZ~{cP2HTe~i_p5fExDGJT*`R*)`f-^;%`lY zScAcY{v?x0f1Zrq`zu)Wh$y|Ek~-d~paHx5ZMb=(7cp`Y=v^(^bVP;d_htX{r6vU@7&AD* z7Lsu$yd*k0q<~2BkqFR6^EcM&D(bzfY(t$j%CoFoRr0c3c{tw3!I!4K{K+TL4YmSj zzgut&ClHf)XDVF{dfvr2yumPXbvdNTt9feqZi9Dhii(KI+vsi8hdc<9b3xw#~J(KmTD$}ll#jniHN9g z!htNqb&+h(67r(3P5flQj1$s6!+pbJF7QBY{ggmt$GAlknB? z@DWH6Q}(IY*-P`6ZlCBOJlwL#5)JQe8>aC?mW*=!jUXO%12M*yaT@{?fh{^r+1q+vV;xGBYX9)Vo7?@Z@OEp50B`j)_qhn(=1-u#{pBd^j z(G{?;H`)wD*1t{`DlZRx4Y-w$Tu<%3#vrpVKdRec=sIjl_NT2OkU!kILGRV%8iJ_7 zA$n8X(m{lGtnziL5u$pc_5w48$!efrvL^OK`{_i*v; zm1`|WATHiS!y%%|p*E|xF9bFpi(BnM&e1w#-oYmkl8o~RV__{o*A-BW%-1UHDV^k(UVI`zbfp)I!&~996#l1~Q{|v-J7G}|E z99W^AX&{6`^0DfHfGP0D1^{QI7%Qgpt;({Z*!+^_uE)4MhVJjsy4`{DTTDHsPvvI7 zQ=na`Y}X8^pe7=GPmc8yxwrIoX-nLwpn8lMAp<0}@wdOqsF>P}&*t4$K$fY{3^1kX zGVZtE7F>8%^qo^M{J>ND_F#!O@%RX|^P|(>-U8#_(ar40pzt+wikYjN&!oKiL_&e; z>H9|ip^s3Yh$a-_z4Ln{RmgCI69=R~0pRf+FOa?lxeR$IT*9762zpMB#xV^%2#q^; zzC_wPnGgmOh&SJ> z>D8*BGm4JOcdjd=00SX$1((h|1~hT&0BXb@Vf;zj&W0FaJgj07Eh6XyndB0((^G-L z*o-pbJHW;&8H}7Jb+twr2@q=_bsiwK6K+!uL^#Zh`eQ0YWESDU^4!j3(y@~j#nPVK zs4G_kq-yNS3bTPB(w^3Ps4I{Fh8jDDN!FSgEHIMXGVi=BLTcbA*D+7;UW>8R zNG1m}j6J+rr<7CEURPS&D47Yasj+E*fpe{@s}SL`hWe`uohn{cr8;s zf9{20oaf*1$;)?o1+nE6JAwagL+E)T%u28yW>;TOR*&n=zz^allWWOeE$JjpE>ryt zfd8FJ>14I_5~3SeT-Z6`@U@LRs8zTC^>$qm^|qwf*wF}q?NwcX4k~x%KZs&J)W(zP zst4n7Go3fg*VZZ;MxRv$!hbVu_2S=Z5xxYIsQU;fdRl%WFykaedgE5rp+_7|nr|}~ z6>|2Ju5MS%-Wb$azY3>=ZGAjd@74bCRod#HdjBQu+tc#j6J_sQDA2l%`=vFU+V9*p zMiF7Vg34bQmr4MRRz4SK6pfa~vazZQ6meS}Afr26Z8uWIhH*gPQ9H0HL^yHYv$4#N!+Q}R|8)&%Uu=P~l_?Z4%f}V4R zn{W|hkT()GJX_~icmD1hm7a5|Ng!NXy`fuxWkSmJfGOEJ2eb;4f*n`b7)KF04PG_YB(s` zOb>Ro`0`tEa7%Og994WOSbWM|vfpzm36!fVjmZMV!$5}7*^@oA900_zlIiFth8Wpg z+d=dr0N_@ky$~A+;8g?ImuriIzW8w1?$Bk30%1M2uDqdKnAgYJ)!@vi&e4GTqwimssa33?vaZSOECr$gs1i z*3M+GGmHb*rTL<+&;i_y$7Q4>npNE>&L}D_t>4txbGD+wBBF!USakyYid^V(KP*!e zKt_0x4yB2@Isk^!Zq_Kls{!6MwnD)B0N9s6iK4p1LtA|z5t$7bBd2q7s!>!Fcn@jy zl>zK>We+6`+J?heYf?{vr;r=P>(G`M8$0jz!Wh}$B|4;%I+$#qjO{qrEgz=cMSnxN z0P3lYsXirgCzIjmihWVqy6@@?0^uW(-Rv89JP#L8Zn1Kn+-~RQvUE7o4iA70;O@Gn zb($S4b&28~qQqqqMZBm*HdlY_&Ut;tKzJ$Etw_|<;GK;`o37hku3MIBSjC&td8zS=U+#+Q zPsE=scUSSDQig^3spe(7R+=7s#kI>Ln^<_4d?K5xs}jl8Mfp# zE8~Yva@;kFuh%T~1+0!fv;HdV}WY_MbdOQ@zY6cv^OiQFPOCiIWINhPB(qp3u|KwD|&u+Z}FB z_YSkwyIbmP5$LvsdBq52^$-pZEt6yTA6mvn@p0*e@!`NPHoh9fTD7C@o_ybpM81?Z{Fy4b@}*( zwEh64<6!y;P;+2XZKgm8sn?p*7VP!#Y}44cipm0ZRHk3Oy57fb04r)TL$D*a5d|nJ zr!BLw0jAAw!2l#+W|o>J$Jxsv)%qoVSul3tQmq{(ln7Upr5CPV%OPj|{f34_W1 zPYLH9&*c06aUG;|KsD!6jxonH=Tpf!JZevm8Qm?05J5{jtY=U+>-5ecz8g_IO^e_vw1qX0NGpz)d2*>kAZ8-Nu`C7gZm9 zE;?oEnb$)?`55+SrUkdemit$IPF)N9tpJa(0T9#jk{o>z^5}x&P3vxd(#NBexLEM^ zCy4-44rZ$qe7ZU74Va;_tfKjWH~=Jp3Oc1OwAR9FE)eOpWC7S*+%W*x(s9=gl=2TG zNgAt#RLaqdCLOj=71*XYePXFvsg~a#%)6H&MgIJ-kJZxHsnB0mdZ#X~-1!aA!4cim zNEmw*7yW)85kRgC9Z53q4}mQ{iakDeD6^qe^L44oSFV$hcMEd(r(4Ikszmm6$N-VyoRULAS8vGw`B~cdgQYnTF5&K0Z=mz_w}L z`DU^4HFZe}>dcm~5AbfH=20_?|S{A=qE+-{czs?&X8`#Z7y zmDFQ?i^>#-FNY@2#(mD(WxQMk+Xr2AdH9#9W+2@u;Sjw4Vp#j?;gyXYB6QLs!MDV& z3AGk@i2aazixH;Pma#8e_RUQY{+H=#gyqb)+lz>Dr9Jrr=1*=n3sU6WxbjbE4tkz^EEnS=g^ZfBBuH?y5x;y*eQpM#hx z#zhbvcL4Z0XUkmgUt<5?GQf>H@4$n1G-RElm6%?DT+wm^c|4G40je2JLH;s$gEZ!aD?ck&OfSc6z1j z264o5kKIz;wD(Z2^W!a4ZB_4i zj+cvjXw?H65U(fs;;P+VLT8D)1j>7E6FazK@x^y94$^O5U9C!SufqmRMnEUw5w>5} zw6I~WRyXGqD4+4-7}7)WB9dd;FidRIGbt}Vr=S{k8GxMC=&&u3h*z7v2vRyHvrMr+ zfNsHUOKW%W#`Hmu60@upGu*)fF=c9jAm$WO2W0HumE8yKW)B#C0AJM96;cOf>=AiS z>SC*aC;Y!+8X^Una>C03SA5p2aTs6N?K%|of$tIaoa?|nH7dZ|bMD6Npb0zg8pj43 zM;e~tHsi5dA(F`>y5ub9)^*i?WF8B?qQZ^4tMe3fBI*&4I=*oZbO70Kn1Q}7SsF1F z(-Xc#{r%^oPTV7m*;PtUiW>=)u%sNY6dvAP!VNlv%wiI6kjM60dEr8Ah4Wm0T(#Xv z#1w-hCQv8x_UAPoaZC6JUwWX z6Jw}D{@t>?;5R1ZuA6;e*p=IzapcIDO{JNj{1W0u&mQY>^@dxQ{frAW!0L|$Zsu@& z=!MsUe{B!qzX|XcNLxaFPS#+SFe_c`eW;6X95-R5SuVh&Ijo|8@nF)=*onWLxQhm; z>-u%Ftp0r%+Ab>?pPN!6pP?x`bDqGyEamesuaAtXi)3P4L5I3nq}#MT=XuX2(hT19 zXZT&H=lJnW@~&Ibk!&5w)WN+z<^Bnm?kUaCB6#5Or;h%S6}K5ivQl)%lS0p7k>`Gv z?I&?vd@bs#S05t`-EWbjCF&yYuPfazJ1U2o4Gx10*W4&L>r;Ped&wX7*`8^0E^>ta z*Wq6U=6&*sMQopD>w&+!-kiVn61COWnCdt2&F|x2^l*-lDEfAOJSe>dV=OrP0LR&m zFXQ_G30-lPbc={pK(>xTPLRX6t4=zIqV%dWdJUizQBCvTH>7unebT?7?nGllli!z3 zjYZO>7^xexoj}p?O?%n(2i9_`sswU+YV_$HwiC$9dzM9#ngK4YnP0j2(!dKt>1~%} znl0WA=;Ww8?WH$IhMZK4+)%*k2kD&X;xng6caWXqW*?pJdyCl8g)u<_R=7Ym{tImK zw`;nY|1n2z3BC)r9h`moG!Vdw6yi*9OGS?ElVAcCb>UFK%yn7q*UO*u2BEW&kg(rw<4VmEzWng;uE zvb-nI%ve|NesIVz0QaI48RsBt%DNSAk{E|#Q-Uo`+~%2wzYywfU%tJ1<8}rs1KIi0 zl^zEh&#lo6;6JtGqO~l@^F$>P|-w(0ojOJ&jNXys(vfDB3QnueU0zB>Ph;He7+tO$FQi#Vj(9+09&P+2P z!Rd1!TfuB^q{SQ#BqgQ9Kpw)l)3E@~mq*&KzOyuv?$>2@F_QikG8NmRZYP#}Pl>`x z%-O*R5J=8xqmxZG4=z(y-Fy6CZp2;OtJNcBC%{@;C@!0PPjQxrtp@Nl7J{ONYgiy= zE0Q6pN}L)xh!%k&lAkz%IW)hf(^grWbLegL0_oZ!BH02@LVz)j^5i=wqB ztI%)p_O1Z_PRJZpQtUS=^*OZ3Dp~a70Zv%00^sOX$-V_O1LRYmOzNNBudlG_G zn0R!*0Y@eFAckvdk2b^t97&2f`i}+kHdZ>Mq64`-2o#rRMHg1x>eCEUfCCm|cY7gO&#Hobt1u9w~-IcQa;i7~B`-vDRA=rD@&)V59 ziXKyxjXiOw-#iOB#8^EwshQ8)fJoQvsHd9$Wm3aSFFSq9e-?FDb#5Fq{;s4YWVpml zVnCeZM}BKQB%AK43r@(j<0vFO9_{LXQbN^`qJ=>R8- z`0aGkveDTgkvdu@*Wv|XUI5fahX@z@lOh@DsBGA_Ni_a6#>wo|< zG>lO-Q8pO4>hnhIKc$y8k;}1R2uz++Kvw&OoFy?0YbDB3%n zwFikEkd}|6^$D7frkfl=j^1iY21JPVCl+8u|rMIinA?;YE6s|&EHm->DZP;USB zshkCVz%?V;c{Z5KSkfH4pmSlVMx4G5KVo7yMf+hKBvvn+aNZS?3moAxD41AlQt3>U zHz!$TKGEGSCu}$PeMJN$wFJ-FK?RDjNsXI#tc}ND;~r`dc=(qcAIoZ*Pm+kget2O* zJ^r&{(#?*UuIkRvFXZvOLu*cJzE@F0!3|*E% zC#Qro*b@tLRs)%*}LGJdx~}y7sJz29vFV`DsZG%q#7AC#02HEPzImeR@pV1TZ9j8*+YkUZy~qhV5ci-!$t)Z0`qV z%NNLPja|p1?6;d5`m^$UWwPwg(nPd(bl4ZnNl9o4FK+=hal+cF64_Q?VaYf0^`V1Nime(VZhg z>}(8)wvOJOfl9|ByiP}R0?=2vVUEOUt2Aun6fd0kV@pP1#fM%s^CE|&(=JAuyUfx+ zGs}U^*10cDeq^RoXMSO zU&~(Jh+nzQ2*a)TY7hPMk}VjIHnvw_a;hcYshpH&(f$s`c({ctzA@C^Ao@w-` z)y_V;qIWaZPO9rb+87cPi1k#W{7(U!dTL2`Oh#Dc$V})iSPf0$I+3Ip+)pAPh08Qx znw$C&O641^Z^x~q_+48w063=RU(Ys$_3!q&X~L|%qQvgQ<_}?=S)0i=p^Z7v%97=T z&Ag1Lee9d7tIUYSr0mOyuyA$xf!}6ZS8h#JoUTd+B^`iqfCq{B|i* zrQ33Pege(6*ljE_Ee%7tEyd#aVLeXuAz`xL5~s?|#I@}e?X7ylp~KIgqy5cpHH42n zR+hdv=p=pm+uxDHvXQd?x{+fy%QRserAwat0nQ0!U)TP5sCOIo42$>UMTggJ&M!In z)dt~j_l3}#ke*8}l8I#wiIetylqL;(e{f1|B+S%DB4Eu~P~^E-u%hBaq}AD5dln|$ z#DJ-KEB91zANEpgfNBwlAuGif5Z`awk~hdX!Z!aT3dd{d=$2}wuW!G4^CL7 zJS#PMIy8tR#8SSPxEt+9*i@jM7pwi4Qc3%U!83f|tQi)_Y^wqzm?tssa!Vvec{lJ+ z;!|zwnJB}qdP{*S1S$9V_o5v9%N}jE{BBR_26AX=y6l?#rP%emLG7kJuh@r{oxFo)!}`;IPo7WDY`>n(UFs0Ee|o4)1Vby z5=hX2q#;Oho-0?5%3P_>GJdz?B$(68w1f}As|tBh*=Y`-%e+7gmKe`k&ktD}{>b^( z)<_o!oUwioXuB_c2yLY<-Hvzl&Mf*j+zugOKm>EJ0oVm0=>#Y}B@xmt?(A9=77BLU z#c>0Q?HT_1q{6C)K7eM$$pdI<2&ouEAcavCJuCBa0qy9LHcom+Tp|O<=H-SGkGuVV z@FS3c%QxW>&;nB8f2_=5I{_gOn<0;*qE@cxjsSWu=aSNC-}A85qb$yR&exYR?-z?E z>b4V-fO1d*TW9G~*&&`M0lwewq!(0xJlx$}NkYu4)l=7hS&xYRE)*p*h=ykZD@fBmQ{r7qmD<2R~Y!`%22}G-M>t#73q~LHaha%vjXe^x6u>73ET8{6mFXfMd1qs=603flu*AmJY>E}<*V5aLOxKtv@@#1M z%u7t_Yx!xoww#Ak#-(~K?XX@+#|b5`0zVoZEC)0-e#T))cY%n)2<}x6M1d%nrD<7N z7MScU{VAjirYjp&dCLa~+6J zA9K#w#{3u^n{-!M<>7(rgHasQe(XleCbSpcIc}eL2xW&CPs&XmuBR!ndFhuI=@cAf z4F8e8v}DK95cIeY%z*u&Vkq?upq3g3~2 zmd&4$Kmqz5HWDImNe^A$x(%tUk9ZnJZrSC-1%>>Hc?R|wme^Hs&xM%1ZqZu^xYtZ9 zIiaj{UWeQH2>qoT9@kY{It=TPLZ@vM*R;R}%BS~XM^a<+Ud!)|M(P~- zp*xKNNLJ2MXX=De1=!fq^WICc4rRcO8D^x<4#x12ftdW?2uo%@0e3ELxI=-3D^2-f z-6c|7bcNefNH%|4Ue4QK)5@2U@b#PEXkX6B_Er}DRPtOtBZN0hy{xA#(!p3mnO z!e`Lbpy$7EeJ2~E`0ky)gd7%bF!tL({$BMsXPrHgG9Br$sq=C3rrb<8ti4rnWUy+T z=-OtdEe!7wuiXIu#?ZB5OV^VQoUh`fZl*~;-$T2*yJ|^Kn*VqzKG1(C(ExBp z6ak!BZyY$#oFdlzN}qkj*&`BRvO1zKT9ovuF!lo*8{_fT{w^d<{=JzraQhHSyi6;wI;9>uzh+hEZq-W4zM6;Qo@0V3L3uUx=q#~q zq!PD!$W}LY10RZ!sUV{gKEMC0{JzS*S;Z%~&(fuGqdpO|ag!>SFy_&?T_I;p2l4|C zqa<`gWUq>?>5j1_CFpTg0hpVqZ{P*uYKbL$ZjO$<8k-9*V=7j zy&mVB-aI6*=QzdyL|Jok%*a(MJ8yE-`V?g(hRQWbm#!X?E%+Z;WDBKjiMy4dwYH}U zfW%K05J|WLL=AW*z9PeO&?Dj35?1U`lLx!^32~X+$&f~9*_CEc-ICyIDwbrdW!k*MWyJ9noi`JNZOyhkml;X+#-#IP0$4p}5IM1I*DlfIl`tGX1W()M7=J^%-4F^9RHqiz!-U?;8ViW!iWDE1 z1^Pv5A$01{I_}zme@73G>zi3xkou1`f(X*J)b2B8LD2o8Q$Xu_*?wN2;qoR_i<2*t zQYYLXpaCn?tGKj7xb(IqHNb-DfUh)MM&+6ze3+~K5~*Fk!VL#kFJlg9Zmj;Yj3>#+ zJF?EM2kV-jAiFN}cC*0g_}~l|T!cN!{D8NE1wG*sJ6i~*+>DrRAQY@Z9yA0xr>fJlDQNgBM@vx@)0q@{9O z`MUyu07n?JqjBkjFU2674;h(84F@AANnbIbdE2hn-Fg3`$yWdjbvRWVb`YDWlB%Q* zhEtNp0VeGNwy1wb<$p#OU><-FI$-9;wE*TMy~a9c@alN`obMu&zV({di_U4!nZ{0_A+dBd z>f86V&Qjp37y_6YJ;a;gFRZlB<1f-=j0{Qh#7zt{%o69@=rC(LQ zKe&^E%sRO7Y#MrW)GLGJ`^-cCa2*TgeyPU}rz9kOwr4@qyht0%Yf^cDprn4F>0G52 z^H0Mx-E;SLl66aUed`EG7?n?8 z(X<)wT(n$vb_^sBhSeu-8_gNC;@?2{%N6C}PZ1^ItN9fSj;4 ztmUseUz}0RVYPR9)4>PG_(DHFwNm`D@(d&kn;1*$fb-W*)RoY!*_0toQKT{~OH zL2u4bW!J=_hIa|oA0^f1v6f`Y8JEc|b;6qL3$HfGUHwAOx(#hqG&8gN*)@|@;%22$ z^X_foHXX~Z+g@okm&DK<5V!iz=+(-5f-Zu*jC>rv>CCmuw;Jh$ z5_l28<2n5c$LCN6x;M!`k4ZZN5A1WQQxnfH3sS!>{~Y9XpgYTh5Ap!^vJ>2y8}I)q zm{8y#^Ts@ebZQhL^mVcklU=EElcqD~@nv?u`9SFJ*kLkB@?;aAE3$yJ2rjN8?7yl) z)t?GK^I)p(nnd+|9Pt0TFj7&$qb0RESdTLlP-|Ta>wg!Gfgj zZ&9-@#<}uAi)B7bUGpvHO(cI3G)J3g#XixU)~|fM!zJDf5(6D-mBO?lpl1sHAVVJQ z=sb<&)CkP4p4S!&`8eOK1eXjT1Z+y0Y<%}x=l!#FGhN!@O=p_4k-ou^v{5*0YUb`t zHR>9L_)e(5))iRUqfJ)IEi2A~D%g*0^Dg?3kHDX>)gQi4;)8IiPX;~pzHZ=*nt;8F zOtw}8&byfigRO+Ya`H>?OxSCiNj_eM7`T5-UF|iTDrzk(7^u1LV)nVi0xAIc=ub9J zc9&({M`ahYgNnGeacA4mVI=b`pN=!k-glV2I)|YtwhK0LXMVk6nY`TE#IXH%7uMV4 zbr>lg6}&-UfVUo9(fKwpyHNDeYa-uL$6d%o-gv?o{gXN(7uOTI^b4QeG8i z4}E{>becNkw1X{`^0kgH0742O;Xax$F18o{i6ZRFOnboRcD?awb8Jbx$t4|t4>)&Kwi literal 0 HcmV?d00001 diff --git a/02_ml_inference/03_image_to_image/pyproject.toml b/02_ml_inference/03_image_to_image/pyproject.toml new file mode 100644 index 0000000..b73c5a0 --- /dev/null +++ b/02_ml_inference/03_image_to_image/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "flash-image-to-image" +version = "0.1.0" +description = "Image-to-image transformations with Stable Diffusion on RunPod Flash" +requires-python = ">=3.10" +dependencies = [ + "runpod-flash", + "fastapi>=0.104.0", + "pillow>=10.0.0", +] diff --git a/02_ml_inference/03_image_to_image/requirements.txt b/02_ml_inference/03_image_to_image/requirements.txt new file mode 100644 index 0000000..a73ed1a --- /dev/null +++ b/02_ml_inference/03_image_to_image/requirements.txt @@ -0,0 +1 @@ +runpod-flash diff --git a/02_ml_inference/04_text_to_video/.env.example b/02_ml_inference/04_text_to_video/.env.example new file mode 100644 index 0000000..8360712 --- /dev/null +++ b/02_ml_inference/04_text_to_video/.env.example @@ -0,0 +1,4 @@ +# FLASH_HOST=localhost +# FLASH_PORT=8888 +# LOG_LEVEL=INFO +# RUNPOD_API_KEY=your_api_key_here diff --git a/02_ml_inference/04_text_to_video/.flashignore b/02_ml_inference/04_text_to_video/.flashignore new file mode 100644 index 0000000..2dfb6fb --- /dev/null +++ b/02_ml_inference/04_text_to_video/.flashignore @@ -0,0 +1,43 @@ +# Flash Build Ignore Patterns + +# Python cache +__pycache__/ +*.pyc + +# Virtual environments +venv/ +.venv/ +env/ + +# IDE +.vscode/ +.idea/ + +# Environment files +.env +.env.local + +# Git +.git/ +.gitignore + +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Flash resources +.runpod/ + +# Tests +tests/ +test_*.py +*_test.py + +# Documentation +docs/ +*.md +!README.md + +# Demo output +text_to_video.gif diff --git a/02_ml_inference/04_text_to_video/.gitignore b/02_ml_inference/04_text_to_video/.gitignore new file mode 100644 index 0000000..2f377a5 --- /dev/null +++ b/02_ml_inference/04_text_to_video/.gitignore @@ -0,0 +1,28 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# Virtual environments +.venv/ +venv/ +env/ + +# Environment +.env +.env.local + +# Flash +.flash_resources.pkl +.tetra_resources.pkl +.runpod/ + +# IDE +.vscode/ +.idea/ + +# Demo output +text_to_video.gif diff --git a/02_ml_inference/04_text_to_video/README.md b/02_ml_inference/04_text_to_video/README.md new file mode 100644 index 0000000..296c563 --- /dev/null +++ b/02_ml_inference/04_text_to_video/README.md @@ -0,0 +1,72 @@ +# Text-to-Video with Diffusers + +Serverless text-to-video API built with Runpod Flash and Diffusers. + +## What this example does + +- Accepts a text prompt +- Generates a short video clip with a GPU `@remote` worker +- Returns the generated video as base64-encoded GIF + +## Quick Start + +```bash +cd 02_ml_inference/04_text_to_video +pip install -r requirements.txt +cp .env.example .env +# Add RUNPOD_API_KEY in .env +flash run +``` + +Open docs at `http://localhost:8888/docs`. + +## Endpoint + +### POST `/gpu/generate` + +Request body: + +```json +{ + "prompt": "a cinematic drone shot of snowy mountains at sunrise", + "negative_prompt": "blurry, noisy, low quality", + "num_frames": 12, + "num_steps": 18, + "guidance_scale": 7.0, + "fps": 8, + "width": 512, + "height": 288, + "seed": 42 +} +``` + +Response: + +```json +{ + "status": "success", + "video_base64": "", + "video_mime_type": "image/gif", + "preview_image_base64": "", + "preview_image_mime_type": "image/png", + "model": "damo-vilab/text-to-video-ms-1.7b", + "prompt": "...", + "num_frames": 16, + "fps": 8, + "timestamp": "2026-02-15T12:34:56.789123" +} +``` + +## Local Demo Script + +```bash +python demo.py "a cinematic drone shot of snowy mountains" output.gif +``` + +## Notes + +- First request can take longer because the worker and model need to warm up. +- This example returns GIF output for portability and simple local testing. +- GIF encoding is capped at 25 FPS; higher requested values are clamped and response `fps` reflects the encoded output. +- Quality is intentionally baseline for fast, reliable, and lower-cost demo runs; this is a starter configuration, not a max-quality preset. +- The default parameters are tuned for reliability on 24GB GPUs; increase frames/steps/resolution gradually if you want higher quality. diff --git a/02_ml_inference/04_text_to_video/__init__.py b/02_ml_inference/04_text_to_video/__init__.py new file mode 100644 index 0000000..ba611bf --- /dev/null +++ b/02_ml_inference/04_text_to_video/__init__.py @@ -0,0 +1 @@ +"""Text-to-video inference example package.""" diff --git a/02_ml_inference/04_text_to_video/demo.py b/02_ml_inference/04_text_to_video/demo.py new file mode 100644 index 0000000..5c84eba --- /dev/null +++ b/02_ml_inference/04_text_to_video/demo.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Generate a short GIF video from a text prompt. + +Usage: + python demo.py "a cinematic drone shot of snowy mountains" [output.gif] +""" + +import base64 +import json +import sys +import urllib.error +import urllib.request +from pathlib import Path + +API_URL = "http://localhost:8888/gpu/generate" + + +def main() -> None: + prompt = ( + sys.argv[1] + if len(sys.argv) > 1 + else "a cinematic drone shot of snowy mountains at sunrise" + ) + output_path = Path(sys.argv[2] if len(sys.argv) > 2 else "text_to_video.gif").resolve() + + payload = { + "prompt": prompt, + "num_frames": 12, + "num_steps": 18, + "guidance_scale": 7.0, + "fps": 8, + "width": 512, + "height": 288, + } + + request = urllib.request.Request( + API_URL, + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", + ) + + try: + with urllib.request.urlopen(request, timeout=600) as response: + result = json.loads(response.read().decode("utf-8")) + except urllib.error.HTTPError as exc: + body = exc.read().decode("utf-8", errors="replace") + print(f"Request failed: HTTP {exc.code}") + if body: + print(f"Server detail: {body}") + print("Make sure the server is running from this folder with: flash run") + sys.exit(1) + except urllib.error.URLError as exc: + print(f"Request failed: {exc}") + print("Make sure the server is running from this folder with: flash run") + sys.exit(1) + + if result.get("status") != "success": + print(f"Worker error: {result}") + sys.exit(1) + + output_bytes = base64.b64decode(result["video_base64"]) + output_path.write_bytes(output_bytes) + print(f"Saved generated video GIF to {output_path}") + + +if __name__ == "__main__": + main() diff --git a/02_ml_inference/04_text_to_video/gpu_worker.py b/02_ml_inference/04_text_to_video/gpu_worker.py new file mode 100644 index 0000000..5daf485 --- /dev/null +++ b/02_ml_inference/04_text_to_video/gpu_worker.py @@ -0,0 +1,195 @@ +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field +from runpod_flash import GpuGroup, LiveServerless, remote + +gpu_config = LiveServerless( + name="02_04_text_to_video_gpu", + gpus=[GpuGroup.ADA_24], + workersMin=0, + workersMax=2, + idleTimeout=5, +) + + +@remote( + resource_config=gpu_config, + dependencies=[ + "diffusers", + "torch", + "transformers", + "accelerate", + "safetensors", + "pillow", + ], +) +class TextToVideoWorker: + def __init__(self): + import torch + from diffusers import DiffusionPipeline + + self._torch = torch + self.model = "damo-vilab/text-to-video-ms-1.7b" + self._using_cpu_offload = False + self.pipe = DiffusionPipeline.from_pretrained( + self.model, + torch_dtype=torch.float16, + ) + self.pipe.enable_attention_slicing() + if hasattr(self.pipe, "vae"): + if hasattr(self.pipe.vae, "enable_slicing"): + try: + self.pipe.vae.enable_slicing() + except NotImplementedError: + pass + except Exception: + pass + if hasattr(self.pipe.vae, "enable_tiling"): + try: + self.pipe.vae.enable_tiling() + except NotImplementedError: + pass + except Exception: + pass + + if torch.cuda.is_available(): + try: + # Prefer CPU offload for better reliability on 24GB GPUs. + self.pipe.enable_model_cpu_offload() + self._using_cpu_offload = True + except Exception: + # Fallback to full-GPU placement if offload is unavailable. + self.pipe = self.pipe.to("cuda") + else: + self.pipe = self.pipe.to("cpu") + + async def generate(self, input_data: dict) -> dict: + import base64 + import io + from datetime import datetime + + prompt = input_data.get("prompt", "").strip() + negative_prompt = input_data.get("negative_prompt", "").strip() + num_frames = int(input_data.get("num_frames", 12)) + num_steps = int(input_data.get("num_steps", 18)) + guidance_scale = float(input_data.get("guidance_scale", 7.0)) + fps = int(input_data.get("fps", 8)) + width = int(input_data.get("width", 512)) + height = int(input_data.get("height", 288)) + seed = input_data.get("seed") + + if not prompt: + return {"status": "error", "error": "prompt is required"} + if width % 8 != 0 or height % 8 != 0: + return {"status": "error", "error": "width and height must be divisible by 8"} + + generator = None + if seed is not None: + generator_device = "cpu" if self._using_cpu_offload else "cuda" + if not self._torch.cuda.is_available(): + generator_device = "cpu" + generator = self._torch.Generator(device=generator_device).manual_seed(int(seed)) + + try: + with self._torch.inference_mode(): + result = self.pipe( + prompt=prompt, + negative_prompt=negative_prompt if negative_prompt else None, + num_frames=num_frames, + num_inference_steps=num_steps, + guidance_scale=guidance_scale, + width=width, + height=height, + generator=generator, + output_type="pil", + ) + frames = result.frames[0] + except Exception as exc: + return {"status": "error", "error": f"Video generation failed: {exc}"} + finally: + if self._torch.cuda.is_available(): + self._torch.cuda.empty_cache() + + if frames is None: + return {"status": "error", "error": "Model returned no frames"} + frames = list(frames) + if len(frames) == 0: + return {"status": "error", "error": "Model returned no frames"} + if not hasattr(frames[0], "save"): + from PIL import Image + + converted_frames = [] + for frame in frames: + arr = frame + if hasattr(arr, "dtype") and str(arr.dtype) != "uint8": + arr = (arr * 255).clip(0, 255).astype("uint8") + converted_frames.append(Image.fromarray(arr)) + frames = converted_frames + + # GIF timing is quantized in milliseconds; clamp to 25 FPS max and report actual output FPS. + effective_fps = min(max(fps, 1), 25) + duration_ms = int(1000 / effective_fps) + + gif_buffer = io.BytesIO() + frames[0].save( + gif_buffer, + format="GIF", + save_all=True, + append_images=frames[1:], + duration=duration_ms, + loop=0, + ) + gif_buffer.seek(0) + + preview_buffer = io.BytesIO() + frames[0].save(preview_buffer, format="PNG") + preview_buffer.seek(0) + + return { + "status": "success", + "video_base64": base64.b64encode(gif_buffer.read()).decode("utf-8"), + "video_mime_type": "image/gif", + "preview_image_base64": base64.b64encode(preview_buffer.read()).decode("utf-8"), + "preview_image_mime_type": "image/png", + "model": self.model, + "prompt": prompt, + "negative_prompt": negative_prompt or None, + "num_frames": len(frames), + "fps": effective_fps, + "num_steps": num_steps, + "guidance_scale": guidance_scale, + "width": width, + "height": height, + "seed": seed, + "timestamp": datetime.now().isoformat(), + } + + +gpu_router = APIRouter() +worker: TextToVideoWorker | None = None + + +def get_worker() -> TextToVideoWorker: + global worker + if worker is None: + worker = TextToVideoWorker() + return worker + + +class TextToVideoRequest(BaseModel): + prompt: str = Field(description="Prompt that describes the video to generate") + negative_prompt: str = Field(default="", description="What to avoid in the generated video") + num_frames: int = Field(default=12, ge=8, le=24) + num_steps: int = Field(default=18, ge=5, le=40) + guidance_scale: float = Field(default=7.0, ge=1.0, le=20.0) + fps: int = Field(default=8, ge=1, le=30) + width: int = Field(default=512, ge=256, le=768) + height: int = Field(default=288, ge=256, le=512) + seed: int | None = Field(default=None, ge=0) + + +@gpu_router.post("/generate") +async def generate(request: TextToVideoRequest): + result = await get_worker().generate(request.model_dump()) + if result.get("status") != "success": + raise HTTPException(status_code=400, detail=result.get("error", "Video generation failed")) + return result diff --git a/02_ml_inference/04_text_to_video/main.py b/02_ml_inference/04_text_to_video/main.py new file mode 100644 index 0000000..abe2d89 --- /dev/null +++ b/02_ml_inference/04_text_to_video/main.py @@ -0,0 +1,38 @@ +import logging +import os + +from fastapi import FastAPI +from gpu_worker import gpu_router + +logger = logging.getLogger(__name__) + +app = FastAPI( + title="Text-to-Video API", + description="Generate short videos from text prompts on RunPod serverless GPUs", + version="1.0.0", +) + +app.include_router(gpu_router, prefix="/gpu", tags=["Text-to-Video"]) + + +@app.get("/") +def home(): + return { + "message": "Text-to-Video API", + "docs": "/docs", + "endpoints": {"generate": "/gpu/generate"}, + } + + +@app.get("/ping") +def ping(): + return {"status": "healthy"} + + +if __name__ == "__main__": + import uvicorn + + host = os.getenv("FLASH_HOST", "localhost") + port = int(os.getenv("FLASH_PORT", 8888)) + logger.info(f"Starting Flash server on {host}:{port}") + uvicorn.run(app, host=host, port=port) diff --git a/02_ml_inference/04_text_to_video/mothership.py b/02_ml_inference/04_text_to_video/mothership.py new file mode 100644 index 0000000..a4de8a8 --- /dev/null +++ b/02_ml_inference/04_text_to_video/mothership.py @@ -0,0 +1,7 @@ +"""Mothership endpoint configuration.""" + +from runpod_flash import CpuLiveLoadBalancer + +mothership = CpuLiveLoadBalancer( + name="02_04_text_to_video-mothership", +) diff --git a/02_ml_inference/04_text_to_video/pyproject.toml b/02_ml_inference/04_text_to_video/pyproject.toml new file mode 100644 index 0000000..011ac7c --- /dev/null +++ b/02_ml_inference/04_text_to_video/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "flash-text-to-video" +version = "0.1.0" +description = "Text-to-video generation with Diffusers on RunPod Flash" +requires-python = ">=3.10" +dependencies = [ + "runpod-flash", + "fastapi>=0.104.0", + "pillow>=10.0.0", +] diff --git a/02_ml_inference/04_text_to_video/requirements.txt b/02_ml_inference/04_text_to_video/requirements.txt new file mode 100644 index 0000000..a73ed1a --- /dev/null +++ b/02_ml_inference/04_text_to_video/requirements.txt @@ -0,0 +1 @@ +runpod-flash diff --git a/02_ml_inference/05_image_to_video/.env.example b/02_ml_inference/05_image_to_video/.env.example new file mode 100644 index 0000000..8360712 --- /dev/null +++ b/02_ml_inference/05_image_to_video/.env.example @@ -0,0 +1,4 @@ +# FLASH_HOST=localhost +# FLASH_PORT=8888 +# LOG_LEVEL=INFO +# RUNPOD_API_KEY=your_api_key_here diff --git a/02_ml_inference/05_image_to_video/.flashignore b/02_ml_inference/05_image_to_video/.flashignore new file mode 100644 index 0000000..ac60074 --- /dev/null +++ b/02_ml_inference/05_image_to_video/.flashignore @@ -0,0 +1,43 @@ +# Flash Build Ignore Patterns + +# Python cache +__pycache__/ +*.pyc + +# Virtual environments +venv/ +.venv/ +env/ + +# IDE +.vscode/ +.idea/ + +# Environment files +.env +.env.local + +# Git +.git/ +.gitignore + +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Flash resources +.runpod/ + +# Tests +tests/ +test_*.py +*_test.py + +# Documentation +docs/ +*.md +!README.md + +# Demo output +image_to_video.gif diff --git a/02_ml_inference/05_image_to_video/.gitignore b/02_ml_inference/05_image_to_video/.gitignore new file mode 100644 index 0000000..b551249 --- /dev/null +++ b/02_ml_inference/05_image_to_video/.gitignore @@ -0,0 +1,28 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# Virtual environments +.venv/ +venv/ +env/ + +# Environment +.env +.env.local + +# Flash +.flash_resources.pkl +.tetra_resources.pkl +.runpod/ + +# IDE +.vscode/ +.idea/ + +# Demo output +image_to_video.gif diff --git a/02_ml_inference/05_image_to_video/README.md b/02_ml_inference/05_image_to_video/README.md new file mode 100644 index 0000000..acf55de --- /dev/null +++ b/02_ml_inference/05_image_to_video/README.md @@ -0,0 +1,76 @@ +# Image-to-Video with Stable Video Diffusion + +Serverless image-to-video API built with Runpod Flash and Stable Video Diffusion. + +## What this example does + +- Accepts an input image as base64 +- Animates the image into a short clip with `StableVideoDiffusionPipeline` +- Returns the generated video as base64-encoded GIF + +## Quick Start + +```bash +cd 02_ml_inference/05_image_to_video +pip install -r requirements.txt +cp .env.example .env +# Add RUNPOD_API_KEY in .env +flash run +``` + +Open docs at `http://localhost:8888/docs`. + +## Endpoint + +### POST `/gpu/animate` + +Request body: + +```json +{ + "image_base64": "", + "motion_bucket_id": 127, + "noise_aug_strength": 0.02, + "num_frames": 12, + "num_steps": 18, + "fps": 7, + "seed": 42 +} +``` + +Response: + +```json +{ + "status": "success", + "video_base64": "", + "video_mime_type": "image/gif", + "preview_image_base64": "", + "preview_image_mime_type": "image/png", + "model": "stabilityai/stable-video-diffusion-img2vid-xt", + "input_width": 1920, + "input_height": 1080, + "render_width": 1024, + "render_height": 576, + "num_frames": 16, + "timestamp": "2026-02-15T12:34:56.789123" +} +``` + +## Local Demo Script + +```bash +python demo.py +# or explicitly: +python demo.py input.png output.gif +``` + +## Notes + +- First request can take longer because the worker and model need to warm up. +- Input images are resized to `1024x576` before animation for predictable memory usage. +- This example returns GIF output for portability and simple local testing. +- GIF encoding is capped at 25 FPS; higher requested values are clamped and response `fps` reflects the encoded output. +- If `image_base64` is omitted, the endpoint uses `poddy.jpg` as the default input image. +- Quality is intentionally baseline for fast, reliable, and lower-cost demo runs; this is a starter configuration, not a max-quality preset. +- The default parameters are tuned for reliability on 24GB GPUs; increase frames/steps gradually if you want higher quality. diff --git a/02_ml_inference/05_image_to_video/__init__.py b/02_ml_inference/05_image_to_video/__init__.py new file mode 100644 index 0000000..640a62b --- /dev/null +++ b/02_ml_inference/05_image_to_video/__init__.py @@ -0,0 +1 @@ +"""Image-to-video inference example package.""" diff --git a/02_ml_inference/05_image_to_video/demo.py b/02_ml_inference/05_image_to_video/demo.py new file mode 100644 index 0000000..dca1582 --- /dev/null +++ b/02_ml_inference/05_image_to_video/demo.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +""" +Animate an input image into a short GIF video. + +Usage: + python demo.py [input.png] [output.gif] +""" + +import base64 +import json +import sys +import urllib.error +import urllib.request +from pathlib import Path + +API_URL = "http://localhost:8888/gpu/animate" +DEFAULT_IMAGE = Path(__file__).resolve().parent / "poddy.jpg" +DEFAULT_OUTPUT = "image_to_video.gif" + + +def main() -> None: + input_path = Path(sys.argv[1]).expanduser().resolve() if len(sys.argv) > 1 else DEFAULT_IMAGE + output_path = Path(sys.argv[2] if len(sys.argv) > 2 else DEFAULT_OUTPUT).resolve() + + if not input_path.exists(): + print(f"Input image not found: {input_path}") + sys.exit(1) + + image_base64 = base64.b64encode(input_path.read_bytes()).decode("utf-8") + payload = { + "image_base64": image_base64, + "motion_bucket_id": 127, + "noise_aug_strength": 0.02, + "num_frames": 12, + "num_steps": 18, + "fps": 7, + } + + request = urllib.request.Request( + API_URL, + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", + ) + + try: + with urllib.request.urlopen(request, timeout=600) as response: + result = json.loads(response.read().decode("utf-8")) + except urllib.error.HTTPError as exc: + body = exc.read().decode("utf-8", errors="replace") + print(f"Request failed: HTTP {exc.code}") + if body: + print(f"Server detail: {body}") + print("Make sure the server is running from this folder with: flash run") + sys.exit(1) + except urllib.error.URLError as exc: + print(f"Request failed: {exc}") + print("Make sure the server is running from this folder with: flash run") + sys.exit(1) + + if result.get("status") != "success": + print(f"Worker error: {result}") + sys.exit(1) + + output_bytes = base64.b64decode(result["video_base64"]) + output_path.write_bytes(output_bytes) + print(f"Saved animated video GIF to {output_path}") + + +if __name__ == "__main__": + main() diff --git a/02_ml_inference/05_image_to_video/gpu_worker.py b/02_ml_inference/05_image_to_video/gpu_worker.py new file mode 100644 index 0000000..06f9b45 --- /dev/null +++ b/02_ml_inference/05_image_to_video/gpu_worker.py @@ -0,0 +1,215 @@ +import base64 +from pathlib import Path + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field +from runpod_flash import GpuGroup, LiveServerless, remote + +DEFAULT_IMAGE_PATH = Path(__file__).resolve().parent / "poddy.jpg" + + +def load_default_image_base64() -> str: + return base64.b64encode(DEFAULT_IMAGE_PATH.read_bytes()).decode("utf-8") + + +gpu_config = LiveServerless( + name="02_05_image_to_video_gpu", + gpus=[GpuGroup.ADA_24], + workersMin=0, + workersMax=2, + idleTimeout=5, +) + + +@remote( + resource_config=gpu_config, + dependencies=[ + "diffusers", + "torch", + "transformers", + "accelerate", + "safetensors", + "pillow", + ], +) +class ImageToVideoWorker: + def __init__(self): + import torch + from diffusers import StableVideoDiffusionPipeline + + self._torch = torch + self.model = "stabilityai/stable-video-diffusion-img2vid-xt" + self._using_cpu_offload = False + self.pipe = StableVideoDiffusionPipeline.from_pretrained( + self.model, + torch_dtype=torch.float16, + variant="fp16", + ) + self.pipe.enable_attention_slicing() + if hasattr(self.pipe, "vae"): + if hasattr(self.pipe.vae, "enable_slicing"): + try: + self.pipe.vae.enable_slicing() + except NotImplementedError: + pass + except Exception: + pass + if hasattr(self.pipe.vae, "enable_tiling"): + try: + self.pipe.vae.enable_tiling() + except NotImplementedError: + pass + except Exception: + pass + + if torch.cuda.is_available(): + try: + self.pipe.enable_model_cpu_offload() + self._using_cpu_offload = True + except Exception: + self.pipe = self.pipe.to("cuda") + else: + self.pipe = self.pipe.to("cpu") + + async def animate(self, input_data: dict) -> dict: + import base64 + import io + from datetime import datetime + + from PIL import Image + + image_base64 = input_data.get("image_base64", "") + motion_bucket_id = int(input_data.get("motion_bucket_id", 127)) + noise_aug_strength = float(input_data.get("noise_aug_strength", 0.02)) + num_frames = int(input_data.get("num_frames", 12)) + num_steps = int(input_data.get("num_steps", 18)) + fps = int(input_data.get("fps", 7)) + seed = input_data.get("seed") + + if not image_base64: + return {"status": "error", "error": "image_base64 is required"} + + try: + image_bytes = base64.b64decode(image_base64) + input_image = Image.open(io.BytesIO(image_bytes)).convert("RGB") + except Exception as exc: + return {"status": "error", "error": f"Invalid input image: {exc}"} + + resized_image = input_image.resize((1024, 576)) + + generator = None + if seed is not None: + generator_device = "cpu" if self._using_cpu_offload else "cuda" + if not self._torch.cuda.is_available(): + generator_device = "cpu" + generator = self._torch.Generator(device=generator_device).manual_seed(int(seed)) + + try: + with self._torch.inference_mode(): + result = self.pipe( + image=resized_image, + decode_chunk_size=4, + motion_bucket_id=motion_bucket_id, + noise_aug_strength=noise_aug_strength, + num_frames=num_frames, + num_inference_steps=num_steps, + generator=generator, + output_type="pil", + ) + frames = result.frames[0] + except Exception as exc: + return {"status": "error", "error": f"Animation failed: {exc}"} + finally: + if self._torch.cuda.is_available(): + self._torch.cuda.empty_cache() + + if frames is None: + return {"status": "error", "error": "Model returned no frames"} + frames = list(frames) + if len(frames) == 0: + return {"status": "error", "error": "Model returned no frames"} + if not hasattr(frames[0], "save"): + converted_frames = [] + for frame in frames: + arr = frame + if hasattr(arr, "dtype") and str(arr.dtype) != "uint8": + arr = (arr * 255).clip(0, 255).astype("uint8") + converted_frames.append(Image.fromarray(arr)) + frames = converted_frames + + # GIF timing is quantized in milliseconds; clamp to 25 FPS max and report actual output FPS. + effective_fps = min(max(fps, 1), 25) + duration_ms = int(1000 / effective_fps) + + gif_buffer = io.BytesIO() + frames[0].save( + gif_buffer, + format="GIF", + save_all=True, + append_images=frames[1:], + duration=duration_ms, + loop=0, + ) + gif_buffer.seek(0) + + preview_buffer = io.BytesIO() + frames[0].save(preview_buffer, format="PNG") + preview_buffer.seek(0) + + return { + "status": "success", + "video_base64": base64.b64encode(gif_buffer.read()).decode("utf-8"), + "video_mime_type": "image/gif", + "preview_image_base64": base64.b64encode(preview_buffer.read()).decode("utf-8"), + "preview_image_mime_type": "image/png", + "model": self.model, + "input_width": input_image.width, + "input_height": input_image.height, + "render_width": 1024, + "render_height": 576, + "num_frames": len(frames), + "num_steps": num_steps, + "motion_bucket_id": motion_bucket_id, + "noise_aug_strength": noise_aug_strength, + "fps": effective_fps, + "seed": seed, + "timestamp": datetime.now().isoformat(), + } + + +gpu_router = APIRouter() +worker: ImageToVideoWorker | None = None + + +def get_worker() -> ImageToVideoWorker: + global worker + if worker is None: + worker = ImageToVideoWorker() + return worker + + +class ImageToVideoRequest(BaseModel): + image_base64: str = Field( + default="", + description="Input image encoded as base64. If omitted, defaults to poddy.jpg.", + ) + motion_bucket_id: int = Field(default=127, ge=1, le=255) + noise_aug_strength: float = Field(default=0.02, ge=0.0, le=1.0) + num_frames: int = Field(default=12, ge=8, le=24) + num_steps: int = Field(default=18, ge=5, le=40) + fps: int = Field(default=7, ge=1, le=30) + seed: int | None = Field(default=None, ge=0) + + +@gpu_router.post("/animate") +async def animate(request: ImageToVideoRequest): + payload = request.model_dump() + if not payload.get("image_base64"): + try: + payload["image_base64"] = load_default_image_base64() + except FileNotFoundError as exc: + raise HTTPException(status_code=500, detail=f"Default image not found: {exc}") from exc + result = await get_worker().animate(payload) + if result.get("status") != "success": + raise HTTPException(status_code=400, detail=result.get("error", "Image animation failed")) + return result diff --git a/02_ml_inference/05_image_to_video/main.py b/02_ml_inference/05_image_to_video/main.py new file mode 100644 index 0000000..ba791ee --- /dev/null +++ b/02_ml_inference/05_image_to_video/main.py @@ -0,0 +1,38 @@ +import logging +import os + +from fastapi import FastAPI +from gpu_worker import gpu_router + +logger = logging.getLogger(__name__) + +app = FastAPI( + title="Image-to-Video API", + description="Animate still images on RunPod serverless GPUs", + version="1.0.0", +) + +app.include_router(gpu_router, prefix="/gpu", tags=["Image-to-Video"]) + + +@app.get("/") +def home(): + return { + "message": "Image-to-Video API", + "docs": "/docs", + "endpoints": {"animate": "/gpu/animate"}, + } + + +@app.get("/ping") +def ping(): + return {"status": "healthy"} + + +if __name__ == "__main__": + import uvicorn + + host = os.getenv("FLASH_HOST", "localhost") + port = int(os.getenv("FLASH_PORT", 8888)) + logger.info(f"Starting Flash server on {host}:{port}") + uvicorn.run(app, host=host, port=port) diff --git a/02_ml_inference/05_image_to_video/mothership.py b/02_ml_inference/05_image_to_video/mothership.py new file mode 100644 index 0000000..7a726d3 --- /dev/null +++ b/02_ml_inference/05_image_to_video/mothership.py @@ -0,0 +1,7 @@ +"""Mothership endpoint configuration.""" + +from runpod_flash import CpuLiveLoadBalancer + +mothership = CpuLiveLoadBalancer( + name="02_05_image_to_video-mothership", +) diff --git a/02_ml_inference/05_image_to_video/poddy.jpg b/02_ml_inference/05_image_to_video/poddy.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74937103d5519407af4923cc790c6a4b8bf611eb GIT binary patch literal 30015 zcmeFYcT`i|w>BC&2BavVNe87WU6Bsb1O*|GAiYQj0V&dZla2x+(xinDI)q+BQ&Fj* zDi9E*3PE}ack}+vIo}y~j623R?%(gi;NFY1HhZl#=QE#KxR}1c(Gt}Coa{lM$B#im zAP|TWL{4-KL;_q90bd{@RuI`g*B}s#=*ItCTM!BS*BD|Di1_;d86WEeBK@!Nz%&18 zTK{$bpZ_URK_CjChg3{LLOhM=Ki5RWB%p%-?iUvmDBZ?cW{vcL)C6fq!@4-yQgO2malGe|O;D9r$+#{@sCpci`V0_;&~Xzjxqb z4ip6<`lk^S0d6=kF);}#3D8K%NG}^1IoUsr{J$E-KaKLTUHPAOIUSft3jDi5PDcJe z{r}e&7e4^M{?{_}?ay$s+zffu8oyaJ}8 z0}&IEkPwrSkOA)tj10aEC6LmSUE>y4A-}F~O~K>IAQ7JSfs$9Xwg+M`y2B@F^CIF3 zn30K@h4tnwegQ!tDe1fSWbVs8P*c~?)Ox55H8g_3jh~v>KC`oTaCCC^^7iq4>E|C1 z85JE98}};y^_%pJ%(q$ZvI~ofOG*)C$nv`S26SUnb4x3xx37O-@Z+b?W8)Lp$*Jj? zS=`d{&y`=lSJ&3@yMOog4-SuxPk`9Qf0hG$|Idp4*LvuI^$?Sil8{ndu7`;DCGa7k zCne(+C%>kmPhstOokt>^l0h}?Lv7C$UP*%;h|P;pFe9H7?k4_nN&l?q|87AM|G!rB zKMVSw^<2z?s7Z)`10$gaDS-$qtA+7|Y6VbXykMBVIzyO1VZ0TCzB&l_aDfF1v&b0q zwTR;dN{Q4No&X<%{~ibe27~nVCjy7$Ou-yL|A;* zN98>P*{ur7;j#Q0;5NQT!C5&ED+BUIfLzqIDAhuvWw> z48y{>5u^~A~YK`Un)mnj?sCm9B6kqIj!2jh0Ey<2|T^(AAnc?jfwO zA0U_0cvtKm$K?H#MJyUX1*|Pvvs6|NCVA`&5+w z*U8wVk}>SWFo|Q^? zaxLEDfgUB9Gm@bou69kA(%DLZOrE>akRHZODcGZ_0;TNNraQ5zgtC|kB1rd9++;E! zSy*{$9BD{I@s$lInG}w*(uGx7l>zL>k#PsRha}F!mKTx|U6r#!Vgkk^h)ji)l^J{) z&SMaugOX_%tubPH&PqAw`1~P;oxF%f;0Qng&_>o8y736`tQ0w%QV^#=rVH8>#65N{ zBLXGo))?XxoG`MpQlLm+%WcOh;)YAKcq)zPck2os=+a}+iH70gCgnF@F!Whqx#ITL zV5&Tk(F$mHysa z2_`}yqusS0#!AvX;AMfsFXKHx9NSkMd$*A}vB^N%Tg@XXT{+ShV{0lInSF(}fJ;OT z%Jiz}TN4o(I@mcJ)-n)4w!rX!-mzBWk=;BwQLdOeF8eE>nFLk=p$9U()IesgQ7tT`okG29D zDi*b4}OY&!MiXsJP1RzDy`IdtGj zKt9y`5Tw!q?9uYv+a~39X{5w?Oe2OE@oB>d*-ob7&to@MkTTu)0XT~rNg9G}cO)_+nuZ=^*YQeS zfJD42i_}UOjp)aLvv<8E3-&XadBPfQsG*6L*AN;>na$&^EK)|3!swGRx%Kf4ybO4j zO<~6oR(11t2+A&S_Dl*V#VH-2uUA*JEtJ@A3T zz1~K=#uW?f`l3Y3B&+64+_~5ssL#tH$;n%T@x$w{+-%f_;#K38#VgIsV~EerX)`9@ zy{odnS@(O~ty_*?6Zr$`Hwv(o_CeijZRBM~{pw=yvsV#@K=!ZczP>eCp`fW1vE>zR zDfrIr?37z3pYi)zy|PDdiG7CfWM>?nGlnid-2G zH!c7$`AdN5?w1k=_G6FMcgl45Q(hRu~aerZbE(T)$iG0{&VYYvAVa`KNBL@U4sX9Z zn~Yf*fM*!lW}X8isZoVTw*mbrppdPAoKZ%*)-10GQX-c`yiuLzN<#n+#&U9NT|U@o z;ZAN1Y_vC4@R2$oa>Js4c1Ra{^uI`t^1c8G&sN5*EeXw7YuaQbza2sqiaL9wK#Dzz zYITM<(s7(C=|K+a1OmeQ^8%rI!o~6#;d{3U#v S=?02)3S7d?AHMp9zP+8EjNV&QuOu&i z>8UFRYq%G#DUDGNitI3!jVGEcG}M2oK%?-4+RK#jJN{fp7!xyiz~myAoI=v^Lt#Ze{X9nfAz8?dN1(T@mAv1Fjg}n-RBKtx7v-z zZYsOo5qc5^?H<;if!^j*`yW}VJpp5(ER+ULIEsLroAQ8ZjL(XT;4G2nf(QV1Rc;br zOseb)oN+1DDj=RNeq+RRVNgsSR_uu^G~&%zV-gn+pb#LM3;{ipH0THnU&zP}ek{XZ zY5bq=c1P+k{sI8_&|*+M`YAO>B1(Y&L`z%So(ZWi0g}qAZ>P_$&2xZKA(vrT%ot_8 zSqA9~LG!;&&UmKbv?Oji5Uwo)^w1D6%r8us3a^3=$LKgjU_HfE~Y0yJk$ZX zf~$G-yvPFX1C4r^0Mp#>#wNxG{Rfi0HD`>%R4y0HlyYe&dKqkJwOxYU%)fMVzinZR@6;G~9lh&!8raq)jCKpxLQWl=PX zmO5a!t0>D=AkH=2`t*U&z91tUL`L0IL<~UCml-C+LLM>bWM=A1!Or7FEphA7JA|8v z2rw`*@)22Doq^6%&z|CapJCB9-dlWhj}q^D53nWQ7^@p|T^`+gMyEmtE^nt%%-GLS!g%#}Iyqf;_I|FuQ@=>jLmPc}i2PMazmJFb zyWd1}yyFT=;DZZLt)CqJMG3GlO2Cw-AQB~mp@ZSS~y zdI9?QEAtMO>XEXLa_pLAK^H}+bqamZ^PQ0?d8R&@4Fdw}t zH2I#Sqx)L8<9^y591G6wrcHCoBn!)kC?aLLptnZ|+p$|i|i?Ppbk zzjuID>#116%?Lw;JF#hQ5-Aqgj~krzhe-_|AMIeC&7JXC<$qYS_;o$8?&S4~+aNYH zXu&G@lMh;e+>W8tH#<10I=6$n6C-O zt?M(PP3e;}1pd)fLFG~&B&jjVTESU`v*1#T-N5Z?qs!mYzu};H{8) z<2ljZgJ3xbsXd@(s0bxYF+HH%A8fMX0Tqf6F^mveO=h+e=TJZ{$+mJsxZ(lR1?CZT z_Dimgi(vZi9npQ3dD>)-Y;bYHVRk{8!^EpQRh}3S7;x3 zV=_Zf3Fjs6LCqVC_c_YJhQNxdv2S&E2TL3&yH^G0SkcBRyA1M$a`3_+>78c;F}!+X zCTh2y`dm1PUVN$Z*Lix7SmYDghK<|~;UIbt!1d?i!PAptahxvFgqKgr$FEpX;s!(RtF$ z@wA$kMdKsw4Ll!-7kGEBm>Y#XQ^sXYy)&<3Tk51`(wpb|azzgWP6cRlc>s()T#fr7 zlGH~=Wj>F>+GHR#E-ExyUf|MlfB~3RR%6Ic<*(Fc$o5l~1q=xj57o3`IN$@zfq`y7 zyNu%mE-j1U9)wHJWHp*T6`)BE_5g2steDcJPzn^q#jqm>erJn}@)>94)dEO$iE#&+ z@s(u2b2FzaGzd4-kem1*K2{;(2k;1=QC70D!e(7SN^meW@101rHzr(-in9Qz=7+>^ zmKY~vr^yGHCiV~}-mATN73uKrx5nUhVv#k-u`7J5GW%QR)iLdmYQJ(kz$R#Bbo|8$ zORp(f237W}2!AmPv_WK^2kvp%wVe}&4yVFe{e8>HBwZ(EofR(2UK~+ zE|T{fzVygFJPNOn4QUu1l^#18OpftxR<*uKHVpO zI-^;r{EnAEmI%L59WoM-D6SMs;56059o>h;>hv8}DXn#EB&D5}zff@L;dguS=PAFx zV*hWpkJh$)yP;D$vB%Tr;+Ah5W}<{e6_ z;!fctEjX_;Fc^C4J0y|Q%i><3;`=I-$4j(V|FXAODsnyCZORe+Nz#ya9~5c!d9Bg+ zVdJb=%X+|@ed)O%?#9uDCA<9CeH-pNJR8*UZ{tGi^N|V>Il=33Ed#PspN8)tEefO6 zD)vTA7N&l}YidsfPl-duPJZ|5F`60&7gtb!7JJxz5&++HAMBc?Vb4?)*21=T{V=-= zx&Wy)&%UV&Z568vKR-%8s9r(f29RZbUVX11OUL)_?z(0uweYZ4b?q>chCoiGPFnru zaK(?sorvQ4`Jp|`zjvw~C`4U!hrNSgkFy@?d#89!h6v3D=k%%mWMi`}tG+Q6K*n;oG<2Ifvv z0l~c{mZ~G4bM*(tlt`c} z>s1E*aHh%{PrsNXDBP|Relh>M#G}S+30>GRAbftmk~GEY&)BBGQXQHKSI<&$YeGzR z^5zx+XDRy5=%F!nhsCnL^nmU?w#ojVJxR}MZgq0hwb7daASwwUCYcsPUOGTwA}%S6 z(38jx`Af%*i-$6BB3yaRfK*{!T~EcFzL=2*?1UrJ1zZ3S`z47{qdUL!aX(zus-1e| z7*1&raLH+Ec=wS;-q4?pq3?dQthWdD%=K4~5-#y7Ij{t-*sAedJl_ zw81v}Jqh`h^T)NYnp;XO#AlK0uRa}C|H>IoP-y?Y*s|d7+QD0Q7EHe~5Uri}mmkkG zZ~=JNs-gS_zL2f1Usc*O+V zZPhuo+L2NR(}#b4H6$OnB%n1cWu@Z%UJKkf3_NU$QM7#fYbZngv#IEjtzXYQ+oL_K zcOdGP_%MsAY=l^-5Drb&8Xa`XFFs6vO>J=z_2GN0LXEOgbf~lCj_18I`te1F&38C= z+K}=5x~k+LC0ww@;pq+IQEr<9U#7>`A3AwD{StSy$sZC^Xuc^wenaEBggZfEf5@`_ z(P-XxxU|;56KKqrZzXpd9171KwKV1rrX4Nls6eV;xmE7RVHG#syt_J6k~d@@wryAz z<=4aG$(t6B8y+tBG9fG?$GYxpl@ex-^r_8Q&P2-|l7j|B_Bj{05Bo8)DazP=&du88 z(9KYKO$W)Xhn>))sfoqfdu52T*$Ytq@{8T|cC?7E5~KWsmy2!e{;rMhpiHpE#4=y= z*c5vsnZpXs>2dhDREOjeQ}Vvt+qWilqawK4){IZDzdJK6-T1WRzL=`|;XxIc+^zYm zYse@}(YMI!$^|I%GoECN>o^p@h{lIvk^%y$<^X2lAI*#1d@l8yrTcspmUwQoY!+aj zdGAMGzfxKSs@X`dc5gVDAmrpZZL+X&ZAdDhTt|eyA^C61 zjK;IQ?c~e;GyYSv&o=bX(8@z8!|NX0YnZG#U+x3BJje9bF_toCZeCb42>!FZ`}^!b+fc^@YKa~JoY zxzM-XN^#(m>H}ANRJQ0V-h>cFy|<8sFw_ui5N2xmx^Sy@=4eCRjlXY-E-lPd`coY0 zM~K8s^+AqFdQb)8wbCqG2zptEF#t^XwKK$4`|k0*NsjFwO*q?;(pF!=3L~xrh_~vG z6T^s`kP~^a((;>J5yi%zR)y9FQ(A@eZ?LY#(3KFUMjb8{)0G{f3c;yrWIhil4PHIc z0K#lZ<*O-x8zyNk-jm!91ckXF-VZhm;?%tdDJ9~7sHg?D0EjEap-N?x1t_!gxc4iG z0E#bMczLU?1-JM=<>UKSmCI3rD(?tdh`4Ace>NZ%2CAH- z3yN_}Kx?HwcPoI!#k7jjuI0OSXyGT~r`BzsC@|sNd#O^cFKDnx6i=<|^@fH8XuP*& zd!u^K$_MhhtNwSyVq?Vp84Bmq&N;hwVwnLDa|NMn?k+;>@8FI`M(KmNWfBhTQpnwL zTkay2{$NhjkDw@c)Y02(;?>Q#RYDU>?v)Q-cZ_0g@Cd6RB z{z);yfZC0vb_D8IsX15DwBa z$>QGX*J1#t3QCWINq7LGLs6n#JAqLYE@x2TY1T7&XS+OBAw=yVq?|MLNFZB)j?^Se z)HzEN@E5>q(ZpkFM6@N^yb!=;D88~PZZ1|JL6^a$KU+!(FnQXsR$MH595G^eGj-X@+= zS$W>+_%O_1dpb;h%kTme?zYr?Ghwz>5A0lty8vCA-p~O;)8oSTTKBAvkUY)xUl1EI zLg{ja!UpZE^2<1~A0^4CrK@)ogFf-JC3SUCqjs0q@+Ui7p3NqGjG%x09lXoQ?3A#x zy|koGJLFtHgm^|x%VYM~CB-Tkm4DzeBjs1r5wkv1z@%a7U$iUP^!3LL(YJYXgg=4y zT?tOOVHq(=d#cbGF9MhX&yOQyJkOjp zIw-mTU13arl|yya%V&CHaOmYh^8^jVmL!?XzggbB@mE4p_5~>B0>oQh!cgQLB01Om zG!`9d2RN(FqFem5F@(E*hX>}^TZWMXCXXz*#>XjcooWiZy&P8Q9CL9)U4R}LPvMh< z{a@f`I~UHQLR~BwLk5)^-bRKxx>Jy4|HWj#wg}GDcozSMB5CW7TZ!PuhgKOAZ0sp@ zk1Ok)s?=q)->i4x60J6e_m(a|5p{%boI6;{9X-)_r7IQI@#e;kyUr&{xtKeM;p>8GeD1y3ed=7SArB;G33BAauEP9o_$Z;ET81OseuT zJun*ydO(qK1}*u7a4>F2o}W^`=gGX1TY63YWu0nCdvW8P#M76pw~Ryc-@dA{&bEeR z`+Hy#7#`+qB};=7-b{f(U;U2)pa}H==O10w&BpIa_XN-6GwkuVhhxle(i`_(r$%%m zzcslA%Gwgh%td?610M65#XN_*kVFnBbb;}os-An#WubLA_YYq4;elZEb#luIIAX(o zeyIOzsPzSiW2#M*>jG5ulH?0#8ea%xb+P<39~T-4oXnY??47Ze!?y#Dp-oc4ZuT@y zbo+bOV_nkw6mK5l<%TKeGOtxnm7TT1*C=BD@Z#Mu)~vZ>GrkAX$EWyvx_4!(XBn)7 zIX~_yy*ooqeJk?QD3|ke{U8~@wEO5cthWpfa!_<8;> zGV;)kfcqI@?yOZ%VQr%7gnU^MY#nK4b22LAdY}-qjnSMTF;hJf-A% zOK^+Yg`LpwE!@g>KnAHaR~)#N8^9Az6-(?+Pi_f45FQ{^!{Px@;;zcF1{H84VRW+K zb{^u(Py=9O7zF_H6jNPstHfBioLM5B8h=%b2U5tz-mlG5m9_Z!r=*%@4O0G%i7IRQ z1Hc5O7bfB^Z4mGvQt44PW#Z!rw`8NB3({EK1{3vY83*8_|9i=HbMyqx+b=Z~B?%epAv-LBvz8S9LY4V#HS z=Eu!9q|OXT&UTdetjtU{m1NwjlXDs$S@UsONS#XCfD3(daWR}s{m@e_f<^oMnCwIe ze4btPQ6=3H_-gzrMmXj10vZjt_POiM-vphW1tb&0W>!R!mEB`EY}d$m#)PVAbCX_uo_Lr3#0j$+uP##(cfrF9) z4u#4*fccb06xylW$P5KC?b<0bvE^D~M2YtG$qLGKyOf=(X6x<>z!*pjZoyzm&hKo# z`t;I}S+hT?U8<nR@`?dYEy!HQ=NepO@RENY7A3M|mIYBQ z_42{88yv_oERzSje!8@DBw8FX8PxEKT74?(6Mqxe`7}MWgPy0;V(077YQ6a?AKpP6 zpn+y{8P!Yf*LG7j&z6d>jc%QANF3f;3Po(6-3UH4J91e@Yzn*VFfKodKGWH0TzV`s z9YI>8k^5!#8H#gWoJZ2soy7eem8kO>+XW~R6%@EL*RRxgFDmHv;zm}LPZy1!{#KC6 zir30xPrrI&)lG8wY25YtTQ7M&M~BU$&+5+MQ&la^QpKMw*HS7Q<@IiCy5hFt*e(0qr$1Rp38AbE%P`~j7a+ZpEyTyIgYQGB zgb+l1?h4M+yelf`W+2jnr{aYKK8;l>B}Gl0EH_2nh_n_H_K>)%Vhc{Vs&$TXZC5|I z%{iENb)sSEyQRd!%-#9Ena!5_gVtV0Q(K6$>u(5GPtNnZY!LBNJ-m~$Q!9VMJ2ZSe))9j_uyJ_UTWW0~Ck5MeH55?a3CL_PC&;);n@@{;XM z;iHYIyCp->t5-vp(tfk(`p$S?9#6As?L+_48B=j)S{VsF0Vtu5gnuU5 zDg5zphHq#@*Q%%1N&Uf#$)qn{j;~+Tmx^25$9MHE*yrgt{dui)M{_M3xntSKIi1HB zp&>&Q9XLGbc1O5*o&Y(vQ(oTr)tQ%V*^iBy_R=d=QEtjO@1Cf5~Ca(5@7HA;F6l9=>(p09`nBjSVI{3u|lWO+EJRxQj0B2 zf4iKAes&PcS$_d~^jk-F~aVdcw0bj1`id8kE(G`>+T5s(tJ*aua6>U$0TF4gm%?vca_PFMCaF^)PxYuAdh59&6hd>!l<8x?+? z_W4^}^(PcnN{at#xgw2OuC(v?_1Dt#t+f+xYi=^;s_Cx{^P5%34GT+#*s@Bk=kYXG z1OE14OxeY?_RFO8G*FhaN$bKx*A0A-q3G=9Jl*&M2cQ5+5DO&c@*02?Kn8TOs{^NY zKpD1q8DWwEQfC?BBXC23^g?AILuQu<&f=FANN%-O8&#((H$-tLlwZc4qKI2@YK0Iz z?Q|}{{KT#q2+?4tsAY`fxK9kjK}aA__K&eBkmCX6M{C!f7)7Uyl^Soay=Cx#$^F%i;dUNcWEFnD!2AOvlt;cU(B3+-i0Gq>FW=9XgyFQ?2= zi*3T_Cx+NfjLUrz(7q3-B?|kY6-G`r{kpY*a96PcBja+r33x#_gf{S=}<7i=|p|!uVL9QI#Wjggf~9^_1rstR_Z}FUq|OMPoND7 z0ElHz3bPXs4s)rrSY`e7M&kQZXV##v?q2RfdwxGHa_pY!>QAKmJQG3u(s~Ya%(PqW zW(}RyenCSPDyZ6)aNlrv!(nmwj9u?E$d3twU$vww*l9|#4`-C>oRB(p-M-;Cur~kM zI|45MtfZY+sn?P@WHxlia$#=npnC_q5GD^@YL>r`3v92L)r{VHuM_`E>G-$L{p>*G zoAC6fg-HW2t(hw(grGuf-qN~)VxPj-zIHw_-W1`so^cj#ATG>hF<;i8;VjBPo!WQrOFGwI@M`$7a%+Fu3eL=Lpn{wpvyr_%R8dK z&#V}(nesaCgGAZ(4=TA~Q9L-+yR8dd`#)cgk3{9`pRh01PP|t3jI?~_Z1RPJXD6v@ zlS#i}s^=Fj>it`EU`lWPH4=ZajAeU?-uPb_TdXwSupZUS=8HVu9p9qUPanpw{+%0I zNnf-b*;Q(5Y<@Z3uJ1T!@a+co_M4QCHeG6OEOrXNR_V6+zAwXg6Q~?By6}sdI7{iH z4U4gAs($B!+O>GxpfyD1R9zvE_bqK=hIURoD;qovP4#nXZ!U1^`bT>s+a^%VGV=js z#4iOG(@`A+?Tl5ed+puq%{!zk@v(@_bHW>NBPrWO?^Z5(V~bU}YmfiV`HB^tOaTR_ zYc8%Kx7R|Q2D-oer0)@G50O51!D&XKo8Lrd$Ui#IQD}Yb+vb^g|E7D_7+&!WQP{zo zB)-k}CIkxjsX%BCr;_t9$87KN@U6Mt$%lU{ng)L>zPGW^TT&0O+|obgJ<~aIBfU2- zI$`=WyN~+VGcJw6GlK+34_2=oE-oZhB6d0&-`xl+WWXG(eGMu(HJo+|*t~AC&{uIJ z_wXmD^Qk+nyv(PX!iL6LoB(S~)j zz8Dy-Un`mNI@U7w?0*E#GEFC;a*(&TM4B^ zo<9z8&PIUsP{fMGx8Wjgs|_0~lmi`%*VmwwO5n{H$`TTp2U`Y|C@p0nYbLbM15$%{ zNwxc125~Z#^j857sLU$XR!UjOD2fJK%nhVvBU(R1uw-)|Al0OcxsaqJW3AlbBx0&U zDp#H206VglOiDdUf!5-xm)h1e z^rfM-Q>H&oCSQN2!45vSMdqlf1X;&zd}5c{V81sj7&11Gj^bBXoA;}>m&vfLglhs7 zhj%g*ob8^-8umyy>oad#u9^wt?Iy*k8ukF)K=H+s0`ID3eSud{pavrx3Iv#p6ol&- zi2Jw|oQ?Ffvorw=p8}JNglTR-DK9|ant$94^Ym4+kBmx~QsZ^Z;3{Bse5+>tJj*gB zWpgxyJo3;IgGcaZVk>2ZM4UJS!0EBJMNZ28LBt64+X4Osfc)5Kz!r@6QdjUIGi4iU z4L2Bt8}pdKD2Q&YQAu(Med3gxyclFs2#9XMt}lc`i=Ab?%qMpGHEtTl<+loXQf|XC z?@CKx$X-G*nT1AL7ob*gsK~8Kk&p~ojGoChB1({_u#XHnb;iq-)PDbNWD9Rgbo;6A z8*YiAm5<4q>OR{k>5I$p^Tx5xZT9LNm@f<79t(f+t_H=vVv}a`eQ5H=68y{ZVEc?J zCu`koBDa;3b2?-=pV77@P(5aHY(W0kpOZiH0t}(;d?LXSANWV$E)F~IagC9zUza*` zfA_s~mY6(Hsi??^wa-)et<{;t<{4=&n)EDK@xZsoWzOHuN+76Mam5J;LQ88cow zUx1W)&nbqVZ3KIs9xxL`W!-8tf-@*;D}KOCE44SaIt8&``&w39FF-*Tpr~DIictyS zZ`JROa@?5g&VO_sB0h1Kakr)ixH{Y8Eh=x$g%01a{Gp~;C-c3xX^8n`T*OBX8=Vrl z*H;%FdykZqdF$QE@f?9zbF6IctApcQ14?E&^ms#bd*vj?6CJuJAmk8#LeNvh&uS3< z<`AB)?Lk(An|zK!0k4TyhvhB(L*aMQa+B}o%I6Yk2Q+^YK;zbiNz`}6;;^-CY_)@r zYJaD68?6}(Or3iro`9>*%b0$mQ#eh&&dAkk^`YHMKQ2h_2tWU}t8lXy)2*erjkYyD zFFkqk`lGVK+xWw3YCmW%-l^0dOJa+9lCkWaqDL0{E2GeVJ}uB6_2X7Tsob#q7v28# zFaCP)*b#7uOBU^E@%H(AtXNVCwbC4D^nVPSnXxGHxk z*PPnmJAlW_`6l}dbZ45tNk^(siM-F2o~1R)5cV2P-sQ++KU5*P1W?2$iTJM7M&F{#C~c4y;6v z<%b8Pf%YCTC3LXY-4xCl)_YYSt(3pf1p>r>)Ht}@E)rmo#ELNcI3V864JmHi7i$7W zXg`tYlNN}aMUz5!x)p!`6(>&%eHPc20ad8;1OWc}WZr|siiUQSz;7aRrW9PFbzV&- z^_2iJ<8*;yr*&J;g`rS~9thmDUt!d1cSBreqLx8VEs2 zQR0%|wWfW0!+4Q+Kr4)90#92|>CTgLdqX{pQ!uNR{_7)De8)k6Uqpj36-~d-qD?ML zK3hJ1)EfLaA1J4lGExF(z}W##4@7$ks`ug{VfQH)vf7r26m&ex7Mpxk@GzhH#Du5=H zFl}o9>m&q4>>8cjfi0Z$a<#GCE&6H@JsZR2RW&TeWnTJQ`=*P{LrH& z^GPl>9=3+TZnFC$u~!bN*8L17z(bS-pTeAiWRXD6cD-_XgHpodX;m$ldZ~{cP2HTe~i_p5fExDGJT*`R*)`f-^;%`lY zScAcY{v?x0f1Zrq`zu)Wh$y|Ek~-d~paHx5ZMb=(7cp`Y=v^(^bVP;d_htX{r6vU@7&AD* z7Lsu$yd*k0q<~2BkqFR6^EcM&D(bzfY(t$j%CoFoRr0c3c{tw3!I!4K{K+TL4YmSj zzgut&ClHf)XDVF{dfvr2yumPXbvdNTt9feqZi9Dhii(KI+vsi8hdc<9b3xw#~J(KmTD$}ll#jniHN9g z!htNqb&+h(67r(3P5flQj1$s6!+pbJF7QBY{ggmt$GAlknB? z@DWH6Q}(IY*-P`6ZlCBOJlwL#5)JQe8>aC?mW*=!jUXO%12M*yaT@{?fh{^r+1q+vV;xGBYX9)Vo7?@Z@OEp50B`j)_qhn(=1-u#{pBd^j z(G{?;H`)wD*1t{`DlZRx4Y-w$Tu<%3#vrpVKdRec=sIjl_NT2OkU!kILGRV%8iJ_7 zA$n8X(m{lGtnziL5u$pc_5w48$!efrvL^OK`{_i*v; zm1`|WATHiS!y%%|p*E|xF9bFpi(BnM&e1w#-oYmkl8o~RV__{o*A-BW%-1UHDV^k(UVI`zbfp)I!&~996#l1~Q{|v-J7G}|E z99W^AX&{6`^0DfHfGP0D1^{QI7%Qgpt;({Z*!+^_uE)4MhVJjsy4`{DTTDHsPvvI7 zQ=na`Y}X8^pe7=GPmc8yxwrIoX-nLwpn8lMAp<0}@wdOqsF>P}&*t4$K$fY{3^1kX zGVZtE7F>8%^qo^M{J>ND_F#!O@%RX|^P|(>-U8#_(ar40pzt+wikYjN&!oKiL_&e; z>H9|ip^s3Yh$a-_z4Ln{RmgCI69=R~0pRf+FOa?lxeR$IT*9762zpMB#xV^%2#q^; zzC_wPnGgmOh&SJ> z>D8*BGm4JOcdjd=00SX$1((h|1~hT&0BXb@Vf;zj&W0FaJgj07Eh6XyndB0((^G-L z*o-pbJHW;&8H}7Jb+twr2@q=_bsiwK6K+!uL^#Zh`eQ0YWESDU^4!j3(y@~j#nPVK zs4G_kq-yNS3bTPB(w^3Ps4I{Fh8jDDN!FSgEHIMXGVi=BLTcbA*D+7;UW>8R zNG1m}j6J+rr<7CEURPS&D47Yasj+E*fpe{@s}SL`hWe`uohn{cr8;s zf9{20oaf*1$;)?o1+nE6JAwagL+E)T%u28yW>;TOR*&n=zz^allWWOeE$JjpE>ryt zfd8FJ>14I_5~3SeT-Z6`@U@LRs8zTC^>$qm^|qwf*wF}q?NwcX4k~x%KZs&J)W(zP zst4n7Go3fg*VZZ;MxRv$!hbVu_2S=Z5xxYIsQU;fdRl%WFykaedgE5rp+_7|nr|}~ z6>|2Ju5MS%-Wb$azY3>=ZGAjd@74bCRod#HdjBQu+tc#j6J_sQDA2l%`=vFU+V9*p zMiF7Vg34bQmr4MRRz4SK6pfa~vazZQ6meS}Afr26Z8uWIhH*gPQ9H0HL^yHYv$4#N!+Q}R|8)&%Uu=P~l_?Z4%f}V4R zn{W|hkT()GJX_~icmD1hm7a5|Ng!NXy`fuxWkSmJfGOEJ2eb;4f*n`b7)KF04PG_YB(s` zOb>Ro`0`tEa7%Og994WOSbWM|vfpzm36!fVjmZMV!$5}7*^@oA900_zlIiFth8Wpg z+d=dr0N_@ky$~A+;8g?ImuriIzW8w1?$Bk30%1M2uDqdKnAgYJ)!@vi&e4GTqwimssa33?vaZSOECr$gs1i z*3M+GGmHb*rTL<+&;i_y$7Q4>npNE>&L}D_t>4txbGD+wBBF!USakyYid^V(KP*!e zKt_0x4yB2@Isk^!Zq_Kls{!6MwnD)B0N9s6iK4p1LtA|z5t$7bBd2q7s!>!Fcn@jy zl>zK>We+6`+J?heYf?{vr;r=P>(G`M8$0jz!Wh}$B|4;%I+$#qjO{qrEgz=cMSnxN z0P3lYsXirgCzIjmihWVqy6@@?0^uW(-Rv89JP#L8Zn1Kn+-~RQvUE7o4iA70;O@Gn zb($S4b&28~qQqqqMZBm*HdlY_&Ut;tKzJ$Etw_|<;GK;`o37hku3MIBSjC&td8zS=U+#+Q zPsE=scUSSDQig^3spe(7R+=7s#kI>Ln^<_4d?K5xs}jl8Mfp# zE8~Yva@;kFuh%T~1+0!fv;HdV}WY_MbdOQ@zY6cv^OiQFPOCiIWINhPB(qp3u|KwD|&u+Z}FB z_YSkwyIbmP5$LvsdBq52^$-pZEt6yTA6mvn@p0*e@!`NPHoh9fTD7C@o_ybpM81?Z{Fy4b@}*( zwEh64<6!y;P;+2XZKgm8sn?p*7VP!#Y}44cipm0ZRHk3Oy57fb04r)TL$D*a5d|nJ zr!BLw0jAAw!2l#+W|o>J$Jxsv)%qoVSul3tQmq{(ln7Upr5CPV%OPj|{f34_W1 zPYLH9&*c06aUG;|KsD!6jxonH=Tpf!JZevm8Qm?05J5{jtY=U+>-5ecz8g_IO^e_vw1qX0NGpz)d2*>kAZ8-Nu`C7gZm9 zE;?oEnb$)?`55+SrUkdemit$IPF)N9tpJa(0T9#jk{o>z^5}x&P3vxd(#NBexLEM^ zCy4-44rZ$qe7ZU74Va;_tfKjWH~=Jp3Oc1OwAR9FE)eOpWC7S*+%W*x(s9=gl=2TG zNgAt#RLaqdCLOj=71*XYePXFvsg~a#%)6H&MgIJ-kJZxHsnB0mdZ#X~-1!aA!4cim zNEmw*7yW)85kRgC9Z53q4}mQ{iakDeD6^qe^L44oSFV$hcMEd(r(4Ikszmm6$N-VyoRULAS8vGw`B~cdgQYnTF5&K0Z=mz_w}L z`DU^4HFZe}>dcm~5AbfH=20_?|S{A=qE+-{czs?&X8`#Z7y zmDFQ?i^>#-FNY@2#(mD(WxQMk+Xr2AdH9#9W+2@u;Sjw4Vp#j?;gyXYB6QLs!MDV& z3AGk@i2aazixH;Pma#8e_RUQY{+H=#gyqb)+lz>Dr9Jrr=1*=n3sU6WxbjbE4tkz^EEnS=g^ZfBBuH?y5x;y*eQpM#hx z#zhbvcL4Z0XUkmgUt<5?GQf>H@4$n1G-RElm6%?DT+wm^c|4G40je2JLH;s$gEZ!aD?ck&OfSc6z1j z264o5kKIz;wD(Z2^W!a4ZB_4i zj+cvjXw?H65U(fs;;P+VLT8D)1j>7E6FazK@x^y94$^O5U9C!SufqmRMnEUw5w>5} zw6I~WRyXGqD4+4-7}7)WB9dd;FidRIGbt}Vr=S{k8GxMC=&&u3h*z7v2vRyHvrMr+ zfNsHUOKW%W#`Hmu60@upGu*)fF=c9jAm$WO2W0HumE8yKW)B#C0AJM96;cOf>=AiS z>SC*aC;Y!+8X^Una>C03SA5p2aTs6N?K%|of$tIaoa?|nH7dZ|bMD6Npb0zg8pj43 zM;e~tHsi5dA(F`>y5ub9)^*i?WF8B?qQZ^4tMe3fBI*&4I=*oZbO70Kn1Q}7SsF1F z(-Xc#{r%^oPTV7m*;PtUiW>=)u%sNY6dvAP!VNlv%wiI6kjM60dEr8Ah4Wm0T(#Xv z#1w-hCQv8x_UAPoaZC6JUwWX z6Jw}D{@t>?;5R1ZuA6;e*p=IzapcIDO{JNj{1W0u&mQY>^@dxQ{frAW!0L|$Zsu@& z=!MsUe{B!qzX|XcNLxaFPS#+SFe_c`eW;6X95-R5SuVh&Ijo|8@nF)=*onWLxQhm; z>-u%Ftp0r%+Ab>?pPN!6pP?x`bDqGyEamesuaAtXi)3P4L5I3nq}#MT=XuX2(hT19 zXZT&H=lJnW@~&Ibk!&5w)WN+z<^Bnm?kUaCB6#5Or;h%S6}K5ivQl)%lS0p7k>`Gv z?I&?vd@bs#S05t`-EWbjCF&yYuPfazJ1U2o4Gx10*W4&L>r;Ped&wX7*`8^0E^>ta z*Wq6U=6&*sMQopD>w&+!-kiVn61COWnCdt2&F|x2^l*-lDEfAOJSe>dV=OrP0LR&m zFXQ_G30-lPbc={pK(>xTPLRX6t4=zIqV%dWdJUizQBCvTH>7unebT?7?nGllli!z3 zjYZO>7^xexoj}p?O?%n(2i9_`sswU+YV_$HwiC$9dzM9#ngK4YnP0j2(!dKt>1~%} znl0WA=;Ww8?WH$IhMZK4+)%*k2kD&X;xng6caWXqW*?pJdyCl8g)u<_R=7Ym{tImK zw`;nY|1n2z3BC)r9h`moG!Vdw6yi*9OGS?ElVAcCb>UFK%yn7q*UO*u2BEW&kg(rw<4VmEzWng;uE zvb-nI%ve|NesIVz0QaI48RsBt%DNSAk{E|#Q-Uo`+~%2wzYywfU%tJ1<8}rs1KIi0 zl^zEh&#lo6;6JtGqO~l@^F$>P|-w(0ojOJ&jNXys(vfDB3QnueU0zB>Ph;He7+tO$FQi#Vj(9+09&P+2P z!Rd1!TfuB^q{SQ#BqgQ9Kpw)l)3E@~mq*&KzOyuv?$>2@F_QikG8NmRZYP#}Pl>`x z%-O*R5J=8xqmxZG4=z(y-Fy6CZp2;OtJNcBC%{@;C@!0PPjQxrtp@Nl7J{ONYgiy= zE0Q6pN}L)xh!%k&lAkz%IW)hf(^grWbLegL0_oZ!BH02@LVz)j^5i=wqB ztI%)p_O1Z_PRJZpQtUS=^*OZ3Dp~a70Zv%00^sOX$-V_O1LRYmOzNNBudlG_G zn0R!*0Y@eFAckvdk2b^t97&2f`i}+kHdZ>Mq64`-2o#rRMHg1x>eCEUfCCm|cY7gO&#Hobt1u9w~-IcQa;i7~B`-vDRA=rD@&)V59 ziXKyxjXiOw-#iOB#8^EwshQ8)fJoQvsHd9$Wm3aSFFSq9e-?FDb#5Fq{;s4YWVpml zVnCeZM}BKQB%AK43r@(j<0vFO9_{LXQbN^`qJ=>R8- z`0aGkveDTgkvdu@*Wv|XUI5fahX@z@lOh@DsBGA_Ni_a6#>wo|< zG>lO-Q8pO4>hnhIKc$y8k;}1R2uz++Kvw&OoFy?0YbDB3%n zwFikEkd}|6^$D7frkfl=j^1iY21JPVCl+8u|rMIinA?;YE6s|&EHm->DZP;USB zshkCVz%?V;c{Z5KSkfH4pmSlVMx4G5KVo7yMf+hKBvvn+aNZS?3moAxD41AlQt3>U zHz!$TKGEGSCu}$PeMJN$wFJ-FK?RDjNsXI#tc}ND;~r`dc=(qcAIoZ*Pm+kget2O* zJ^r&{(#?*UuIkRvFXZvOLu*cJzE@F0!3|*E% zC#Qro*b@tLRs)%*}LGJdx~}y7sJz29vFV`DsZG%q#7AC#02HEPzImeR@pV1TZ9j8*+YkUZy~qhV5ci-!$t)Z0`qV z%NNLPja|p1?6;d5`m^$UWwPwg(nPd(bl4ZnNl9o4FK+=hal+cF64_Q?VaYf0^`V1Nime(VZhg z>}(8)wvOJOfl9|ByiP}R0?=2vVUEOUt2Aun6fd0kV@pP1#fM%s^CE|&(=JAuyUfx+ zGs}U^*10cDeq^RoXMSO zU&~(Jh+nzQ2*a)TY7hPMk}VjIHnvw_a;hcYshpH&(f$s`c({ctzA@C^Ao@w-` z)y_V;qIWaZPO9rb+87cPi1k#W{7(U!dTL2`Oh#Dc$V})iSPf0$I+3Ip+)pAPh08Qx znw$C&O641^Z^x~q_+48w063=RU(Ys$_3!q&X~L|%qQvgQ<_}?=S)0i=p^Z7v%97=T z&Ag1Lee9d7tIUYSr0mOyuyA$xf!}6ZS8h#JoUTd+B^`iqfCq{B|i* zrQ33Pege(6*ljE_Ee%7tEyd#aVLeXuAz`xL5~s?|#I@}e?X7ylp~KIgqy5cpHH42n zR+hdv=p=pm+uxDHvXQd?x{+fy%QRserAwat0nQ0!U)TP5sCOIo42$>UMTggJ&M!In z)dt~j_l3}#ke*8}l8I#wiIetylqL;(e{f1|B+S%DB4Eu~P~^E-u%hBaq}AD5dln|$ z#DJ-KEB91zANEpgfNBwlAuGif5Z`awk~hdX!Z!aT3dd{d=$2}wuW!G4^CL7 zJS#PMIy8tR#8SSPxEt+9*i@jM7pwi4Qc3%U!83f|tQi)_Y^wqzm?tssa!Vvec{lJ+ z;!|zwnJB}qdP{*S1S$9V_o5v9%N}jE{BBR_26AX=y6l?#rP%emLG7kJuh@r{oxFo)!}`;IPo7WDY`>n(UFs0Ee|o4)1Vby z5=hX2q#;Oho-0?5%3P_>GJdz?B$(68w1f}As|tBh*=Y`-%e+7gmKe`k&ktD}{>b^( z)<_o!oUwioXuB_c2yLY<-Hvzl&Mf*j+zugOKm>EJ0oVm0=>#Y}B@xmt?(A9=77BLU z#c>0Q?HT_1q{6C)K7eM$$pdI<2&ouEAcavCJuCBa0qy9LHcom+Tp|O<=H-SGkGuVV z@FS3c%QxW>&;nB8f2_=5I{_gOn<0;*qE@cxjsSWu=aSNC-}A85qb$yR&exYR?-z?E z>b4V-fO1d*TW9G~*&&`M0lwewq!(0xJlx$}NkYu4)l=7hS&xYRE)*p*h=ykZD@fBmQ{r7qmD<2R~Y!`%22}G-M>t#73q~LHaha%vjXe^x6u>73ET8{6mFXfMd1qs=603flu*AmJY>E}<*V5aLOxKtv@@#1M z%u7t_Yx!xoww#Ak#-(~K?XX@+#|b5`0zVoZEC)0-e#T))cY%n)2<}x6M1d%nrD<7N z7MScU{VAjirYjp&dCLa~+6J zA9K#w#{3u^n{-!M<>7(rgHasQe(XleCbSpcIc}eL2xW&CPs&XmuBR!ndFhuI=@cAf z4F8e8v}DK95cIeY%z*u&Vkq?upq3g3~2 zmd&4$Kmqz5HWDImNe^A$x(%tUk9ZnJZrSC-1%>>Hc?R|wme^Hs&xM%1ZqZu^xYtZ9 zIiaj{UWeQH2>qoT9@kY{It=TPLZ@vM*R;R}%BS~XM^a<+Ud!)|M(P~- zp*xKNNLJ2MXX=De1=!fq^WICc4rRcO8D^x<4#x12ftdW?2uo%@0e3ELxI=-3D^2-f z-6c|7bcNefNH%|4Ue4QK)5@2U@b#PEXkX6B_Er}DRPtOtBZN0hy{xA#(!p3mnO z!e`Lbpy$7EeJ2~E`0ky)gd7%bF!tL({$BMsXPrHgG9Br$sq=C3rrb<8ti4rnWUy+T z=-OtdEe!7wuiXIu#?ZB5OV^VQoUh`fZl*~;-$T2*yJ|^Kn*VqzKG1(C(ExBp z6ak!BZyY$#oFdlzN}qkj*&`BRvO1zKT9ovuF!lo*8{_fT{w^d<{=JzraQhHSyi6;wI;9>uzh+hEZq-W4zM6;Qo@0V3L3uUx=q#~q zq!PD!$W}LY10RZ!sUV{gKEMC0{JzS*S;Z%~&(fuGqdpO|ag!>SFy_&?T_I;p2l4|C zqa<`gWUq>?>5j1_CFpTg0hpVqZ{P*uYKbL$ZjO$<8k-9*V=7j zy&mVB-aI6*=QzdyL|Jok%*a(MJ8yE-`V?g(hRQWbm#!X?E%+Z;WDBKjiMy4dwYH}U zfW%K05J|WLL=AW*z9PeO&?Dj35?1U`lLx!^32~X+$&f~9*_CEc-ICyIDwbrdW!k*MWyJ9noi`JNZOyhkml;X+#-#IP0$4p}5IM1I*DlfIl`tGX1W()M7=J^%-4F^9RHqiz!-U?;8ViW!iWDE1 z1^Pv5A$01{I_}zme@73G>zi3xkou1`f(X*J)b2B8LD2o8Q$Xu_*?wN2;qoR_i<2*t zQYYLXpaCn?tGKj7xb(IqHNb-DfUh)MM&+6ze3+~K5~*Fk!VL#kFJlg9Zmj;Yj3>#+ zJF?EM2kV-jAiFN}cC*0g_}~l|T!cN!{D8NE1wG*sJ6i~*+>DrRAQY@Z9yA0xr>fJlDQNgBM@vx@)0q@{9O z`MUyu07n?JqjBkjFU2674;h(84F@AANnbIbdE2hn-Fg3`$yWdjbvRWVb`YDWlB%Q* zhEtNp0VeGNwy1wb<$p#OU><-FI$-9;wE*TMy~a9c@alN`obMu&zV({di_U4!nZ{0_A+dBd z>f86V&Qjp37y_6YJ;a;gFRZlB<1f-=j0{Qh#7zt{%o69@=rC(LQ zKe&^E%sRO7Y#MrW)GLGJ`^-cCa2*TgeyPU}rz9kOwr4@qyht0%Yf^cDprn4F>0G52 z^H0Mx-E;SLl66aUed`EG7?n?8 z(X<)wT(n$vb_^sBhSeu-8_gNC;@?2{%N6C}PZ1^ItN9fSj;4 ztmUseUz}0RVYPR9)4>PG_(DHFwNm`D@(d&kn;1*$fb-W*)RoY!*_0toQKT{~OH zL2u4bW!J=_hIa|oA0^f1v6f`Y8JEc|b;6qL3$HfGUHwAOx(#hqG&8gN*)@|@;%22$ z^X_foHXX~Z+g@okm&DK<5V!iz=+(-5f-Zu*jC>rv>CCmuw;Jh$ z5_l28<2n5c$LCN6x;M!`k4ZZN5A1WQQxnfH3sS!>{~Y9XpgYTh5Ap!^vJ>2y8}I)q zm{8y#^Ts@ebZQhL^mVcklU=EElcqD~@nv?u`9SFJ*kLkB@?;aAE3$yJ2rjN8?7yl) z)t?GK^I)p(nnd+|9Pt0TFj7&$qb0RESdTLlP-|Ta>wg!Gfgj zZ&9-@#<}uAi)B7bUGpvHO(cI3G)J3g#XixU)~|fM!zJDf5(6D-mBO?lpl1sHAVVJQ z=sb<&)CkP4p4S!&`8eOK1eXjT1Z+y0Y<%}x=l!#FGhN!@O=p_4k-ou^v{5*0YUb`t zHR>9L_)e(5))iRUqfJ)IEi2A~D%g*0^Dg?3kHDX>)gQi4;)8IiPX;~pzHZ=*nt;8F zOtw}8&byfigRO+Ya`H>?OxSCiNj_eM7`T5-UF|iTDrzk(7^u1LV)nVi0xAIc=ub9J zc9&({M`ahYgNnGeacA4mVI=b`pN=!k-glV2I)|YtwhK0LXMVk6nY`TE#IXH%7uMV4 zbr>lg6}&-UfVUo9(fKwpyHNDeYa-uL$6d%o-gv?o{gXN(7uOTI^b4QeG8i z4}E{>becNkw1X{`^0kgH0742O;Xax$F18o{i6ZRFOnboRcD?awb8Jbx$t4|t4>)&Kwi literal 0 HcmV?d00001 diff --git a/02_ml_inference/05_image_to_video/pyproject.toml b/02_ml_inference/05_image_to_video/pyproject.toml new file mode 100644 index 0000000..c87060f --- /dev/null +++ b/02_ml_inference/05_image_to_video/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "flash-image-to-video" +version = "0.1.0" +description = "Image-to-video generation with Diffusers on RunPod Flash" +requires-python = ">=3.10" +dependencies = [ + "runpod-flash", + "fastapi>=0.104.0", + "pillow>=10.0.0", +] diff --git a/02_ml_inference/05_image_to_video/requirements.txt b/02_ml_inference/05_image_to_video/requirements.txt new file mode 100644 index 0000000..a73ed1a --- /dev/null +++ b/02_ml_inference/05_image_to_video/requirements.txt @@ -0,0 +1 @@ +runpod-flash diff --git a/02_ml_inference/README.md b/02_ml_inference/README.md index cd41fed..598caa6 100644 --- a/02_ml_inference/README.md +++ b/02_ml_inference/README.md @@ -18,21 +18,55 @@ LLM inference API with streaming support. - Mistral, Mixtral - Qwen, Phi, Gemma -### 02_image_generation _(coming soon)_ -Stable Diffusion image generation API. +### 02_text_to_image +Text-to-image generation API. **What you'll learn:** -- Loading Stable Diffusion models -- Optimizing inference with diffusers -- Handling image uploads and downloads -- Model caching strategies +- Building text-to-image endpoints with `@remote` GPU workers +- Running Diffusers pipelines on serverless GPUs +- Returning generated images as base64 payloads +- Tuning quality/speed tradeoffs with inference steps **Models covered:** -- Stable Diffusion 1.5, 2.1, XL -- SDXL Turbo -- ControlNet integration +- FLUX.1-schnell -### 03_embeddings _(coming soon)_ +### 03_image_to_image +Prompt-guided image transformation API with Stable Diffusion img2img. + +**What you'll learn:** +- Building image-to-image endpoints with `@remote` GPU workers +- Sending base64-encoded images through FastAPI +- Controlling style transfer intensity with `strength` and `guidance_scale` +- Returning transformed images from serverless workers + +**Models covered:** +- Stable Diffusion v1.5 img2img pipeline + +### 04_text_to_video +Prompt-guided text-to-video generation API. + +**What you'll learn:** +- Building text-to-video endpoints with `@remote` GPU workers +- Returning generated clips as portable GIF output +- Tuning temporal quality with frames, inference steps, and guidance +- Managing higher-memory multimodal inference workloads + +**Models covered:** +- damo-vilab/text-to-video-ms-1.7b + +### 05_image_to_video +Image animation API with Stable Video Diffusion. + +**What you'll learn:** +- Turning still images into short animated clips on serverless GPUs +- Sending and validating base64-encoded image inputs +- Controlling animation dynamics with motion and noise settings +- Returning generated clips with preview frames + +**Models covered:** +- stabilityai/stable-video-diffusion-img2vid-xt + +### 06_embeddings _(coming soon)_ Text embedding API for semantic search and RAG. **What you'll learn:** @@ -46,7 +80,7 @@ Text embedding API for semantic search and RAG. - OpenAI-compatible embeddings - Multilingual models -### 04_multimodal _(coming soon)_ +### 07_multimodal _(coming soon)_ Vision-language models (CLIP, LLaVA, etc.). **What you'll learn:** diff --git a/02_ml_inference/poddy.jpg b/02_ml_inference/poddy.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74937103d5519407af4923cc790c6a4b8bf611eb GIT binary patch literal 30015 zcmeFYcT`i|w>BC&2BavVNe87WU6Bsb1O*|GAiYQj0V&dZla2x+(xinDI)q+BQ&Fj* zDi9E*3PE}ack}+vIo}y~j623R?%(gi;NFY1HhZl#=QE#KxR}1c(Gt}Coa{lM$B#im zAP|TWL{4-KL;_q90bd{@RuI`g*B}s#=*ItCTM!BS*BD|Di1_;d86WEeBK@!Nz%&18 zTK{$bpZ_URK_CjChg3{LLOhM=Ki5RWB%p%-?iUvmDBZ?cW{vcL)C6fq!@4-yQgO2malGe|O;D9r$+#{@sCpci`V0_;&~Xzjxqb z4ip6<`lk^S0d6=kF);}#3D8K%NG}^1IoUsr{J$E-KaKLTUHPAOIUSft3jDi5PDcJe z{r}e&7e4^M{?{_}?ay$s+zffu8oyaJ}8 z0}&IEkPwrSkOA)tj10aEC6LmSUE>y4A-}F~O~K>IAQ7JSfs$9Xwg+M`y2B@F^CIF3 zn30K@h4tnwegQ!tDe1fSWbVs8P*c~?)Ox55H8g_3jh~v>KC`oTaCCC^^7iq4>E|C1 z85JE98}};y^_%pJ%(q$ZvI~ofOG*)C$nv`S26SUnb4x3xx37O-@Z+b?W8)Lp$*Jj? zS=`d{&y`=lSJ&3@yMOog4-SuxPk`9Qf0hG$|Idp4*LvuI^$?Sil8{ndu7`;DCGa7k zCne(+C%>kmPhstOokt>^l0h}?Lv7C$UP*%;h|P;pFe9H7?k4_nN&l?q|87AM|G!rB zKMVSw^<2z?s7Z)`10$gaDS-$qtA+7|Y6VbXykMBVIzyO1VZ0TCzB&l_aDfF1v&b0q zwTR;dN{Q4No&X<%{~ibe27~nVCjy7$Ou-yL|A;* zN98>P*{ur7;j#Q0;5NQT!C5&ED+BUIfLzqIDAhuvWw> z48y{>5u^~A~YK`Un)mnj?sCm9B6kqIj!2jh0Ey<2|T^(AAnc?jfwO zA0U_0cvtKm$K?H#MJyUX1*|Pvvs6|NCVA`&5+w z*U8wVk}>SWFo|Q^? zaxLEDfgUB9Gm@bou69kA(%DLZOrE>akRHZODcGZ_0;TNNraQ5zgtC|kB1rd9++;E! zSy*{$9BD{I@s$lInG}w*(uGx7l>zL>k#PsRha}F!mKTx|U6r#!Vgkk^h)ji)l^J{) z&SMaugOX_%tubPH&PqAw`1~P;oxF%f;0Qng&_>o8y736`tQ0w%QV^#=rVH8>#65N{ zBLXGo))?XxoG`MpQlLm+%WcOh;)YAKcq)zPck2os=+a}+iH70gCgnF@F!Whqx#ITL zV5&Tk(F$mHysa z2_`}yqusS0#!AvX;AMfsFXKHx9NSkMd$*A}vB^N%Tg@XXT{+ShV{0lInSF(}fJ;OT z%Jiz}TN4o(I@mcJ)-n)4w!rX!-mzBWk=;BwQLdOeF8eE>nFLk=p$9U()IesgQ7tT`okG29D zDi*b4}OY&!MiXsJP1RzDy`IdtGj zKt9y`5Tw!q?9uYv+a~39X{5w?Oe2OE@oB>d*-ob7&to@MkTTu)0XT~rNg9G}cO)_+nuZ=^*YQeS zfJD42i_}UOjp)aLvv<8E3-&XadBPfQsG*6L*AN;>na$&^EK)|3!swGRx%Kf4ybO4j zO<~6oR(11t2+A&S_Dl*V#VH-2uUA*JEtJ@A3T zz1~K=#uW?f`l3Y3B&+64+_~5ssL#tH$;n%T@x$w{+-%f_;#K38#VgIsV~EerX)`9@ zy{odnS@(O~ty_*?6Zr$`Hwv(o_CeijZRBM~{pw=yvsV#@K=!ZczP>eCp`fW1vE>zR zDfrIr?37z3pYi)zy|PDdiG7CfWM>?nGlnid-2G zH!c7$`AdN5?w1k=_G6FMcgl45Q(hRu~aerZbE(T)$iG0{&VYYvAVa`KNBL@U4sX9Z zn~Yf*fM*!lW}X8isZoVTw*mbrppdPAoKZ%*)-10GQX-c`yiuLzN<#n+#&U9NT|U@o z;ZAN1Y_vC4@R2$oa>Js4c1Ra{^uI`t^1c8G&sN5*EeXw7YuaQbza2sqiaL9wK#Dzz zYITM<(s7(C=|K+a1OmeQ^8%rI!o~6#;d{3U#v S=?02)3S7d?AHMp9zP+8EjNV&QuOu&i z>8UFRYq%G#DUDGNitI3!jVGEcG}M2oK%?-4+RK#jJN{fp7!xyiz~myAoI=v^Lt#Ze{X9nfAz8?dN1(T@mAv1Fjg}n-RBKtx7v-z zZYsOo5qc5^?H<;if!^j*`yW}VJpp5(ER+ULIEsLroAQ8ZjL(XT;4G2nf(QV1Rc;br zOseb)oN+1DDj=RNeq+RRVNgsSR_uu^G~&%zV-gn+pb#LM3;{ipH0THnU&zP}ek{XZ zY5bq=c1P+k{sI8_&|*+M`YAO>B1(Y&L`z%So(ZWi0g}qAZ>P_$&2xZKA(vrT%ot_8 zSqA9~LG!;&&UmKbv?Oji5Uwo)^w1D6%r8us3a^3=$LKgjU_HfE~Y0yJk$ZX zf~$G-yvPFX1C4r^0Mp#>#wNxG{Rfi0HD`>%R4y0HlyYe&dKqkJwOxYU%)fMVzinZR@6;G~9lh&!8raq)jCKpxLQWl=PX zmO5a!t0>D=AkH=2`t*U&z91tUL`L0IL<~UCml-C+LLM>bWM=A1!Or7FEphA7JA|8v z2rw`*@)22Doq^6%&z|CapJCB9-dlWhj}q^D53nWQ7^@p|T^`+gMyEmtE^nt%%-GLS!g%#}Iyqf;_I|FuQ@=>jLmPc}i2PMazmJFb zyWd1}yyFT=;DZZLt)CqJMG3GlO2Cw-AQB~mp@ZSS~y zdI9?QEAtMO>XEXLa_pLAK^H}+bqamZ^PQ0?d8R&@4Fdw}t zH2I#Sqx)L8<9^y591G6wrcHCoBn!)kC?aLLptnZ|+p$|i|i?Ppbk zzjuID>#116%?Lw;JF#hQ5-Aqgj~krzhe-_|AMIeC&7JXC<$qYS_;o$8?&S4~+aNYH zXu&G@lMh;e+>W8tH#<10I=6$n6C-O zt?M(PP3e;}1pd)fLFG~&B&jjVTESU`v*1#T-N5Z?qs!mYzu};H{8) z<2ljZgJ3xbsXd@(s0bxYF+HH%A8fMX0Tqf6F^mveO=h+e=TJZ{$+mJsxZ(lR1?CZT z_Dimgi(vZi9npQ3dD>)-Y;bYHVRk{8!^EpQRh}3S7;x3 zV=_Zf3Fjs6LCqVC_c_YJhQNxdv2S&E2TL3&yH^G0SkcBRyA1M$a`3_+>78c;F}!+X zCTh2y`dm1PUVN$Z*Lix7SmYDghK<|~;UIbt!1d?i!PAptahxvFgqKgr$FEpX;s!(RtF$ z@wA$kMdKsw4Ll!-7kGEBm>Y#XQ^sXYy)&<3Tk51`(wpb|azzgWP6cRlc>s()T#fr7 zlGH~=Wj>F>+GHR#E-ExyUf|MlfB~3RR%6Ic<*(Fc$o5l~1q=xj57o3`IN$@zfq`y7 zyNu%mE-j1U9)wHJWHp*T6`)BE_5g2steDcJPzn^q#jqm>erJn}@)>94)dEO$iE#&+ z@s(u2b2FzaGzd4-kem1*K2{;(2k;1=QC70D!e(7SN^meW@101rHzr(-in9Qz=7+>^ zmKY~vr^yGHCiV~}-mATN73uKrx5nUhVv#k-u`7J5GW%QR)iLdmYQJ(kz$R#Bbo|8$ zORp(f237W}2!AmPv_WK^2kvp%wVe}&4yVFe{e8>HBwZ(EofR(2UK~+ zE|T{fzVygFJPNOn4QUu1l^#18OpftxR<*uKHVpO zI-^;r{EnAEmI%L59WoM-D6SMs;56059o>h;>hv8}DXn#EB&D5}zff@L;dguS=PAFx zV*hWpkJh$)yP;D$vB%Tr;+Ah5W}<{e6_ z;!fctEjX_;Fc^C4J0y|Q%i><3;`=I-$4j(V|FXAODsnyCZORe+Nz#ya9~5c!d9Bg+ zVdJb=%X+|@ed)O%?#9uDCA<9CeH-pNJR8*UZ{tGi^N|V>Il=33Ed#PspN8)tEefO6 zD)vTA7N&l}YidsfPl-duPJZ|5F`60&7gtb!7JJxz5&++HAMBc?Vb4?)*21=T{V=-= zx&Wy)&%UV&Z568vKR-%8s9r(f29RZbUVX11OUL)_?z(0uweYZ4b?q>chCoiGPFnru zaK(?sorvQ4`Jp|`zjvw~C`4U!hrNSgkFy@?d#89!h6v3D=k%%mWMi`}tG+Q6K*n;oG<2Ifvv z0l~c{mZ~G4bM*(tlt`c} z>s1E*aHh%{PrsNXDBP|Relh>M#G}S+30>GRAbftmk~GEY&)BBGQXQHKSI<&$YeGzR z^5zx+XDRy5=%F!nhsCnL^nmU?w#ojVJxR}MZgq0hwb7daASwwUCYcsPUOGTwA}%S6 z(38jx`Af%*i-$6BB3yaRfK*{!T~EcFzL=2*?1UrJ1zZ3S`z47{qdUL!aX(zus-1e| z7*1&raLH+Ec=wS;-q4?pq3?dQthWdD%=K4~5-#y7Ij{t-*sAedJl_ zw81v}Jqh`h^T)NYnp;XO#AlK0uRa}C|H>IoP-y?Y*s|d7+QD0Q7EHe~5Uri}mmkkG zZ~=JNs-gS_zL2f1Usc*O+V zZPhuo+L2NR(}#b4H6$OnB%n1cWu@Z%UJKkf3_NU$QM7#fYbZngv#IEjtzXYQ+oL_K zcOdGP_%MsAY=l^-5Drb&8Xa`XFFs6vO>J=z_2GN0LXEOgbf~lCj_18I`te1F&38C= z+K}=5x~k+LC0ww@;pq+IQEr<9U#7>`A3AwD{StSy$sZC^Xuc^wenaEBggZfEf5@`_ z(P-XxxU|;56KKqrZzXpd9171KwKV1rrX4Nls6eV;xmE7RVHG#syt_J6k~d@@wryAz z<=4aG$(t6B8y+tBG9fG?$GYxpl@ex-^r_8Q&P2-|l7j|B_Bj{05Bo8)DazP=&du88 z(9KYKO$W)Xhn>))sfoqfdu52T*$Ytq@{8T|cC?7E5~KWsmy2!e{;rMhpiHpE#4=y= z*c5vsnZpXs>2dhDREOjeQ}Vvt+qWilqawK4){IZDzdJK6-T1WRzL=`|;XxIc+^zYm zYse@}(YMI!$^|I%GoECN>o^p@h{lIvk^%y$<^X2lAI*#1d@l8yrTcspmUwQoY!+aj zdGAMGzfxKSs@X`dc5gVDAmrpZZL+X&ZAdDhTt|eyA^C61 zjK;IQ?c~e;GyYSv&o=bX(8@z8!|NX0YnZG#U+x3BJje9bF_toCZeCb42>!FZ`}^!b+fc^@YKa~JoY zxzM-XN^#(m>H}ANRJQ0V-h>cFy|<8sFw_ui5N2xmx^Sy@=4eCRjlXY-E-lPd`coY0 zM~K8s^+AqFdQb)8wbCqG2zptEF#t^XwKK$4`|k0*NsjFwO*q?;(pF!=3L~xrh_~vG z6T^s`kP~^a((;>J5yi%zR)y9FQ(A@eZ?LY#(3KFUMjb8{)0G{f3c;yrWIhil4PHIc z0K#lZ<*O-x8zyNk-jm!91ckXF-VZhm;?%tdDJ9~7sHg?D0EjEap-N?x1t_!gxc4iG z0E#bMczLU?1-JM=<>UKSmCI3rD(?tdh`4Ace>NZ%2CAH- z3yN_}Kx?HwcPoI!#k7jjuI0OSXyGT~r`BzsC@|sNd#O^cFKDnx6i=<|^@fH8XuP*& zd!u^K$_MhhtNwSyVq?Vp84Bmq&N;hwVwnLDa|NMn?k+;>@8FI`M(KmNWfBhTQpnwL zTkay2{$NhjkDw@c)Y02(;?>Q#RYDU>?v)Q-cZ_0g@Cd6RB z{z);yfZC0vb_D8IsX15DwBa z$>QGX*J1#t3QCWINq7LGLs6n#JAqLYE@x2TY1T7&XS+OBAw=yVq?|MLNFZB)j?^Se z)HzEN@E5>q(ZpkFM6@N^yb!=;D88~PZZ1|JL6^a$KU+!(FnQXsR$MH595G^eGj-X@+= zS$W>+_%O_1dpb;h%kTme?zYr?Ghwz>5A0lty8vCA-p~O;)8oSTTKBAvkUY)xUl1EI zLg{ja!UpZE^2<1~A0^4CrK@)ogFf-JC3SUCqjs0q@+Ui7p3NqGjG%x09lXoQ?3A#x zy|koGJLFtHgm^|x%VYM~CB-Tkm4DzeBjs1r5wkv1z@%a7U$iUP^!3LL(YJYXgg=4y zT?tOOVHq(=d#cbGF9MhX&yOQyJkOjp zIw-mTU13arl|yya%V&CHaOmYh^8^jVmL!?XzggbB@mE4p_5~>B0>oQh!cgQLB01Om zG!`9d2RN(FqFem5F@(E*hX>}^TZWMXCXXz*#>XjcooWiZy&P8Q9CL9)U4R}LPvMh< z{a@f`I~UHQLR~BwLk5)^-bRKxx>Jy4|HWj#wg}GDcozSMB5CW7TZ!PuhgKOAZ0sp@ zk1Ok)s?=q)->i4x60J6e_m(a|5p{%boI6;{9X-)_r7IQI@#e;kyUr&{xtKeM;p>8GeD1y3ed=7SArB;G33BAauEP9o_$Z;ET81OseuT zJun*ydO(qK1}*u7a4>F2o}W^`=gGX1TY63YWu0nCdvW8P#M76pw~Ryc-@dA{&bEeR z`+Hy#7#`+qB};=7-b{f(U;U2)pa}H==O10w&BpIa_XN-6GwkuVhhxle(i`_(r$%%m zzcslA%Gwgh%td?610M65#XN_*kVFnBbb;}os-An#WubLA_YYq4;elZEb#luIIAX(o zeyIOzsPzSiW2#M*>jG5ulH?0#8ea%xb+P<39~T-4oXnY??47Ze!?y#Dp-oc4ZuT@y zbo+bOV_nkw6mK5l<%TKeGOtxnm7TT1*C=BD@Z#Mu)~vZ>GrkAX$EWyvx_4!(XBn)7 zIX~_yy*ooqeJk?QD3|ke{U8~@wEO5cthWpfa!_<8;> zGV;)kfcqI@?yOZ%VQr%7gnU^MY#nK4b22LAdY}-qjnSMTF;hJf-A% zOK^+Yg`LpwE!@g>KnAHaR~)#N8^9Az6-(?+Pi_f45FQ{^!{Px@;;zcF1{H84VRW+K zb{^u(Py=9O7zF_H6jNPstHfBioLM5B8h=%b2U5tz-mlG5m9_Z!r=*%@4O0G%i7IRQ z1Hc5O7bfB^Z4mGvQt44PW#Z!rw`8NB3({EK1{3vY83*8_|9i=HbMyqx+b=Z~B?%epAv-LBvz8S9LY4V#HS z=Eu!9q|OXT&UTdetjtU{m1NwjlXDs$S@UsONS#XCfD3(daWR}s{m@e_f<^oMnCwIe ze4btPQ6=3H_-gzrMmXj10vZjt_POiM-vphW1tb&0W>!R!mEB`EY}d$m#)PVAbCX_uo_Lr3#0j$+uP##(cfrF9) z4u#4*fccb06xylW$P5KC?b<0bvE^D~M2YtG$qLGKyOf=(X6x<>z!*pjZoyzm&hKo# z`t;I}S+hT?U8<nR@`?dYEy!HQ=NepO@RENY7A3M|mIYBQ z_42{88yv_oERzSje!8@DBw8FX8PxEKT74?(6Mqxe`7}MWgPy0;V(077YQ6a?AKpP6 zpn+y{8P!Yf*LG7j&z6d>jc%QANF3f;3Po(6-3UH4J91e@Yzn*VFfKodKGWH0TzV`s z9YI>8k^5!#8H#gWoJZ2soy7eem8kO>+XW~R6%@EL*RRxgFDmHv;zm}LPZy1!{#KC6 zir30xPrrI&)lG8wY25YtTQ7M&M~BU$&+5+MQ&la^QpKMw*HS7Q<@IiCy5hFt*e(0qr$1Rp38AbE%P`~j7a+ZpEyTyIgYQGB zgb+l1?h4M+yelf`W+2jnr{aYKK8;l>B}Gl0EH_2nh_n_H_K>)%Vhc{Vs&$TXZC5|I z%{iENb)sSEyQRd!%-#9Ena!5_gVtV0Q(K6$>u(5GPtNnZY!LBNJ-m~$Q!9VMJ2ZSe))9j_uyJ_UTWW0~Ck5MeH55?a3CL_PC&;);n@@{;XM z;iHYIyCp->t5-vp(tfk(`p$S?9#6As?L+_48B=j)S{VsF0Vtu5gnuU5 zDg5zphHq#@*Q%%1N&Uf#$)qn{j;~+Tmx^25$9MHE*yrgt{dui)M{_M3xntSKIi1HB zp&>&Q9XLGbc1O5*o&Y(vQ(oTr)tQ%V*^iBy_R=d=QEtjO@1Cf5~Ca(5@7HA;F6l9=>(p09`nBjSVI{3u|lWO+EJRxQj0B2 zf4iKAes&PcS$_d~^jk-F~aVdcw0bj1`id8kE(G`>+T5s(tJ*aua6>U$0TF4gm%?vca_PFMCaF^)PxYuAdh59&6hd>!l<8x?+? z_W4^}^(PcnN{at#xgw2OuC(v?_1Dt#t+f+xYi=^;s_Cx{^P5%34GT+#*s@Bk=kYXG z1OE14OxeY?_RFO8G*FhaN$bKx*A0A-q3G=9Jl*&M2cQ5+5DO&c@*02?Kn8TOs{^NY zKpD1q8DWwEQfC?BBXC23^g?AILuQu<&f=FANN%-O8&#((H$-tLlwZc4qKI2@YK0Iz z?Q|}{{KT#q2+?4tsAY`fxK9kjK}aA__K&eBkmCX6M{C!f7)7Uyl^Soay=Cx#$^F%i;dUNcWEFnD!2AOvlt;cU(B3+-i0Gq>FW=9XgyFQ?2= zi*3T_Cx+NfjLUrz(7q3-B?|kY6-G`r{kpY*a96PcBja+r33x#_gf{S=}<7i=|p|!uVL9QI#Wjggf~9^_1rstR_Z}FUq|OMPoND7 z0ElHz3bPXs4s)rrSY`e7M&kQZXV##v?q2RfdwxGHa_pY!>QAKmJQG3u(s~Ya%(PqW zW(}RyenCSPDyZ6)aNlrv!(nmwj9u?E$d3twU$vww*l9|#4`-C>oRB(p-M-;Cur~kM zI|45MtfZY+sn?P@WHxlia$#=npnC_q5GD^@YL>r`3v92L)r{VHuM_`E>G-$L{p>*G zoAC6fg-HW2t(hw(grGuf-qN~)VxPj-zIHw_-W1`so^cj#ATG>hF<;i8;VjBPo!WQrOFGwI@M`$7a%+Fu3eL=Lpn{wpvyr_%R8dK z&#V}(nesaCgGAZ(4=TA~Q9L-+yR8dd`#)cgk3{9`pRh01PP|t3jI?~_Z1RPJXD6v@ zlS#i}s^=Fj>it`EU`lWPH4=ZajAeU?-uPb_TdXwSupZUS=8HVu9p9qUPanpw{+%0I zNnf-b*;Q(5Y<@Z3uJ1T!@a+co_M4QCHeG6OEOrXNR_V6+zAwXg6Q~?By6}sdI7{iH z4U4gAs($B!+O>GxpfyD1R9zvE_bqK=hIURoD;qovP4#nXZ!U1^`bT>s+a^%VGV=js z#4iOG(@`A+?Tl5ed+puq%{!zk@v(@_bHW>NBPrWO?^Z5(V~bU}YmfiV`HB^tOaTR_ zYc8%Kx7R|Q2D-oer0)@G50O51!D&XKo8Lrd$Ui#IQD}Yb+vb^g|E7D_7+&!WQP{zo zB)-k}CIkxjsX%BCr;_t9$87KN@U6Mt$%lU{ng)L>zPGW^TT&0O+|obgJ<~aIBfU2- zI$`=WyN~+VGcJw6GlK+34_2=oE-oZhB6d0&-`xl+WWXG(eGMu(HJo+|*t~AC&{uIJ z_wXmD^Qk+nyv(PX!iL6LoB(S~)j zz8Dy-Un`mNI@U7w?0*E#GEFC;a*(&TM4B^ zo<9z8&PIUsP{fMGx8Wjgs|_0~lmi`%*VmwwO5n{H$`TTp2U`Y|C@p0nYbLbM15$%{ zNwxc125~Z#^j857sLU$XR!UjOD2fJK%nhVvBU(R1uw-)|Al0OcxsaqJW3AlbBx0&U zDp#H206VglOiDdUf!5-xm)h1e z^rfM-Q>H&oCSQN2!45vSMdqlf1X;&zd}5c{V81sj7&11Gj^bBXoA;}>m&vfLglhs7 zhj%g*ob8^-8umyy>oad#u9^wt?Iy*k8ukF)K=H+s0`ID3eSud{pavrx3Iv#p6ol&- zi2Jw|oQ?Ffvorw=p8}JNglTR-DK9|ant$94^Ym4+kBmx~QsZ^Z;3{Bse5+>tJj*gB zWpgxyJo3;IgGcaZVk>2ZM4UJS!0EBJMNZ28LBt64+X4Osfc)5Kz!r@6QdjUIGi4iU z4L2Bt8}pdKD2Q&YQAu(Med3gxyclFs2#9XMt}lc`i=Ab?%qMpGHEtTl<+loXQf|XC z?@CKx$X-G*nT1AL7ob*gsK~8Kk&p~ojGoChB1({_u#XHnb;iq-)PDbNWD9Rgbo;6A z8*YiAm5<4q>OR{k>5I$p^Tx5xZT9LNm@f<79t(f+t_H=vVv}a`eQ5H=68y{ZVEc?J zCu`koBDa;3b2?-=pV77@P(5aHY(W0kpOZiH0t}(;d?LXSANWV$E)F~IagC9zUza*` zfA_s~mY6(Hsi??^wa-)et<{;t<{4=&n)EDK@xZsoWzOHuN+76Mam5J;LQ88cow zUx1W)&nbqVZ3KIs9xxL`W!-8tf-@*;D}KOCE44SaIt8&``&w39FF-*Tpr~DIictyS zZ`JROa@?5g&VO_sB0h1Kakr)ixH{Y8Eh=x$g%01a{Gp~;C-c3xX^8n`T*OBX8=Vrl z*H;%FdykZqdF$QE@f?9zbF6IctApcQ14?E&^ms#bd*vj?6CJuJAmk8#LeNvh&uS3< z<`AB)?Lk(An|zK!0k4TyhvhB(L*aMQa+B}o%I6Yk2Q+^YK;zbiNz`}6;;^-CY_)@r zYJaD68?6}(Or3iro`9>*%b0$mQ#eh&&dAkk^`YHMKQ2h_2tWU}t8lXy)2*erjkYyD zFFkqk`lGVK+xWw3YCmW%-l^0dOJa+9lCkWaqDL0{E2GeVJ}uB6_2X7Tsob#q7v28# zFaCP)*b#7uOBU^E@%H(AtXNVCwbC4D^nVPSnXxGHxk z*PPnmJAlW_`6l}dbZ45tNk^(siM-F2o~1R)5cV2P-sQ++KU5*P1W?2$iTJM7M&F{#C~c4y;6v z<%b8Pf%YCTC3LXY-4xCl)_YYSt(3pf1p>r>)Ht}@E)rmo#ELNcI3V864JmHi7i$7W zXg`tYlNN}aMUz5!x)p!`6(>&%eHPc20ad8;1OWc}WZr|siiUQSz;7aRrW9PFbzV&- z^_2iJ<8*;yr*&J;g`rS~9thmDUt!d1cSBreqLx8VEs2 zQR0%|wWfW0!+4Q+Kr4)90#92|>CTgLdqX{pQ!uNR{_7)De8)k6Uqpj36-~d-qD?ML zK3hJ1)EfLaA1J4lGExF(z}W##4@7$ks`ug{VfQH)vf7r26m&ex7Mpxk@GzhH#Du5=H zFl}o9>m&q4>>8cjfi0Z$a<#GCE&6H@JsZR2RW&TeWnTJQ`=*P{LrH& z^GPl>9=3+TZnFC$u~!bN*8L17z(bS-pTeAiWRXD6cD-_XgHpodX;m$ldZ~{cP2HTe~i_p5fExDGJT*`R*)`f-^;%`lY zScAcY{v?x0f1Zrq`zu)Wh$y|Ek~-d~paHx5ZMb=(7cp`Y=v^(^bVP;d_htX{r6vU@7&AD* z7Lsu$yd*k0q<~2BkqFR6^EcM&D(bzfY(t$j%CoFoRr0c3c{tw3!I!4K{K+TL4YmSj zzgut&ClHf)XDVF{dfvr2yumPXbvdNTt9feqZi9Dhii(KI+vsi8hdc<9b3xw#~J(KmTD$}ll#jniHN9g z!htNqb&+h(67r(3P5flQj1$s6!+pbJF7QBY{ggmt$GAlknB? z@DWH6Q}(IY*-P`6ZlCBOJlwL#5)JQe8>aC?mW*=!jUXO%12M*yaT@{?fh{^r+1q+vV;xGBYX9)Vo7?@Z@OEp50B`j)_qhn(=1-u#{pBd^j z(G{?;H`)wD*1t{`DlZRx4Y-w$Tu<%3#vrpVKdRec=sIjl_NT2OkU!kILGRV%8iJ_7 zA$n8X(m{lGtnziL5u$pc_5w48$!efrvL^OK`{_i*v; zm1`|WATHiS!y%%|p*E|xF9bFpi(BnM&e1w#-oYmkl8o~RV__{o*A-BW%-1UHDV^k(UVI`zbfp)I!&~996#l1~Q{|v-J7G}|E z99W^AX&{6`^0DfHfGP0D1^{QI7%Qgpt;({Z*!+^_uE)4MhVJjsy4`{DTTDHsPvvI7 zQ=na`Y}X8^pe7=GPmc8yxwrIoX-nLwpn8lMAp<0}@wdOqsF>P}&*t4$K$fY{3^1kX zGVZtE7F>8%^qo^M{J>ND_F#!O@%RX|^P|(>-U8#_(ar40pzt+wikYjN&!oKiL_&e; z>H9|ip^s3Yh$a-_z4Ln{RmgCI69=R~0pRf+FOa?lxeR$IT*9762zpMB#xV^%2#q^; zzC_wPnGgmOh&SJ> z>D8*BGm4JOcdjd=00SX$1((h|1~hT&0BXb@Vf;zj&W0FaJgj07Eh6XyndB0((^G-L z*o-pbJHW;&8H}7Jb+twr2@q=_bsiwK6K+!uL^#Zh`eQ0YWESDU^4!j3(y@~j#nPVK zs4G_kq-yNS3bTPB(w^3Ps4I{Fh8jDDN!FSgEHIMXGVi=BLTcbA*D+7;UW>8R zNG1m}j6J+rr<7CEURPS&D47Yasj+E*fpe{@s}SL`hWe`uohn{cr8;s zf9{20oaf*1$;)?o1+nE6JAwagL+E)T%u28yW>;TOR*&n=zz^allWWOeE$JjpE>ryt zfd8FJ>14I_5~3SeT-Z6`@U@LRs8zTC^>$qm^|qwf*wF}q?NwcX4k~x%KZs&J)W(zP zst4n7Go3fg*VZZ;MxRv$!hbVu_2S=Z5xxYIsQU;fdRl%WFykaedgE5rp+_7|nr|}~ z6>|2Ju5MS%-Wb$azY3>=ZGAjd@74bCRod#HdjBQu+tc#j6J_sQDA2l%`=vFU+V9*p zMiF7Vg34bQmr4MRRz4SK6pfa~vazZQ6meS}Afr26Z8uWIhH*gPQ9H0HL^yHYv$4#N!+Q}R|8)&%Uu=P~l_?Z4%f}V4R zn{W|hkT()GJX_~icmD1hm7a5|Ng!NXy`fuxWkSmJfGOEJ2eb;4f*n`b7)KF04PG_YB(s` zOb>Ro`0`tEa7%Og994WOSbWM|vfpzm36!fVjmZMV!$5}7*^@oA900_zlIiFth8Wpg z+d=dr0N_@ky$~A+;8g?ImuriIzW8w1?$Bk30%1M2uDqdKnAgYJ)!@vi&e4GTqwimssa33?vaZSOECr$gs1i z*3M+GGmHb*rTL<+&;i_y$7Q4>npNE>&L}D_t>4txbGD+wBBF!USakyYid^V(KP*!e zKt_0x4yB2@Isk^!Zq_Kls{!6MwnD)B0N9s6iK4p1LtA|z5t$7bBd2q7s!>!Fcn@jy zl>zK>We+6`+J?heYf?{vr;r=P>(G`M8$0jz!Wh}$B|4;%I+$#qjO{qrEgz=cMSnxN z0P3lYsXirgCzIjmihWVqy6@@?0^uW(-Rv89JP#L8Zn1Kn+-~RQvUE7o4iA70;O@Gn zb($S4b&28~qQqqqMZBm*HdlY_&Ut;tKzJ$Etw_|<;GK;`o37hku3MIBSjC&td8zS=U+#+Q zPsE=scUSSDQig^3spe(7R+=7s#kI>Ln^<_4d?K5xs}jl8Mfp# zE8~Yva@;kFuh%T~1+0!fv;HdV}WY_MbdOQ@zY6cv^OiQFPOCiIWINhPB(qp3u|KwD|&u+Z}FB z_YSkwyIbmP5$LvsdBq52^$-pZEt6yTA6mvn@p0*e@!`NPHoh9fTD7C@o_ybpM81?Z{Fy4b@}*( zwEh64<6!y;P;+2XZKgm8sn?p*7VP!#Y}44cipm0ZRHk3Oy57fb04r)TL$D*a5d|nJ zr!BLw0jAAw!2l#+W|o>J$Jxsv)%qoVSul3tQmq{(ln7Upr5CPV%OPj|{f34_W1 zPYLH9&*c06aUG;|KsD!6jxonH=Tpf!JZevm8Qm?05J5{jtY=U+>-5ecz8g_IO^e_vw1qX0NGpz)d2*>kAZ8-Nu`C7gZm9 zE;?oEnb$)?`55+SrUkdemit$IPF)N9tpJa(0T9#jk{o>z^5}x&P3vxd(#NBexLEM^ zCy4-44rZ$qe7ZU74Va;_tfKjWH~=Jp3Oc1OwAR9FE)eOpWC7S*+%W*x(s9=gl=2TG zNgAt#RLaqdCLOj=71*XYePXFvsg~a#%)6H&MgIJ-kJZxHsnB0mdZ#X~-1!aA!4cim zNEmw*7yW)85kRgC9Z53q4}mQ{iakDeD6^qe^L44oSFV$hcMEd(r(4Ikszmm6$N-VyoRULAS8vGw`B~cdgQYnTF5&K0Z=mz_w}L z`DU^4HFZe}>dcm~5AbfH=20_?|S{A=qE+-{czs?&X8`#Z7y zmDFQ?i^>#-FNY@2#(mD(WxQMk+Xr2AdH9#9W+2@u;Sjw4Vp#j?;gyXYB6QLs!MDV& z3AGk@i2aazixH;Pma#8e_RUQY{+H=#gyqb)+lz>Dr9Jrr=1*=n3sU6WxbjbE4tkz^EEnS=g^ZfBBuH?y5x;y*eQpM#hx z#zhbvcL4Z0XUkmgUt<5?GQf>H@4$n1G-RElm6%?DT+wm^c|4G40je2JLH;s$gEZ!aD?ck&OfSc6z1j z264o5kKIz;wD(Z2^W!a4ZB_4i zj+cvjXw?H65U(fs;;P+VLT8D)1j>7E6FazK@x^y94$^O5U9C!SufqmRMnEUw5w>5} zw6I~WRyXGqD4+4-7}7)WB9dd;FidRIGbt}Vr=S{k8GxMC=&&u3h*z7v2vRyHvrMr+ zfNsHUOKW%W#`Hmu60@upGu*)fF=c9jAm$WO2W0HumE8yKW)B#C0AJM96;cOf>=AiS z>SC*aC;Y!+8X^Una>C03SA5p2aTs6N?K%|of$tIaoa?|nH7dZ|bMD6Npb0zg8pj43 zM;e~tHsi5dA(F`>y5ub9)^*i?WF8B?qQZ^4tMe3fBI*&4I=*oZbO70Kn1Q}7SsF1F z(-Xc#{r%^oPTV7m*;PtUiW>=)u%sNY6dvAP!VNlv%wiI6kjM60dEr8Ah4Wm0T(#Xv z#1w-hCQv8x_UAPoaZC6JUwWX z6Jw}D{@t>?;5R1ZuA6;e*p=IzapcIDO{JNj{1W0u&mQY>^@dxQ{frAW!0L|$Zsu@& z=!MsUe{B!qzX|XcNLxaFPS#+SFe_c`eW;6X95-R5SuVh&Ijo|8@nF)=*onWLxQhm; z>-u%Ftp0r%+Ab>?pPN!6pP?x`bDqGyEamesuaAtXi)3P4L5I3nq}#MT=XuX2(hT19 zXZT&H=lJnW@~&Ibk!&5w)WN+z<^Bnm?kUaCB6#5Or;h%S6}K5ivQl)%lS0p7k>`Gv z?I&?vd@bs#S05t`-EWbjCF&yYuPfazJ1U2o4Gx10*W4&L>r;Ped&wX7*`8^0E^>ta z*Wq6U=6&*sMQopD>w&+!-kiVn61COWnCdt2&F|x2^l*-lDEfAOJSe>dV=OrP0LR&m zFXQ_G30-lPbc={pK(>xTPLRX6t4=zIqV%dWdJUizQBCvTH>7unebT?7?nGllli!z3 zjYZO>7^xexoj}p?O?%n(2i9_`sswU+YV_$HwiC$9dzM9#ngK4YnP0j2(!dKt>1~%} znl0WA=;Ww8?WH$IhMZK4+)%*k2kD&X;xng6caWXqW*?pJdyCl8g)u<_R=7Ym{tImK zw`;nY|1n2z3BC)r9h`moG!Vdw6yi*9OGS?ElVAcCb>UFK%yn7q*UO*u2BEW&kg(rw<4VmEzWng;uE zvb-nI%ve|NesIVz0QaI48RsBt%DNSAk{E|#Q-Uo`+~%2wzYywfU%tJ1<8}rs1KIi0 zl^zEh&#lo6;6JtGqO~l@^F$>P|-w(0ojOJ&jNXys(vfDB3QnueU0zB>Ph;He7+tO$FQi#Vj(9+09&P+2P z!Rd1!TfuB^q{SQ#BqgQ9Kpw)l)3E@~mq*&KzOyuv?$>2@F_QikG8NmRZYP#}Pl>`x z%-O*R5J=8xqmxZG4=z(y-Fy6CZp2;OtJNcBC%{@;C@!0PPjQxrtp@Nl7J{ONYgiy= zE0Q6pN}L)xh!%k&lAkz%IW)hf(^grWbLegL0_oZ!BH02@LVz)j^5i=wqB ztI%)p_O1Z_PRJZpQtUS=^*OZ3Dp~a70Zv%00^sOX$-V_O1LRYmOzNNBudlG_G zn0R!*0Y@eFAckvdk2b^t97&2f`i}+kHdZ>Mq64`-2o#rRMHg1x>eCEUfCCm|cY7gO&#Hobt1u9w~-IcQa;i7~B`-vDRA=rD@&)V59 ziXKyxjXiOw-#iOB#8^EwshQ8)fJoQvsHd9$Wm3aSFFSq9e-?FDb#5Fq{;s4YWVpml zVnCeZM}BKQB%AK43r@(j<0vFO9_{LXQbN^`qJ=>R8- z`0aGkveDTgkvdu@*Wv|XUI5fahX@z@lOh@DsBGA_Ni_a6#>wo|< zG>lO-Q8pO4>hnhIKc$y8k;}1R2uz++Kvw&OoFy?0YbDB3%n zwFikEkd}|6^$D7frkfl=j^1iY21JPVCl+8u|rMIinA?;YE6s|&EHm->DZP;USB zshkCVz%?V;c{Z5KSkfH4pmSlVMx4G5KVo7yMf+hKBvvn+aNZS?3moAxD41AlQt3>U zHz!$TKGEGSCu}$PeMJN$wFJ-FK?RDjNsXI#tc}ND;~r`dc=(qcAIoZ*Pm+kget2O* zJ^r&{(#?*UuIkRvFXZvOLu*cJzE@F0!3|*E% zC#Qro*b@tLRs)%*}LGJdx~}y7sJz29vFV`DsZG%q#7AC#02HEPzImeR@pV1TZ9j8*+YkUZy~qhV5ci-!$t)Z0`qV z%NNLPja|p1?6;d5`m^$UWwPwg(nPd(bl4ZnNl9o4FK+=hal+cF64_Q?VaYf0^`V1Nime(VZhg z>}(8)wvOJOfl9|ByiP}R0?=2vVUEOUt2Aun6fd0kV@pP1#fM%s^CE|&(=JAuyUfx+ zGs}U^*10cDeq^RoXMSO zU&~(Jh+nzQ2*a)TY7hPMk}VjIHnvw_a;hcYshpH&(f$s`c({ctzA@C^Ao@w-` z)y_V;qIWaZPO9rb+87cPi1k#W{7(U!dTL2`Oh#Dc$V})iSPf0$I+3Ip+)pAPh08Qx znw$C&O641^Z^x~q_+48w063=RU(Ys$_3!q&X~L|%qQvgQ<_}?=S)0i=p^Z7v%97=T z&Ag1Lee9d7tIUYSr0mOyuyA$xf!}6ZS8h#JoUTd+B^`iqfCq{B|i* zrQ33Pege(6*ljE_Ee%7tEyd#aVLeXuAz`xL5~s?|#I@}e?X7ylp~KIgqy5cpHH42n zR+hdv=p=pm+uxDHvXQd?x{+fy%QRserAwat0nQ0!U)TP5sCOIo42$>UMTggJ&M!In z)dt~j_l3}#ke*8}l8I#wiIetylqL;(e{f1|B+S%DB4Eu~P~^E-u%hBaq}AD5dln|$ z#DJ-KEB91zANEpgfNBwlAuGif5Z`awk~hdX!Z!aT3dd{d=$2}wuW!G4^CL7 zJS#PMIy8tR#8SSPxEt+9*i@jM7pwi4Qc3%U!83f|tQi)_Y^wqzm?tssa!Vvec{lJ+ z;!|zwnJB}qdP{*S1S$9V_o5v9%N}jE{BBR_26AX=y6l?#rP%emLG7kJuh@r{oxFo)!}`;IPo7WDY`>n(UFs0Ee|o4)1Vby z5=hX2q#;Oho-0?5%3P_>GJdz?B$(68w1f}As|tBh*=Y`-%e+7gmKe`k&ktD}{>b^( z)<_o!oUwioXuB_c2yLY<-Hvzl&Mf*j+zugOKm>EJ0oVm0=>#Y}B@xmt?(A9=77BLU z#c>0Q?HT_1q{6C)K7eM$$pdI<2&ouEAcavCJuCBa0qy9LHcom+Tp|O<=H-SGkGuVV z@FS3c%QxW>&;nB8f2_=5I{_gOn<0;*qE@cxjsSWu=aSNC-}A85qb$yR&exYR?-z?E z>b4V-fO1d*TW9G~*&&`M0lwewq!(0xJlx$}NkYu4)l=7hS&xYRE)*p*h=ykZD@fBmQ{r7qmD<2R~Y!`%22}G-M>t#73q~LHaha%vjXe^x6u>73ET8{6mFXfMd1qs=603flu*AmJY>E}<*V5aLOxKtv@@#1M z%u7t_Yx!xoww#Ak#-(~K?XX@+#|b5`0zVoZEC)0-e#T))cY%n)2<}x6M1d%nrD<7N z7MScU{VAjirYjp&dCLa~+6J zA9K#w#{3u^n{-!M<>7(rgHasQe(XleCbSpcIc}eL2xW&CPs&XmuBR!ndFhuI=@cAf z4F8e8v}DK95cIeY%z*u&Vkq?upq3g3~2 zmd&4$Kmqz5HWDImNe^A$x(%tUk9ZnJZrSC-1%>>Hc?R|wme^Hs&xM%1ZqZu^xYtZ9 zIiaj{UWeQH2>qoT9@kY{It=TPLZ@vM*R;R}%BS~XM^a<+Ud!)|M(P~- zp*xKNNLJ2MXX=De1=!fq^WICc4rRcO8D^x<4#x12ftdW?2uo%@0e3ELxI=-3D^2-f z-6c|7bcNefNH%|4Ue4QK)5@2U@b#PEXkX6B_Er}DRPtOtBZN0hy{xA#(!p3mnO z!e`Lbpy$7EeJ2~E`0ky)gd7%bF!tL({$BMsXPrHgG9Br$sq=C3rrb<8ti4rnWUy+T z=-OtdEe!7wuiXIu#?ZB5OV^VQoUh`fZl*~;-$T2*yJ|^Kn*VqzKG1(C(ExBp z6ak!BZyY$#oFdlzN}qkj*&`BRvO1zKT9ovuF!lo*8{_fT{w^d<{=JzraQhHSyi6;wI;9>uzh+hEZq-W4zM6;Qo@0V3L3uUx=q#~q zq!PD!$W}LY10RZ!sUV{gKEMC0{JzS*S;Z%~&(fuGqdpO|ag!>SFy_&?T_I;p2l4|C zqa<`gWUq>?>5j1_CFpTg0hpVqZ{P*uYKbL$ZjO$<8k-9*V=7j zy&mVB-aI6*=QzdyL|Jok%*a(MJ8yE-`V?g(hRQWbm#!X?E%+Z;WDBKjiMy4dwYH}U zfW%K05J|WLL=AW*z9PeO&?Dj35?1U`lLx!^32~X+$&f~9*_CEc-ICyIDwbrdW!k*MWyJ9noi`JNZOyhkml;X+#-#IP0$4p}5IM1I*DlfIl`tGX1W()M7=J^%-4F^9RHqiz!-U?;8ViW!iWDE1 z1^Pv5A$01{I_}zme@73G>zi3xkou1`f(X*J)b2B8LD2o8Q$Xu_*?wN2;qoR_i<2*t zQYYLXpaCn?tGKj7xb(IqHNb-DfUh)MM&+6ze3+~K5~*Fk!VL#kFJlg9Zmj;Yj3>#+ zJF?EM2kV-jAiFN}cC*0g_}~l|T!cN!{D8NE1wG*sJ6i~*+>DrRAQY@Z9yA0xr>fJlDQNgBM@vx@)0q@{9O z`MUyu07n?JqjBkjFU2674;h(84F@AANnbIbdE2hn-Fg3`$yWdjbvRWVb`YDWlB%Q* zhEtNp0VeGNwy1wb<$p#OU><-FI$-9;wE*TMy~a9c@alN`obMu&zV({di_U4!nZ{0_A+dBd z>f86V&Qjp37y_6YJ;a;gFRZlB<1f-=j0{Qh#7zt{%o69@=rC(LQ zKe&^E%sRO7Y#MrW)GLGJ`^-cCa2*TgeyPU}rz9kOwr4@qyht0%Yf^cDprn4F>0G52 z^H0Mx-E;SLl66aUed`EG7?n?8 z(X<)wT(n$vb_^sBhSeu-8_gNC;@?2{%N6C}PZ1^ItN9fSj;4 ztmUseUz}0RVYPR9)4>PG_(DHFwNm`D@(d&kn;1*$fb-W*)RoY!*_0toQKT{~OH zL2u4bW!J=_hIa|oA0^f1v6f`Y8JEc|b;6qL3$HfGUHwAOx(#hqG&8gN*)@|@;%22$ z^X_foHXX~Z+g@okm&DK<5V!iz=+(-5f-Zu*jC>rv>CCmuw;Jh$ z5_l28<2n5c$LCN6x;M!`k4ZZN5A1WQQxnfH3sS!>{~Y9XpgYTh5Ap!^vJ>2y8}I)q zm{8y#^Ts@ebZQhL^mVcklU=EElcqD~@nv?u`9SFJ*kLkB@?;aAE3$yJ2rjN8?7yl) z)t?GK^I)p(nnd+|9Pt0TFj7&$qb0RESdTLlP-|Ta>wg!Gfgj zZ&9-@#<}uAi)B7bUGpvHO(cI3G)J3g#XixU)~|fM!zJDf5(6D-mBO?lpl1sHAVVJQ z=sb<&)CkP4p4S!&`8eOK1eXjT1Z+y0Y<%}x=l!#FGhN!@O=p_4k-ou^v{5*0YUb`t zHR>9L_)e(5))iRUqfJ)IEi2A~D%g*0^Dg?3kHDX>)gQi4;)8IiPX;~pzHZ=*nt;8F zOtw}8&byfigRO+Ya`H>?OxSCiNj_eM7`T5-UF|iTDrzk(7^u1LV)nVi0xAIc=ub9J zc9&({M`ahYgNnGeacA4mVI=b`pN=!k-glV2I)|YtwhK0LXMVk6nY`TE#IXH%7uMV4 zbr>lg6}&-UfVUo9(fKwpyHNDeYa-uL$6d%o-gv?o{gXN(7uOTI^b4QeG8i z4}E{>becNkw1X{`^0kgH0742O;Xax$F18o{i6ZRFOnboRcD?awb8Jbx$t4|t4>)&Kwi literal 0 HcmV?d00001 diff --git a/README.md b/README.md index 04b053c..9ef9564 100644 --- a/README.md +++ b/README.md @@ -136,9 +136,12 @@ Learn the fundamentals of Flash applications. Deploy machine learning models as APIs. - 01_text_generation - LLM inference (Llama, Mistral, etc.) _(coming soon)_ -- 02_image_generation - Stable Diffusion image generation _(coming soon)_ -- 03_embeddings - Text embeddings API _(coming soon)_ -- 04_multimodal - Vision-language models _(coming soon)_ +- **[02_text_to_image](./02_ml_inference/02_text_to_image/)** - Serverless text-to-image generation with FLUX.1-schnell +- **[03_image_to_image](./02_ml_inference/03_image_to_image/)** - Serverless image-to-image transformations with Stable Diffusion +- **[04_text_to_video](./02_ml_inference/04_text_to_video/)** - Serverless text-to-video generation with Diffusers +- **[05_image_to_video](./02_ml_inference/05_image_to_video/)** - Serverless image-to-video animation with Stable Video Diffusion +- 06_embeddings - Text embeddings API _(coming soon)_ +- 07_multimodal - Vision-language models _(coming soon)_ ### 03 - Advanced Workers Production-ready worker patterns. @@ -415,4 +418,3 @@ All examples are continuously tested against Python 3.10-3.14 to ensure compatib ## License MIT License - see [LICENSE](./LICENSE) for details. - diff --git a/uv.lock b/uv.lock index 5e44705..efc2fd5 100644 --- a/uv.lock +++ b/uv.lock @@ -2992,12 +2992,14 @@ name = "runpod-flash-examples" version = "1.0.0" source = { editable = "." } dependencies = [ + { name = "fastapi" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pillow" }, { name = "python-multipart" }, { name = "runpod-flash" }, { name = "structlog" }, + { name = "uvicorn" }, ] [package.dev-dependencies] @@ -3014,11 +3016,13 @@ dev = [ [package.metadata] requires-dist = [ + { name = "fastapi", specifier = ">=0.104.0" }, { name = "numpy", specifier = ">=2.0.2" }, { name = "pillow", specifier = ">=10.0.0" }, { name = "python-multipart", specifier = ">=0.0.6" }, { name = "runpod-flash" }, { name = "structlog", specifier = ">=23.0.0" }, + { name = "uvicorn", specifier = ">=0.24.0" }, ] [package.metadata.requires-dev] From 20eea9959d0a796ee5852cf0358f619fcf14d2be Mon Sep 17 00:00:00 2001 From: max4c Date: Sat, 14 Feb 2026 20:00:42 -0800 Subject: [PATCH 2/3] Apply ruff formatting for CI checks --- 02_ml_inference/02_text_to_image/gpu_worker.py | 16 +++++++++------- 02_ml_inference/03_image_to_image/gpu_worker.py | 4 +++- 02_ml_inference/04_text_to_video/demo.py | 4 +--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/02_ml_inference/02_text_to_image/gpu_worker.py b/02_ml_inference/02_text_to_image/gpu_worker.py index af1ac9e..b776a0c 100644 --- a/02_ml_inference/02_text_to_image/gpu_worker.py +++ b/02_ml_inference/02_text_to_image/gpu_worker.py @@ -100,10 +100,12 @@ class ImageRequest(BaseModel): async def generate(request: ImageRequest): """Generate an image from a text prompt using FLUX.1-schnell.""" hf_token = request.hf_token.strip() or os.environ.get("HF_TOKEN", "") - return await generate_image({ - "prompt": request.prompt, - "width": request.width, - "height": request.height, - "num_steps": request.num_steps, - "hf_token": hf_token, - }) + return await generate_image( + { + "prompt": request.prompt, + "width": request.width, + "height": request.height, + "num_steps": request.num_steps, + "hf_token": hf_token, + } + ) diff --git a/02_ml_inference/03_image_to_image/gpu_worker.py b/02_ml_inference/03_image_to_image/gpu_worker.py index 1103f54..60a6261 100644 --- a/02_ml_inference/03_image_to_image/gpu_worker.py +++ b/02_ml_inference/03_image_to_image/gpu_worker.py @@ -139,5 +139,7 @@ async def transform(request: ImageToImageRequest): raise HTTPException(status_code=500, detail=f"Default image not found: {exc}") from exc result = await get_worker().transform(payload) if result.get("status") != "success": - raise HTTPException(status_code=400, detail=result.get("error", "Image transformation failed")) + raise HTTPException( + status_code=400, detail=result.get("error", "Image transformation failed") + ) return result diff --git a/02_ml_inference/04_text_to_video/demo.py b/02_ml_inference/04_text_to_video/demo.py index 5c84eba..c2f4caa 100644 --- a/02_ml_inference/04_text_to_video/demo.py +++ b/02_ml_inference/04_text_to_video/demo.py @@ -18,9 +18,7 @@ def main() -> None: prompt = ( - sys.argv[1] - if len(sys.argv) > 1 - else "a cinematic drone shot of snowy mountains at sunrise" + sys.argv[1] if len(sys.argv) > 1 else "a cinematic drone shot of snowy mountains at sunrise" ) output_path = Path(sys.argv[2] if len(sys.argv) > 2 else "text_to_video.gif").resolve() From 9ab902868112ff0fca14d8d176401cc804e789f8 Mon Sep 17 00:00:00 2001 From: max4c Date: Sat, 14 Feb 2026 20:04:05 -0800 Subject: [PATCH 3/3] Cache FLUX worker pipeline and target ADA_24 GPUs --- .../02_text_to_image/gpu_worker.py | 122 +++++++++++------- 1 file changed, 74 insertions(+), 48 deletions(-) diff --git a/02_ml_inference/02_text_to_image/gpu_worker.py b/02_ml_inference/02_text_to_image/gpu_worker.py index b776a0c..4cd283f 100644 --- a/02_ml_inference/02_text_to_image/gpu_worker.py +++ b/02_ml_inference/02_text_to_image/gpu_worker.py @@ -1,11 +1,11 @@ """Flux Text-to-Image — GPU Worker -One function. One decorator. Images from the cloud. +One warm worker. Cached FLUX pipeline. """ import os -from fastapi import APIRouter +from fastapi import APIRouter, HTTPException from pydantic import BaseModel, Field from runpod_flash import GpuGroup, LiveServerless, remote @@ -14,14 +14,13 @@ # ADA_24 gives us an RTX 4090-class GPU with 24GB — plenty of room. gpu_config = LiveServerless( name="02_02_flux_schnell", - gpus=[GpuGroup.AMPERE_80], + gpus=[GpuGroup.ADA_24], workersMin=1, workersMax=3, idleTimeout=5, ) -# ── The entire inference pipeline in one function ──────────────────── @remote( resource_config=gpu_config, dependencies=[ @@ -33,53 +32,77 @@ "protobuf", ], ) -async def generate_image(input_data: dict) -> dict: - """Generate an image with FLUX.1-schnell on a remote GPU.""" - import base64 - import io - - import torch - from diffusers import FluxPipeline - from huggingface_hub import login - - hf_token = input_data.get("hf_token", "") - if hf_token: - login(token=hf_token) - - prompt = input_data.get("prompt", "a lightning flash above a datacenter") - width = input_data.get("width", 512) - height = input_data.get("height", 512) - num_steps = input_data.get("num_steps", 4) - - pipe = FluxPipeline.from_pretrained( - "black-forest-labs/FLUX.1-schnell", - torch_dtype=torch.bfloat16, - ) - pipe.enable_model_cpu_offload() - - image = pipe( - prompt, - num_inference_steps=num_steps, - width=width, - height=height, - guidance_scale=0.0, - ).images[0] - - buf = io.BytesIO() - image.save(buf, format="PNG") - buf.seek(0) - - return { - "status": "success", - "image_base64": base64.b64encode(buf.read()).decode(), - "prompt": prompt, - "width": width, - "height": height, - } +class FluxWorker: + """Warm FLUX worker that caches the pipeline between requests.""" + + def __init__(self): + import torch + + self._torch = torch + self._model_name = "black-forest-labs/FLUX.1-schnell" + self._pipe = None + + def _ensure_pipeline(self, hf_token: str): + from diffusers import FluxPipeline + from huggingface_hub import login + + if self._pipe is not None: + return + + if hf_token: + login(token=hf_token) + + self._pipe = FluxPipeline.from_pretrained( + self._model_name, + torch_dtype=self._torch.bfloat16, + ) + self._pipe.enable_model_cpu_offload() + + async def generate(self, input_data: dict) -> dict: + import base64 + import io + + hf_token = input_data.get("hf_token", "") + prompt = input_data.get("prompt", "a lightning flash above a datacenter") + width = int(input_data.get("width", 512)) + height = int(input_data.get("height", 512)) + num_steps = int(input_data.get("num_steps", 4)) + + try: + self._ensure_pipeline(hf_token=hf_token) + image = self._pipe( + prompt, + num_inference_steps=num_steps, + width=width, + height=height, + guidance_scale=0.0, + ).images[0] + except Exception as exc: + return {"status": "error", "error": f"Image generation failed: {exc}"} + + buf = io.BytesIO() + image.save(buf, format="PNG") + buf.seek(0) + + return { + "status": "success", + "image_base64": base64.b64encode(buf.read()).decode(), + "prompt": prompt, + "width": width, + "height": height, + } # ── FastAPI Router ─────────────────────────────────────────────────── gpu_router = APIRouter() +worker: FluxWorker | None = None + + +def get_worker() -> FluxWorker: + global worker + if worker is None: + worker = FluxWorker() + return worker class ImageRequest(BaseModel): @@ -100,7 +123,7 @@ class ImageRequest(BaseModel): async def generate(request: ImageRequest): """Generate an image from a text prompt using FLUX.1-schnell.""" hf_token = request.hf_token.strip() or os.environ.get("HF_TOKEN", "") - return await generate_image( + result = await get_worker().generate( { "prompt": request.prompt, "width": request.width, @@ -109,3 +132,6 @@ async def generate(request: ImageRequest): "hf_token": hf_token, } ) + if result.get("status") != "success": + raise HTTPException(status_code=400, detail=result.get("error", "Image generation failed")) + return result