$ cat node-template.py

I

Image to Video

// Animates a first-frame image into a video using the LTX 2.3 22B diffusion transformer guided by a text prompt.

Process
Video
template.py
1import os2import sys3import json4import traceback5import random67from gais import Gais89INPUT_DIR = "/data/input"10OUTPUT_DIR = "/data/output"111213def main():14    try:15        input_json = sys.stdin.read()16        execution_input = json.loads(input_json)17        inputs = execution_input.get("inputs", {})1819        prompt = inputs.get("prompt", "")20        negative_prompt = inputs.get("negative_prompt", "")21        aspect_ratio = float(inputs.get("aspect_ratio", 1.7778) or 1.7778)22        megapixel = float(inputs.get("megapixel", 1.0) or 1.0)23        resolution = (inputs.get("resolution") or "").strip()24        num_frames = int(inputs.get("num_frames", 125))25        # fps unset → let the active video-creation backend pick its default26        # (Wan 2.2 = 16, LTX 2.3 = 24). Only pass when the user explicitly set it.27        fps_in = inputs.get("fps")28        fps = int(fps_in) if fps_in not in (None, "") else None2930        seed_mode = inputs.get("seed_mode", "random")31        seed_input = int(inputs.get("seed", -1))32        if seed_mode == "fixed" and seed_input >= 0:33            seed_value = seed_input34        else:35            seed_value = random.randint(0, 2**31 - 1)3637        if not prompt:38            raise ValueError("Prompt is required")3940        image_name = inputs.get("image", "")41        if not image_name:42            raise ValueError("An image is required")43        image_path = os.path.join(INPUT_DIR, image_name)44        if not os.path.exists(image_path):45            raise FileNotFoundError(f"Input image not found: {image_path}")4647        # Width/height: explicit `resolution` override wins; otherwise48        # derive from aspect_ratio + megapixel, rounded to a multiple of 849        # (most video codecs / vendor backends prefer 8-divisible dims).50        if resolution:51            try:52                w_str, h_str = resolution.lower().split("x")53                width = int(w_str)54                height = int(h_str)55            except Exception as e:56                raise ValueError(57                    f"Invalid resolution {resolution!r}; expected 'WxH'"58                ) from e59        else:60            import math61            target_pixels = max(0.25, min(8.0, megapixel)) * 1_000_000.062            h_raw = math.sqrt(target_pixels / max(0.1, aspect_ratio))63            height = max(8, int(round(h_raw / 8) * 8))64            width = max(8, int(round((height * aspect_ratio) / 8) * 8))65            print(66                f"[derived] aspect_ratio={aspect_ratio} megapixel={megapixel} "67                f"-> {width}x{height} (backend may snap further)",68                file=sys.stderr,69            )7071        if fps is not None and fps <= 0:72            raise ValueError(f"fps must be > 0, got {fps}")73        # When the user provided fps, derive duration_s locally so the service74        # gets a precise duration. When fps is unset, leave duration_s out too75        # — the backend will compute it from num_frames at its own default fps.76        duration_s = (num_frames / fps) if fps else None7778        os.makedirs(OUTPUT_DIR, exist_ok=True)7980        fps_label = f"{fps}" if fps else "backend-default"81        dur_label = f"{duration_s:.2f}s" if duration_s else "backend-derived"82        print(83            f"Requesting i2v via gais-video-creation slot: "84            f"{width}x{height} @ {fps_label}fps, num_frames={num_frames} "85            f"({dur_label}), seed={seed_value}",86            file=sys.stderr,87        )8889        # Profile-routed: backend (Wan22 / LTX / Helios / Luma / future) is90        # picked by HARDWARE_PROFILE via PROFILE_OVERRIDES in _registry.py.91        result = Gais.video.create_i2v(92            first_frame=image_path,93            prompt=prompt,94            negative_prompt=negative_prompt,95            width=width,96            height=height,97            duration_s=duration_s,98            fps=fps,99            seed=seed_value,100            num_frames=num_frames,101        )102103        out_filename = "generated_video.mp4"104        out_path = os.path.join(OUTPUT_DIR, out_filename)105        with open(out_path, "wb") as f:106            f.write(result.content)107108        inference_time = result.metadata.get("inference_time_ms", "unknown")109        provider = result.metadata.get("provider_name", "local")110        print(111            f"video generated via {provider}: time={inference_time}ms, seed={seed_value}",112            file=sys.stderr,113        )114115        # Probe output dimensions and emit aspect_ratio + resolution so116        # downstream nodes can chain.117        try:118            import av119            with av.open(out_path) as _c:120                _s = next(s for s in _c.streams if s.type == "video")121                _w = int(_s.codec_context.width)122                _h = int(_s.codec_context.height)123        except Exception:124            _w, _h = 0, 0125126        print(json.dumps({127            "video": out_filename,128            "aspect_ratio": round(_w / _h, 4) if _h else 0.0,129            "resolution": f"{_w}x{_h}",130        }, indent=2))131132    except Exception as e:133        err = {134            "error": str(e),135            "errorType": type(e).__name__,136            "traceback": traceback.format_exc(),137        }138        print(json.dumps(err), file=sys.stderr)139        sys.exit(1)140141142if __name__ == "__main__":143    main()

$ git log --oneline

v3.4.1
HEAD
2026-05-07
v3.0.02026-04-23
v1.7.02026-04-22
v1.2.12026-04-09
v1.3.02026-03-29
v1.2.02026-03-20