$ cat node-template.py

T

Text to Video

// Generates a video from a text prompt using the LTX 2.3 22B diffusion transformer. Text-to-video (t2v) only — no input image required.

Process
Video
template.py
1import os2import sys3import json4import traceback5import random67from gais import Gais89OUTPUT_DIR = "/data/output"101112def main():13    try:14        input_json = sys.stdin.read()15        execution_input = json.loads(input_json)16        inputs = execution_input.get("inputs", {})1718        prompt = inputs.get("prompt", "")19        negative_prompt = inputs.get("negative_prompt", "")20        aspect_ratio = float(inputs.get("aspect_ratio", 1.7778) or 1.7778)21        megapixel = float(inputs.get("megapixel", 1.0) or 1.0)22        resolution = (inputs.get("resolution") or "").strip()23        num_frames = int(inputs.get("num_frames", 125))24        # fps unset → let the active video-creation backend pick its default25        # (Wan 2.2 = 16, LTX 2.3 = 24). Only pass when the user explicitly set it.26        fps_in = inputs.get("fps")27        fps = int(fps_in) if fps_in not in (None, "") else None2829        seed_mode = inputs.get("seed_mode", "random")30        seed_input = int(inputs.get("seed", -1))31        if seed_mode == "fixed" and seed_input >= 0:32            seed_value = seed_input33        else:34            seed_value = random.randint(0, 2**31 - 1)3536        if not prompt:37            raise ValueError("Prompt is required")3839        # Width/height: explicit `resolution` override wins; otherwise40        # derive from aspect_ratio + megapixel, rounded to a multiple of 841        # (most video codecs / vendor backends prefer 8-divisible dims).42        if resolution:43            try:44                w_str, h_str = resolution.lower().split("x")45                width = int(w_str)46                height = int(h_str)47            except Exception as e:48                raise ValueError(49                    f"Invalid resolution {resolution!r}; expected 'WxH'"50                ) from e51        else:52            import math53            target_pixels = max(0.25, min(8.0, megapixel)) * 1_000_000.054            h_raw = math.sqrt(target_pixels / max(0.1, aspect_ratio))55            height = max(8, int(round(h_raw / 8) * 8))56            width = max(8, int(round((height * aspect_ratio) / 8) * 8))57            print(58                f"[derived] aspect_ratio={aspect_ratio} megapixel={megapixel} "59                f"-> {width}x{height} (backend may snap further)",60                file=sys.stderr,61            )6263        if fps is not None and fps <= 0:64            raise ValueError(f"fps must be > 0, got {fps}")65        # When the user provided fps, derive duration_s locally so the service66        # gets a precise duration. When fps is unset, leave duration_s out too67        # — the backend will compute it from num_frames at its own default fps.68        duration_s = (num_frames / fps) if fps else None6970        os.makedirs(OUTPUT_DIR, exist_ok=True)7172        fps_label = f"{fps}" if fps else "backend-default"73        dur_label = f"{duration_s:.2f}s" if duration_s else "backend-derived"74        print(75            f"Requesting t2v via gais-video-creation slot: "76            f"{width}x{height} @ {fps_label}fps, num_frames={num_frames} "77            f"({dur_label}), seed={seed_value}",78            file=sys.stderr,79        )8081        # Profile-routed: backend (Wan22 / LTX / Helios / Luma / future) is82        # picked by HARDWARE_PROFILE via PROFILE_OVERRIDES in _registry.py.83        result = Gais.video.create_t2v(84            prompt=prompt,85            negative_prompt=negative_prompt,86            width=width,87            height=height,88            duration_s=duration_s,89            fps=fps,90            seed=seed_value,91            num_frames=num_frames,92        )9394        out_filename = "generated_video.mp4"95        out_path = os.path.join(OUTPUT_DIR, out_filename)96        with open(out_path, "wb") as f:97            f.write(result.content)9899        inference_time = result.metadata.get("inference_time_ms", "unknown")100        provider = result.metadata.get("provider_name", "local")101        print(102            f"video generated via {provider}: time={inference_time}ms, seed={seed_value}",103            file=sys.stderr,104        )105106        # Probe output dimensions and emit aspect_ratio + resolution so107        # downstream nodes can chain.108        try:109            import av110            with av.open(out_path) as _c:111                _s = next(s for s in _c.streams if s.type == "video")112                _w = int(_s.codec_context.width)113                _h = int(_s.codec_context.height)114        except Exception:115            _w, _h = 0, 0116117        print(json.dumps({118            "video": out_filename,119            "aspect_ratio": round(_w / _h, 4) if _h else 0.0,120            "resolution": f"{_w}x{_h}",121        }, indent=2))122123    except Exception as e:124        err = {125            "error": str(e),126            "errorType": type(e).__name__,127            "traceback": traceback.format_exc(),128        }129        print(json.dumps(err), file=sys.stderr)130        sys.exit(1)131132133if __name__ == "__main__":134    main()

$ git log --oneline

v1.4.1
HEAD
2026-05-07
v1.0.02026-04-23