$ cat node-template.py

Video Creation Wan 2.2

// Generates a video from a starting image using the WAN 2.2 I2V 14B model via a native GPU service. Optionally guided by a text prompt. Outputs an MP4 video file.

Process
Video
template.py
1import os2import sys3import json4import subprocess5import time6import traceback7import random89try:10    import requests11except ImportError:12    subprocess.check_call([sys.executable, "-m", "pip", "install", "requests"])13    import requests1415NATIVE_VIDEO_CREATION_SERVICE_URL = os.getenv(16    "NATIVE_VIDEO_CREATION_SERVICE_URL", "http://native-video-creation-service:8105"17)18_EMBLEMA_VERSION = os.getenv("EMBLEMA_VERSION", "dev")19NATIVE_VIDEO_CREATION_SERVICE_IMAGE = os.getenv(20    "NATIVE_VIDEO_CREATION_SERVICE_IMAGE",21    f"emblema/native-video-creation-service:{_EMBLEMA_VERSION}",22)23HF_CACHE_HOST_PATH = os.getenv("HF_CACHE_HOST_PATH", "/root/.cache/huggingface")24CONTAINER_NAME = "native-video-creation-service"25INPUT_DIR = "/data/input"26OUTPUT_DIR = "/data/output"272829def start_container():30    """Create and start native-video-creation-service, removing any stale container first."""31    subprocess.run(32        ["docker", "rm", "-f", CONTAINER_NAME],33        capture_output=True, text=True34    )3536    hf_token = os.getenv("HUGGINGFACE_TOKEN", "")37    print(f"Creating container {CONTAINER_NAME}...", file=sys.stderr)38    lora_strength = os.getenv("LORA_STRENGTH", "1.0")39    stage_split_step = os.getenv("STAGE_SPLIT_STEP", "2")40    run_cmd = [41        "docker", "run", "-d",42        "--name", CONTAINER_NAME,43        "--network", "emblema",44        "--gpus", "all",45        "-e", "PORT=8105",46        "-e", "DEVICE=cuda",47        "-e", f"HF_TOKEN={hf_token}",48        "-e", f"LORA_STRENGTH={lora_strength}",49        "-e", f"STAGE_SPLIT_STEP={stage_split_step}",50        "-v", f"{HF_CACHE_HOST_PATH}:/root/.cache/huggingface",51        NATIVE_VIDEO_CREATION_SERVICE_IMAGE,52    ]53    result = subprocess.run(run_cmd, capture_output=True, text=True)54    if result.returncode != 0:55        print(f"docker run failed (exit {result.returncode}): {result.stderr}", file=sys.stderr)56        raise RuntimeError(f"Failed to start container: {result.stderr}")5758    # Poll health endpoint (300s timeout for large model loading)59    timeout = 30060    interval = 561    elapsed = 062    health_url = f"{NATIVE_VIDEO_CREATION_SERVICE_URL}/health"63    while elapsed < timeout:64        try:65            r = requests.get(health_url, timeout=5)66            if r.status_code == 200:67                print(f"Container healthy (waited {elapsed}s).", file=sys.stderr)68                return69        except requests.ConnectionError:70            pass71        time.sleep(interval)72        elapsed += interval7374    raise RuntimeError(f"Container did not become healthy within {timeout}s")757677def stop_container():78    """Remove the container."""79    try:80        subprocess.run(81            ["docker", "rm", "-f", CONTAINER_NAME],82            capture_output=True, text=True, timeout=3083        )84        print(f"Container {CONTAINER_NAME} removed.", file=sys.stderr)85    except Exception as e:86        print(f"Warning: failed to remove container: {e}", file=sys.stderr)878889def main():90    try:91        input_json = sys.stdin.read()92        execution_input = json.loads(input_json)93        inputs = execution_input.get("inputs", {})9495        image = inputs.get("image", "")96        if not image:97            raise ValueError("Input image is required")9899        text = inputs.get("text", "")100        resolution = inputs.get("resolution", "1280x720")101        num_frames = int(inputs.get("num_frames", 129))102        num_inference_steps = int(inputs.get("num_inference_steps", 4))103        guidance_scale = float(inputs.get("guidance_scale", 1.0))104        negative_prompt = inputs.get("negative_prompt", "")105106        seed_mode = inputs.get("seed_mode", "random")107        seed_input = int(inputs.get("seed", -1))108        if seed_mode == "fixed" and seed_input >= 0:109            seed_value = seed_input110        else:111            seed_value = random.randint(0, 2**31 - 1)112113        local_path = os.path.join(INPUT_DIR, image)114        if not os.path.exists(local_path):115            raise FileNotFoundError(f"Input image not found: {local_path}")116117        os.makedirs(OUTPUT_DIR, exist_ok=True)118119        # Start the container120        start_container()121122        try:123            # Send image and parameters to service124            with open(local_path, "rb") as f:125                resp = requests.post(126                    f"{NATIVE_VIDEO_CREATION_SERVICE_URL}/generate",127                    files={"image": (os.path.basename(local_path), f, "image/png")},128                    data={129                        "text": text,130                        "resolution": resolution,131                        "num_frames": num_frames,132                        "num_inference_steps": num_inference_steps,133                        "guidance_scale": guidance_scale,134                        "seed": seed_value,135                        "negative_prompt": negative_prompt,136                    },137                    timeout=1800,138                )139140            if resp.status_code != 200:141                try:142                    error_detail = resp.json()143                except Exception:144                    error_detail = resp.text145                raise RuntimeError(146                    f"Video creation service returned {resp.status_code}: {error_detail}"147                )148149            # Save result150            out_filename = "generated_video.mp4"151            out_path = os.path.join(OUTPUT_DIR, out_filename)152            with open(out_path, "wb") as f:153                f.write(resp.content)154155            inference_time = resp.headers.get("X-Inference-Time-Ms", "unknown")156            result_resolution = resp.headers.get("X-Resolution", resolution)157            print(f"Video generated: time={inference_time}ms, resolution={result_resolution}", file=sys.stderr)158159            output = {160                "video": out_filename,161            }162            print(json.dumps(output, indent=2))163164        finally:165            stop_container()166167    except Exception as e:168        error_output = {169            "error": str(e),170            "errorType": type(e).__name__,171            "traceback": traceback.format_exc(),172        }173        print(json.dumps(error_output), file=sys.stderr)174        sys.exit(1)175176177if __name__ == "__main__":178    main()