$ cat node-template.py

P

Pose Estimation

// Detects human body poses in images and video. Identifies body landmarks, shot types, and camera angles. Supports multi-person detection with configurable thresholds.

Process
#pose-estimation#mediapipe#shot-type#camera-angle#computer-vision#body-detection
template.py
1import os2import sys3import json4import traceback5from collections import Counter67from gais import Gais89INPUT_DIR = "/data/input"101112def compute_dominant(frames, field):13    """Compute overall dominant value across all frames using Counter."""14    counts = Counter()15    for frame in frames:16        value = frame.get(field)17        if value:18            counts[value] += 119    if counts:20        return counts.most_common(1)[0][0]21    return "unknown"222324def main():25    try:26        input_json = sys.stdin.read()27        execution_input = json.loads(input_json)28        inputs = execution_input.get("inputs", {})2930        media = inputs.get("media", "")31        num_poses = int(inputs.get("num_poses", 4))32        min_detection_confidence = float(inputs.get("min_detection_confidence", 0.5))33        min_presence_confidence = float(inputs.get("min_presence_confidence", 0.5))34        video_sample_fps = float(inputs.get("video_sample_fps", 2.0))3536        if not media:37            raise ValueError("Media input is required")3839        media_path = os.path.join(INPUT_DIR, media)40        if not os.path.exists(media_path):41            raise FileNotFoundError(f"Input file not found: {media_path}")4243        print(44            f"Analyzing poses: num_poses={num_poses}, "45            f"detection_conf={min_detection_confidence}, "46            f"presence_conf={min_presence_confidence}, "47            f"video_fps={video_sample_fps}",48            file=sys.stderr,49        )5051        result = Gais.detect.poses(52            media=media_path,53            num_poses=num_poses,54            min_detection_confidence=min_detection_confidence,55            min_presence_confidence=min_presence_confidence,56            video_sample_fps=video_sample_fps,57        )5859        result_data = json.loads(result.content)6061        frames = result_data.get("frames", [])62        total_persons = result_data.get("total_persons_detected", 0)63        total_frames = result_data.get("total_frames", 1)64        model_used = result_data.get("model_used", "pose_landmarker")65        input_type = result_data.get("input_type", "image")66        processing_time = result_data.get("processing_time_ms", 0)6768        # Aggregate dominant shot type and camera angle across all frames69        dominant_shot_type = compute_dominant(frames, "dominant_shot_type")70        dominant_camera_angle = compute_dominant(frames, "dominant_camera_angle")7172        print(73            f"Pose analysis complete: {total_persons} persons in {total_frames} frame(s), "74            f"shot={dominant_shot_type}, angle={dominant_camera_angle}, "75            f"model={model_used}, type={input_type}, "76            f"processing_time={processing_time}ms",77            file=sys.stderr,78        )7980        # Flat output — keys match OUTPUT_SCHEMA81        output = {82            "pose_analysis": json.dumps(frames),83            "total_persons": total_persons,84            "total_frames": total_frames,85            "dominant_shot_type": dominant_shot_type,86            "dominant_camera_angle": dominant_camera_angle,87            "model_used": model_used,88            "input_type": input_type,89        }90        print(json.dumps(output, indent=2))9192    except Exception as e:93        error_output = {94            "error": str(e),95            "errorType": type(e).__name__,96            "traceback": traceback.format_exc(),97        }98        print(json.dumps(error_output), file=sys.stderr)99        sys.exit(1)100101102if __name__ == "__main__":103    main()

$ git log --oneline

v1.5.0
HEAD
2026-05-07
v1.2.02026-04-09