$ cat node-template.py
Music Creation Legacy
// Generates a music track from a style description and lyrics. Supports configurable BPM, musical key, time signature, duration (10-300s), and lyrics language. Outputs an MP3 audio file.
Process
Audio
template.py
1import os2import sys3import json4import time5import random6import traceback78try:9 import requests10except ImportError:11 import subprocess12 subprocess.check_call([sys.executable, "-m", "pip", "install", "requests"])13 import requests1415COMFYUI_API_URL = os.getenv("COMFYUI_API_URL", "http://192.168.1.39:8188")16OUTPUT_DIR = "/data/output"1718# ---------- ACE-Step 1.5 music generation workflow ----------19WORKFLOW = {20 "3": {21 "inputs": {22 "seed": 31,23 "steps": 8,24 "cfg": 1,25 "sampler_name": "euler",26 "scheduler": "simple",27 "denoise": 1,28 "model": ["78", 0],29 "positive": ["94", 0],30 "negative": ["47", 0],31 "latent_image": ["98", 0],32 },33 "class_type": "KSampler",34 "_meta": {"title": "KSampler"},35 },36 "18": {37 "inputs": {38 "samples": ["3", 0],39 "vae": ["106", 0],40 },41 "class_type": "VAEDecodeAudio",42 "_meta": {"title": "VAE Decode Audio"},43 },44 "47": {45 "inputs": {46 "conditioning": ["94", 0],47 },48 "class_type": "ConditioningZeroOut",49 "_meta": {"title": "ConditioningZeroOut"},50 },51 "78": {52 "inputs": {53 "shift": 3,54 "model": ["104", 0],55 },56 "class_type": "ModelSamplingAuraFlow",57 "_meta": {"title": "ModelSamplingAuraFlow"},58 },59 "94": {60 "inputs": {61 "tags": ["109", 0],62 "lyrics": ["110", 0],63 "seed": 31,64 "bpm": 190,65 "duration": 90,66 "timesignature": "4",67 "language": "en",68 "keyscale": "E minor",69 "generate_audio_codes": True,70 "cfg_scale": 2,71 "temperature": 0.85,72 "top_p": 0.9,73 "top_k": 0,74 "min_p": 0.05,75 "clip": ["105", 0],76 },77 "class_type": "TextEncodeAceStepAudio1.5",78 "_meta": {"title": "TextEncodeAceStepAudio1.5"},79 },80 "98": {81 "inputs": {82 "seconds": 90,83 "batch_size": 1,84 },85 "class_type": "EmptyAceStep1.5LatentAudio",86 "_meta": {"title": "Empty Ace Step 1.5 Latent Audio"},87 },88 "104": {89 "inputs": {90 "unet_name": "acestep_v1.5_turbo.safetensors",91 "weight_dtype": "default",92 },93 "class_type": "UNETLoader",94 "_meta": {"title": "Load Diffusion Model"},95 },96 "105": {97 "inputs": {98 "clip_name1": "qwen_0.6b_ace15.safetensors",99 "clip_name2": "qwen_1.7b_ace15.safetensors",100 "type": "ace",101 "device": "default",102 },103 "class_type": "DualCLIPLoader",104 "_meta": {"title": "DualCLIPLoader"},105 },106 "106": {107 "inputs": {108 "vae_name": "ace_1.5_vae.safetensors",109 },110 "class_type": "VAELoader",111 "_meta": {"title": "Load VAE"},112 },113 "107": {114 "inputs": {115 "filename_prefix": "audio/ComfyUI",116 "quality": "V0",117 "audioUI": "",118 "audio": ["18", 0],119 },120 "class_type": "SaveAudioMP3",121 "_meta": {"title": "Save Audio (MP3)"},122 },123 "109": {124 "inputs": {125 "text": "",126 },127 "class_type": "Text Multiline",128 "_meta": {"title": "Text Multiline (Prompt)"},129 },130 "110": {131 "inputs": {132 "text": "",133 },134 "class_type": "Text Multiline",135 "_meta": {"title": "Text Multiline (Lyrics)"},136 },137}138139140def build_workflow(141 prompt: str,142 lyrics: str,143 duration: int,144 language: str,145 time_signature: str,146 bpm: int,147 keyscale: str,148) -> dict:149 """Build a music generation workflow with the given parameters."""150 import copy151152 wf = copy.deepcopy(WORKFLOW)153154 # Music description / tags (node 109)155 wf["109"]["inputs"]["text"] = prompt156157 # Lyrics (node 110)158 wf["110"]["inputs"]["text"] = lyrics159160 # ACE-Step encoder settings (node 94)161 wf["94"]["inputs"]["duration"] = duration162 wf["94"]["inputs"]["language"] = language163 wf["94"]["inputs"]["timesignature"] = time_signature164 wf["94"]["inputs"]["bpm"] = bpm165 wf["94"]["inputs"]["keyscale"] = keyscale166167 # Latent audio duration (node 98) — keep in sync with node 94168 wf["98"]["inputs"]["seconds"] = duration169170 # Randomize seeds (nodes 3 and 94)171 seed = random.randint(0, 2**31 - 1)172 wf["3"]["inputs"]["seed"] = seed173 wf["94"]["inputs"]["seed"] = seed174175 # Output prefix (node 107)176 wf["107"]["inputs"]["filename_prefix"] = "audio/emblema-music"177178 return wf179180181def submit_prompt(workflow: dict) -> str:182 """Submit workflow to ComfyUI and return prompt_id."""183 resp = requests.post(184 f"{COMFYUI_API_URL}/prompt",185 json={"prompt": workflow},186 timeout=30,187 )188 if resp.status_code != 200:189 try:190 error_detail = resp.json()191 except Exception:192 error_detail = resp.text193 raise RuntimeError(194 f"ComfyUI /prompt returned {resp.status_code}: "195 f"{json.dumps(error_detail, indent=2) if isinstance(error_detail, dict) else error_detail}"196 )197 data = resp.json()198199 # ComfyUI returns 200 even when nodes have validation errors200 node_errors = data.get("node_errors", {})201 if node_errors:202 raise RuntimeError(203 f"ComfyUI workflow has node errors: {json.dumps(node_errors, indent=2)}"204 )205206 return data["prompt_id"]207208209def wait_for_result(prompt_id: str, timeout: int = 900, poll_interval: int = 3) -> dict:210 """Poll ComfyUI history until the prompt completes with outputs."""211 deadline = time.time() + timeout212 empty_complete_retries = 0213 max_empty_retries = 3 # grace period for output serialization lag214215 while time.time() < deadline:216 resp = requests.get(217 f"{COMFYUI_API_URL}/history/{prompt_id}",218 timeout=10,219 )220 resp.raise_for_status()221 history = resp.json()222223 if prompt_id in history:224 prompt_data = history[prompt_id]225 status = prompt_data.get("status", {})226227 if status.get("status_str") == "error":228 messages = status.get("messages", [])229 raise RuntimeError(230 f"ComfyUI prompt failed: {json.dumps(messages, indent=2)}"231 )232233 if status.get("completed", False):234 if prompt_data.get("outputs"):235 return prompt_data236237 # Completed but no outputs — retry briefly for race condition238 empty_complete_retries += 1239 if empty_complete_retries >= max_empty_retries:240 raise RuntimeError(241 f"ComfyUI prompt completed but produced no outputs. "242 f"This usually means a node failed silently (missing custom node or model). "243 f"Status: {json.dumps(status, indent=2)}"244 )245246 time.sleep(poll_interval)247248 raise TimeoutError(f"ComfyUI prompt {prompt_id} did not complete within {timeout}s")249250251def download_output_audio(prompt_data: dict, output_dir: str) -> str:252 """Download the generated audio from ComfyUI."""253 outputs = prompt_data.get("outputs", {})254 for node_id, node_output in outputs.items():255 audio_list = node_output.get("audio") or []256 if audio_list:257 audio_info = audio_list[0]258 filename = audio_info["filename"]259 subfolder = audio_info.get("subfolder", "")260 audio_type = audio_info.get("type", "output")261262 resp = requests.get(263 f"{COMFYUI_API_URL}/view",264 params={265 "filename": filename,266 "subfolder": subfolder,267 "type": audio_type,268 },269 timeout=120,270 )271 resp.raise_for_status()272273 out_filename = f"generated_{filename}"274 out_path = os.path.join(output_dir, out_filename)275 with open(out_path, "wb") as f:276 f.write(resp.content)277278 return out_filename279280 raise RuntimeError(281 f"No output audio found in ComfyUI response. Available outputs: {json.dumps(outputs, indent=2)}"282 )283284285def main():286 try:287 input_json = sys.stdin.read()288 execution_input = json.loads(input_json)289 inputs = execution_input.get("inputs", {})290291 prompt = inputs.get("prompt", "")292 lyrics = inputs.get("lyrics", "")293 duration = int(inputs.get("duration", 90))294 language = inputs.get("language", "en")295 time_signature = inputs.get("time_signature", "4")296 bpm = int(inputs.get("bpm", 190))297 keyscale = inputs.get("keyscale", "E minor")298299 if not prompt:300 raise ValueError("Prompt input is required")301 if not lyrics:302 raise ValueError("Lyrics input is required")303 if not (10 <= duration <= 300):304 raise ValueError(f"Duration must be between 10 and 300, got {duration}")305 if not (40 <= bpm <= 300):306 raise ValueError(f"BPM must be between 40 and 300, got {bpm}")307308 os.makedirs(OUTPUT_DIR, exist_ok=True)309310 # Build workflow, submit, wait, download311 workflow = build_workflow(prompt, lyrics, duration, language, time_signature, bpm, keyscale)312 prompt_id = submit_prompt(workflow)313 prompt_data = wait_for_result(prompt_id)314 out_filename = download_output_audio(prompt_data, OUTPUT_DIR)315316 # Log metadata to stderr317 print(318 f"prompt_id={prompt_id}, duration={duration}s, bpm={bpm}, "319 f"key={keyscale}, time_sig={time_signature}, language={language}",320 file=sys.stderr,321 )322323 # Flat output — keys match OUTPUT_SCHEMA324 output = {325 "audio": out_filename,326 }327 print(json.dumps(output, indent=2))328329 except Exception as e:330 error_output = {331 "error": str(e),332 "errorType": type(e).__name__,333 "traceback": traceback.format_exc(),334 }335 print(json.dumps(error_output), file=sys.stderr)336 sys.exit(1)337338339if __name__ == "__main__":340 main()