$ cat node-template.py
K
Knowledge Synthesizer
// Synthesizes an answer from multiple document chunks using an LLM. Connect one or more Document Chunk nodes to the context port.
Process
LLM
template.py
1import sys2import json3import traceback4from gais import Gais567def main():8 try:9 raw = json.loads(sys.stdin.read())10 inputs = raw.get("inputs", {})1112 query = inputs.get("query", "")13 context = inputs.get("context", [])14 model = inputs.get("llmModel", "qwen3.5-35b")15 language = inputs.get("language", "Italian")1617 # Normalize context to list (fan-in gives a list, single connection gives a string)18 if isinstance(context, str):19 context = [context]2021 if not query:22 print(json.dumps({"text": "No query provided."}))23 return2425 if not context or all(not c for c in context):26 print(json.dumps({"text": "No context chunks available. Connect Document Chunk nodes to provide source material."}))27 return2829 # Build context section from chunks30 context_text = ""31 for i, chunk in enumerate(context, 1):32 if chunk:33 context_text += f"--- Source {i} ---\n{chunk}\n\n"3435 # Build prompt (aligned with RAG agent citation and grounding rules)36 system_prompt = (37 f"You are a Knowledge Synthesis assistant. Your job: answer questions using ONLY the provided source materials.\n\n"38 f"## Rules\n"39 f"1. **Never use general knowledge** — Only use information from the sources below.\n"40 f"2. **Always respond in {language}** — Match the response language exactly.\n"41 f"3. **Never fabricate** — If the sources don't contain enough information, say so clearly.\n"42 f"4. **Cite every factual claim** using the format: [Source: <chunk_id>]\n"43 f" - The Chunk ID is in each source's header: [Document Path: ... | Chunk ID: <id> | Relevance: ...]\n"44 f" - Multiple sources: [Source: id1, id2]\n"45 f" - Place citation IMMEDIATELY after each claim.\n"46 f"5. **Prioritize high-relevance sources** — Sources with higher Relevance scores are more likely to be relevant.\n"47 f"6. **Structure your answer** — Use headings, bullet points, or numbered lists when appropriate for clarity.\n"48 )4950 user_prompt = f"Question: {query}\n\nSource Materials:\n\n{context_text}"5152 messages = [53 {"role": "system", "content": system_prompt},54 {"role": "user", "content": user_prompt},55 ]5657 result = Gais.llm.chat(messages, model=model)58 print(json.dumps({"text": result.text}))5960 except Exception as e:61 error_output = {62 "error": str(e),63 "errorType": type(e).__name__,64 "traceback": traceback.format_exc(),65 }66 print(json.dumps(error_output), file=sys.stderr)67 sys.exit(1)686970if __name__ == "__main__":71 main()$ git log --oneline
v1.3.1
HEAD
2026-05-07v1.1.02026-04-09
v1.0.02026-04-08