Intelligent Agent Router: LLM-powered natural language routing with suggestion UI
This commit is contained in:
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Agent Router
|
||||
LLM-powered router that reads a natural language request, examines the agent
|
||||
catalog and user's instances, and recommends the best agent to handle it.
|
||||
Supports: run_existing, create_and_run, configure, info, not_possible.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from shared import DASHBOARD_API, api_request
|
||||
from llm_client import complete as llm_complete
|
||||
|
||||
SYSTEM_PROMPT = """You are the Agent Router for the Agent Command Center — a personal automation platform.
|
||||
|
||||
Your job: Given a user's natural language request, decide which agent (or combination of agents) should handle it.
|
||||
|
||||
You have access to:
|
||||
1. The AGENT CATALOG — available agent types that can be enabled
|
||||
2. The user's EXISTING INSTANCES — agents they've already enabled and configured
|
||||
|
||||
ACTIONS you can recommend:
|
||||
- "run_existing" — Run one of the user's existing agent instances. Include the instance_id.
|
||||
- "create_and_run" — The user doesn't have this agent yet. Enable it from the catalog with suggested config, then run it. Include catalog_id and suggested config.
|
||||
- "configure" — Modify an existing instance's settings. Include instance_id and the config changes.
|
||||
- "info" — The user is asking a question, not requesting an action. Answer it directly.
|
||||
- "not_possible" — No agent can handle this request. Explain what's missing.
|
||||
|
||||
RULES:
|
||||
- Always prefer running an existing instance over creating a new one
|
||||
- Be specific about WHY you chose this agent
|
||||
- For "configure" actions, specify exactly what config fields to change
|
||||
- For "info" actions, answer the question directly in your reasoning
|
||||
- If the request is ambiguous, pick the most likely interpretation and explain your reasoning
|
||||
- Keep reasoning concise — 1-3 sentences
|
||||
|
||||
Respond with ONLY valid JSON (no markdown, no code fences):
|
||||
{
|
||||
"action": "run_existing|create_and_run|configure|info|not_possible",
|
||||
"instance_id": null or integer,
|
||||
"catalog_id": null or string,
|
||||
"instance_name": null or string (for create_and_run),
|
||||
"config": null or object,
|
||||
"reasoning": "string explaining the decision"
|
||||
}"""
|
||||
|
||||
|
||||
def build_context(catalog, instances):
|
||||
"""Build the context string for the LLM prompt."""
|
||||
ctx = "=== AGENT CATALOG (available agent types) ===\n"
|
||||
for c in catalog:
|
||||
ctx += f"\n**{c['name']}** (id: {c['id']}, category: {c['category']})"
|
||||
ctx += f"\n {c['description']}"
|
||||
if c.get('requires_llm'):
|
||||
ctx += "\n [Requires LLM]"
|
||||
if c.get('is_sub_agent'):
|
||||
ctx += "\n [Sub-agent — called by other agents]"
|
||||
ctx += "\n"
|
||||
|
||||
ctx += "\n=== YOUR EXISTING AGENT INSTANCES ===\n"
|
||||
if not instances:
|
||||
ctx += "\nNo agents enabled yet.\n"
|
||||
else:
|
||||
for i in instances:
|
||||
config_summary = json.dumps(i.get('config', {}))[:200]
|
||||
ctx += f"\n**{i['name']}** (instance_id: {i['id']}, type: {i['catalog_id']}, status: {i.get('status', '?')})"
|
||||
ctx += f"\n Config: {config_summary}"
|
||||
ctx += "\n"
|
||||
|
||||
return ctx
|
||||
|
||||
|
||||
def route(user_id, request_text):
|
||||
"""Route a natural language request to the best agent.
|
||||
|
||||
Args:
|
||||
user_id: Dashboard user ID
|
||||
request_text: The user's natural language request
|
||||
|
||||
Returns:
|
||||
dict with: action, instance_id, catalog_id, config, reasoning
|
||||
"""
|
||||
# Fetch catalog and user's instances
|
||||
catalog = api_request(f"{DASHBOARD_API}/api/catalog/all", retries=1)
|
||||
instances = api_request(f"{DASHBOARD_API}/api/instances/by-user/{user_id}", retries=1)
|
||||
|
||||
# Build the prompt
|
||||
context = build_context(catalog, instances)
|
||||
prompt = f"{context}\n=== USER REQUEST ===\n{request_text}"
|
||||
|
||||
# Call LLM
|
||||
result = llm_complete(user_id, prompt, system=SYSTEM_PROMPT, max_tokens=500)
|
||||
response_text = result["text"].strip()
|
||||
|
||||
# Parse JSON response
|
||||
try:
|
||||
# Handle potential markdown code fences
|
||||
if response_text.startswith("```"):
|
||||
response_text = response_text.split("```")[1]
|
||||
if response_text.startswith("json"):
|
||||
response_text = response_text[4:]
|
||||
recommendation = json.loads(response_text)
|
||||
except json.JSONDecodeError:
|
||||
recommendation = {
|
||||
"action": "info",
|
||||
"reasoning": response_text,
|
||||
"instance_id": None,
|
||||
"catalog_id": None,
|
||||
"config": None,
|
||||
}
|
||||
|
||||
recommendation["model"] = result.get("model", "")
|
||||
recommendation["tokens_in"] = result.get("input_tokens", 0)
|
||||
recommendation["tokens_out"] = result.get("output_tokens", 0)
|
||||
|
||||
# Resolve agent name for display
|
||||
if recommendation.get("instance_id"):
|
||||
for i in instances:
|
||||
if i["id"] == recommendation["instance_id"]:
|
||||
recommendation["agent_name"] = i["name"]
|
||||
break
|
||||
elif recommendation.get("catalog_id"):
|
||||
for c in catalog:
|
||||
if c["id"] == recommendation["catalog_id"]:
|
||||
recommendation["agent_name"] = c["name"]
|
||||
break
|
||||
|
||||
if "agent_name" not in recommendation:
|
||||
recommendation["agent_name"] = recommendation.get("catalog_id") or "Unknown"
|
||||
|
||||
return recommendation
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--user-id", type=int, required=True)
|
||||
parser.add_argument("request", nargs="+")
|
||||
args = parser.parse_args()
|
||||
|
||||
result = route(args.user_id, " ".join(args.request))
|
||||
print(json.dumps(result, indent=2))
|
||||
Reference in New Issue
Block a user