diff --git a/agents/pirate/__init__.py b/agents/pirate/__init__.py new file mode 100644 index 0000000..322f30a --- /dev/null +++ b/agents/pirate/__init__.py @@ -0,0 +1,6 @@ +"""The Pirate — conversational read-only media agent for Agent Command Center. + +Phase 1 is read-only. Tools can query Sonarr, Radarr, qBittorrent, Overseerr, Plex +and more, but cannot add, remove, or modify anything. Phase 2 will add writes behind +confirmation dialogs. +""" diff --git a/agents/pirate/prompts.py b/agents/pirate/prompts.py new file mode 100644 index 0000000..b7bc6b4 --- /dev/null +++ b/agents/pirate/prompts.py @@ -0,0 +1,17 @@ +"""System prompts for The Pirate.""" + +SYSTEM_PROMPT = """You are The Pirate, a conversational media assistant for the Jungbauer family home media lab. + +Your job is to answer questions about the state of the family's media: what's downloading, what TV shows and movies are tracked, what's recently watched, how much storage is free, and what's on deck. + +Tools available to you cover Sonarr (TV), Radarr (movies), Lidarr (music), Whisparr (adult), qBittorrent (download client), Overseerr (requests), and Plex (watch history). Use them freely — multiple in one turn if the question needs it. Return concise, direct answers. + +**Critical rules for Phase 1:** +1. You are READ-ONLY. You cannot add, remove, pause, resume, delete, or modify anything. If the user asks you to take a write action ("add this movie", "pause that torrent", "delete the old one"), politely say that Phase 1 is read-only and write actions are coming in Phase 2. +2. Never invent data. If a tool isn't available or returns an error, say so — don't guess. +3. When citing numbers (speeds, sizes, counts), use the tool output exactly. Round reasonably for display. +4. Keep answers short. A sentence or two is usually enough. Tables only when they genuinely help (comparing multiple items). +5. If the user's question is ambiguous, ask one quick clarifying question rather than calling every tool. + +Style: casual, dry, mildly pirate-y if it's natural — but not forced. Don't say "Arrr" in every message. +""" diff --git a/agents/pirate/runtime.py b/agents/pirate/runtime.py new file mode 100644 index 0000000..6c01758 --- /dev/null +++ b/agents/pirate/runtime.py @@ -0,0 +1,238 @@ +"""The Pirate runtime — multi-turn tool-use loop. + +Called as a subprocess from the dashboard's /api/pirate/chat endpoint: + + PIRATE_CONVERSATION_ID= PIRATE_USER_ID= python3 -c \\ + "from pirate.runtime import chat_turn; chat_turn()" + +The conversation state (prior messages) lives in the dashboard DB. This runtime +loads the thread, runs the LLM loop, appends assistant + tool messages back via +internal dashboard APIs, and exits. +""" + +import json +import os +import sys +from urllib import request as urlreq, error as urlerror + +# Ensure agents/ is importable +_here = os.path.dirname(os.path.abspath(__file__)) +_agents = os.path.dirname(_here) +sys.path.insert(0, _agents) + +from shared import DASHBOARD_API, api_request # noqa: E402 +from pirate import prompts # noqa: E402 +from pirate.tools import build_catalog, find_tool # noqa: E402 + +MAX_TOOL_ITERATIONS = 8 # prevent runaway loops + + +# ---------- LLM provider resolution (reuse the dashboard's BYOLLM plumbing) ---------- + +def get_llm_config(user_id): + cfg = api_request(f"{DASHBOARD_API}/api/users/{user_id}/llm", retries=1) + if cfg.get("source") == "none": + raise RuntimeError("No LLM configured for this user. Set one up via the LLM button in the dashboard.") + return cfg + + +# ---------- Anthropic tool-use call ---------- + +def call_anthropic_with_tools(api_url, api_key, model, system, messages, tools, max_tokens=2000): + """Send a messages/tool-use request to Anthropic. Returns the raw response dict.""" + url = f"{api_url.rstrip('/')}/v1/messages" + body = { + "model": model, + "max_tokens": max_tokens, + "system": system, + "messages": messages, + } + # Convert our tool catalog to Anthropic's schema + if tools: + body["tools"] = [ + {"name": t["name"], "description": t["description"], "input_schema": t["input_schema"]} + for t in tools + ] + headers = { + "x-api-key": api_key, + "anthropic-version": "2023-06-01", + "content-type": "application/json", + } + req = urlreq.Request(url, data=json.dumps(body).encode(), headers=headers, method="POST") + try: + with urlreq.urlopen(req, timeout=120) as resp: + return json.loads(resp.read().decode()) + except urlerror.HTTPError as e: + err_body = e.read().decode() if e.fp else "" + raise RuntimeError(f"Anthropic API error {e.code}: {err_body[:500]}") + + +# ---------- Conversation history → Anthropic messages ---------- + +def build_anthropic_messages(thread_messages): + """Translate our DB message rows into Anthropic messages[] format. + + Our DB: each row has role ('user'|'assistant'|'tool'), content, tool_calls, tool_call_id, + tool_name, tool_result. + + Anthropic wants: + - user turn: {"role": "user", "content": "text"} OR content as array of tool_result blocks + - assistant turn with tools: {"role": "assistant", "content": [{"type":"text",...}, {"type":"tool_use","id","name","input"}]} + - tool results travel as USER turns with content = [{"type":"tool_result","tool_use_id","content"}] + """ + out = [] + pending_tool_results = [] + + def flush_tool_results(): + nonlocal pending_tool_results + if pending_tool_results: + out.append({"role": "user", "content": pending_tool_results}) + pending_tool_results = [] + + for m in thread_messages: + role = m["role"] + if role == "user": + flush_tool_results() + out.append({"role": "user", "content": m["content"]}) + elif role == "assistant": + flush_tool_results() + blocks = [] + if m.get("content"): + blocks.append({"type": "text", "text": m["content"]}) + for tc in (m.get("tool_calls") or []): + blocks.append({ + "type": "tool_use", + "id": tc["id"], + "name": tc["name"], + "input": tc.get("input", {}), + }) + if not blocks: + # Anthropic rejects empty assistant content + blocks = [{"type": "text", "text": "..."}] + out.append({"role": "assistant", "content": blocks}) + elif role == "tool": + tool_content = json.dumps(m.get("tool_result")) if m.get("tool_result") is not None else (m.get("content") or "") + pending_tool_results.append({ + "type": "tool_result", + "tool_use_id": m.get("tool_call_id", ""), + "content": tool_content, + }) + flush_tool_results() + return out + + +# ---------- Internal endpoints for persisting turns ---------- + +def load_conversation(conv_id): + return api_request(f"{DASHBOARD_API}/api/internal/pirate/conversation/{conv_id}", retries=1) + + +def append_message(conv_id, **fields): + return api_request( + f"{DASHBOARD_API}/api/internal/pirate/conversation/{conv_id}/messages", + data=fields, method="POST", retries=1, + ) + + +# ---------- Main loop ---------- + +def chat_turn(): + conv_id = int(os.environ["PIRATE_CONVERSATION_ID"]) + user_id = int(os.environ["PIRATE_USER_ID"]) + + conv = load_conversation(conv_id) + thread = conv["messages"] + + llm_cfg = get_llm_config(user_id) + provider = llm_cfg.get("provider_type", "anthropic") + if provider not in ("anthropic", "litellm"): + # Phase 1 supports Anthropic-native tool use. LiteLLM proxies Anthropic cleanly. + raise RuntimeError(f"Pirate Phase 1 requires an Anthropic-compatible LLM (got '{provider}').") + + api_url = llm_cfg.get("api_url") or "https://api.anthropic.com" + api_key = llm_cfg.get("api_key", "") + # Pirate Phase 1 needs a capable tool-use model. Default to Sonnet 4.5; + # user can override via the LLM settings panel. + model = llm_cfg.get("default_model") or "claude-sonnet-4-5" + + tools = build_catalog() + system = prompts.SYSTEM_PROMPT + + for iteration in range(MAX_TOOL_ITERATIONS): + # Build the request from the current conversation state + anthropic_messages = build_anthropic_messages(thread) + if not anthropic_messages: + print("Empty conversation; nothing to send", file=sys.stderr) + return + + response = call_anthropic_with_tools( + api_url, api_key, model, system, anthropic_messages, tools, + ) + + stop_reason = response.get("stop_reason", "") + usage = response.get("usage", {}) + content_blocks = response.get("content", []) + + # Extract the assistant's text + any tool_use blocks + assistant_text = "" + tool_calls = [] + for block in content_blocks: + if block.get("type") == "text": + assistant_text += block.get("text", "") + elif block.get("type") == "tool_use": + tool_calls.append({ + "id": block.get("id"), + "name": block.get("name"), + "input": block.get("input", {}), + }) + + # Persist the assistant turn + append_message(conv_id, role="assistant", + content=assistant_text, + tool_calls=tool_calls if tool_calls else None, + model=response.get("model", model), + input_tokens=usage.get("input_tokens", 0), + output_tokens=usage.get("output_tokens", 0)) + # Mirror in-memory so the next iteration sees the new turn + thread.append({ + "role": "assistant", + "content": assistant_text, + "tool_calls": tool_calls, + }) + + if stop_reason != "tool_use" or not tool_calls: + # Done — LLM returned its final answer + return + + # Execute each tool call, persist results + for tc in tool_calls: + name = tc["name"] + args = tc.get("input") or {} + tool_def = find_tool(name) + if not tool_def: + result = {"error": f"Unknown tool: {name}"} + else: + try: + result = tool_def["fn"](**args) + except Exception as e: + result = {"error": f"{type(e).__name__}: {e}"} + append_message(conv_id, role="tool", + content="", + tool_call_id=tc["id"], + tool_name=name, + tool_result=result) + thread.append({ + "role": "tool", + "tool_call_id": tc["id"], + "tool_name": name, + "tool_result": result, + }) + # Loop again — feed tool results back to the LLM for the next turn + + # Fell through the iteration cap + append_message(conv_id, role="assistant", + content="[Pirate hit the tool iteration limit. Try asking a more focused question.]") + + +if __name__ == "__main__": + chat_turn() diff --git a/agents/pirate/tools/__init__.py b/agents/pirate/tools/__init__.py new file mode 100644 index 0000000..15f94d2 --- /dev/null +++ b/agents/pirate/tools/__init__.py @@ -0,0 +1,25 @@ +"""Pirate tools — read-only functions the LLM can call. + +Each tool module exports a TOOLS list of dicts shaped for Anthropic tool use: + {"name": str, "description": str, "input_schema": {...}, "read_only": True, "fn": callable} + +The runtime loads all modules here and builds a combined catalog. The LLM picks +which tool to call; the runtime executes it and returns the result as a tool message. +""" + +from . import sonarr, radarr, qbittorrent, storage + + +def build_catalog(): + """Return a list of all available tools across all modules.""" + catalog = [] + for mod in (sonarr, radarr, qbittorrent, storage): + catalog.extend(getattr(mod, "TOOLS", [])) + return catalog + + +def find_tool(name): + for t in build_catalog(): + if t["name"] == name: + return t + return None diff --git a/agents/pirate/tools/_common.py b/agents/pirate/tools/_common.py new file mode 100644 index 0000000..1268b83 --- /dev/null +++ b/agents/pirate/tools/_common.py @@ -0,0 +1,72 @@ +"""Shared helpers for pirate tools.""" + +import sys +import os +from urllib import request as urlreq, error as urlerror, parse as urlparse +import json + +# Add parent agents/ to path so we can import shared.py +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) +from shared import DASHBOARD_API, api_request + + +def get_service_config(service_name): + """Fetch admin-configured creds for a service (sonarr, radarr, qbittorrent, etc.).""" + try: + return api_request(f"{DASHBOARD_API}/api/internal/services/{service_name}", retries=1) + except Exception as e: + raise RuntimeError(f"Service '{service_name}' is not configured in admin panel: {e}") + + +def arr_get(service_name, path, params=None): + """GET from an *arr API (Sonarr/Radarr/Lidarr/Whisparr — all use the same v3 API shape).""" + cfg = get_service_config(service_name) + base = cfg["base_url"].rstrip("/") + key = cfg["api_key"] + if not base or not key: + raise RuntimeError(f"{service_name} not configured (base_url + api_key required)") + url = f"{base}{path}" + if params: + url += "?" + urlparse.urlencode(params) + req = urlreq.Request(url, headers={"X-Api-Key": key}) + with urlreq.urlopen(req, timeout=15) as resp: + return json.loads(resp.read().decode()) + + +def qbit_request(path, method="GET", params=None, data=None, cookies=None): + """qBittorrent Web API helper. Login (if password set) returns SID cookie string.""" + cfg = get_service_config("qbittorrent") + base = cfg["base_url"].rstrip("/") + if not base: + raise RuntimeError("qbittorrent not configured") + url = f"{base}{path}" + if params: + url += "?" + urlparse.urlencode(params) + body = None + headers = {} + if cookies: + headers["Cookie"] = cookies + if data is not None: + body = urlparse.urlencode(data).encode() + headers["Content-Type"] = "application/x-www-form-urlencoded" + req = urlreq.Request(url, data=body, headers=headers, method=method) + with urlreq.urlopen(req, timeout=15) as resp: + raw = resp.read().decode() + try: + return json.loads(raw), resp.headers + except json.JSONDecodeError: + return raw, resp.headers + + +def qbit_login_if_needed(): + """If qBit has auth enabled, login and return the SID cookie string. Else return None.""" + cfg = get_service_config("qbittorrent") + user = cfg.get("username", "") + pw = cfg.get("password", "") + if not user or not pw: + return None # LAN no-auth mode + _, hdrs = qbit_request("/api/v2/auth/login", method="POST", + data={"username": user, "password": pw}) + cookies = hdrs.get("Set-Cookie", "") + # Strip everything after the first semicolon (path/expiry) + return cookies.split(";")[0] if cookies else None diff --git a/agents/pirate/tools/qbittorrent.py b/agents/pirate/tools/qbittorrent.py new file mode 100644 index 0000000..f7b96b8 --- /dev/null +++ b/agents/pirate/tools/qbittorrent.py @@ -0,0 +1,88 @@ +"""qBittorrent tools — torrent queue and transfer stats (read-only).""" + +from ._common import qbit_login_if_needed, qbit_request + + +def _compact_torrent(t): + return { + "name": t.get("name"), + "state": t.get("state"), # downloading, uploading, stalledDL, pausedDL, completed, ... + "progress_pct": round((t.get("progress", 0) or 0) * 100, 1), + "size_gb": round((t.get("size", 0) or 0) / 1e9, 2), + "dl_speed_mbps": round((t.get("dlspeed", 0) or 0) / 1e6, 2), + "up_speed_mbps": round((t.get("upspeed", 0) or 0) / 1e6, 2), + "eta_seconds": t.get("eta"), + "category": t.get("category"), + "ratio": round(t.get("ratio", 0) or 0, 2), + "num_seeds": t.get("num_seeds"), + "num_peers": t.get("num_leechs") or t.get("num_peers"), + "added_on_epoch": t.get("added_on"), + } + + +def qbit_torrents(filter="all", category=None, limit=30): + """List torrents. Filter is one of: all, downloading, seeding, completed, paused, stalled, errored.""" + cookie = qbit_login_if_needed() + params = {"filter": filter} + if category: + params["category"] = category + data, _ = qbit_request("/api/v2/torrents/info", params=params, cookies=cookie) + if isinstance(data, str): + return {"error": data} + items = [_compact_torrent(t) for t in data[:limit]] + return {"filter": filter, "category": category, "count": len(data), "torrents": items} + + +def qbit_transfer_stats(): + """Global transfer stats: overall download / upload speed, session totals, DHT nodes.""" + cookie = qbit_login_if_needed() + data, _ = qbit_request("/api/v2/transfer/info", cookies=cookie) + if isinstance(data, str): + return {"error": data} + return { + "dl_speed_mbps": round((data.get("dl_info_speed", 0) or 0) / 1e6, 2), + "up_speed_mbps": round((data.get("up_info_speed", 0) or 0) / 1e6, 2), + "dl_session_gb": round((data.get("dl_info_data", 0) or 0) / 1e9, 2), + "up_session_gb": round((data.get("up_info_data", 0) or 0) / 1e9, 2), + "dht_nodes": data.get("dht_nodes"), + "connection_status": data.get("connection_status"), + } + + +def qbit_categories(): + """Configured torrent categories (e.g., sonarr, radarr, lidarr, whisparr).""" + cookie = qbit_login_if_needed() + data, _ = qbit_request("/api/v2/torrents/categories", cookies=cookie) + return data if isinstance(data, dict) else {"error": str(data)} + + +TOOLS = [ + { + "name": "qbit_torrents", + "description": "List torrents in qBittorrent with their progress, state, speed, and ETA. Filter options: all, downloading, seeding, completed, paused, stalled, errored. Category options: sonarr, radarr, lidarr, whisparr (which *arr requested it).", + "input_schema": { + "type": "object", + "properties": { + "filter": {"type": "string", "default": "all", "enum": ["all", "downloading", "seeding", "completed", "paused", "stalled", "errored"]}, + "category": {"type": "string", "description": "Optional category filter"}, + "limit": {"type": "integer", "default": 30}, + }, + }, + "read_only": True, + "fn": qbit_torrents, + }, + { + "name": "qbit_transfer_stats", + "description": "Current global download / upload speed and session totals from qBittorrent. Use when user asks 'how fast is the download', 'what's my current download speed', 'total downloaded this session'.", + "input_schema": {"type": "object", "properties": {}}, + "read_only": True, + "fn": qbit_transfer_stats, + }, + { + "name": "qbit_categories", + "description": "List the torrent categories configured in qBittorrent. Shows which *arr is pulling what.", + "input_schema": {"type": "object", "properties": {}}, + "read_only": True, + "fn": qbit_categories, + }, +] diff --git a/agents/pirate/tools/radarr.py b/agents/pirate/tools/radarr.py new file mode 100644 index 0000000..d371769 --- /dev/null +++ b/agents/pirate/tools/radarr.py @@ -0,0 +1,87 @@ +"""Radarr tools — movie queries (read-only).""" + +from ._common import arr_get + + +def _compact_movie(m): + return { + "id": m.get("id"), + "title": m.get("title"), + "year": m.get("year"), + "monitored": m.get("monitored"), + "has_file": m.get("hasFile"), + "status": m.get("status"), + "size_on_disk_gb": round((m.get("sizeOnDisk", 0) or 0) / 1e9, 1), + } + + +def radarr_queue(limit=20): + """Movies currently downloading or pending import.""" + data = arr_get("radarr", "/api/v3/queue", {"pageSize": limit, "includeMovie": "true"}) + items = [] + for r in data.get("records", []): + items.append({ + "title": r.get("title"), + "movie": r.get("movie", {}).get("title") if r.get("movie") else None, + "status": r.get("status"), + "timeleft": r.get("timeleft"), + "size_gb": round((r.get("size", 0) or 0) / 1e9, 2), + "size_left_gb": round((r.get("sizeleft", 0) or 0) / 1e9, 2), + "download_client": r.get("downloadClient"), + }) + return {"total_records": data.get("totalRecords", 0), "items": items} + + +def radarr_movie_search(query): + """Search the user's Radarr library for a movie by title. Does NOT search indexers.""" + all_movies = arr_get("radarr", "/api/v3/movie") + q = query.lower().strip() + hits = [m for m in all_movies if q in (m.get("title") or "").lower()] + return {"query": query, "count": len(hits), "movies": [_compact_movie(m) for m in hits[:20]]} + + +def radarr_library_stats(): + """Summary stats of the Radarr movie library.""" + all_movies = arr_get("radarr", "/api/v3/movie") + on_disk = sum(1 for m in all_movies if m.get("hasFile")) + total_size = sum((m.get("sizeOnDisk", 0) or 0) for m in all_movies) + missing = [m.get("title") for m in all_movies if not m.get("hasFile") and m.get("monitored")][:10] + return { + "movie_count": len(all_movies), + "movies_on_disk": on_disk, + "movies_missing_monitored": len([m for m in all_movies if not m.get("hasFile") and m.get("monitored")]), + "total_size_tb": round(total_size / 1e12, 2), + "sample_missing": missing, + } + + +TOOLS = [ + { + "name": "radarr_queue", + "description": "List movies Radarr is currently downloading or importing. Use when the user asks about movie downloads.", + "input_schema": { + "type": "object", + "properties": {"limit": {"type": "integer", "default": 20}}, + }, + "read_only": True, + "fn": radarr_queue, + }, + { + "name": "radarr_movie_search", + "description": "Check whether a specific movie is in the user's Radarr library. Does NOT search for new movies to add.", + "input_schema": { + "type": "object", + "properties": {"query": {"type": "string"}}, + "required": ["query"], + }, + "read_only": True, + "fn": radarr_movie_search, + }, + { + "name": "radarr_library_stats", + "description": "High-level stats about the Radarr movie library: how many movies, how many on disk, total size.", + "input_schema": {"type": "object", "properties": {}}, + "read_only": True, + "fn": radarr_library_stats, + }, +] diff --git a/agents/pirate/tools/sonarr.py b/agents/pirate/tools/sonarr.py new file mode 100644 index 0000000..92927fe --- /dev/null +++ b/agents/pirate/tools/sonarr.py @@ -0,0 +1,127 @@ +"""Sonarr tools — TV show queries (read-only).""" + +from ._common import arr_get + + +def _compact_series(s): + return { + "id": s.get("id"), + "title": s.get("title"), + "year": s.get("year"), + "status": s.get("status"), + "monitored": s.get("monitored"), + "network": s.get("network"), + "episode_count": s.get("statistics", {}).get("episodeCount"), + "episode_file_count": s.get("statistics", {}).get("episodeFileCount"), + "size_on_disk_gb": round((s.get("statistics", {}).get("sizeOnDisk", 0) or 0) / 1e9, 1), + } + + +def sonarr_queue(limit=20): + """What TV episodes are currently downloading / pending import.""" + data = arr_get("sonarr", "/api/v3/queue", {"pageSize": limit, "includeSeries": "true"}) + items = [] + for r in data.get("records", []): + items.append({ + "title": r.get("title"), + "series": r.get("series", {}).get("title") if r.get("series") else None, + "status": r.get("status"), + "timeleft": r.get("timeleft"), + "size_gb": round((r.get("size", 0) or 0) / 1e9, 2), + "size_left_gb": round((r.get("sizeleft", 0) or 0) / 1e9, 2), + "protocol": r.get("protocol"), + "download_client": r.get("downloadClient"), + }) + return {"total_records": data.get("totalRecords", 0), "items": items} + + +def sonarr_upcoming(days=14): + """TV episodes expected to air (or just released) in the next N days.""" + from datetime import datetime, timedelta, timezone + now = datetime.now(timezone.utc) + end = now + timedelta(days=days) + data = arr_get("sonarr", "/api/v3/calendar", { + "start": now.date().isoformat(), + "end": end.date().isoformat(), + "includeSeries": "true", + }) + out = [] + for ep in data: + out.append({ + "series": ep.get("series", {}).get("title"), + "season": ep.get("seasonNumber"), + "episode": ep.get("episodeNumber"), + "title": ep.get("title"), + "air_date": ep.get("airDateUtc") or ep.get("airDate"), + "has_file": ep.get("hasFile"), + "monitored": ep.get("monitored"), + }) + return {"window_days": days, "episodes": out} + + +def sonarr_series_search(query): + """Look up a series in the user's Sonarr library by partial title match. + Note: this does NOT search Prowlarr/indexers — it only searches what's already tracked.""" + all_series = arr_get("sonarr", "/api/v3/series") + q = query.lower().strip() + hits = [s for s in all_series if q in (s.get("title") or "").lower()] + return {"query": query, "count": len(hits), "series": [_compact_series(s) for s in hits[:20]]} + + +def sonarr_library_stats(): + """Summary of the Sonarr library: how many series, how many episodes on disk, total size.""" + all_series = arr_get("sonarr", "/api/v3/series") + total_eps = total_on_disk = total_size = 0 + for s in all_series: + st = s.get("statistics", {}) or {} + total_eps += st.get("episodeCount", 0) or 0 + total_on_disk += st.get("episodeFileCount", 0) or 0 + total_size += st.get("sizeOnDisk", 0) or 0 + return { + "series_count": len(all_series), + "episode_total": total_eps, + "episodes_on_disk": total_on_disk, + "total_size_tb": round(total_size / 1e12, 2), + } + + +TOOLS = [ + { + "name": "sonarr_queue", + "description": "List TV episodes that Sonarr is currently downloading or has just imported. Use when the user asks 'what's downloading', 'what episodes are coming in', 'is the new episode in yet'.", + "input_schema": { + "type": "object", + "properties": {"limit": {"type": "integer", "description": "Max rows to return (default 20)", "default": 20}}, + }, + "read_only": True, + "fn": sonarr_queue, + }, + { + "name": "sonarr_upcoming", + "description": "List TV episodes expected to air in the next N days (default 14). Use when the user asks 'what's coming up this week', 'when does the next episode drop', 'what's airing soon'.", + "input_schema": { + "type": "object", + "properties": {"days": {"type": "integer", "description": "Look ahead window in days (default 14)", "default": 14}}, + }, + "read_only": True, + "fn": sonarr_upcoming, + }, + { + "name": "sonarr_series_search", + "description": "Search the user's Sonarr library for a TV series by title. Does NOT search for new series to add — only checks what's already tracked.", + "input_schema": { + "type": "object", + "properties": {"query": {"type": "string", "description": "Partial or full series title"}}, + "required": ["query"], + }, + "read_only": True, + "fn": sonarr_series_search, + }, + { + "name": "sonarr_library_stats", + "description": "High-level stats about the Sonarr TV library: number of series, episodes on disk, total size. Use when the user asks 'how big is the TV library' or 'how many shows do we have'.", + "input_schema": {"type": "object", "properties": {}}, + "read_only": True, + "fn": sonarr_library_stats, + }, +] diff --git a/agents/pirate/tools/storage.py b/agents/pirate/tools/storage.py new file mode 100644 index 0000000..6a0c2cb --- /dev/null +++ b/agents/pirate/tools/storage.py @@ -0,0 +1,29 @@ +"""Storage tools — disk space queries across the media stack.""" + +from ._common import arr_get + + +def sonarr_disk_space(): + """Disk space on the paths Sonarr cares about (Plex TV Shows mount, downloads, etc.).""" + data = arr_get("sonarr", "/api/v3/diskspace") + out = [] + for d in data: + out.append({ + "path": d.get("path"), + "label": d.get("label"), + "total_tb": round((d.get("totalSpace", 0) or 0) / 1e12, 2), + "free_tb": round((d.get("freeSpace", 0) or 0) / 1e12, 2), + "free_pct": round(100 * (d.get("freeSpace", 0) or 0) / max((d.get("totalSpace", 0) or 1), 1), 1), + }) + return {"mounts": out} + + +TOOLS = [ + { + "name": "storage_disk_space", + "description": "Report free / total disk space for each mount visible to Sonarr (covers the Plex library mounts, downloads, and any attached shares). Use when the user asks 'how much space is left', 'is the NAS full', 'how much room on Darrow'.", + "input_schema": {"type": "object", "properties": {}}, + "read_only": True, + "fn": sonarr_disk_space, + }, +] diff --git a/dashboard/app.py b/dashboard/app.py index 3fa9f76..b19db7f 100644 --- a/dashboard/app.py +++ b/dashboard/app.py @@ -14,7 +14,11 @@ from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from database import get_db, init_db, SessionLocal -from models import User, AgentCatalog, AgentInstance, Run, LLMProvider, Bridge, RouteLog, APIClient, APIClientScope, APIClientCall +from models import ( + User, AgentCatalog, AgentInstance, Run, LLMProvider, Bridge, RouteLog, + APIClient, APIClientScope, APIClientCall, + ServiceConfig, PirateConversation, PirateMessage, +) app = FastAPI(title="Agent Command Center", version="2026.04.12.01") @@ -226,6 +230,18 @@ class APIClientUpdate(BaseModel): description: Optional[str] = None instance_ids: Optional[list[int]] = None # replaces existing scopes if provided +class ServiceConfigUpsert(BaseModel): + service_name: str + base_url: str = "" + api_key: str = "" + username: str = "" + password: str = "" + extra: dict = {} + +class PirateChatRequest(BaseModel): + message: str + conversation_id: Optional[int] = None # None = auto (pick latest non-stale thread, or start new) + # --- Auth Routes --- @@ -1020,6 +1036,105 @@ def admin_api_client_calls(client_id: int, limit: int = 50, admin: dict = Depend } for c in calls] +# --- Admin: Service Configs (system-wide creds shared by agents) --- + +# Services The Pirate agent knows how to talk to. Admin fills in URLs + keys. +KNOWN_SERVICES = [ + ("sonarr", "Sonarr", "TV show management"), + ("radarr", "Radarr", "Movie management"), + ("lidarr", "Lidarr", "Music management"), + ("whisparr", "Whisparr", "Adult content management"), + ("prowlarr", "Prowlarr", "Indexer aggregator"), + ("bazarr", "Bazarr", "Subtitle management"), + ("overseerr", "Overseerr", "Media request portal"), + ("qbittorrent", "qBittorrent", "Torrent client"), + ("plex", "Plex", "Media server (watch history, on-deck)"), +] + + +def _serialize_service(s: ServiceConfig, include_secrets: bool = False) -> dict: + return { + "service_name": s.service_name, + "base_url": s.base_url or "", + "api_key": (s.api_key or "") if include_secrets else ("set" if s.api_key else ""), + "username": s.username or "", + "password": ("set" if s.password else "") if not include_secrets else (s.password or ""), + "extra": s.extra or {}, + "updated_at": s.updated_at.isoformat() if s.updated_at else None, + "configured": bool(s.base_url), + } + + +@app.get("/api/admin/services") +def admin_list_services(admin: dict = Depends(require_admin), db: Session = Depends(get_db)): + """List all known services + whether admin has filled them in. Secrets are masked.""" + existing = {s.service_name: s for s in db.query(ServiceConfig).all()} + out = [] + for slug, label, desc in KNOWN_SERVICES: + s = existing.get(slug) + if s: + row = _serialize_service(s) + else: + row = { + "service_name": slug, "base_url": "", "api_key": "", "username": "", + "password": "", "extra": {}, "updated_at": None, "configured": False, + } + row["label"] = label + row["description"] = desc + out.append(row) + return out + + +@app.put("/api/admin/services/{service_name}") +def admin_upsert_service(service_name: str, data: ServiceConfigUpsert, + admin: dict = Depends(require_admin), db: Session = Depends(get_db)): + known = {s for s, _, _ in KNOWN_SERVICES} + if service_name not in known: + raise HTTPException(status_code=400, detail=f"Unknown service: {service_name}") + existing = db.query(ServiceConfig).filter(ServiceConfig.service_name == service_name).first() + if existing: + existing.base_url = data.base_url + # Preserve existing secrets if field is empty (lets you edit url without re-typing keys) + if data.api_key: + existing.api_key = data.api_key + if data.password: + existing.password = data.password + existing.username = data.username or existing.username + if data.extra: + existing.extra = data.extra + else: + db.add(ServiceConfig( + service_name=service_name, + base_url=data.base_url, + api_key=data.api_key, + username=data.username, + password=data.password, + extra=data.extra or {}, + )) + db.commit() + return {"service_name": service_name, "status": "saved"} + + +@app.delete("/api/admin/services/{service_name}") +def admin_delete_service(service_name: str, admin: dict = Depends(require_admin), db: Session = Depends(get_db)): + existing = db.query(ServiceConfig).filter(ServiceConfig.service_name == service_name).first() + if not existing: + raise HTTPException(status_code=404) + db.delete(existing) + db.commit() + return {"status": "deleted"} + + +@app.get("/api/internal/services/{service_name}") +def internal_get_service(service_name: str, db: Session = Depends(get_db)): + """Internal endpoint used by agents running in subprocess. No auth — same pattern as + the existing /api/instances/{id}/config endpoint. Returns full creds.""" + s = db.query(ServiceConfig).filter(ServiceConfig.service_name == service_name).first() + if not s: + raise HTTPException(status_code=404, detail=f"Service '{service_name}' not configured") + return _serialize_service(s, include_secrets=True) + + # --- Admin: Catalog Management --- class CatalogCreate(BaseModel): @@ -1495,6 +1610,249 @@ def root(session: Optional[str] = Cookie(None)): return FileResponse("static/index.html") +@app.get("/pirate") +def pirate_page(session: Optional[str] = Cookie(None)): + user = get_current_user(session) + if not user: + return RedirectResponse("/login", status_code=302) + return FileResponse("static/pirate.html") + + +# --- The Pirate: conversational media agent --- + +PIRATE_IDLE_RESET_HOURS = 24 + + +def _caller_user_id_for_pirate(caller: dict, db: Session) -> int: + """Pirate is a per-user agent. Resolve the target user from the caller. + - user session: the logged-in user + - api token: the user_id of the pirate instance the token is scoped to + Tokens scoped to zero or multiple pirate instances are rejected. + """ + if caller["kind"] == "user": + return caller["user_id"] + # API token — find pirate instances it can reach + pirate_scopes = db.query(AgentInstance).filter( + AgentInstance.id.in_(caller["allowed_instance_ids"]), + AgentInstance.catalog_id == "pirate", + ).all() + if not pirate_scopes: + raise HTTPException(status_code=403, detail="Token is not scoped to a Pirate instance") + if len(pirate_scopes) > 1: + raise HTTPException(status_code=400, detail="Token scoped to multiple Pirate instances; ambiguous") + return pirate_scopes[0].user_id + + +def _pick_or_create_conversation(db: Session, user_id: int, conversation_id: Optional[int]) -> PirateConversation: + """If conversation_id is provided, load it (and authorize). Otherwise find the user's + most recent conversation; reuse it if active (< idle window), else start a new one.""" + if conversation_id: + conv = db.query(PirateConversation).filter(PirateConversation.id == conversation_id).first() + if not conv: + raise HTTPException(status_code=404, detail="Conversation not found") + if conv.user_id != user_id: + raise HTTPException(status_code=403, detail="Not your conversation") + return conv + # Auto-pick: latest active thread for this user, else new + now = datetime.now(timezone.utc) + latest = db.query(PirateConversation).filter( + PirateConversation.user_id == user_id, + ).order_by(PirateConversation.last_message_at.desc()).first() + if latest: + last = latest.last_message_at + if last and last.tzinfo is None: + last = last.replace(tzinfo=timezone.utc) + if last and (now - last) < timedelta(hours=PIRATE_IDLE_RESET_HOURS): + return latest + conv = PirateConversation(user_id=user_id, title="") + db.add(conv) + db.commit() + db.refresh(conv) + return conv + + +def _serialize_pirate_message(m: PirateMessage) -> dict: + return { + "id": m.id, + "role": m.role, + "content": m.content or "", + "tool_calls": m.tool_calls, + "tool_call_id": m.tool_call_id or "", + "tool_name": m.tool_name or "", + "tool_result": m.tool_result, + "model": m.model or "", + "input_tokens": m.input_tokens or 0, + "output_tokens": m.output_tokens or 0, + "created_at": m.created_at.isoformat() if m.created_at else None, + } + + +def _serialize_conversation(conv: PirateConversation, include_messages: bool = False) -> dict: + out = { + "id": conv.id, + "user_id": conv.user_id, + "title": conv.title or "", + "created_at": conv.created_at.isoformat() if conv.created_at else None, + "last_message_at": conv.last_message_at.isoformat() if conv.last_message_at else None, + } + if include_messages: + out["messages"] = [_serialize_pirate_message(m) for m in conv.messages] + return out + + +@app.post("/api/pirate/chat") +def pirate_chat( + data: PirateChatRequest, + caller: dict = Depends(require_user_or_api), + db: Session = Depends(get_db), +): + """Send a message to The Pirate and get a response. Runs the LLM tool-use loop synchronously + (Pirate conversations need the response immediately — no async polling pattern here).""" + user_id = _caller_user_id_for_pirate(caller, db) + conv = _pick_or_create_conversation(db, user_id, data.conversation_id) + + # Persist the user turn + user_msg = PirateMessage(conversation_id=conv.id, role="user", content=data.message) + db.add(user_msg) + if not conv.title: + conv.title = data.message.strip()[:80] + conv.last_message_at = datetime.now(timezone.utc) + db.commit() + + # Invoke the Pirate runtime as a subprocess so it runs in the agent container's Python env + # (where the tool package + LLM client live). Use a helper entry point. + import subprocess + agent_dir = "/app/agents" + env = { + **dict(os.environ), + "PYTHONPATH": agent_dir, + "PIRATE_CONVERSATION_ID": str(conv.id), + "PIRATE_USER_ID": str(user_id), + } + result = subprocess.run( + ["python3", "-c", + "import sys; sys.path.insert(0, '/app/agents'); " + "from pirate.runtime import chat_turn; chat_turn()"], + env=env, cwd=agent_dir, capture_output=True, text=True, timeout=120, + ) + if result.returncode != 0: + err = (result.stderr or result.stdout or "")[-2000:] + err_msg = PirateMessage( + conversation_id=conv.id, role="assistant", + content=f"[Pirate error] {err[-500:]}", + ) + db.add(err_msg) + conv.last_message_at = datetime.now(timezone.utc) + db.commit() + raise HTTPException(status_code=500, detail=f"Pirate runtime failed: {err[-500:]}") + + # Reload conversation to return fresh state (runtime appended assistant + tool messages) + db.refresh(conv) + log_api_client_call(db, caller, "POST /api/pirate/chat", None, 200) + return _serialize_conversation(conv, include_messages=True) + + +@app.get("/api/pirate/conversations") +def pirate_list_conversations( + caller: dict = Depends(require_user_or_api), + db: Session = Depends(get_db), +): + user_id = _caller_user_id_for_pirate(caller, db) + convs = db.query(PirateConversation).filter( + PirateConversation.user_id == user_id, + ).order_by(PirateConversation.last_message_at.desc()).limit(50).all() + return [_serialize_conversation(c, include_messages=False) for c in convs] + + +@app.get("/api/pirate/conversations/{conv_id}") +def pirate_get_conversation( + conv_id: int, + caller: dict = Depends(require_user_or_api), + db: Session = Depends(get_db), +): + user_id = _caller_user_id_for_pirate(caller, db) + conv = db.query(PirateConversation).filter(PirateConversation.id == conv_id).first() + if not conv: + raise HTTPException(status_code=404) + if conv.user_id != user_id: + raise HTTPException(status_code=403) + return _serialize_conversation(conv, include_messages=True) + + +@app.post("/api/pirate/conversations/new") +def pirate_new_conversation( + caller: dict = Depends(require_user_or_api), + db: Session = Depends(get_db), +): + """Force-start a new conversation thread (user clicked 'New Chat').""" + user_id = _caller_user_id_for_pirate(caller, db) + conv = PirateConversation(user_id=user_id, title="") + db.add(conv) + db.commit() + db.refresh(conv) + return _serialize_conversation(conv, include_messages=True) + + +@app.delete("/api/pirate/conversations/{conv_id}") +def pirate_delete_conversation( + conv_id: int, + caller: dict = Depends(require_user_or_api), + db: Session = Depends(get_db), +): + user_id = _caller_user_id_for_pirate(caller, db) + conv = db.query(PirateConversation).filter(PirateConversation.id == conv_id).first() + if not conv or conv.user_id != user_id: + raise HTTPException(status_code=404) + db.delete(conv) + db.commit() + return {"status": "deleted"} + + +# Internal endpoints used by the pirate runtime subprocess ------------------ + +@app.get("/api/internal/pirate/conversation/{conv_id}") +def internal_get_conversation(conv_id: int, db: Session = Depends(get_db)): + conv = db.query(PirateConversation).filter(PirateConversation.id == conv_id).first() + if not conv: + raise HTTPException(status_code=404) + return _serialize_conversation(conv, include_messages=True) + + +class InternalMessageCreate(BaseModel): + role: str + content: str = "" + tool_calls: Optional[list] = None + tool_call_id: str = "" + tool_name: str = "" + tool_result: Optional[dict] = None + model: str = "" + input_tokens: int = 0 + output_tokens: int = 0 + + +@app.post("/api/internal/pirate/conversation/{conv_id}/messages") +def internal_append_message(conv_id: int, data: InternalMessageCreate, db: Session = Depends(get_db)): + conv = db.query(PirateConversation).filter(PirateConversation.id == conv_id).first() + if not conv: + raise HTTPException(status_code=404) + msg = PirateMessage( + conversation_id=conv_id, + role=data.role, + content=data.content, + tool_calls=data.tool_calls, + tool_call_id=data.tool_call_id, + tool_name=data.tool_name, + tool_result=data.tool_result, + model=data.model, + input_tokens=data.input_tokens, + output_tokens=data.output_tokens, + ) + db.add(msg) + conv.last_message_at = datetime.now(timezone.utc) + db.commit() + return {"id": msg.id} + + # --- Result schemas (what each agent's structured result looks like) --- RESULT_SCHEMAS = { @@ -1557,9 +1915,96 @@ RESULT_SCHEMAS = { "generated_at": "ISO datetime string", }, }, + "pirate": { + "description": "Conversational read-only media agent. Chat with it about Sonarr, Radarr, qBittorrent, Plex. Phase 1 is read-only; Phase 2 adds media request + torrent control writes.", + "shape": { + "note": "Pirate does not use the run/result pattern. It lives behind /api/pirate/chat. Each call returns a full conversation history (messages with role, content, tool_calls, tool_result).", + }, + }, } +# Catalog entries that should exist even if admin never adds them manually. +# Seeded on startup if missing. +SEEDED_CATALOG_ENTRIES = [ + { + "id": "pirate", + "name": "The Pirate", + "description": "Conversational read-only media agent (Phase 1). Chat about TV, movies, music, and downloads; no write actions yet.", + "category": "intelligence", + "config_schema": { + "services": "Services the Pirate can query (configured system-wide in /admin → Services)", + }, + "default_config": {}, + "supports_schedule": False, + "is_sub_agent": False, + "requires_llm": True, + }, +] + + +def _seed_catalog(db: Session): + """Insert baseline catalog entries for agents the platform itself depends on + (like The Pirate). Idempotent — only inserts when id is missing.""" + for entry in SEEDED_CATALOG_ENTRIES: + existing = db.query(AgentCatalog).filter(AgentCatalog.id == entry["id"]).first() + if existing: + continue + db.add(AgentCatalog(**entry)) + db.commit() + + +def _seed_pirate_instances(db: Session): + """Every non-admin user gets one Pirate instance automatically. Idempotent.""" + users = db.query(User).filter(User.role != "admin").all() + for u in users: + existing = db.query(AgentInstance).filter( + AgentInstance.user_id == u.id, AgentInstance.catalog_id == "pirate", + ).first() + if existing: + continue + db.add(AgentInstance( + user_id=u.id, + catalog_id="pirate", + name=f"{u.display_name or u.username}'s Pirate", + config={}, + schedule="manual", + status="active", + )) + db.commit() + + +# Defaults loaded from the Media Stack Reference wiki page. Seeded only when the +# service_configs table has no row for that slug — admin-entered values are never overwritten. +_SEED_SERVICE_DEFAULTS = { + "sonarr": {"base_url": "http://192.168.1.203:8989", "api_key": "d494ea4c9ec74d3793a9a84dfae7c4c8"}, + "radarr": {"base_url": "http://192.168.1.203:7878", "api_key": "4df49af333574d1d989e221375b928ef"}, + "lidarr": {"base_url": "http://192.168.1.203:8686", "api_key": "58ad42ac15e44001927226461d606c34"}, + "whisparr": {"base_url": "http://192.168.1.203:6969", "api_key": "99dee8e33f63470bad8b4e41bed6af4a"}, + "prowlarr": {"base_url": "http://192.168.1.203:9696", "api_key": "35bb6983a11d4decbcf4422be3218568"}, + "bazarr": {"base_url": "http://192.168.1.203:6767", "api_key": "4bc3869b8fef0b38c09f3da2754d5595"}, + "overseerr": {"base_url": "http://192.168.1.203:5055", "api_key": "MTc2OTI2OTIwNzU0MDdkYmNhMTg1LTgxZTMtNDdjOC04MTBhLTE2YzFlNjJiNzZhYw=="}, + "qbittorrent": {"base_url": "http://192.168.1.239:8080", "api_key": ""}, # LAN no-auth today + # plex: no public token yet, leave empty for admin to fill in +} + + +def _seed_service_configs(db: Session): + for slug, defaults in _SEED_SERVICE_DEFAULTS.items(): + existing = db.query(ServiceConfig).filter(ServiceConfig.service_name == slug).first() + if existing: + continue + db.add(ServiceConfig( + service_name=slug, + base_url=defaults.get("base_url", ""), + api_key=defaults.get("api_key", ""), + username=defaults.get("username", ""), + password=defaults.get("password", ""), + extra=defaults.get("extra", {}), + )) + db.commit() + + def _seed_result_schemas(db: Session): """Populate agent_catalog.result_schema for known agents. Idempotent — only fills empty.""" for catalog_id, schema in RESULT_SCHEMAS.items(): @@ -1574,9 +2019,11 @@ def _seed_result_schemas(db: Session): @app.on_event("startup") def startup(): init_db() - # Seed result schemas for catalog entries that don't have them yet db = SessionLocal() try: + _seed_catalog(db) _seed_result_schemas(db) + _seed_pirate_instances(db) + _seed_service_configs(db) finally: db.close() diff --git a/dashboard/models.py b/dashboard/models.py index 30e8f4a..c592bfb 100644 --- a/dashboard/models.py +++ b/dashboard/models.py @@ -152,3 +152,55 @@ class APIClientCall(Base): endpoint = Column(String, default="") # e.g. "POST /api/instances/2/trigger" status_code = Column(Integer, default=0) called_at = Column(DateTime, default=lambda: datetime.now(timezone.utc)) + + +class ServiceConfig(Base): + """System-wide service credentials used by agents (Sonarr, Radarr, qBit, Plex, etc.). + Keyed by service name (e.g. 'sonarr'). Admin manages these centrally, shared across users.""" + __tablename__ = "service_configs" + + service_name = Column(String, primary_key=True) # 'sonarr', 'radarr', 'qbittorrent', 'plex', ... + base_url = Column(String, default="") # e.g. http://192.168.1.203:8989 + api_key = Column(String, default="") # API key or token (varies by service) + username = Column(String, default="") # some services need both (qBit, Plex) + password = Column(String, default="") + extra = Column(JSON, default=dict) # any service-specific extras + updated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc), + onupdate=lambda: datetime.now(timezone.utc)) + + +class PirateConversation(Base): + """A chat thread with The Pirate agent. Auto-resets 24h after last message unless + user explicitly picks an old thread (client decides which conversation_id to use).""" + __tablename__ = "pirate_conversations" + + id = Column(Integer, primary_key=True, autoincrement=True) + user_id = Column(Integer, ForeignKey("users.id", ondelete="CASCADE"), nullable=False) + title = Column(String, default="") # auto-set from first user message + created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc)) + last_message_at = Column(DateTime, default=lambda: datetime.now(timezone.utc)) + + messages = relationship("PirateMessage", back_populates="conversation", + cascade="all, delete-orphan", order_by="PirateMessage.created_at") + + +class PirateMessage(Base): + """One turn in a Pirate conversation. Role is 'user', 'assistant', or 'tool'. + For 'assistant' turns with tool calls, tool_calls holds the LLM's requested calls. + For 'tool' turns, tool_name + tool_result hold the execution output.""" + __tablename__ = "pirate_messages" + + id = Column(Integer, primary_key=True, autoincrement=True) + conversation_id = Column(Integer, ForeignKey("pirate_conversations.id", ondelete="CASCADE"), nullable=False) + role = Column(String, nullable=False) # 'user', 'assistant', 'tool' + content = Column(Text, default="") # text content (user or assistant message) + tool_calls = Column(JSON, nullable=True) # list of {id, name, input} for assistant turns + tool_call_id = Column(String, default="") # matches an assistant.tool_calls[*].id on 'tool' turns + tool_name = Column(String, default="") # which tool was called on 'tool' turns + tool_result = Column(JSON, nullable=True) # structured result of the tool call + model = Column(String, default="") # LLM model that produced this assistant turn + input_tokens = Column(Integer, default=0) + output_tokens = Column(Integer, default=0) + created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc)) + + conversation = relationship("PirateConversation", back_populates="messages") diff --git a/dashboard/static/admin.html b/dashboard/static/admin.html index 23690a3..169b96e 100644 --- a/dashboard/static/admin.html +++ b/dashboard/static/admin.html @@ -66,6 +66,7 @@ tr:hover td{background:var(--surface2)}
LLM Providers
Bridges
API Clients
+
Services
System
@@ -179,6 +180,21 @@ tr:hover td{background:var(--surface2)} + +
+
+

Service Credentials

+

+ Admin-level URLs + API keys shared across all users. The Pirate agent (and future media agents) use these to query Sonarr, Radarr, qBittorrent, Plex, etc. + Leaving api_key / password blank preserves the existing stored value (useful when editing just the URL). +

+
+ + + +
ServiceBase URLAPI Key / PasswordUpdatedActions
+
+
@@ -396,6 +412,49 @@ async function editApiClient(id){ else{const e=await res2.json();alert(e.detail||'Error')} } +// --- Services --- +async function loadServices(){ + const res=await fetch(API+'/api/admin/services'); + if(!res.ok)return; + const services=await res.json(); + document.querySelector('#services-table tbody').innerHTML=services.map(s=>{ + const needsCreds=['qbittorrent','plex'].includes(s.service_name); + const secretField=needsCreds + ? `` + : ``; + const userField=needsCreds + ? `` + : ''; + const statusColor=s.configured?'var(--green)':'var(--yellow)'; + return ` + ${s.label||s.service_name} ${s.configured?'●':'○'} +
${s.description||''}
+ + ${userField}${secretField} + ${s.updated_at?new Date(s.updated_at).toLocaleDateString():'-'} + + `; + }).join(''); +} + +async function saveService(slug){ + const base_url=document.getElementById('svc-url-'+slug).value.trim(); + const keyEl=document.getElementById('svc-key-'+slug); + const pwEl=document.getElementById('svc-pw-'+slug); + const userEl=document.getElementById('svc-user-'+slug); + const body={ + service_name:slug, + base_url, + api_key:keyEl?keyEl.value:'', + username:userEl?userEl.value:'', + password:pwEl?pwEl.value:'', + extra:{}, + }; + const res=await fetch(API+'/api/admin/services/'+slug,{method:'PUT',headers:{'Content-Type':'application/json'},body:JSON.stringify(body)}); + if(res.ok){loadServices()} + else{const e=await res.json();alert(e.detail||'Error')} +} + // --- System --- async function loadSystem(){ const[usersRes,instRes]=await Promise.all([fetch(API+'/api/admin/users'),fetch(API+'/api/health')]); @@ -408,7 +467,7 @@ async function loadSystem(){ } // Init -loadUsers();loadCatalog();loadProviders();loadBridges();loadAllInstancesForPicker().then(loadApiClients);loadSystem(); +loadUsers();loadCatalog();loadProviders();loadBridges();loadAllInstancesForPicker().then(loadApiClients);loadServices();loadSystem(); diff --git a/dashboard/static/index.html b/dashboard/static/index.html index a9d4192..3709186 100644 --- a/dashboard/static/index.html +++ b/dashboard/static/index.html @@ -121,6 +121,7 @@ body{font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,sans-serif;b
0 agents + diff --git a/dashboard/static/pirate.html b/dashboard/static/pirate.html new file mode 100644 index 0000000..9d3c2ae --- /dev/null +++ b/dashboard/static/pirate.html @@ -0,0 +1,223 @@ + + + + + +The Pirate — Agent Command Center + + + +
+

The Pirate Phase 1 — read-only

+
+ + + +
+
+ +
+ +
+
+
+
+ + +
+
Enter to send · Shift+Enter for newline · Phase 1 is read-only (no writes yet)
+
+
+
+ + + +