The Pirate — Phase 1.a: conversational read-only media agent
Ships a chat-based agent at /pirate that LLM-routes user questions to media-stack tools and returns natural-language answers grounded in real data. Foundation built on top of the existing API-tokens + dual-auth infrastructure so other apps (Open WebUI, HA voice, Synap) can consume the same Pirate API. New subsystem (not the standard trigger/result pattern): - pirate_conversations + pirate_messages tables - service_configs table (admin-wide creds shared by media agents) - /api/pirate/chat + /api/pirate/conversations/* (dual-auth: user session OR Bearer token scoped to user's pirate instance) - /api/internal/pirate/* endpoints used by runtime subprocess - /api/admin/services + Services tab in admin.html for cred management - Auto-seeded service_configs on startup from Media Stack Reference defaults (never overwrite admin edits) - Auto-seeded pirate catalog entry + per-user pirate instance on startup Pirate package (agents/pirate/): - prompts.py: system prompt, enforces read-only in Phase 1 - runtime.py: Anthropic-native tool-use loop (max 8 iterations, persists every turn) - tools/_common.py: service_configs fetch + qBit session auth - tools/sonarr.py: queue, upcoming, series_search, library_stats - tools/radarr.py: queue, movie_search, library_stats - tools/qbittorrent.py: torrents, transfer_stats, categories - tools/storage.py: disk_space (via Sonarr diskspace API) - Default model: claude-sonnet-4-5 (Haiku fumbles multi-step chains) Dashboard: - static/pirate.html — full chat UI with conversation sidebar, suggestion chips, inline tool-call visualization, 24h idle reset + New Chat button - Pirate button added to main dashboard header Wiki reorg: Agents / Developer Guides / Plans parent docs, per-agent reference docs, The Pirate doc. API Clients + Calling Agents docs moved under Developer Guides. Working folder: PIRATE_PHASE_1A.md + NEXT_SESSION_PROMPT.md for fast bootstrap. Smoke tested end-to-end: real tool calls against qBittorrent (13 active torrents correctly reported) and Sonarr disk-space; multi-turn conversation state preserved across follow-up questions. On deck: Phase 1.b (Lidarr/Whisparr/Overseerr/Plex tools), then 1.d (OWUI pipeline), then 1.c (HA voice).
This commit is contained in:
@@ -0,0 +1,25 @@
|
||||
"""Pirate tools — read-only functions the LLM can call.
|
||||
|
||||
Each tool module exports a TOOLS list of dicts shaped for Anthropic tool use:
|
||||
{"name": str, "description": str, "input_schema": {...}, "read_only": True, "fn": callable}
|
||||
|
||||
The runtime loads all modules here and builds a combined catalog. The LLM picks
|
||||
which tool to call; the runtime executes it and returns the result as a tool message.
|
||||
"""
|
||||
|
||||
from . import sonarr, radarr, qbittorrent, storage
|
||||
|
||||
|
||||
def build_catalog():
|
||||
"""Return a list of all available tools across all modules."""
|
||||
catalog = []
|
||||
for mod in (sonarr, radarr, qbittorrent, storage):
|
||||
catalog.extend(getattr(mod, "TOOLS", []))
|
||||
return catalog
|
||||
|
||||
|
||||
def find_tool(name):
|
||||
for t in build_catalog():
|
||||
if t["name"] == name:
|
||||
return t
|
||||
return None
|
||||
@@ -0,0 +1,72 @@
|
||||
"""Shared helpers for pirate tools."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
from urllib import request as urlreq, error as urlerror, parse as urlparse
|
||||
import json
|
||||
|
||||
# Add parent agents/ to path so we can import shared.py
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
from shared import DASHBOARD_API, api_request
|
||||
|
||||
|
||||
def get_service_config(service_name):
|
||||
"""Fetch admin-configured creds for a service (sonarr, radarr, qbittorrent, etc.)."""
|
||||
try:
|
||||
return api_request(f"{DASHBOARD_API}/api/internal/services/{service_name}", retries=1)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Service '{service_name}' is not configured in admin panel: {e}")
|
||||
|
||||
|
||||
def arr_get(service_name, path, params=None):
|
||||
"""GET from an *arr API (Sonarr/Radarr/Lidarr/Whisparr — all use the same v3 API shape)."""
|
||||
cfg = get_service_config(service_name)
|
||||
base = cfg["base_url"].rstrip("/")
|
||||
key = cfg["api_key"]
|
||||
if not base or not key:
|
||||
raise RuntimeError(f"{service_name} not configured (base_url + api_key required)")
|
||||
url = f"{base}{path}"
|
||||
if params:
|
||||
url += "?" + urlparse.urlencode(params)
|
||||
req = urlreq.Request(url, headers={"X-Api-Key": key})
|
||||
with urlreq.urlopen(req, timeout=15) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
|
||||
|
||||
def qbit_request(path, method="GET", params=None, data=None, cookies=None):
|
||||
"""qBittorrent Web API helper. Login (if password set) returns SID cookie string."""
|
||||
cfg = get_service_config("qbittorrent")
|
||||
base = cfg["base_url"].rstrip("/")
|
||||
if not base:
|
||||
raise RuntimeError("qbittorrent not configured")
|
||||
url = f"{base}{path}"
|
||||
if params:
|
||||
url += "?" + urlparse.urlencode(params)
|
||||
body = None
|
||||
headers = {}
|
||||
if cookies:
|
||||
headers["Cookie"] = cookies
|
||||
if data is not None:
|
||||
body = urlparse.urlencode(data).encode()
|
||||
headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
req = urlreq.Request(url, data=body, headers=headers, method=method)
|
||||
with urlreq.urlopen(req, timeout=15) as resp:
|
||||
raw = resp.read().decode()
|
||||
try:
|
||||
return json.loads(raw), resp.headers
|
||||
except json.JSONDecodeError:
|
||||
return raw, resp.headers
|
||||
|
||||
|
||||
def qbit_login_if_needed():
|
||||
"""If qBit has auth enabled, login and return the SID cookie string. Else return None."""
|
||||
cfg = get_service_config("qbittorrent")
|
||||
user = cfg.get("username", "")
|
||||
pw = cfg.get("password", "")
|
||||
if not user or not pw:
|
||||
return None # LAN no-auth mode
|
||||
_, hdrs = qbit_request("/api/v2/auth/login", method="POST",
|
||||
data={"username": user, "password": pw})
|
||||
cookies = hdrs.get("Set-Cookie", "")
|
||||
# Strip everything after the first semicolon (path/expiry)
|
||||
return cookies.split(";")[0] if cookies else None
|
||||
@@ -0,0 +1,88 @@
|
||||
"""qBittorrent tools — torrent queue and transfer stats (read-only)."""
|
||||
|
||||
from ._common import qbit_login_if_needed, qbit_request
|
||||
|
||||
|
||||
def _compact_torrent(t):
|
||||
return {
|
||||
"name": t.get("name"),
|
||||
"state": t.get("state"), # downloading, uploading, stalledDL, pausedDL, completed, ...
|
||||
"progress_pct": round((t.get("progress", 0) or 0) * 100, 1),
|
||||
"size_gb": round((t.get("size", 0) or 0) / 1e9, 2),
|
||||
"dl_speed_mbps": round((t.get("dlspeed", 0) or 0) / 1e6, 2),
|
||||
"up_speed_mbps": round((t.get("upspeed", 0) or 0) / 1e6, 2),
|
||||
"eta_seconds": t.get("eta"),
|
||||
"category": t.get("category"),
|
||||
"ratio": round(t.get("ratio", 0) or 0, 2),
|
||||
"num_seeds": t.get("num_seeds"),
|
||||
"num_peers": t.get("num_leechs") or t.get("num_peers"),
|
||||
"added_on_epoch": t.get("added_on"),
|
||||
}
|
||||
|
||||
|
||||
def qbit_torrents(filter="all", category=None, limit=30):
|
||||
"""List torrents. Filter is one of: all, downloading, seeding, completed, paused, stalled, errored."""
|
||||
cookie = qbit_login_if_needed()
|
||||
params = {"filter": filter}
|
||||
if category:
|
||||
params["category"] = category
|
||||
data, _ = qbit_request("/api/v2/torrents/info", params=params, cookies=cookie)
|
||||
if isinstance(data, str):
|
||||
return {"error": data}
|
||||
items = [_compact_torrent(t) for t in data[:limit]]
|
||||
return {"filter": filter, "category": category, "count": len(data), "torrents": items}
|
||||
|
||||
|
||||
def qbit_transfer_stats():
|
||||
"""Global transfer stats: overall download / upload speed, session totals, DHT nodes."""
|
||||
cookie = qbit_login_if_needed()
|
||||
data, _ = qbit_request("/api/v2/transfer/info", cookies=cookie)
|
||||
if isinstance(data, str):
|
||||
return {"error": data}
|
||||
return {
|
||||
"dl_speed_mbps": round((data.get("dl_info_speed", 0) or 0) / 1e6, 2),
|
||||
"up_speed_mbps": round((data.get("up_info_speed", 0) or 0) / 1e6, 2),
|
||||
"dl_session_gb": round((data.get("dl_info_data", 0) or 0) / 1e9, 2),
|
||||
"up_session_gb": round((data.get("up_info_data", 0) or 0) / 1e9, 2),
|
||||
"dht_nodes": data.get("dht_nodes"),
|
||||
"connection_status": data.get("connection_status"),
|
||||
}
|
||||
|
||||
|
||||
def qbit_categories():
|
||||
"""Configured torrent categories (e.g., sonarr, radarr, lidarr, whisparr)."""
|
||||
cookie = qbit_login_if_needed()
|
||||
data, _ = qbit_request("/api/v2/torrents/categories", cookies=cookie)
|
||||
return data if isinstance(data, dict) else {"error": str(data)}
|
||||
|
||||
|
||||
TOOLS = [
|
||||
{
|
||||
"name": "qbit_torrents",
|
||||
"description": "List torrents in qBittorrent with their progress, state, speed, and ETA. Filter options: all, downloading, seeding, completed, paused, stalled, errored. Category options: sonarr, radarr, lidarr, whisparr (which *arr requested it).",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"filter": {"type": "string", "default": "all", "enum": ["all", "downloading", "seeding", "completed", "paused", "stalled", "errored"]},
|
||||
"category": {"type": "string", "description": "Optional category filter"},
|
||||
"limit": {"type": "integer", "default": 30},
|
||||
},
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": qbit_torrents,
|
||||
},
|
||||
{
|
||||
"name": "qbit_transfer_stats",
|
||||
"description": "Current global download / upload speed and session totals from qBittorrent. Use when user asks 'how fast is the download', 'what's my current download speed', 'total downloaded this session'.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
"read_only": True,
|
||||
"fn": qbit_transfer_stats,
|
||||
},
|
||||
{
|
||||
"name": "qbit_categories",
|
||||
"description": "List the torrent categories configured in qBittorrent. Shows which *arr is pulling what.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
"read_only": True,
|
||||
"fn": qbit_categories,
|
||||
},
|
||||
]
|
||||
@@ -0,0 +1,87 @@
|
||||
"""Radarr tools — movie queries (read-only)."""
|
||||
|
||||
from ._common import arr_get
|
||||
|
||||
|
||||
def _compact_movie(m):
|
||||
return {
|
||||
"id": m.get("id"),
|
||||
"title": m.get("title"),
|
||||
"year": m.get("year"),
|
||||
"monitored": m.get("monitored"),
|
||||
"has_file": m.get("hasFile"),
|
||||
"status": m.get("status"),
|
||||
"size_on_disk_gb": round((m.get("sizeOnDisk", 0) or 0) / 1e9, 1),
|
||||
}
|
||||
|
||||
|
||||
def radarr_queue(limit=20):
|
||||
"""Movies currently downloading or pending import."""
|
||||
data = arr_get("radarr", "/api/v3/queue", {"pageSize": limit, "includeMovie": "true"})
|
||||
items = []
|
||||
for r in data.get("records", []):
|
||||
items.append({
|
||||
"title": r.get("title"),
|
||||
"movie": r.get("movie", {}).get("title") if r.get("movie") else None,
|
||||
"status": r.get("status"),
|
||||
"timeleft": r.get("timeleft"),
|
||||
"size_gb": round((r.get("size", 0) or 0) / 1e9, 2),
|
||||
"size_left_gb": round((r.get("sizeleft", 0) or 0) / 1e9, 2),
|
||||
"download_client": r.get("downloadClient"),
|
||||
})
|
||||
return {"total_records": data.get("totalRecords", 0), "items": items}
|
||||
|
||||
|
||||
def radarr_movie_search(query):
|
||||
"""Search the user's Radarr library for a movie by title. Does NOT search indexers."""
|
||||
all_movies = arr_get("radarr", "/api/v3/movie")
|
||||
q = query.lower().strip()
|
||||
hits = [m for m in all_movies if q in (m.get("title") or "").lower()]
|
||||
return {"query": query, "count": len(hits), "movies": [_compact_movie(m) for m in hits[:20]]}
|
||||
|
||||
|
||||
def radarr_library_stats():
|
||||
"""Summary stats of the Radarr movie library."""
|
||||
all_movies = arr_get("radarr", "/api/v3/movie")
|
||||
on_disk = sum(1 for m in all_movies if m.get("hasFile"))
|
||||
total_size = sum((m.get("sizeOnDisk", 0) or 0) for m in all_movies)
|
||||
missing = [m.get("title") for m in all_movies if not m.get("hasFile") and m.get("monitored")][:10]
|
||||
return {
|
||||
"movie_count": len(all_movies),
|
||||
"movies_on_disk": on_disk,
|
||||
"movies_missing_monitored": len([m for m in all_movies if not m.get("hasFile") and m.get("monitored")]),
|
||||
"total_size_tb": round(total_size / 1e12, 2),
|
||||
"sample_missing": missing,
|
||||
}
|
||||
|
||||
|
||||
TOOLS = [
|
||||
{
|
||||
"name": "radarr_queue",
|
||||
"description": "List movies Radarr is currently downloading or importing. Use when the user asks about movie downloads.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"limit": {"type": "integer", "default": 20}},
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": radarr_queue,
|
||||
},
|
||||
{
|
||||
"name": "radarr_movie_search",
|
||||
"description": "Check whether a specific movie is in the user's Radarr library. Does NOT search for new movies to add.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"],
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": radarr_movie_search,
|
||||
},
|
||||
{
|
||||
"name": "radarr_library_stats",
|
||||
"description": "High-level stats about the Radarr movie library: how many movies, how many on disk, total size.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
"read_only": True,
|
||||
"fn": radarr_library_stats,
|
||||
},
|
||||
]
|
||||
@@ -0,0 +1,127 @@
|
||||
"""Sonarr tools — TV show queries (read-only)."""
|
||||
|
||||
from ._common import arr_get
|
||||
|
||||
|
||||
def _compact_series(s):
|
||||
return {
|
||||
"id": s.get("id"),
|
||||
"title": s.get("title"),
|
||||
"year": s.get("year"),
|
||||
"status": s.get("status"),
|
||||
"monitored": s.get("monitored"),
|
||||
"network": s.get("network"),
|
||||
"episode_count": s.get("statistics", {}).get("episodeCount"),
|
||||
"episode_file_count": s.get("statistics", {}).get("episodeFileCount"),
|
||||
"size_on_disk_gb": round((s.get("statistics", {}).get("sizeOnDisk", 0) or 0) / 1e9, 1),
|
||||
}
|
||||
|
||||
|
||||
def sonarr_queue(limit=20):
|
||||
"""What TV episodes are currently downloading / pending import."""
|
||||
data = arr_get("sonarr", "/api/v3/queue", {"pageSize": limit, "includeSeries": "true"})
|
||||
items = []
|
||||
for r in data.get("records", []):
|
||||
items.append({
|
||||
"title": r.get("title"),
|
||||
"series": r.get("series", {}).get("title") if r.get("series") else None,
|
||||
"status": r.get("status"),
|
||||
"timeleft": r.get("timeleft"),
|
||||
"size_gb": round((r.get("size", 0) or 0) / 1e9, 2),
|
||||
"size_left_gb": round((r.get("sizeleft", 0) or 0) / 1e9, 2),
|
||||
"protocol": r.get("protocol"),
|
||||
"download_client": r.get("downloadClient"),
|
||||
})
|
||||
return {"total_records": data.get("totalRecords", 0), "items": items}
|
||||
|
||||
|
||||
def sonarr_upcoming(days=14):
|
||||
"""TV episodes expected to air (or just released) in the next N days."""
|
||||
from datetime import datetime, timedelta, timezone
|
||||
now = datetime.now(timezone.utc)
|
||||
end = now + timedelta(days=days)
|
||||
data = arr_get("sonarr", "/api/v3/calendar", {
|
||||
"start": now.date().isoformat(),
|
||||
"end": end.date().isoformat(),
|
||||
"includeSeries": "true",
|
||||
})
|
||||
out = []
|
||||
for ep in data:
|
||||
out.append({
|
||||
"series": ep.get("series", {}).get("title"),
|
||||
"season": ep.get("seasonNumber"),
|
||||
"episode": ep.get("episodeNumber"),
|
||||
"title": ep.get("title"),
|
||||
"air_date": ep.get("airDateUtc") or ep.get("airDate"),
|
||||
"has_file": ep.get("hasFile"),
|
||||
"monitored": ep.get("monitored"),
|
||||
})
|
||||
return {"window_days": days, "episodes": out}
|
||||
|
||||
|
||||
def sonarr_series_search(query):
|
||||
"""Look up a series in the user's Sonarr library by partial title match.
|
||||
Note: this does NOT search Prowlarr/indexers — it only searches what's already tracked."""
|
||||
all_series = arr_get("sonarr", "/api/v3/series")
|
||||
q = query.lower().strip()
|
||||
hits = [s for s in all_series if q in (s.get("title") or "").lower()]
|
||||
return {"query": query, "count": len(hits), "series": [_compact_series(s) for s in hits[:20]]}
|
||||
|
||||
|
||||
def sonarr_library_stats():
|
||||
"""Summary of the Sonarr library: how many series, how many episodes on disk, total size."""
|
||||
all_series = arr_get("sonarr", "/api/v3/series")
|
||||
total_eps = total_on_disk = total_size = 0
|
||||
for s in all_series:
|
||||
st = s.get("statistics", {}) or {}
|
||||
total_eps += st.get("episodeCount", 0) or 0
|
||||
total_on_disk += st.get("episodeFileCount", 0) or 0
|
||||
total_size += st.get("sizeOnDisk", 0) or 0
|
||||
return {
|
||||
"series_count": len(all_series),
|
||||
"episode_total": total_eps,
|
||||
"episodes_on_disk": total_on_disk,
|
||||
"total_size_tb": round(total_size / 1e12, 2),
|
||||
}
|
||||
|
||||
|
||||
TOOLS = [
|
||||
{
|
||||
"name": "sonarr_queue",
|
||||
"description": "List TV episodes that Sonarr is currently downloading or has just imported. Use when the user asks 'what's downloading', 'what episodes are coming in', 'is the new episode in yet'.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"limit": {"type": "integer", "description": "Max rows to return (default 20)", "default": 20}},
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": sonarr_queue,
|
||||
},
|
||||
{
|
||||
"name": "sonarr_upcoming",
|
||||
"description": "List TV episodes expected to air in the next N days (default 14). Use when the user asks 'what's coming up this week', 'when does the next episode drop', 'what's airing soon'.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"days": {"type": "integer", "description": "Look ahead window in days (default 14)", "default": 14}},
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": sonarr_upcoming,
|
||||
},
|
||||
{
|
||||
"name": "sonarr_series_search",
|
||||
"description": "Search the user's Sonarr library for a TV series by title. Does NOT search for new series to add — only checks what's already tracked.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string", "description": "Partial or full series title"}},
|
||||
"required": ["query"],
|
||||
},
|
||||
"read_only": True,
|
||||
"fn": sonarr_series_search,
|
||||
},
|
||||
{
|
||||
"name": "sonarr_library_stats",
|
||||
"description": "High-level stats about the Sonarr TV library: number of series, episodes on disk, total size. Use when the user asks 'how big is the TV library' or 'how many shows do we have'.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
"read_only": True,
|
||||
"fn": sonarr_library_stats,
|
||||
},
|
||||
]
|
||||
@@ -0,0 +1,29 @@
|
||||
"""Storage tools — disk space queries across the media stack."""
|
||||
|
||||
from ._common import arr_get
|
||||
|
||||
|
||||
def sonarr_disk_space():
|
||||
"""Disk space on the paths Sonarr cares about (Plex TV Shows mount, downloads, etc.)."""
|
||||
data = arr_get("sonarr", "/api/v3/diskspace")
|
||||
out = []
|
||||
for d in data:
|
||||
out.append({
|
||||
"path": d.get("path"),
|
||||
"label": d.get("label"),
|
||||
"total_tb": round((d.get("totalSpace", 0) or 0) / 1e12, 2),
|
||||
"free_tb": round((d.get("freeSpace", 0) or 0) / 1e12, 2),
|
||||
"free_pct": round(100 * (d.get("freeSpace", 0) or 0) / max((d.get("totalSpace", 0) or 1), 1), 1),
|
||||
})
|
||||
return {"mounts": out}
|
||||
|
||||
|
||||
TOOLS = [
|
||||
{
|
||||
"name": "storage_disk_space",
|
||||
"description": "Report free / total disk space for each mount visible to Sonarr (covers the Plex library mounts, downloads, and any attached shares). Use when the user asks 'how much space is left', 'is the NAS full', 'how much room on Darrow'.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
"read_only": True,
|
||||
"fn": sonarr_disk_space,
|
||||
},
|
||||
]
|
||||
Reference in New Issue
Block a user