From ad06fa2d700e428ec448c55ec9626d74b054ba8c Mon Sep 17 00:00:00 2001 From: Eric Jungbauer Date: Mon, 13 Apr 2026 15:07:56 +0000 Subject: [PATCH] Accept full wiki URLs, URL slugs, or UUIDs in project monitor config --- agents/project_monitor.py | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/agents/project_monitor.py b/agents/project_monitor.py index ed5ec9d..5bf8f2f 100644 --- a/agents/project_monitor.py +++ b/agents/project_monitor.py @@ -60,14 +60,34 @@ Things to consider — improvements, risks, opportunities. """ +def parse_wiki_id(raw_id): + """Extract a wiki doc/collection ID from a UUID, URL slug, or full URL. + + Accepts: + ae11e785-b110-4a86-985d-804f35bf3d7c (UUID) + bVLzs4hbbS (URL slug) + https://wiki.jfamily.io/doc/title-bVLzs4hbbS (full URL) + https://wiki.jfamily.io/collection/name-bVLzs4hbbS (full collection URL) + """ + raw_id = raw_id.strip() + if "/" in raw_id: + # Full URL — grab the last segment after the last dash + last_segment = raw_id.rstrip("/").split("/")[-1] + # URL format is "title-slug" — the slug is after the last dash + if "-" in last_segment: + return last_segment.split("-")[-1] + return last_segment + return raw_id + + def fetch_wiki_docs(collection_id=None, doc_ids=None): - """Fetch wiki documents. Returns combined text content.""" + """Fetch wiki documents recursively. Returns combined text content.""" headers = wiki_headers() texts = [] if doc_ids: - for doc_id in doc_ids: - doc_id = doc_id.strip() + for raw_id in doc_ids: + doc_id = parse_wiki_id(raw_id) if not doc_id: continue try: @@ -83,15 +103,15 @@ def fetch_wiki_docs(collection_id=None, doc_ids=None): texts.append(f"### [Error fetching doc {doc_id}: {e}]") elif collection_id: + collection_id = parse_wiki_id(collection_id) try: result = api_request( f"{WIKI_API}/documents.list", - data={"collectionId": collection_id, "limit": 25}, + data={"collectionId": collection_id, "limit": 50}, headers=headers, method="POST", ) - for doc in result.get("data", [])[:15]: - # Fetch full content for each (up to limit) + for doc in result.get("data", [])[:25]: try: full = api_request( f"{WIKI_API}/documents.info",