Project Monitor agent: LLM-powered project status reports with wiki+Gitea integration
This commit is contained in:
@@ -9,6 +9,7 @@ CONFIG = {
|
||||
"person": "Angela",
|
||||
"agent_id": "angela-daily-briefing",
|
||||
"instance_id": INSTANCE_ID,
|
||||
"user_id": 3,
|
||||
"wiki_parent_doc_id": "65966bd6-4ef8-4b79-9b79-e4aa62b94e96",
|
||||
"location": {
|
||||
"name": "Providence",
|
||||
|
||||
@@ -81,6 +81,35 @@ def collect_sections(config):
|
||||
print(f" Reading List failed: {e}", file=sys.stderr)
|
||||
sections.append(("Reading List", "## Reading List\n\n*Unavailable.*\n", f"error: {e}"))
|
||||
|
||||
# --- Project Monitors (LLM-powered) ---
|
||||
instance_id = config.get("instance_id", 0)
|
||||
user_id = config.get("user_id", 0)
|
||||
if user_id:
|
||||
try:
|
||||
# Fetch this user's project-monitor instances that are set to include in briefing
|
||||
pm_instances = api_request(
|
||||
f"{DASHBOARD_API}/api/instances/by-user/{user_id}?catalog_id=project-monitor",
|
||||
retries=1,
|
||||
)
|
||||
project_sections = []
|
||||
for pm in pm_instances:
|
||||
pm_config = pm.get("config", {})
|
||||
if str(pm_config.get("include_in_briefing", "false")).lower() != "true":
|
||||
continue
|
||||
try:
|
||||
from project_monitor import run as pm_run
|
||||
md, summary = pm_run(pm_config, user_id=user_id, instance_id=pm.get("id"))
|
||||
project_sections.append(md)
|
||||
print(f" Project [{pm_config.get('project_name', '?')}]: {summary[:80]}")
|
||||
except Exception as e:
|
||||
print(f" Project [{pm_config.get('project_name', '?')}] failed: {e}", file=sys.stderr)
|
||||
|
||||
if project_sections:
|
||||
combined = "## Projects\n\n" + "\n\n".join(project_sections)
|
||||
sections.append(("Projects", combined, f"{len(project_sections)} project(s)"))
|
||||
except Exception as e:
|
||||
print(f" Project monitors skipped: {e}", file=sys.stderr)
|
||||
|
||||
return sections
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ CONFIG = {
|
||||
"person": "Eric",
|
||||
"agent_id": "eric-daily-briefing",
|
||||
"instance_id": INSTANCE_ID,
|
||||
"user_id": 2,
|
||||
"wiki_parent_doc_id": "2a891fe8-579b-450b-a663-de93915896b7",
|
||||
"location": {
|
||||
"name": "Providence",
|
||||
|
||||
@@ -0,0 +1,166 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LLM Client
|
||||
Unified interface for calling Anthropic, OpenAI, and LiteLLM/compatible APIs.
|
||||
Resolves the user's LLM config from the dashboard and routes accordingly.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from urllib import request, error as urlerror
|
||||
from shared import DASHBOARD_API, api_request
|
||||
|
||||
|
||||
# Default models per provider
|
||||
DEFAULT_MODELS = {
|
||||
"anthropic": "claude-sonnet-4-5-20250514",
|
||||
"openai": "gpt-4o-mini",
|
||||
"litellm": "anthropic/claude-sonnet-4-5-20250514",
|
||||
"ollama": "llama3",
|
||||
}
|
||||
|
||||
# Default API URLs per provider
|
||||
DEFAULT_URLS = {
|
||||
"anthropic": "https://api.anthropic.com",
|
||||
"openai": "https://api.openai.com",
|
||||
"litellm": "http://localhost:4000",
|
||||
"ollama": "http://localhost:11434",
|
||||
}
|
||||
|
||||
|
||||
def get_llm_config(user_id):
|
||||
"""Get the resolved LLM config for a user from the dashboard API."""
|
||||
config = api_request(f"{DASHBOARD_API}/api/users/{user_id}/llm", retries=1)
|
||||
if config.get("source") == "none":
|
||||
raise RuntimeError("No LLM configured. Set one up via the LLM button in the dashboard.")
|
||||
return config
|
||||
|
||||
|
||||
def complete(user_id, prompt, system=None, max_tokens=4096):
|
||||
"""Send a completion request using the user's configured LLM.
|
||||
|
||||
Args:
|
||||
user_id: Dashboard user ID (for config resolution)
|
||||
prompt: The user message / prompt text
|
||||
system: Optional system message
|
||||
max_tokens: Max response tokens
|
||||
|
||||
Returns:
|
||||
dict with keys: text (str), model (str), input_tokens (int), output_tokens (int)
|
||||
"""
|
||||
config = get_llm_config(user_id)
|
||||
provider = config.get("provider_type", "anthropic")
|
||||
api_url = config.get("api_url") or DEFAULT_URLS.get(provider, "")
|
||||
api_key = config.get("api_key", "")
|
||||
model = config.get("default_model") or DEFAULT_MODELS.get(provider, "")
|
||||
|
||||
if provider == "anthropic":
|
||||
return _call_anthropic(api_url, api_key, model, prompt, system, max_tokens)
|
||||
elif provider in ("openai", "litellm"):
|
||||
return _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens)
|
||||
elif provider == "ollama":
|
||||
return _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens, is_ollama=True)
|
||||
else:
|
||||
raise RuntimeError(f"Unknown LLM provider: {provider}")
|
||||
|
||||
|
||||
def _call_anthropic(api_url, api_key, model, prompt, system, max_tokens):
|
||||
"""Call the Anthropic Messages API."""
|
||||
url = f"{api_url.rstrip('/')}/v1/messages"
|
||||
body = {
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
}
|
||||
if system:
|
||||
body["system"] = system
|
||||
|
||||
headers = {
|
||||
"x-api-key": api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
data = json.dumps(body).encode()
|
||||
req = request.Request(url, data=data, headers=headers, method="POST")
|
||||
|
||||
try:
|
||||
with request.urlopen(req, timeout=120) as resp:
|
||||
result = json.loads(resp.read().decode())
|
||||
except urlerror.HTTPError as e:
|
||||
err_body = e.read().decode() if e.fp else ""
|
||||
raise RuntimeError(f"Anthropic API error {e.code}: {err_body[:500]}")
|
||||
|
||||
text = ""
|
||||
for block in result.get("content", []):
|
||||
if block.get("type") == "text":
|
||||
text += block["text"]
|
||||
|
||||
usage = result.get("usage", {})
|
||||
return {
|
||||
"text": text,
|
||||
"model": result.get("model", model),
|
||||
"input_tokens": usage.get("input_tokens", 0),
|
||||
"output_tokens": usage.get("output_tokens", 0),
|
||||
}
|
||||
|
||||
|
||||
def _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens, is_ollama=False):
|
||||
"""Call an OpenAI-compatible API (works with OpenAI, LiteLLM, Ollama)."""
|
||||
if is_ollama:
|
||||
url = f"{api_url.rstrip('/')}/api/chat"
|
||||
else:
|
||||
url = f"{api_url.rstrip('/')}/v1/chat/completions"
|
||||
|
||||
messages = []
|
||||
if system:
|
||||
messages.append({"role": "system", "content": system})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
body = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
|
||||
headers = {"content-type": "application/json"}
|
||||
if api_key:
|
||||
headers["authorization"] = f"Bearer {api_key}"
|
||||
|
||||
data = json.dumps(body).encode()
|
||||
req = request.Request(url, data=data, headers=headers, method="POST")
|
||||
|
||||
try:
|
||||
with request.urlopen(req, timeout=120) as resp:
|
||||
result = json.loads(resp.read().decode())
|
||||
except urlerror.HTTPError as e:
|
||||
err_body = e.read().decode() if e.fp else ""
|
||||
raise RuntimeError(f"API error {e.code}: {err_body[:500]}")
|
||||
|
||||
if is_ollama:
|
||||
text = result.get("message", {}).get("content", "")
|
||||
return {"text": text, "model": model, "input_tokens": 0, "output_tokens": 0}
|
||||
|
||||
choice = result.get("choices", [{}])[0]
|
||||
text = choice.get("message", {}).get("content", "")
|
||||
usage = result.get("usage", {})
|
||||
return {
|
||||
"text": text,
|
||||
"model": result.get("model", model),
|
||||
"input_tokens": usage.get("prompt_tokens", 0),
|
||||
"output_tokens": usage.get("completion_tokens", 0),
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Quick test
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--user-id", type=int, required=True)
|
||||
parser.add_argument("--prompt", default="Say hello in one sentence.")
|
||||
args = parser.parse_args()
|
||||
|
||||
result = complete(args.user_id, args.prompt)
|
||||
print(f"Model: {result['model']}")
|
||||
print(f"Tokens: {result['input_tokens']} in, {result['output_tokens']} out")
|
||||
print(f"Response: {result['text']}")
|
||||
@@ -0,0 +1,336 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Project Monitor Agent
|
||||
Reads project data from wiki, Gitea, and custom URLs.
|
||||
Uses LLM to generate intelligent status reports with analysis and next steps.
|
||||
Posts full report to wiki, returns summary for daily briefing.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from urllib import request as urlreq, error as urlerror
|
||||
from shared import (
|
||||
MT, DASHBOARD_API, WIKI_API, WIKI_TOKEN,
|
||||
api_request, log_run, wiki_headers,
|
||||
)
|
||||
from llm_client import complete as llm_complete
|
||||
|
||||
AGENT_ID = "project-monitor"
|
||||
|
||||
GITEA_API = "http://192.168.1.204:3000/api/v1"
|
||||
GITEA_TOKEN = "a03bb836c58010c4de35ac9c1f242292108c9776"
|
||||
|
||||
SYSTEM_PROMPT = """You are a project analyst for a home lab and software development environment.
|
||||
You review project documentation, code activity, and issues to produce clear, actionable status reports.
|
||||
Be specific — reference actual document names, commit messages, and issue titles.
|
||||
Be concise but thorough. Prioritize what matters most."""
|
||||
|
||||
ANALYSIS_PROMPT = """Review the current state of this project and generate a status report.
|
||||
|
||||
Project: {project_name}
|
||||
|
||||
{custom_notes}
|
||||
|
||||
=== PROJECT DOCUMENTATION ===
|
||||
{wiki_content}
|
||||
|
||||
=== CODE ACTIVITY (last 7 days) ===
|
||||
{gitea_content}
|
||||
|
||||
=== ADDITIONAL CONTEXT ===
|
||||
{url_content}
|
||||
|
||||
Generate a report with these sections:
|
||||
|
||||
## Status Summary
|
||||
2-3 sentences on overall health, momentum, and any blockers.
|
||||
|
||||
## Recent Activity
|
||||
What changed recently — commits, doc updates, new issues.
|
||||
|
||||
## Open Issues / Blockers
|
||||
Anything stalled, broken, or needing attention.
|
||||
|
||||
## Recommended Next Steps
|
||||
3-5 prioritized, actionable items.
|
||||
|
||||
## Ideas & Opportunities
|
||||
Things to consider — improvements, risks, opportunities.
|
||||
"""
|
||||
|
||||
|
||||
def fetch_wiki_docs(collection_id=None, doc_ids=None):
|
||||
"""Fetch wiki documents. Returns combined text content."""
|
||||
headers = wiki_headers()
|
||||
texts = []
|
||||
|
||||
if doc_ids:
|
||||
for doc_id in doc_ids:
|
||||
doc_id = doc_id.strip()
|
||||
if not doc_id:
|
||||
continue
|
||||
try:
|
||||
result = api_request(
|
||||
f"{WIKI_API}/documents.info",
|
||||
data={"id": doc_id},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
doc = result.get("data", {})
|
||||
texts.append(f"### {doc.get('title', 'Untitled')}\n{doc.get('text', '')[:3000]}")
|
||||
except Exception as e:
|
||||
texts.append(f"### [Error fetching doc {doc_id}: {e}]")
|
||||
|
||||
elif collection_id:
|
||||
try:
|
||||
result = api_request(
|
||||
f"{WIKI_API}/documents.list",
|
||||
data={"collectionId": collection_id, "limit": 25},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
for doc in result.get("data", [])[:15]:
|
||||
# Fetch full content for each (up to limit)
|
||||
try:
|
||||
full = api_request(
|
||||
f"{WIKI_API}/documents.info",
|
||||
data={"id": doc["id"]},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
d = full.get("data", {})
|
||||
texts.append(f"### {d.get('title', 'Untitled')}\n{d.get('text', '')[:2000]}")
|
||||
except Exception:
|
||||
texts.append(f"### {doc.get('title', 'Untitled')}\n[Content not available]")
|
||||
except Exception as e:
|
||||
texts.append(f"[Error fetching collection: {e}]")
|
||||
|
||||
return "\n\n".join(texts) if texts else "No wiki documentation configured."
|
||||
|
||||
|
||||
def fetch_gitea_activity(repo):
|
||||
"""Fetch recent commits and open issues from a Gitea repo."""
|
||||
if not repo:
|
||||
return "No Gitea repo configured."
|
||||
|
||||
parts = []
|
||||
headers = {"Authorization": f"token {GITEA_TOKEN}"}
|
||||
|
||||
# Recent commits
|
||||
try:
|
||||
url = f"{GITEA_API}/repos/{repo}/commits?limit=15&token={GITEA_TOKEN}"
|
||||
req = urlreq.Request(url)
|
||||
with urlreq.urlopen(req, timeout=15) as resp:
|
||||
commits = json.loads(resp.read().decode())
|
||||
if commits:
|
||||
parts.append("Recent commits:")
|
||||
for c in commits[:10]:
|
||||
msg = c.get("commit", {}).get("message", "").split("\n")[0][:80]
|
||||
date = c.get("commit", {}).get("author", {}).get("date", "")[:10]
|
||||
parts.append(f" - [{date}] {msg}")
|
||||
except Exception as e:
|
||||
parts.append(f"[Could not fetch commits: {e}]")
|
||||
|
||||
# Open issues
|
||||
try:
|
||||
url = f"{GITEA_API}/repos/{repo}/issues?state=open&limit=20&token={GITEA_TOKEN}"
|
||||
req = urlreq.Request(url)
|
||||
with urlreq.urlopen(req, timeout=15) as resp:
|
||||
issues = json.loads(resp.read().decode())
|
||||
if issues:
|
||||
parts.append(f"\nOpen issues ({len(issues)}):")
|
||||
for i in issues[:10]:
|
||||
labels = ", ".join(l.get("name", "") for l in i.get("labels", []))
|
||||
parts.append(f" - #{i['number']}: {i['title']}" + (f" [{labels}]" if labels else ""))
|
||||
except Exception as e:
|
||||
parts.append(f"[Could not fetch issues: {e}]")
|
||||
|
||||
return "\n".join(parts) if parts else "No recent Gitea activity."
|
||||
|
||||
|
||||
def fetch_url_content(urls_text):
|
||||
"""Fetch text content from custom URLs."""
|
||||
if not urls_text:
|
||||
return "No additional URLs configured."
|
||||
|
||||
urls = [u.strip() for u in urls_text.strip().split("\n") if u.strip()]
|
||||
if not urls:
|
||||
return "No additional URLs configured."
|
||||
|
||||
parts = []
|
||||
for url in urls[:5]:
|
||||
try:
|
||||
req = urlreq.Request(url, headers={"User-Agent": "AgentCommandCenter/1.0"})
|
||||
with urlreq.urlopen(req, timeout=15) as resp:
|
||||
content = resp.read().decode("utf-8", errors="replace")[:2000]
|
||||
parts.append(f"### {url}\n{content}")
|
||||
except Exception as e:
|
||||
parts.append(f"### {url}\n[Error: {e}]")
|
||||
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
def post_report_to_wiki(report_md, project_name, collection_id):
|
||||
"""Post the full report to a wiki collection."""
|
||||
if not collection_id:
|
||||
return None
|
||||
|
||||
now = datetime.now(MT)
|
||||
title = f"Project Status — {project_name} — {now.strftime('%Y-%m-%d')}"
|
||||
headers = wiki_headers()
|
||||
|
||||
# Check for existing report today
|
||||
try:
|
||||
search = api_request(
|
||||
f"{WIKI_API}/documents.search",
|
||||
data={"query": title, "collectionId": collection_id},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
for doc in search.get("data", []):
|
||||
if doc.get("document", {}).get("title") == title:
|
||||
api_request(
|
||||
f"{WIKI_API}/documents.update",
|
||||
data={"id": doc["document"]["id"], "text": report_md, "publish": True},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
return doc["document"]["id"]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Create new
|
||||
try:
|
||||
result = api_request(
|
||||
f"{WIKI_API}/documents.create",
|
||||
data={"title": title, "text": report_md, "collectionId": collection_id, "publish": True},
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
return result["data"]["id"]
|
||||
except Exception as e:
|
||||
print(f" Warning: could not post report to wiki: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def run(config, user_id=None, instance_id=None):
|
||||
"""Run the project monitor agent.
|
||||
|
||||
Config keys:
|
||||
project_name (str): Display name
|
||||
wiki_collection_id (str): Outline collection to read
|
||||
wiki_doc_ids (str): Comma-separated specific doc IDs
|
||||
gitea_repo (str): owner/repo
|
||||
custom_urls (str): Newline-separated URLs
|
||||
custom_notes (str): Free text context
|
||||
report_collection_id (str): Where to post the full report
|
||||
include_in_briefing (str): "true"/"false"
|
||||
|
||||
Returns:
|
||||
(markdown_section, summary) for daily briefing integration
|
||||
"""
|
||||
project_name = config.get("project_name", "Unknown Project")
|
||||
|
||||
if not user_id:
|
||||
return f"## {project_name}\n\n*No user context for LLM.*\n", "error: no user_id"
|
||||
|
||||
print(f" Collecting data for {project_name}...")
|
||||
|
||||
# Collect data
|
||||
wiki_collection = config.get("wiki_collection_id", "")
|
||||
wiki_docs = config.get("wiki_doc_ids", "")
|
||||
doc_ids = [d.strip() for d in wiki_docs.split(",") if d.strip()] if wiki_docs else None
|
||||
|
||||
wiki_content = fetch_wiki_docs(
|
||||
collection_id=wiki_collection if not doc_ids else None,
|
||||
doc_ids=doc_ids,
|
||||
)
|
||||
gitea_content = fetch_gitea_activity(config.get("gitea_repo", ""))
|
||||
url_content = fetch_url_content(config.get("custom_urls", ""))
|
||||
custom_notes = config.get("custom_notes", "")
|
||||
if custom_notes:
|
||||
custom_notes = f"Project context/goals:\n{custom_notes}"
|
||||
|
||||
# Build prompt
|
||||
prompt = ANALYSIS_PROMPT.format(
|
||||
project_name=project_name,
|
||||
custom_notes=custom_notes,
|
||||
wiki_content=wiki_content[:8000],
|
||||
gitea_content=gitea_content[:3000],
|
||||
url_content=url_content[:3000],
|
||||
)
|
||||
|
||||
print(f" Calling LLM for analysis...")
|
||||
try:
|
||||
result = llm_complete(user_id, prompt, system=SYSTEM_PROMPT, max_tokens=2000)
|
||||
except RuntimeError as e:
|
||||
err = str(e)
|
||||
log_run(AGENT_ID, "failed", err=err, instance_id=instance_id)
|
||||
return f"## {project_name}\n\n*LLM error: {err}*\n", f"error: {err}"
|
||||
|
||||
report_md = result["text"]
|
||||
model = result["model"]
|
||||
tokens_in = result["input_tokens"]
|
||||
tokens_out = result["output_tokens"]
|
||||
|
||||
print(f" LLM: {model}, {tokens_in}+{tokens_out} tokens")
|
||||
|
||||
# Post full report to wiki
|
||||
report_collection = config.get("report_collection_id", "") or config.get("wiki_collection_id", "")
|
||||
doc_id = None
|
||||
if report_collection:
|
||||
now = datetime.now(MT)
|
||||
full_report = f"# Project Status — {project_name}\n\n"
|
||||
full_report += f"**{now.strftime('%A, %B %d, %Y')}** | Generated by Project Monitor Agent\n\n"
|
||||
full_report += f"---\n\n{report_md}\n\n"
|
||||
full_report += f"---\n*Model: {model} | Tokens: {tokens_in} in, {tokens_out} out*\n"
|
||||
doc_id = post_report_to_wiki(full_report, project_name, report_collection)
|
||||
if doc_id:
|
||||
print(f" Wiki report posted: {doc_id}")
|
||||
|
||||
# Extract just the status summary for the briefing
|
||||
summary_lines = []
|
||||
in_summary = False
|
||||
for line in report_md.split("\n"):
|
||||
if "## Status Summary" in line:
|
||||
in_summary = True
|
||||
continue
|
||||
if in_summary and line.startswith("## "):
|
||||
break
|
||||
if in_summary and line.strip():
|
||||
summary_lines.append(line.strip())
|
||||
summary = " ".join(summary_lines)[:200] if summary_lines else report_md[:200]
|
||||
|
||||
# Briefing section
|
||||
section = f"### {project_name}\n\n{summary}\n"
|
||||
|
||||
log_run(AGENT_ID, "success", output=f"{project_name}: {summary[:100]}", instance_id=instance_id, metadata={
|
||||
"project": project_name,
|
||||
"model": model,
|
||||
"tokens_in": tokens_in,
|
||||
"tokens_out": tokens_out,
|
||||
"wiki_report_id": doc_id,
|
||||
})
|
||||
|
||||
return section, summary
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--user-id", type=int, required=True)
|
||||
parser.add_argument("--instance-id", type=int, default=0)
|
||||
parser.add_argument("--project-name", required=True)
|
||||
parser.add_argument("--wiki-collection", default="")
|
||||
parser.add_argument("--gitea-repo", default="")
|
||||
args = parser.parse_args()
|
||||
|
||||
config = {
|
||||
"project_name": args.project_name,
|
||||
"wiki_collection_id": args.wiki_collection,
|
||||
"gitea_repo": args.gitea_repo,
|
||||
}
|
||||
section, summary = run(config, user_id=args.user_id, instance_id=args.instance_id)
|
||||
print(section)
|
||||
print(f"\nSummary: {summary}")
|
||||
Reference in New Issue
Block a user