Project Monitor agent: LLM-powered project status reports with wiki+Gitea integration
This commit is contained in:
@@ -9,6 +9,7 @@ CONFIG = {
|
|||||||
"person": "Angela",
|
"person": "Angela",
|
||||||
"agent_id": "angela-daily-briefing",
|
"agent_id": "angela-daily-briefing",
|
||||||
"instance_id": INSTANCE_ID,
|
"instance_id": INSTANCE_ID,
|
||||||
|
"user_id": 3,
|
||||||
"wiki_parent_doc_id": "65966bd6-4ef8-4b79-9b79-e4aa62b94e96",
|
"wiki_parent_doc_id": "65966bd6-4ef8-4b79-9b79-e4aa62b94e96",
|
||||||
"location": {
|
"location": {
|
||||||
"name": "Providence",
|
"name": "Providence",
|
||||||
|
|||||||
@@ -81,6 +81,35 @@ def collect_sections(config):
|
|||||||
print(f" Reading List failed: {e}", file=sys.stderr)
|
print(f" Reading List failed: {e}", file=sys.stderr)
|
||||||
sections.append(("Reading List", "## Reading List\n\n*Unavailable.*\n", f"error: {e}"))
|
sections.append(("Reading List", "## Reading List\n\n*Unavailable.*\n", f"error: {e}"))
|
||||||
|
|
||||||
|
# --- Project Monitors (LLM-powered) ---
|
||||||
|
instance_id = config.get("instance_id", 0)
|
||||||
|
user_id = config.get("user_id", 0)
|
||||||
|
if user_id:
|
||||||
|
try:
|
||||||
|
# Fetch this user's project-monitor instances that are set to include in briefing
|
||||||
|
pm_instances = api_request(
|
||||||
|
f"{DASHBOARD_API}/api/instances/by-user/{user_id}?catalog_id=project-monitor",
|
||||||
|
retries=1,
|
||||||
|
)
|
||||||
|
project_sections = []
|
||||||
|
for pm in pm_instances:
|
||||||
|
pm_config = pm.get("config", {})
|
||||||
|
if str(pm_config.get("include_in_briefing", "false")).lower() != "true":
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
from project_monitor import run as pm_run
|
||||||
|
md, summary = pm_run(pm_config, user_id=user_id, instance_id=pm.get("id"))
|
||||||
|
project_sections.append(md)
|
||||||
|
print(f" Project [{pm_config.get('project_name', '?')}]: {summary[:80]}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Project [{pm_config.get('project_name', '?')}] failed: {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
if project_sections:
|
||||||
|
combined = "## Projects\n\n" + "\n\n".join(project_sections)
|
||||||
|
sections.append(("Projects", combined, f"{len(project_sections)} project(s)"))
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Project monitors skipped: {e}", file=sys.stderr)
|
||||||
|
|
||||||
return sections
|
return sections
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ CONFIG = {
|
|||||||
"person": "Eric",
|
"person": "Eric",
|
||||||
"agent_id": "eric-daily-briefing",
|
"agent_id": "eric-daily-briefing",
|
||||||
"instance_id": INSTANCE_ID,
|
"instance_id": INSTANCE_ID,
|
||||||
|
"user_id": 2,
|
||||||
"wiki_parent_doc_id": "2a891fe8-579b-450b-a663-de93915896b7",
|
"wiki_parent_doc_id": "2a891fe8-579b-450b-a663-de93915896b7",
|
||||||
"location": {
|
"location": {
|
||||||
"name": "Providence",
|
"name": "Providence",
|
||||||
|
|||||||
@@ -0,0 +1,166 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
LLM Client
|
||||||
|
Unified interface for calling Anthropic, OpenAI, and LiteLLM/compatible APIs.
|
||||||
|
Resolves the user's LLM config from the dashboard and routes accordingly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from urllib import request, error as urlerror
|
||||||
|
from shared import DASHBOARD_API, api_request
|
||||||
|
|
||||||
|
|
||||||
|
# Default models per provider
|
||||||
|
DEFAULT_MODELS = {
|
||||||
|
"anthropic": "claude-sonnet-4-5-20250514",
|
||||||
|
"openai": "gpt-4o-mini",
|
||||||
|
"litellm": "anthropic/claude-sonnet-4-5-20250514",
|
||||||
|
"ollama": "llama3",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default API URLs per provider
|
||||||
|
DEFAULT_URLS = {
|
||||||
|
"anthropic": "https://api.anthropic.com",
|
||||||
|
"openai": "https://api.openai.com",
|
||||||
|
"litellm": "http://localhost:4000",
|
||||||
|
"ollama": "http://localhost:11434",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_llm_config(user_id):
|
||||||
|
"""Get the resolved LLM config for a user from the dashboard API."""
|
||||||
|
config = api_request(f"{DASHBOARD_API}/api/users/{user_id}/llm", retries=1)
|
||||||
|
if config.get("source") == "none":
|
||||||
|
raise RuntimeError("No LLM configured. Set one up via the LLM button in the dashboard.")
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
def complete(user_id, prompt, system=None, max_tokens=4096):
|
||||||
|
"""Send a completion request using the user's configured LLM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: Dashboard user ID (for config resolution)
|
||||||
|
prompt: The user message / prompt text
|
||||||
|
system: Optional system message
|
||||||
|
max_tokens: Max response tokens
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with keys: text (str), model (str), input_tokens (int), output_tokens (int)
|
||||||
|
"""
|
||||||
|
config = get_llm_config(user_id)
|
||||||
|
provider = config.get("provider_type", "anthropic")
|
||||||
|
api_url = config.get("api_url") or DEFAULT_URLS.get(provider, "")
|
||||||
|
api_key = config.get("api_key", "")
|
||||||
|
model = config.get("default_model") or DEFAULT_MODELS.get(provider, "")
|
||||||
|
|
||||||
|
if provider == "anthropic":
|
||||||
|
return _call_anthropic(api_url, api_key, model, prompt, system, max_tokens)
|
||||||
|
elif provider in ("openai", "litellm"):
|
||||||
|
return _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens)
|
||||||
|
elif provider == "ollama":
|
||||||
|
return _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens, is_ollama=True)
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Unknown LLM provider: {provider}")
|
||||||
|
|
||||||
|
|
||||||
|
def _call_anthropic(api_url, api_key, model, prompt, system, max_tokens):
|
||||||
|
"""Call the Anthropic Messages API."""
|
||||||
|
url = f"{api_url.rstrip('/')}/v1/messages"
|
||||||
|
body = {
|
||||||
|
"model": model,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
}
|
||||||
|
if system:
|
||||||
|
body["system"] = system
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"x-api-key": api_key,
|
||||||
|
"anthropic-version": "2023-06-01",
|
||||||
|
"content-type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
data = json.dumps(body).encode()
|
||||||
|
req = request.Request(url, data=data, headers=headers, method="POST")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with request.urlopen(req, timeout=120) as resp:
|
||||||
|
result = json.loads(resp.read().decode())
|
||||||
|
except urlerror.HTTPError as e:
|
||||||
|
err_body = e.read().decode() if e.fp else ""
|
||||||
|
raise RuntimeError(f"Anthropic API error {e.code}: {err_body[:500]}")
|
||||||
|
|
||||||
|
text = ""
|
||||||
|
for block in result.get("content", []):
|
||||||
|
if block.get("type") == "text":
|
||||||
|
text += block["text"]
|
||||||
|
|
||||||
|
usage = result.get("usage", {})
|
||||||
|
return {
|
||||||
|
"text": text,
|
||||||
|
"model": result.get("model", model),
|
||||||
|
"input_tokens": usage.get("input_tokens", 0),
|
||||||
|
"output_tokens": usage.get("output_tokens", 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _call_openai_compatible(api_url, api_key, model, prompt, system, max_tokens, is_ollama=False):
|
||||||
|
"""Call an OpenAI-compatible API (works with OpenAI, LiteLLM, Ollama)."""
|
||||||
|
if is_ollama:
|
||||||
|
url = f"{api_url.rstrip('/')}/api/chat"
|
||||||
|
else:
|
||||||
|
url = f"{api_url.rstrip('/')}/v1/chat/completions"
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
if system:
|
||||||
|
messages.append({"role": "system", "content": system})
|
||||||
|
messages.append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {"content-type": "application/json"}
|
||||||
|
if api_key:
|
||||||
|
headers["authorization"] = f"Bearer {api_key}"
|
||||||
|
|
||||||
|
data = json.dumps(body).encode()
|
||||||
|
req = request.Request(url, data=data, headers=headers, method="POST")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with request.urlopen(req, timeout=120) as resp:
|
||||||
|
result = json.loads(resp.read().decode())
|
||||||
|
except urlerror.HTTPError as e:
|
||||||
|
err_body = e.read().decode() if e.fp else ""
|
||||||
|
raise RuntimeError(f"API error {e.code}: {err_body[:500]}")
|
||||||
|
|
||||||
|
if is_ollama:
|
||||||
|
text = result.get("message", {}).get("content", "")
|
||||||
|
return {"text": text, "model": model, "input_tokens": 0, "output_tokens": 0}
|
||||||
|
|
||||||
|
choice = result.get("choices", [{}])[0]
|
||||||
|
text = choice.get("message", {}).get("content", "")
|
||||||
|
usage = result.get("usage", {})
|
||||||
|
return {
|
||||||
|
"text": text,
|
||||||
|
"model": result.get("model", model),
|
||||||
|
"input_tokens": usage.get("prompt_tokens", 0),
|
||||||
|
"output_tokens": usage.get("completion_tokens", 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Quick test
|
||||||
|
import argparse
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--user-id", type=int, required=True)
|
||||||
|
parser.add_argument("--prompt", default="Say hello in one sentence.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
result = complete(args.user_id, args.prompt)
|
||||||
|
print(f"Model: {result['model']}")
|
||||||
|
print(f"Tokens: {result['input_tokens']} in, {result['output_tokens']} out")
|
||||||
|
print(f"Response: {result['text']}")
|
||||||
@@ -0,0 +1,336 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Project Monitor Agent
|
||||||
|
Reads project data from wiki, Gitea, and custom URLs.
|
||||||
|
Uses LLM to generate intelligent status reports with analysis and next steps.
|
||||||
|
Posts full report to wiki, returns summary for daily briefing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from datetime import datetime
|
||||||
|
from urllib import request as urlreq, error as urlerror
|
||||||
|
from shared import (
|
||||||
|
MT, DASHBOARD_API, WIKI_API, WIKI_TOKEN,
|
||||||
|
api_request, log_run, wiki_headers,
|
||||||
|
)
|
||||||
|
from llm_client import complete as llm_complete
|
||||||
|
|
||||||
|
AGENT_ID = "project-monitor"
|
||||||
|
|
||||||
|
GITEA_API = "http://192.168.1.204:3000/api/v1"
|
||||||
|
GITEA_TOKEN = "a03bb836c58010c4de35ac9c1f242292108c9776"
|
||||||
|
|
||||||
|
SYSTEM_PROMPT = """You are a project analyst for a home lab and software development environment.
|
||||||
|
You review project documentation, code activity, and issues to produce clear, actionable status reports.
|
||||||
|
Be specific — reference actual document names, commit messages, and issue titles.
|
||||||
|
Be concise but thorough. Prioritize what matters most."""
|
||||||
|
|
||||||
|
ANALYSIS_PROMPT = """Review the current state of this project and generate a status report.
|
||||||
|
|
||||||
|
Project: {project_name}
|
||||||
|
|
||||||
|
{custom_notes}
|
||||||
|
|
||||||
|
=== PROJECT DOCUMENTATION ===
|
||||||
|
{wiki_content}
|
||||||
|
|
||||||
|
=== CODE ACTIVITY (last 7 days) ===
|
||||||
|
{gitea_content}
|
||||||
|
|
||||||
|
=== ADDITIONAL CONTEXT ===
|
||||||
|
{url_content}
|
||||||
|
|
||||||
|
Generate a report with these sections:
|
||||||
|
|
||||||
|
## Status Summary
|
||||||
|
2-3 sentences on overall health, momentum, and any blockers.
|
||||||
|
|
||||||
|
## Recent Activity
|
||||||
|
What changed recently — commits, doc updates, new issues.
|
||||||
|
|
||||||
|
## Open Issues / Blockers
|
||||||
|
Anything stalled, broken, or needing attention.
|
||||||
|
|
||||||
|
## Recommended Next Steps
|
||||||
|
3-5 prioritized, actionable items.
|
||||||
|
|
||||||
|
## Ideas & Opportunities
|
||||||
|
Things to consider — improvements, risks, opportunities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_wiki_docs(collection_id=None, doc_ids=None):
|
||||||
|
"""Fetch wiki documents. Returns combined text content."""
|
||||||
|
headers = wiki_headers()
|
||||||
|
texts = []
|
||||||
|
|
||||||
|
if doc_ids:
|
||||||
|
for doc_id in doc_ids:
|
||||||
|
doc_id = doc_id.strip()
|
||||||
|
if not doc_id:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
result = api_request(
|
||||||
|
f"{WIKI_API}/documents.info",
|
||||||
|
data={"id": doc_id},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
doc = result.get("data", {})
|
||||||
|
texts.append(f"### {doc.get('title', 'Untitled')}\n{doc.get('text', '')[:3000]}")
|
||||||
|
except Exception as e:
|
||||||
|
texts.append(f"### [Error fetching doc {doc_id}: {e}]")
|
||||||
|
|
||||||
|
elif collection_id:
|
||||||
|
try:
|
||||||
|
result = api_request(
|
||||||
|
f"{WIKI_API}/documents.list",
|
||||||
|
data={"collectionId": collection_id, "limit": 25},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
for doc in result.get("data", [])[:15]:
|
||||||
|
# Fetch full content for each (up to limit)
|
||||||
|
try:
|
||||||
|
full = api_request(
|
||||||
|
f"{WIKI_API}/documents.info",
|
||||||
|
data={"id": doc["id"]},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
d = full.get("data", {})
|
||||||
|
texts.append(f"### {d.get('title', 'Untitled')}\n{d.get('text', '')[:2000]}")
|
||||||
|
except Exception:
|
||||||
|
texts.append(f"### {doc.get('title', 'Untitled')}\n[Content not available]")
|
||||||
|
except Exception as e:
|
||||||
|
texts.append(f"[Error fetching collection: {e}]")
|
||||||
|
|
||||||
|
return "\n\n".join(texts) if texts else "No wiki documentation configured."
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_gitea_activity(repo):
|
||||||
|
"""Fetch recent commits and open issues from a Gitea repo."""
|
||||||
|
if not repo:
|
||||||
|
return "No Gitea repo configured."
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
headers = {"Authorization": f"token {GITEA_TOKEN}"}
|
||||||
|
|
||||||
|
# Recent commits
|
||||||
|
try:
|
||||||
|
url = f"{GITEA_API}/repos/{repo}/commits?limit=15&token={GITEA_TOKEN}"
|
||||||
|
req = urlreq.Request(url)
|
||||||
|
with urlreq.urlopen(req, timeout=15) as resp:
|
||||||
|
commits = json.loads(resp.read().decode())
|
||||||
|
if commits:
|
||||||
|
parts.append("Recent commits:")
|
||||||
|
for c in commits[:10]:
|
||||||
|
msg = c.get("commit", {}).get("message", "").split("\n")[0][:80]
|
||||||
|
date = c.get("commit", {}).get("author", {}).get("date", "")[:10]
|
||||||
|
parts.append(f" - [{date}] {msg}")
|
||||||
|
except Exception as e:
|
||||||
|
parts.append(f"[Could not fetch commits: {e}]")
|
||||||
|
|
||||||
|
# Open issues
|
||||||
|
try:
|
||||||
|
url = f"{GITEA_API}/repos/{repo}/issues?state=open&limit=20&token={GITEA_TOKEN}"
|
||||||
|
req = urlreq.Request(url)
|
||||||
|
with urlreq.urlopen(req, timeout=15) as resp:
|
||||||
|
issues = json.loads(resp.read().decode())
|
||||||
|
if issues:
|
||||||
|
parts.append(f"\nOpen issues ({len(issues)}):")
|
||||||
|
for i in issues[:10]:
|
||||||
|
labels = ", ".join(l.get("name", "") for l in i.get("labels", []))
|
||||||
|
parts.append(f" - #{i['number']}: {i['title']}" + (f" [{labels}]" if labels else ""))
|
||||||
|
except Exception as e:
|
||||||
|
parts.append(f"[Could not fetch issues: {e}]")
|
||||||
|
|
||||||
|
return "\n".join(parts) if parts else "No recent Gitea activity."
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_url_content(urls_text):
|
||||||
|
"""Fetch text content from custom URLs."""
|
||||||
|
if not urls_text:
|
||||||
|
return "No additional URLs configured."
|
||||||
|
|
||||||
|
urls = [u.strip() for u in urls_text.strip().split("\n") if u.strip()]
|
||||||
|
if not urls:
|
||||||
|
return "No additional URLs configured."
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
for url in urls[:5]:
|
||||||
|
try:
|
||||||
|
req = urlreq.Request(url, headers={"User-Agent": "AgentCommandCenter/1.0"})
|
||||||
|
with urlreq.urlopen(req, timeout=15) as resp:
|
||||||
|
content = resp.read().decode("utf-8", errors="replace")[:2000]
|
||||||
|
parts.append(f"### {url}\n{content}")
|
||||||
|
except Exception as e:
|
||||||
|
parts.append(f"### {url}\n[Error: {e}]")
|
||||||
|
|
||||||
|
return "\n\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def post_report_to_wiki(report_md, project_name, collection_id):
|
||||||
|
"""Post the full report to a wiki collection."""
|
||||||
|
if not collection_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
now = datetime.now(MT)
|
||||||
|
title = f"Project Status — {project_name} — {now.strftime('%Y-%m-%d')}"
|
||||||
|
headers = wiki_headers()
|
||||||
|
|
||||||
|
# Check for existing report today
|
||||||
|
try:
|
||||||
|
search = api_request(
|
||||||
|
f"{WIKI_API}/documents.search",
|
||||||
|
data={"query": title, "collectionId": collection_id},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
for doc in search.get("data", []):
|
||||||
|
if doc.get("document", {}).get("title") == title:
|
||||||
|
api_request(
|
||||||
|
f"{WIKI_API}/documents.update",
|
||||||
|
data={"id": doc["document"]["id"], "text": report_md, "publish": True},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
return doc["document"]["id"]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Create new
|
||||||
|
try:
|
||||||
|
result = api_request(
|
||||||
|
f"{WIKI_API}/documents.create",
|
||||||
|
data={"title": title, "text": report_md, "collectionId": collection_id, "publish": True},
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
return result["data"]["id"]
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Warning: could not post report to wiki: {e}", file=sys.stderr)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run(config, user_id=None, instance_id=None):
|
||||||
|
"""Run the project monitor agent.
|
||||||
|
|
||||||
|
Config keys:
|
||||||
|
project_name (str): Display name
|
||||||
|
wiki_collection_id (str): Outline collection to read
|
||||||
|
wiki_doc_ids (str): Comma-separated specific doc IDs
|
||||||
|
gitea_repo (str): owner/repo
|
||||||
|
custom_urls (str): Newline-separated URLs
|
||||||
|
custom_notes (str): Free text context
|
||||||
|
report_collection_id (str): Where to post the full report
|
||||||
|
include_in_briefing (str): "true"/"false"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(markdown_section, summary) for daily briefing integration
|
||||||
|
"""
|
||||||
|
project_name = config.get("project_name", "Unknown Project")
|
||||||
|
|
||||||
|
if not user_id:
|
||||||
|
return f"## {project_name}\n\n*No user context for LLM.*\n", "error: no user_id"
|
||||||
|
|
||||||
|
print(f" Collecting data for {project_name}...")
|
||||||
|
|
||||||
|
# Collect data
|
||||||
|
wiki_collection = config.get("wiki_collection_id", "")
|
||||||
|
wiki_docs = config.get("wiki_doc_ids", "")
|
||||||
|
doc_ids = [d.strip() for d in wiki_docs.split(",") if d.strip()] if wiki_docs else None
|
||||||
|
|
||||||
|
wiki_content = fetch_wiki_docs(
|
||||||
|
collection_id=wiki_collection if not doc_ids else None,
|
||||||
|
doc_ids=doc_ids,
|
||||||
|
)
|
||||||
|
gitea_content = fetch_gitea_activity(config.get("gitea_repo", ""))
|
||||||
|
url_content = fetch_url_content(config.get("custom_urls", ""))
|
||||||
|
custom_notes = config.get("custom_notes", "")
|
||||||
|
if custom_notes:
|
||||||
|
custom_notes = f"Project context/goals:\n{custom_notes}"
|
||||||
|
|
||||||
|
# Build prompt
|
||||||
|
prompt = ANALYSIS_PROMPT.format(
|
||||||
|
project_name=project_name,
|
||||||
|
custom_notes=custom_notes,
|
||||||
|
wiki_content=wiki_content[:8000],
|
||||||
|
gitea_content=gitea_content[:3000],
|
||||||
|
url_content=url_content[:3000],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f" Calling LLM for analysis...")
|
||||||
|
try:
|
||||||
|
result = llm_complete(user_id, prompt, system=SYSTEM_PROMPT, max_tokens=2000)
|
||||||
|
except RuntimeError as e:
|
||||||
|
err = str(e)
|
||||||
|
log_run(AGENT_ID, "failed", err=err, instance_id=instance_id)
|
||||||
|
return f"## {project_name}\n\n*LLM error: {err}*\n", f"error: {err}"
|
||||||
|
|
||||||
|
report_md = result["text"]
|
||||||
|
model = result["model"]
|
||||||
|
tokens_in = result["input_tokens"]
|
||||||
|
tokens_out = result["output_tokens"]
|
||||||
|
|
||||||
|
print(f" LLM: {model}, {tokens_in}+{tokens_out} tokens")
|
||||||
|
|
||||||
|
# Post full report to wiki
|
||||||
|
report_collection = config.get("report_collection_id", "") or config.get("wiki_collection_id", "")
|
||||||
|
doc_id = None
|
||||||
|
if report_collection:
|
||||||
|
now = datetime.now(MT)
|
||||||
|
full_report = f"# Project Status — {project_name}\n\n"
|
||||||
|
full_report += f"**{now.strftime('%A, %B %d, %Y')}** | Generated by Project Monitor Agent\n\n"
|
||||||
|
full_report += f"---\n\n{report_md}\n\n"
|
||||||
|
full_report += f"---\n*Model: {model} | Tokens: {tokens_in} in, {tokens_out} out*\n"
|
||||||
|
doc_id = post_report_to_wiki(full_report, project_name, report_collection)
|
||||||
|
if doc_id:
|
||||||
|
print(f" Wiki report posted: {doc_id}")
|
||||||
|
|
||||||
|
# Extract just the status summary for the briefing
|
||||||
|
summary_lines = []
|
||||||
|
in_summary = False
|
||||||
|
for line in report_md.split("\n"):
|
||||||
|
if "## Status Summary" in line:
|
||||||
|
in_summary = True
|
||||||
|
continue
|
||||||
|
if in_summary and line.startswith("## "):
|
||||||
|
break
|
||||||
|
if in_summary and line.strip():
|
||||||
|
summary_lines.append(line.strip())
|
||||||
|
summary = " ".join(summary_lines)[:200] if summary_lines else report_md[:200]
|
||||||
|
|
||||||
|
# Briefing section
|
||||||
|
section = f"### {project_name}\n\n{summary}\n"
|
||||||
|
|
||||||
|
log_run(AGENT_ID, "success", output=f"{project_name}: {summary[:100]}", instance_id=instance_id, metadata={
|
||||||
|
"project": project_name,
|
||||||
|
"model": model,
|
||||||
|
"tokens_in": tokens_in,
|
||||||
|
"tokens_out": tokens_out,
|
||||||
|
"wiki_report_id": doc_id,
|
||||||
|
})
|
||||||
|
|
||||||
|
return section, summary
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import argparse
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--user-id", type=int, required=True)
|
||||||
|
parser.add_argument("--instance-id", type=int, default=0)
|
||||||
|
parser.add_argument("--project-name", required=True)
|
||||||
|
parser.add_argument("--wiki-collection", default="")
|
||||||
|
parser.add_argument("--gitea-repo", default="")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"project_name": args.project_name,
|
||||||
|
"wiki_collection_id": args.wiki_collection,
|
||||||
|
"gitea_repo": args.gitea_repo,
|
||||||
|
}
|
||||||
|
section, summary = run(config, user_id=args.user_id, instance_id=args.instance_id)
|
||||||
|
print(section)
|
||||||
|
print(f"\nSummary: {summary}")
|
||||||
@@ -328,6 +328,7 @@ def list_catalog(user: dict = Depends(require_auth), db: Session = Depends(get_d
|
|||||||
"category": e.category, "config_schema": e.config_schema or {},
|
"category": e.category, "config_schema": e.config_schema or {},
|
||||||
"default_config": e.default_config or {},
|
"default_config": e.default_config or {},
|
||||||
"supports_schedule": e.supports_schedule, "is_sub_agent": e.is_sub_agent,
|
"supports_schedule": e.supports_schedule, "is_sub_agent": e.is_sub_agent,
|
||||||
|
"requires_llm": e.requires_llm,
|
||||||
"enabled": e.id in user_instance_ids,
|
"enabled": e.id in user_instance_ids,
|
||||||
} for e in entries]
|
} for e in entries]
|
||||||
|
|
||||||
@@ -381,6 +382,14 @@ def create_instance(data: InstanceCreate, user: dict = Depends(require_auth), db
|
|||||||
catalog = db.query(AgentCatalog).filter(AgentCatalog.id == data.catalog_id).first()
|
catalog = db.query(AgentCatalog).filter(AgentCatalog.id == data.catalog_id).first()
|
||||||
if not catalog:
|
if not catalog:
|
||||||
raise HTTPException(status_code=404, detail="Agent type not found in catalog")
|
raise HTTPException(status_code=404, detail="Agent type not found in catalog")
|
||||||
|
# Enforce LLM requirement
|
||||||
|
if catalog.requires_llm:
|
||||||
|
u = db.query(User).filter(User.id == user["user_id"]).first()
|
||||||
|
user_llm = u.llm_config or {} if u else {}
|
||||||
|
has_user_llm = bool(user_llm.get("api_key"))
|
||||||
|
has_system_llm = db.query(LLMProvider).filter(LLMProvider.is_default == True).first() is not None
|
||||||
|
if not has_user_llm and not has_system_llm:
|
||||||
|
raise HTTPException(status_code=400, detail="This agent requires an LLM provider. Configure one via the LLM button in the header.")
|
||||||
config = {**(catalog.default_config or {}), **data.config}
|
config = {**(catalog.default_config or {}), **data.config}
|
||||||
inst = AgentInstance(
|
inst = AgentInstance(
|
||||||
user_id=user["user_id"],
|
user_id=user["user_id"],
|
||||||
@@ -482,11 +491,29 @@ def trigger_instance(instance_id: int, user: dict = Depends(require_auth), db: S
|
|||||||
subprocess.Popen(cmd, env=env, cwd=agent_dir)
|
subprocess.Popen(cmd, env=env, cwd=agent_dir)
|
||||||
return {"status": "triggered", "message": f"Running {catalog_id} for {u.display_name}"}
|
return {"status": "triggered", "message": f"Running {catalog_id} for {u.display_name}"}
|
||||||
|
|
||||||
|
if catalog_id == "project-monitor":
|
||||||
|
config_json = json.dumps(inst.config or {}).replace("'", "\\'")
|
||||||
|
cmd = ["python3", "-c",
|
||||||
|
f"import sys, json; sys.path.insert(0, '{agent_dir}'); "
|
||||||
|
f"from project_monitor import run; "
|
||||||
|
f"run(json.loads('{config_json}'), user_id={user['user_id']}, instance_id={instance_id})"]
|
||||||
|
subprocess.Popen(cmd, env=env, cwd=agent_dir)
|
||||||
|
return {"status": "triggered", "message": f"Running project monitor: {inst.name}"}
|
||||||
|
|
||||||
return {"status": "error", "message": f"Manual trigger not yet supported for {catalog_id}"}
|
return {"status": "error", "message": f"Manual trigger not yet supported for {catalog_id}"}
|
||||||
|
|
||||||
|
|
||||||
# --- Internal endpoints (no auth, for agent scripts) ---
|
# --- Internal endpoints (no auth, for agent scripts) ---
|
||||||
|
|
||||||
|
@app.get("/api/instances/by-user/{user_id}")
|
||||||
|
def get_user_instances(user_id: int, catalog_id: str = None, db: Session = Depends(get_db)):
|
||||||
|
"""Internal: get a user's instances, optionally filtered by catalog type."""
|
||||||
|
query = db.query(AgentInstance).filter(AgentInstance.user_id == user_id, AgentInstance.status == "active")
|
||||||
|
if catalog_id:
|
||||||
|
query = query.filter(AgentInstance.catalog_id == catalog_id)
|
||||||
|
instances = query.all()
|
||||||
|
return [{"id": i.id, "catalog_id": i.catalog_id, "name": i.name, "config": i.config or {}} for i in instances]
|
||||||
|
|
||||||
@app.get("/api/instances/{instance_id}/config")
|
@app.get("/api/instances/{instance_id}/config")
|
||||||
def get_instance_config(instance_id: int, db: Session = Depends(get_db)):
|
def get_instance_config(instance_id: int, db: Session = Depends(get_db)):
|
||||||
inst = db.query(AgentInstance).filter(AgentInstance.id == instance_id).first()
|
inst = db.query(AgentInstance).filter(AgentInstance.id == instance_id).first()
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ class AgentCatalog(Base):
|
|||||||
default_config = Column(JSON, default=dict)
|
default_config = Column(JSON, default=dict)
|
||||||
supports_schedule = Column(Boolean, default=True)
|
supports_schedule = Column(Boolean, default=True)
|
||||||
is_sub_agent = Column(Boolean, default=False)
|
is_sub_agent = Column(Boolean, default=False)
|
||||||
|
requires_llm = Column(Boolean, default=False)
|
||||||
|
|
||||||
instances = relationship("AgentInstance", back_populates="catalog_entry")
|
instances = relationship("AgentInstance", back_populates="catalog_entry")
|
||||||
|
|
||||||
|
|||||||
@@ -61,8 +61,9 @@ body{font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,sans-serif;b
|
|||||||
.config-section h3{font-size:.95rem;margin-bottom:1rem}
|
.config-section h3{font-size:.95rem;margin-bottom:1rem}
|
||||||
.config-grid{display:grid;grid-template-columns:1fr 1fr;gap:.75rem}
|
.config-grid{display:grid;grid-template-columns:1fr 1fr;gap:.75rem}
|
||||||
.config-field label{display:block;font-size:.75rem;font-weight:500;color:var(--text-dim);text-transform:uppercase;letter-spacing:.04em;margin-bottom:.3rem}
|
.config-field label{display:block;font-size:.75rem;font-weight:500;color:var(--text-dim);text-transform:uppercase;letter-spacing:.04em;margin-bottom:.3rem}
|
||||||
.config-field input,.config-field select{width:100%;padding:.5rem .65rem;background:var(--surface);border:1px solid var(--border);border-radius:6px;color:var(--text);font-size:.85rem;outline:none}
|
.config-field input,.config-field select,.config-field textarea{width:100%;padding:.5rem .65rem;background:var(--surface);border:1px solid var(--border);border-radius:6px;color:var(--text);font-size:.85rem;outline:none;font-family:inherit}
|
||||||
.config-field input:focus,.config-field select:focus{border-color:var(--accent)}
|
.config-field input:focus,.config-field select:focus,.config-field textarea:focus{border-color:var(--accent)}
|
||||||
|
.config-field textarea{min-height:80px;resize:vertical}
|
||||||
.config-actions{display:flex;gap:.5rem;margin-top:1rem;align-items:center}
|
.config-actions{display:flex;gap:.5rem;margin-top:1rem;align-items:center}
|
||||||
.btn-save{padding:.5rem 1.25rem;background:var(--accent);color:#fff;border:none;border-radius:6px;font-size:.85rem;cursor:pointer}
|
.btn-save{padding:.5rem 1.25rem;background:var(--accent);color:#fff;border:none;border-radius:6px;font-size:.85rem;cursor:pointer}
|
||||||
.btn-save:hover{background:var(--accent-hover)}
|
.btn-save:hover{background:var(--accent-hover)}
|
||||||
@@ -187,9 +188,13 @@ function buildConfigForm(inst){
|
|||||||
if(f.type==='select'){
|
if(f.type==='select'){
|
||||||
html+=`<div class="config-field"><label>${f.label}</label><select id="cfg-${f.key.replace(/\./g,'-')}">${(f.options||[]).map(o=>`<option value="${o}" ${val===o?'selected':''}>${o}</option>`).join('')}</select></div>`;
|
html+=`<div class="config-field"><label>${f.label}</label><select id="cfg-${f.key.replace(/\./g,'-')}">${(f.options||[]).map(o=>`<option value="${o}" ${val===o?'selected':''}>${o}</option>`).join('')}</select></div>`;
|
||||||
} else {
|
} else {
|
||||||
const inputType=f.type==='number'?'number':f.type==='password'?'password':'text';
|
if(f.type==='textarea'){
|
||||||
const step=f.step?`step="${f.step}"`:'';
|
html+=`<div class="config-field" style="grid-column:1/-1"><label>${f.label}</label><textarea id="cfg-${f.key.replace(/\./g,'-')}" placeholder="${f.placeholder||''}">${val}</textarea></div>`;
|
||||||
html+=`<div class="config-field"><label>${f.label}</label><input type="${inputType}" ${step} id="cfg-${f.key.replace(/\./g,'-')}" value="${val}" placeholder="${f.placeholder||''}"></div>`;
|
} else {
|
||||||
|
const inputType=f.type==='number'?'number':f.type==='password'?'password':'text';
|
||||||
|
const step=f.step?`step="${f.step}"`:'';
|
||||||
|
html+=`<div class="config-field"><label>${f.label}</label><input type="${inputType}" ${step} id="cfg-${f.key.replace(/\./g,'-')}" value="${val}" placeholder="${f.placeholder||''}"></div>`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,7 +281,7 @@ async function saveInstanceConfig(id){
|
|||||||
}
|
}
|
||||||
const el=document.getElementById('cfg-'+f.key.replace(/\./g,'-'));
|
const el=document.getElementById('cfg-'+f.key.replace(/\./g,'-'));
|
||||||
if(!el)continue;
|
if(!el)continue;
|
||||||
let val=el.value;
|
let val=el.tagName==='TEXTAREA'?el.value:el.value;
|
||||||
if(f.type==='number')val=parseFloat(val)||0;
|
if(f.type==='number')val=parseFloat(val)||0;
|
||||||
setNestedVal(config,f.key,val);
|
setNestedVal(config,f.key,val);
|
||||||
}
|
}
|
||||||
@@ -340,7 +345,7 @@ async function showCatalog(){
|
|||||||
<div class="catalog-grid">
|
<div class="catalog-grid">
|
||||||
${catalog.map(c=>`<div class="catalog-card">
|
${catalog.map(c=>`<div class="catalog-card">
|
||||||
<div style="display:flex;justify-content:space-between;align-items:start;margin-bottom:.3rem"><h4>${c.name}</h4><span class="badge ${c.category}">${c.category}</span></div>
|
<div style="display:flex;justify-content:space-between;align-items:start;margin-bottom:.3rem"><h4>${c.name}</h4><span class="badge ${c.category}">${c.category}</span></div>
|
||||||
<div class="cat-desc">${c.description}${c.is_sub_agent?' (sub-agent)':''}</div>
|
<div class="cat-desc">${c.description}${c.is_sub_agent?' (sub-agent)':''}${c.requires_llm?' <span class="badge" style="background:rgba(108,92,231,.15);color:var(--accent)">requires LLM</span>':''}</div>
|
||||||
<div class="cat-footer">
|
<div class="cat-footer">
|
||||||
<span style="font-size:.75rem;color:var(--text-dim)">${c.supports_schedule?'Schedulable':'Manual/sub-agent'}</span>
|
<span style="font-size:.75rem;color:var(--text-dim)">${c.supports_schedule?'Schedulable':'Manual/sub-agent'}</span>
|
||||||
${c.enabled?'<button class="btn-enable enabled">Enabled</button>':`<button class="btn-enable" onclick="enableAgent('${c.id}','${c.name}')">Enable</button>`}
|
${c.enabled?'<button class="btn-enable enabled">Enabled</button>':`<button class="btn-enable" onclick="enableAgent('${c.id}','${c.name}')">Enable</button>`}
|
||||||
@@ -353,6 +358,7 @@ async function showCatalog(){
|
|||||||
async function enableAgent(catalogId,name){
|
async function enableAgent(catalogId,name){
|
||||||
const res=await fetch(API+'/api/instances',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({catalog_id:catalogId,name:name})});
|
const res=await fetch(API+'/api/instances',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({catalog_id:catalogId,name:name})});
|
||||||
if(res.ok){closeModal();refresh()}
|
if(res.ok){closeModal();refresh()}
|
||||||
|
else{const err=await res.json();alert(err.detail||'Failed to enable agent')}
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- LLM Settings ---
|
// --- LLM Settings ---
|
||||||
|
|||||||
Reference in New Issue
Block a user