Macha is now a standalone NixOS flake that can be imported into other systems. This provides: - Independent versioning - Easier reusability - Cleaner separation of concerns - Better development workflow Includes: - Complete autonomous system code - NixOS module with full configuration options - Queue-based architecture with priority system - Chunked map-reduce for large outputs - ChromaDB knowledge base - Tool calling system - Multi-host SSH management - Gotify notification integration All capabilities from DESIGN.md are preserved.
329 lines
12 KiB
Python
329 lines
12 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Conversational Interface - Allows questioning Macha about decisions and system state
|
|
"""
|
|
|
|
import json
|
|
import requests
|
|
from typing import Dict, List, Any, Optional
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
from agent import MachaAgent
|
|
|
|
|
|
class MachaConversation:
|
|
"""Conversational interface for Macha"""
|
|
|
|
def __init__(
|
|
self,
|
|
ollama_host: str = "http://localhost:11434",
|
|
model: str = "gpt-oss:latest",
|
|
state_dir: Path = Path("/var/lib/macha")
|
|
):
|
|
self.ollama_host = ollama_host
|
|
self.model = model
|
|
self.state_dir = state_dir
|
|
self.decision_log = self.state_dir / "decisions.jsonl"
|
|
self.approval_queue = self.state_dir / "approval_queue.json"
|
|
self.orchestrator_log = self.state_dir / "orchestrator.log"
|
|
|
|
# Initialize agent with tool support and queue
|
|
self.agent = MachaAgent(
|
|
ollama_host=ollama_host,
|
|
model=model,
|
|
state_dir=state_dir,
|
|
enable_tools=True,
|
|
use_queue=True,
|
|
priority="INTERACTIVE"
|
|
)
|
|
|
|
def ask(self, question: str, include_context: bool = True) -> str:
|
|
"""Ask Macha a question with optional system context"""
|
|
|
|
context = ""
|
|
if include_context:
|
|
context = self._gather_context()
|
|
|
|
# Build messages for tool-aware chat
|
|
content = self._create_conversational_prompt(question, context)
|
|
messages = [{"role": "user", "content": content}]
|
|
|
|
response = self.agent._query_ollama_with_tools(messages)
|
|
|
|
return response
|
|
|
|
def discuss_action(self, action_index: int) -> str:
|
|
"""Discuss a specific queued action by its queue position (0-based index)"""
|
|
|
|
action = self._get_action_from_queue(action_index)
|
|
if not action:
|
|
return f"No action found at queue position {action_index}. Use 'macha-approve list' to see available actions."
|
|
|
|
context = self._gather_context()
|
|
action_context = json.dumps(action, indent=2)
|
|
|
|
content = f"""TASK: DISCUSS PROPOSED ACTION
|
|
================================================================================
|
|
|
|
A user is asking about a proposed action in your approval queue.
|
|
|
|
QUEUED ACTION (Queue Position #{action_index}):
|
|
{action_context}
|
|
|
|
RECENT SYSTEM CONTEXT:
|
|
{context}
|
|
|
|
The user wants to discuss this action. Explain:
|
|
1. Why you proposed this action
|
|
2. What problem it solves
|
|
3. The risks involved
|
|
4. What could go wrong
|
|
5. Alternative approaches if any
|
|
|
|
Be conversational, helpful, and honest about uncertainties.
|
|
"""
|
|
|
|
messages = [{"role": "user", "content": content}]
|
|
return self.agent._query_ollama_with_tools(messages)
|
|
|
|
def _gather_context(self) -> str:
|
|
"""Gather relevant system context for the conversation"""
|
|
|
|
context_parts = []
|
|
|
|
# System infrastructure from ChromaDB
|
|
try:
|
|
from context_db import ContextDatabase
|
|
db = ContextDatabase()
|
|
systems = db.get_all_systems()
|
|
|
|
if systems:
|
|
context_parts.append("INFRASTRUCTURE:")
|
|
for system in systems:
|
|
context_parts.append(f" - {system['hostname']} ({system.get('type', 'unknown')})")
|
|
if system.get('config_repo'):
|
|
context_parts.append(f" Config Repo: {system['config_repo']}")
|
|
context_parts.append(f" Branch: {system.get('config_branch', 'unknown')}")
|
|
if system.get('capabilities'):
|
|
context_parts.append(f" Capabilities: {', '.join(system['capabilities'])}")
|
|
except Exception as e:
|
|
# ChromaDB not available, skip
|
|
pass
|
|
|
|
# Recent decisions
|
|
recent_decisions = self._get_recent_decisions(5)
|
|
if recent_decisions:
|
|
context_parts.append("\nRECENT DECISIONS:")
|
|
for i, dec in enumerate(recent_decisions, 1):
|
|
timestamp = dec.get("timestamp", "unknown")
|
|
analysis = dec.get("analysis", {})
|
|
status = analysis.get("status", "unknown")
|
|
context_parts.append(f"{i}. [{timestamp}] Status: {status}")
|
|
if "issues" in analysis:
|
|
for issue in analysis.get("issues", [])[:3]:
|
|
context_parts.append(f" - {issue.get('description', 'N/A')}")
|
|
|
|
# Pending approvals
|
|
pending = self._get_pending_approvals()
|
|
if pending:
|
|
context_parts.append(f"\nPENDING APPROVALS: {len(pending)} action(s) awaiting approval")
|
|
|
|
# Recent log excerpts (last 10 lines)
|
|
recent_logs = self._get_recent_logs(10)
|
|
if recent_logs:
|
|
context_parts.append("\nRECENT LOG ENTRIES:")
|
|
context_parts.extend(recent_logs)
|
|
|
|
return "\n".join(context_parts)
|
|
|
|
def _create_conversational_prompt(self, question: str, context: str) -> str:
|
|
"""Create a conversational prompt"""
|
|
|
|
return f"""{MachaAgent.SYSTEM_PROMPT}
|
|
|
|
TASK: ANSWER QUESTION
|
|
================================================================================
|
|
|
|
You monitor system health, analyze issues using AI, and propose fixes. Be helpful,
|
|
honest about what you know and don't know, and reference the context provided below.
|
|
|
|
SYSTEM CONTEXT:
|
|
{context if context else "No recent activity"}
|
|
|
|
USER QUESTION:
|
|
{question}
|
|
|
|
Respond conversationally and helpfully. If the question is about your recent decisions
|
|
or actions, reference the context above. If you don't have enough information, say so.
|
|
Keep responses concise but informative.
|
|
"""
|
|
|
|
def _query_ollama(self, prompt: str, temperature: float = 0.7) -> str:
|
|
"""Query Ollama API"""
|
|
try:
|
|
response = requests.post(
|
|
f"{self.ollama_host}/api/generate",
|
|
json={
|
|
"model": self.model,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"temperature": temperature,
|
|
},
|
|
timeout=60
|
|
)
|
|
response.raise_for_status()
|
|
return response.json().get("response", "")
|
|
except requests.exceptions.HTTPError as e:
|
|
error_detail = ""
|
|
try:
|
|
error_detail = f" - {response.text}"
|
|
except:
|
|
pass
|
|
return f"Error: Ollama returned HTTP {response.status_code}{error_detail}"
|
|
except Exception as e:
|
|
return f"Error querying Ollama: {str(e)}"
|
|
|
|
def _get_recent_decisions(self, count: int = 5) -> List[Dict[str, Any]]:
|
|
"""Get recent decisions from log"""
|
|
if not self.decision_log.exists():
|
|
return []
|
|
|
|
decisions = []
|
|
try:
|
|
with open(self.decision_log, 'r') as f:
|
|
for line in f:
|
|
if line.strip():
|
|
try:
|
|
decisions.append(json.loads(line))
|
|
except:
|
|
pass
|
|
except:
|
|
pass
|
|
|
|
return decisions[-count:]
|
|
|
|
def _get_pending_approvals(self) -> List[Dict[str, Any]]:
|
|
"""Get pending approvals from queue"""
|
|
if not self.approval_queue.exists():
|
|
return []
|
|
|
|
try:
|
|
with open(self.approval_queue, 'r') as f:
|
|
data = json.load(f)
|
|
# Queue is a JSON array, not an object with "pending" key
|
|
if isinstance(data, list):
|
|
return data
|
|
return data.get("pending", [])
|
|
except:
|
|
return []
|
|
|
|
def _get_action_from_queue(self, action_index: int) -> Optional[Dict[str, Any]]:
|
|
"""Get a specific action from the queue by index"""
|
|
pending = self._get_pending_approvals()
|
|
if 0 <= action_index < len(pending):
|
|
return pending[action_index]
|
|
return None
|
|
|
|
def _get_recent_logs(self, count: int = 10) -> List[str]:
|
|
"""Get recent orchestrator log lines"""
|
|
if not self.orchestrator_log.exists():
|
|
return []
|
|
|
|
try:
|
|
with open(self.orchestrator_log, 'r') as f:
|
|
lines = f.readlines()
|
|
return [line.strip() for line in lines[-count:] if line.strip()]
|
|
except:
|
|
return []
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import sys
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="Ask Macha a question or discuss an action")
|
|
parser.add_argument("--discuss", type=int, metavar="ACTION_ID", help="Discuss a specific queued action")
|
|
parser.add_argument("--follow-up", type=str, metavar="QUESTION", help="Follow-up question about the action")
|
|
parser.add_argument("question", nargs="*", help="Your question for Macha")
|
|
parser.add_argument("--no-context", action="store_true", help="Don't include system context")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Load config if available
|
|
config_file = Path("/etc/macha-autonomous/config.json")
|
|
ollama_host = "http://localhost:11434"
|
|
model = "gpt-oss:latest"
|
|
|
|
if config_file.exists():
|
|
try:
|
|
with open(config_file, 'r') as f:
|
|
config = json.load(f)
|
|
ollama_host = config.get("ollama_host", ollama_host)
|
|
model = config.get("model", model)
|
|
except:
|
|
pass
|
|
|
|
conversation = MachaConversation(
|
|
ollama_host=ollama_host,
|
|
model=model
|
|
)
|
|
|
|
if args.discuss is not None:
|
|
if args.follow_up:
|
|
# Follow-up question about a specific action
|
|
action = conversation._get_action_from_queue(args.discuss)
|
|
if not action:
|
|
print(f"No action found at queue position {args.discuss}. Use 'macha-approve list' to see available actions.")
|
|
sys.exit(1)
|
|
|
|
# Build context with the action details
|
|
action_context = f"""
|
|
QUEUED ACTION #{args.discuss}:
|
|
Diagnosis: {action.get('proposal', {}).get('diagnosis', 'N/A')}
|
|
Proposed Action: {action.get('proposal', {}).get('proposed_action', 'N/A')}
|
|
Action Type: {action.get('proposal', {}).get('action_type', 'N/A')}
|
|
Risk Level: {action.get('proposal', {}).get('risk_level', 'N/A')}
|
|
Commands: {json.dumps(action.get('proposal', {}).get('commands', []), indent=2)}
|
|
Reasoning: {action.get('proposal', {}).get('reasoning', 'N/A')}
|
|
|
|
FOLLOW-UP QUESTION:
|
|
{args.follow_up}
|
|
"""
|
|
|
|
# Query the AI with the action context
|
|
response = conversation._query_ollama(f"""{MachaAgent.SYSTEM_PROMPT}
|
|
|
|
TASK: ANSWER FOLLOW-UP QUESTION ABOUT QUEUED ACTION
|
|
================================================================================
|
|
|
|
You are answering a follow-up question about a proposed fix that is awaiting approval.
|
|
Be helpful and answer directly. If the user is concerned about risks, explain them clearly.
|
|
If they ask about alternatives, suggest them.
|
|
|
|
{action_context}
|
|
|
|
RESPOND CONCISELY AND DIRECTLY.
|
|
""")
|
|
|
|
else:
|
|
# Initial discussion about the action
|
|
response = conversation.discuss_action(args.discuss)
|
|
elif args.question:
|
|
# Ask a general question
|
|
question = " ".join(args.question)
|
|
response = conversation.ask(question, include_context=not args.no_context)
|
|
else:
|
|
parser.print_help()
|
|
sys.exit(1)
|
|
|
|
# Only print formatted output for initial discussion, not for follow-ups
|
|
if args.follow_up:
|
|
print(response)
|
|
else:
|
|
print("\n" + "="*60)
|
|
print("MACHA:")
|
|
print("="*60)
|
|
print(response)
|
|
print("="*60 + "\n")
|
|
|