Initial commit: add .gitignore and README
Some checks failed
Tests / test (3.10) (push) Has been cancelled
Tests / test (3.11) (push) Has been cancelled
Tests / test (3.12) (push) Has been cancelled
Tests / lint (push) Has been cancelled
Tests / docker (push) Has been cancelled

This commit is contained in:
defiQUG
2026-02-09 21:51:42 -08:00
commit c052b07662
3146 changed files with 808305 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
"""Agents: base, planner, reasoner, executor, critic, adversarial reviewer, head, witness. See fusionagi.multi_agent for Supervisor, Coordinator, Pool."""
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.agents.planner import PlannerAgent
from fusionagi.agents.reasoner import ReasonerAgent
from fusionagi.agents.executor import ExecutorAgent
from fusionagi.agents.critic import CriticAgent
from fusionagi.agents.adversarial_reviewer import AdversarialReviewerAgent
from fusionagi.agents.head_agent import HeadAgent
from fusionagi.agents.witness_agent import WitnessAgent
__all__ = [
"BaseAgent",
"PlannerAgent",
"ReasonerAgent",
"ExecutorAgent",
"CriticAgent",
"AdversarialReviewerAgent",
"HeadAgent",
"WitnessAgent",
]

View File

@@ -0,0 +1,15 @@
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.schemas.messages import AgentMessageEnvelope
from fusionagi._logger import logger
import json
class AdversarialReviewerAgent(BaseAgent):
def __init__(self, identity="adversarial_reviewer", adapter=None):
super().__init__(identity=identity, role="Adversarial Reviewer", objective="Find errors and risks", memory_access=True, tool_permissions=[])
self._adapter = adapter
def handle_message(self, envelope):
if envelope.message.intent != "review_request":
return None
payload = envelope.message.payload
issues = payload.get("issues", ["Enable LLM for detailed review"])
return envelope.create_response("review_ready", payload={"issues": issues, "risk_level": "medium"})

View File

@@ -0,0 +1,29 @@
"""Base agent interface: identity, role, objective, memory/tool scope, handle_message."""
from abc import ABC, abstractmethod
from typing import Any
from fusionagi.schemas.messages import AgentMessageEnvelope
class BaseAgent(ABC):
"""Abstract base agent: identity, role, objective, memory access, tool permissions."""
def __init__(
self,
identity: str,
role: str,
objective: str,
memory_access: bool | str = True,
tool_permissions: list[str] | str | None = None,
) -> None:
self.identity = identity
self.role = role
self.objective = objective
self.memory_access = memory_access
self.tool_permissions = tool_permissions if tool_permissions is not None else []
@abstractmethod
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""Process an incoming message; return response envelope or None."""
...

View File

@@ -0,0 +1,95 @@
"""Critic / Evaluator agent: evaluates task outcome, error analysis, suggested improvements."""
import json
from typing import Any
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.adapters.base import LLMAdapter
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi._logger import logger
class CriticAgent(BaseAgent):
"""Evaluates task outcome and execution trace; emits evaluation_ready."""
def __init__(
self,
identity: str = "critic",
adapter: LLMAdapter | None = None,
) -> None:
super().__init__(
identity=identity,
role="Critic",
objective="Evaluate outcomes and suggest improvements",
memory_access=True,
tool_permissions=[],
)
self._adapter = adapter
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On evaluate_request, return evaluation_ready with score, analysis, suggestions."""
if envelope.message.intent != "evaluate_request":
return None
logger.info(
"Critic handle_message",
extra={"recipient": self.identity, "intent": envelope.message.intent},
)
payload = envelope.message.payload
task_id = envelope.task_id
outcome = payload.get("outcome", "unknown")
trace = payload.get("trace", [])
plan = payload.get("plan")
if self._adapter:
evaluation = self._evaluate_with_llm(outcome, trace, plan)
else:
evaluation = {
"success": outcome == "completed",
"score": 1.0 if outcome == "completed" else 0.0,
"error_analysis": [],
"suggestions": ["Enable LLM for detailed evaluation"],
}
logger.info(
"Critic response",
extra={"recipient": self.identity, "response_intent": "evaluation_ready"},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="evaluation_ready",
payload={"evaluation": evaluation},
),
task_id=task_id,
correlation_id=envelope.correlation_id,
)
def _evaluate_with_llm(
self,
outcome: str,
trace: list[dict[str, Any]],
plan: dict[str, Any] | None,
) -> dict[str, Any]:
"""Use adapter to produce evaluation (score, error_analysis, suggestions)."""
context = f"Outcome: {outcome}\nTrace (last 5): {json.dumps(trace[-5:], default=str)}\n"
if plan:
context += f"Plan: {json.dumps(plan.get('steps', [])[:5], default=str)}"
messages = [
{"role": "system", "content": "You evaluate task execution. Output JSON: {\"success\": bool, \"score\": 0-1, \"error_analysis\": [], \"suggestions\": []}. Output only JSON."},
{"role": "user", "content": context},
]
try:
raw = self._adapter.complete(messages)
for start in ("```json", "```"):
if raw.strip().startswith(start):
raw = raw.strip()[len(start):].strip()
if raw.endswith("```"):
raw = raw[:-3].strip()
return json.loads(raw)
except Exception:
logger.exception("Critic evaluation parse failed, using fallback")
return {
"success": outcome == "completed",
"score": 0.5,
"error_analysis": ["Evaluation parse failed"],
"suggestions": [],
}

View File

@@ -0,0 +1,236 @@
"""Executor agent: receives execute_step, invokes tool via safe runner, returns step_done/step_failed."""
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi.schemas.plan import Plan
from fusionagi.planning import get_step
from fusionagi.tools.registry import ToolRegistry
from fusionagi.tools.runner import run_tool
from fusionagi._logger import logger
if TYPE_CHECKING:
from fusionagi.core.state_manager import StateManager
from fusionagi.governance.guardrails import Guardrails
from fusionagi.governance.rate_limiter import RateLimiter
from fusionagi.governance.access_control import AccessControl
from fusionagi.governance.override import OverrideHooks
from fusionagi.memory.episodic import EpisodicMemory
class ExecutorAgent(BaseAgent):
"""
Executes steps: maps step to tool call, runs via safe runner, emits step_done/step_failed.
Supports full governance integration:
- Guardrails: Pre/post checks for tool invocations
- RateLimiter: Limits tool invocation rate per agent/tool
- AccessControl: Policy-based tool access control
- OverrideHooks: Human-in-the-loop for high-risk operations
- EpisodicMemory: Records step outcomes for learning
"""
def __init__(
self,
identity: str = "executor",
registry: ToolRegistry | None = None,
state_manager: StateManager | None = None,
guardrails: Guardrails | None = None,
rate_limiter: RateLimiter | None = None,
access_control: AccessControl | None = None,
override_hooks: OverrideHooks | None = None,
episodic_memory: EpisodicMemory | None = None,
) -> None:
"""
Initialize the executor agent.
Args:
identity: Agent identifier.
registry: Tool registry for tool lookup.
state_manager: State manager for trace storage.
guardrails: Guardrails for pre/post checks.
rate_limiter: Rate limiter for tool invocation throttling.
access_control: Access control for policy-based tool access.
override_hooks: Override hooks for human-in-the-loop.
episodic_memory: Episodic memory for recording step outcomes.
"""
super().__init__(
identity=identity,
role="Executor",
objective="Execute plan steps via tools",
memory_access=False,
tool_permissions=["*"],
)
self._registry = registry or ToolRegistry()
self._state = state_manager
self._guardrails = guardrails
self._rate_limiter = rate_limiter
self._access_control = access_control
self._override_hooks = override_hooks
self._episodic_memory = episodic_memory
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On execute_step, run tool and return step_done or step_failed."""
if envelope.message.intent != "execute_step":
return None
logger.info(
"Executor handle_message",
extra={"recipient": self.identity, "intent": envelope.message.intent},
)
payload = envelope.message.payload
task_id = envelope.task_id
step_id = payload.get("step_id")
plan_dict = payload.get("plan")
if not step_id or not plan_dict:
return self._fail(task_id, envelope.message.sender, step_id or "?", "missing step_id or plan")
plan = Plan.from_dict(plan_dict)
step = get_step(plan, step_id)
if not step:
return self._fail(task_id, envelope.message.sender, step_id, "step not found")
tool_name = step.tool_name or payload.get("tool_name")
tool_args = step.tool_args or payload.get("tool_args", {})
if not tool_name:
return self._fail(task_id, envelope.message.sender, step_id, "no tool_name")
tool = self._registry.get(tool_name)
if not tool:
return self._fail(task_id, envelope.message.sender, step_id, f"tool not found: {tool_name}")
# Check tool registry permissions
if not self._registry.allowed_for(tool_name, self.tool_permissions):
return self._fail(task_id, envelope.message.sender, step_id, "permission denied")
# Check access control policy
if self._access_control is not None:
if not self._access_control.allowed(self.identity, tool_name, task_id):
logger.info(
"Executor access_control denied",
extra={"tool_name": tool_name, "agent_id": self.identity, "task_id": task_id},
)
return self._fail(task_id, envelope.message.sender, step_id, "access control denied")
# Check rate limiter
if self._rate_limiter is not None:
rate_key = f"{self.identity}:{tool_name}"
allowed, reason = self._rate_limiter.allow(rate_key)
if not allowed:
logger.info(
"Executor rate_limiter denied",
extra={"tool_name": tool_name, "key": rate_key, "reason": reason},
)
return self._fail(task_id, envelope.message.sender, step_id, reason)
# Check guardrails pre-check
if self._guardrails is not None:
pre_result = self._guardrails.pre_check(tool_name, tool_args)
logger.info(
"Executor guardrail pre_check",
extra={"tool_name": tool_name, "allowed": pre_result.allowed},
)
if not pre_result.allowed:
return self._fail(
task_id, envelope.message.sender, step_id,
pre_result.error_message or "Guardrails pre-check failed",
)
if pre_result.sanitized_args is not None:
tool_args = pre_result.sanitized_args
# Check override hooks for high-risk operations
if self._override_hooks is not None and tool.manufacturing:
proceed = self._override_hooks.fire(
"tool_execution",
{"tool_name": tool_name, "args": tool_args, "task_id": task_id, "step_id": step_id},
)
if not proceed:
logger.info(
"Executor override_hooks blocked",
extra={"tool_name": tool_name, "step_id": step_id},
)
return self._fail(
task_id, envelope.message.sender, step_id,
"Override hook blocked execution",
)
# Execute the tool
result, log_entry = run_tool(tool, tool_args)
logger.info(
"Executor tool run",
extra={"tool_name": tool_name, "step_id": step_id, "error": log_entry.get("error")},
)
# Check guardrails post-check
if self._guardrails is not None and not log_entry.get("error"):
post_ok, post_reason = self._guardrails.post_check(tool_name, result)
if not post_ok:
log_entry["error"] = f"Post-check failed: {post_reason}"
log_entry["post_check_failed"] = True
logger.info(
"Executor guardrail post_check failed",
extra={"tool_name": tool_name, "reason": post_reason},
)
# Record trace in state manager
if self._state:
self._state.append_trace(task_id or "", log_entry)
# Record in episodic memory
if self._episodic_memory:
self._episodic_memory.append(
task_id=task_id or "",
event={
"type": "step_execution",
"step_id": step_id,
"tool_name": tool_name,
"success": not log_entry.get("error"),
"duration_seconds": log_entry.get("duration_seconds"),
},
)
if log_entry.get("error"):
return self._fail(
task_id, envelope.message.sender, step_id,
log_entry["error"],
log_entry=log_entry,
)
logger.info(
"Executor response",
extra={"recipient": self.identity, "response_intent": "step_done"},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="step_done",
payload={
"step_id": step_id,
"result": result,
"log_entry": log_entry,
},
),
task_id=task_id,
correlation_id=envelope.correlation_id,
)
def _fail(
self,
task_id: str | None,
recipient: str,
step_id: str,
error: str,
log_entry: dict[str, Any] | None = None,
) -> AgentMessageEnvelope:
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=recipient,
intent="step_failed",
payload={
"step_id": step_id,
"error": error,
"log_entry": log_entry or {},
},
),
task_id=task_id,
)

View File

@@ -0,0 +1,232 @@
"""Dvādaśa head agent base: structured output via LLM or native reasoning."""
from typing import Any, Protocol, runtime_checkable
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.adapters.base import LLMAdapter
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi.schemas.head import HeadId, HeadOutput, HeadClaim, HeadRisk
from fusionagi.schemas.grounding import Citation
from fusionagi._logger import logger
@runtime_checkable
class ReasoningProvider(Protocol):
"""Protocol for native reasoning: produce HeadOutput without external APIs."""
def produce_head_output(self, head_id: HeadId, prompt: str) -> HeadOutput:
"""Produce structured HeadOutput for the given head and prompt."""
...
def _head_output_json_schema() -> dict[str, Any]:
"""JSON schema for HeadOutput for LLM structured generation."""
return {
"type": "object",
"required": ["head_id", "summary"],
"properties": {
"head_id": {
"type": "string",
"enum": [h.value for h in HeadId if h != HeadId.WITNESS],
},
"summary": {"type": "string"},
"claims": {
"type": "array",
"items": {
"type": "object",
"properties": {
"claim_text": {"type": "string"},
"confidence": {"type": "number", "minimum": 0, "maximum": 1},
"evidence": {
"type": "array",
"items": {
"type": "object",
"properties": {
"source_id": {"type": "string"},
"excerpt": {"type": "string"},
"confidence": {"type": "number"},
},
},
},
"assumptions": {"type": "array", "items": {"type": "string"}},
},
},
},
"risks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"description": {"type": "string"},
"severity": {"type": "string"},
},
},
},
"questions": {"type": "array", "items": {"type": "string"}},
"recommended_actions": {"type": "array", "items": {"type": "string"}},
"tone_guidance": {"type": "string"},
},
}
class HeadAgent(BaseAgent):
"""
Dvādaśa head agent: produces structured HeadOutput from user prompt.
Uses LLMAdapter.complete_structured with JSON schema.
"""
def __init__(
self,
head_id: HeadId,
role: str,
objective: str,
system_prompt: str,
adapter: LLMAdapter | None = None,
tool_permissions: list[str] | None = None,
reasoning_provider: "ReasoningProvider | None" = None,
) -> None:
if head_id == HeadId.WITNESS:
raise ValueError("HeadAgent is for content heads only; use WitnessAgent for Witness")
super().__init__(
identity=head_id.value,
role=role,
objective=objective,
memory_access=True,
tool_permissions=tool_permissions or [],
)
self._head_id = head_id
self._system_prompt = system_prompt
self._adapter = adapter
self._reasoning_provider = reasoning_provider
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On head_request, produce HeadOutput and return head_output envelope."""
if envelope.message.intent != "head_request":
return None
payload = envelope.message.payload or {}
user_prompt = payload.get("prompt", "")
logger.info(
"HeadAgent handle_message",
extra={"head_id": self._head_id.value, "intent": envelope.message.intent},
)
output = self._produce_output(user_prompt)
if output is None:
return envelope.create_response(
"head_failed",
payload={"error": "Failed to produce head output", "head_id": self._head_id.value},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="head_output",
payload={"head_output": output.model_dump()},
),
task_id=envelope.task_id,
correlation_id=envelope.correlation_id,
)
def _produce_output(self, user_prompt: str) -> HeadOutput | None:
"""Produce HeadOutput via native reasoning or LLM adapter."""
# Prefer native reasoning when available (no external APIs)
if self._reasoning_provider is not None:
try:
return self._reasoning_provider.produce_head_output(
self._head_id, user_prompt or "(No prompt provided)"
)
except Exception as e:
logger.warning(
"Native reasoning failed, falling back",
extra={"head_id": self._head_id.value, "error": str(e)},
)
if not self._adapter:
return self._fallback_output(user_prompt)
messages = [
{"role": "system", "content": self._system_prompt},
{"role": "user", "content": user_prompt or "(No prompt provided)"},
]
raw = self._adapter.complete_structured(
messages,
schema=_head_output_json_schema(),
temperature=0.3,
)
if not isinstance(raw, dict):
logger.warning(
"HeadAgent structured output invalid",
extra={"head_id": self._head_id.value, "raw_type": type(raw).__name__},
)
return self._fallback_output(user_prompt)
return self._parse_output(raw)
def _parse_output(self, raw: dict[str, Any]) -> HeadOutput | None:
"""Parse raw dict into HeadOutput."""
try:
claims = []
for c in raw.get("claims", []):
evidence = [
Citation(
source_id=e.get("source_id", ""),
excerpt=e.get("excerpt", ""),
confidence=e.get("confidence", 1.0),
)
for e in c.get("evidence", [])
]
claims.append(
HeadClaim(
claim_text=c.get("claim_text", ""),
confidence=float(c.get("confidence", 0.5)),
evidence=evidence,
assumptions=c.get("assumptions", []),
)
)
risks = [
HeadRisk(
description=r.get("description", ""),
severity=r.get("severity", "medium"),
)
for r in raw.get("risks", [])
]
return HeadOutput(
head_id=self._head_id,
summary=raw.get("summary", "No summary"),
claims=claims,
risks=risks,
questions=raw.get("questions", []),
recommended_actions=raw.get("recommended_actions", []),
tone_guidance=raw.get("tone_guidance", ""),
)
except Exception as e:
logger.exception(
"HeadAgent parse_output failed",
extra={"head_id": self._head_id.value, "error": str(e)},
)
return None
def _fallback_output(self, user_prompt: str) -> HeadOutput:
"""Fallback when both reasoning provider and adapter fail or are absent."""
return HeadOutput(
head_id=self._head_id,
summary=f"{self.role} head: Unable to produce structured analysis for this prompt.",
claims=[
HeadClaim(
claim_text="Analysis requires reasoning provider or LLM adapter.",
confidence=0.0,
evidence=[],
assumptions=[],
),
],
risks=[HeadRisk(description="No reasoning provider or adapter configured", severity="high")],
questions=[],
recommended_actions=["Configure NativeReasoningProvider or an LLM adapter for this head"],
tone_guidance="",
)

View File

@@ -0,0 +1,104 @@
"""Dvādaśa content head agents: Logic, Research, Systems, Strategy, etc."""
from typing import Any
from fusionagi.agents.head_agent import HeadAgent
from fusionagi.adapters.base import LLMAdapter
from fusionagi.reasoning.native import NativeReasoningProvider
from fusionagi.schemas.head import HeadId
from fusionagi.prompts.heads import get_head_prompt
def create_head_agent(
head_id: HeadId,
adapter: LLMAdapter | None = None,
tool_permissions: list[str] | None = None,
reasoning_provider: NativeReasoningProvider | None = None,
use_native_reasoning: bool = True,
) -> HeadAgent:
"""Create a HeadAgent for the given head_id.
When adapter is None and use_native_reasoning is True, uses NativeReasoningProvider
for independent symbolic reasoning (no external LLM calls).
"""
if head_id == HeadId.WITNESS:
raise ValueError("Use WitnessAgent for Witness; HeadAgent is for content heads only")
# Use native reasoning when no adapter and use_native_reasoning, or when explicitly provided
provider = reasoning_provider
if provider is None and use_native_reasoning and adapter is None:
provider = NativeReasoningProvider()
role_map = {
HeadId.LOGIC: ("Logic", "Correctness, contradictions, formal checks"),
HeadId.RESEARCH: ("Research", "Retrieval, source quality, citations"),
HeadId.SYSTEMS: ("Systems", "Architecture, dependencies, scalability"),
HeadId.STRATEGY: ("Strategy", "Roadmap, prioritization, tradeoffs"),
HeadId.PRODUCT: ("Product/UX", "Interaction design, user flows"),
HeadId.SECURITY: ("Security", "Threats, auth, secrets, abuse vectors"),
HeadId.SAFETY: ("Safety/Ethics", "Policy alignment, harmful content prevention"),
HeadId.RELIABILITY: ("Reliability", "SLOs, failover, load testing, observability"),
HeadId.COST: ("Cost/Performance", "Token budgets, caching, model routing"),
HeadId.DATA: ("Data/Memory", "Schemas, privacy, retention, personalization"),
HeadId.DEVEX: ("DevEx", "CI/CD, testing strategy, local tooling"),
}
role, objective = role_map.get(
head_id,
(head_id.value.title(), "Provide analysis from your perspective."),
)
return HeadAgent(
head_id=head_id,
role=role,
objective=objective,
system_prompt=get_head_prompt(head_id),
adapter=adapter,
tool_permissions=tool_permissions,
reasoning_provider=provider,
)
# Heads that may call tools (Research, Systems, Security, Data)
TOOL_ENABLED_HEADS: list[HeadId] = [
HeadId.RESEARCH,
HeadId.SYSTEMS,
HeadId.SECURITY,
HeadId.DATA,
]
DEFAULT_TOOL_PERMISSIONS: dict[HeadId, list[str]] = {
HeadId.RESEARCH: ["search", "docs"],
HeadId.SYSTEMS: ["search", "db"],
HeadId.SECURITY: ["search", "code_runner"],
HeadId.DATA: ["db", "docs"],
}
def create_all_content_heads(
adapter: LLMAdapter | None = None,
tool_permissions_by_head: dict[HeadId, list[str]] | None = None,
reasoning_provider: NativeReasoningProvider | None = None,
use_native_reasoning: bool = True,
) -> dict[HeadId, HeadAgent]:
"""Create all 11 content head agents. Tool-enabled heads get default permissions.
When adapter is None, uses native reasoning by default (no external LLM).
"""
content_heads = [h for h in HeadId if h != HeadId.WITNESS]
perms = tool_permissions_by_head or {}
return {
hid: create_head_agent(
hid,
adapter,
perms.get(hid) or DEFAULT_TOOL_PERMISSIONS.get(hid),
reasoning_provider=reasoning_provider,
use_native_reasoning=use_native_reasoning,
)
for hid in content_heads
}
__all__ = [
"create_head_agent",
"create_all_content_heads",
]

112
fusionagi/agents/planner.py Normal file
View File

@@ -0,0 +1,112 @@
"""Planner agent: decomposes goals into plan graph; uses LLM adapter when provided."""
import json
import re
from typing import Any
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.adapters.base import LLMAdapter
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi._logger import logger
PLAN_REQUEST_SYSTEM = """You are a planner. Given a goal and optional constraints, output a JSON object with this exact structure:
{"steps": [{"id": "step_1", "description": "...", "dependencies": []}, ...], "fallback_paths": []}
Each step has: id (string), description (string), dependencies (list of step ids that must complete first).
Output only valid JSON, no markdown or extra text."""
class PlannerAgent(BaseAgent):
"""Planner: responds to plan_request with a plan; uses adapter if set, else fixed plan."""
DEFAULT_PLAN = {
"steps": [
{"id": "step_1", "description": "Analyze goal", "dependencies": []},
{"id": "step_2", "description": "Execute primary action", "dependencies": ["step_1"]},
{"id": "step_3", "description": "Verify result", "dependencies": ["step_2"]},
],
"fallback_paths": [],
}
def __init__(
self,
identity: str = "planner",
adapter: LLMAdapter | None = None,
) -> None:
super().__init__(
identity=identity,
role="Planner",
objective="Decompose goals into executable steps",
memory_access=True,
tool_permissions=[],
)
self._adapter = adapter
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On plan_request, return plan_ready with plan from adapter or default."""
if envelope.message.intent != "plan_request":
return None
logger.info(
"Planner handle_message",
extra={"recipient": self.identity, "intent": envelope.message.intent},
)
goal = envelope.message.payload.get("goal", "")
constraints = envelope.message.payload.get("constraints", [])
plan_dict = self._get_plan(goal, constraints)
logger.info(
"Planner response",
extra={"recipient": self.identity, "response_intent": "plan_ready"},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="plan_ready",
payload={"plan": plan_dict},
),
task_id=envelope.task_id,
correlation_id=envelope.correlation_id,
)
def _get_plan(self, goal: str, constraints: list[str]) -> dict[str, Any]:
"""Produce plan dict: use adapter if available and parsing succeeds, else default."""
if not self._adapter or not goal:
return self.DEFAULT_PLAN
user_content = f"Goal: {goal}\n"
if constraints:
user_content += "Constraints: " + ", ".join(constraints) + "\n"
user_content += "Output the plan as JSON only."
messages = [
{"role": "system", "content": PLAN_REQUEST_SYSTEM},
{"role": "user", "content": user_content},
]
try:
raw = self._adapter.complete(messages)
plan_dict = self._parse_plan_response(raw)
if plan_dict and plan_dict.get("steps"):
return plan_dict
except Exception:
logger.exception(
"Planner adapter or parse failed, using default plan",
extra={"intent": "plan_request"},
)
return self.DEFAULT_PLAN
def _parse_plan_response(self, raw: str) -> dict[str, Any] | None:
"""Extract JSON plan from raw response (handle code blocks)."""
raw = raw.strip()
for start in ("```json", "```"):
if raw.startswith(start):
raw = raw[len(start) :].strip()
if raw.endswith("```"):
raw = raw[:-3].strip()
match = re.search(r"\{[\s\S]*\}", raw)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError as e:
logger.debug("Planner JSON parse failed (match)", extra={"error": str(e)})
try:
return json.loads(raw)
except json.JSONDecodeError as e:
logger.debug("Planner JSON parse failed (raw)", extra={"error": str(e)})
return None

View File

@@ -0,0 +1,226 @@
"""Reasoner agent: reasons over step/subgoal + context; outputs recommendation via CoT.
The Reasoner agent:
- Processes reason_request messages
- Uses Chain-of-Thought or Tree-of-Thought reasoning
- Integrates with WorkingMemory for context
- Records reasoning traces to EpisodicMemory
"""
from __future__ import annotations
import json
from typing import Any, TYPE_CHECKING
from fusionagi.agents.base_agent import BaseAgent
from fusionagi.adapters.base import LLMAdapter
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi.reasoning import run_chain_of_thought
from fusionagi._logger import logger
if TYPE_CHECKING:
from fusionagi.memory.working import WorkingMemory
from fusionagi.memory.episodic import EpisodicMemory
class ReasonerAgent(BaseAgent):
"""
Reasoner agent: runs Chain-of-Thought reasoning and returns recommendations.
Features:
- LLM-powered reasoning via CoT
- WorkingMemory integration for context enrichment
- EpisodicMemory integration for trace recording
- Confidence scoring
"""
def __init__(
self,
identity: str = "reasoner",
adapter: LLMAdapter | None = None,
working_memory: WorkingMemory | None = None,
episodic_memory: EpisodicMemory | None = None,
) -> None:
"""
Initialize the Reasoner agent.
Args:
identity: Agent identifier.
adapter: LLM adapter for reasoning.
working_memory: Working memory for context retrieval.
episodic_memory: Episodic memory for trace recording.
"""
super().__init__(
identity=identity,
role="Reasoner",
objective="Reason over steps and recommend next actions",
memory_access=True,
tool_permissions=[],
)
self._adapter = adapter
self._working_memory = working_memory
self._episodic_memory = episodic_memory
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On reason_request, run CoT and return recommendation_ready."""
if envelope.message.intent != "reason_request":
return None
logger.info(
"Reasoner handle_message",
extra={"recipient": self.identity, "intent": envelope.message.intent},
)
payload = envelope.message.payload
task_id = envelope.task_id or ""
step_id = payload.get("step_id")
subgoal = payload.get("subgoal", "")
context = payload.get("context", "")
# Enrich context with working memory if available
enriched_context = self._enrich_context(task_id, context)
query = subgoal or f"Consider step: {step_id}. What should we do next?"
if not self._adapter:
return self._respond_without_llm(envelope, step_id)
# Run chain-of-thought reasoning
response, trace = run_chain_of_thought(
self._adapter,
query,
context=enriched_context or None,
)
# Calculate confidence based on trace quality
confidence = self._calculate_confidence(trace)
# Store reasoning in working memory
if self._working_memory and task_id:
self._working_memory.append(
task_id,
"reasoning_history",
{
"step_id": step_id,
"query": query,
"response": response[:500] if response else "",
"confidence": confidence,
},
)
# Record to episodic memory
if self._episodic_memory and task_id:
self._episodic_memory.append(
task_id=task_id,
event={
"type": "reasoning",
"step_id": step_id,
"query": query,
"response_length": len(response) if response else 0,
"trace_length": len(trace),
"confidence": confidence,
},
event_type="reasoning_complete",
)
logger.info(
"Reasoner response",
extra={
"recipient": self.identity,
"response_intent": "recommendation_ready",
"confidence": confidence,
},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="recommendation_ready",
payload={
"step_id": step_id,
"recommendation": response,
"trace": trace,
"confidence": confidence,
},
confidence=confidence,
),
task_id=task_id,
correlation_id=envelope.correlation_id,
)
def _enrich_context(self, task_id: str, base_context: str) -> str:
"""Enrich context with working memory data."""
if not self._working_memory or not task_id:
return base_context
# Get context summary from working memory
context_summary = self._working_memory.get_context_summary(task_id, max_items=5)
if not context_summary:
return base_context
# Get recent reasoning history
reasoning_history = self._working_memory.get_list(task_id, "reasoning_history")
recent_reasoning = reasoning_history[-3:] if reasoning_history else []
enriched_parts = [base_context] if base_context else []
if context_summary:
enriched_parts.append(f"\nWorking memory context: {json.dumps(context_summary, default=str)[:500]}")
if recent_reasoning:
recent_summaries = [
f"- Step {r.get('step_id', '?')}: {r.get('response', '')[:100]}"
for r in recent_reasoning
]
enriched_parts.append(f"\nRecent reasoning:\n" + "\n".join(recent_summaries))
return "\n".join(enriched_parts)
def _calculate_confidence(self, trace: list[dict[str, Any]]) -> float:
"""Calculate confidence score based on reasoning trace."""
if not trace:
return 0.5 # Default confidence without trace
# Simple heuristic: more reasoning steps = more thorough = higher confidence
# But diminishing returns after a point
step_count = len(trace)
if step_count == 0:
return 0.3
elif step_count == 1:
return 0.5
elif step_count == 2:
return 0.7
elif step_count <= 4:
return 0.8
else:
return 0.9
def _respond_without_llm(
self,
envelope: AgentMessageEnvelope,
step_id: str | None,
) -> AgentMessageEnvelope:
"""Generate response when no LLM is available."""
logger.info(
"Reasoner response (no adapter)",
extra={"recipient": self.identity, "response_intent": "recommendation_ready"},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="recommendation_ready",
payload={
"step_id": step_id,
"recommendation": "Proceed with execution (no LLM available for reasoning).",
"trace": [],
"confidence": 0.5,
},
confidence=0.5,
),
task_id=envelope.task_id,
correlation_id=envelope.correlation_id,
)

View File

@@ -0,0 +1,219 @@
"""Witness agent: meta-controller that arbitrates head outputs and produces final response."""
from typing import Any
from fusionagi.agents.base_agent import BaseAgent
# Approx 4 chars/token; limit context to ~6k tokens (~24k chars) to avoid overflow
DEFAULT_MAX_CONTEXT_CHARS = 24_000
from fusionagi.adapters.base import LLMAdapter
from fusionagi.schemas.messages import AgentMessage, AgentMessageEnvelope
from fusionagi.schemas.head import HeadId, HeadOutput
from fusionagi.schemas.witness import (
AgreementMap,
TransparencyReport,
FinalResponse,
)
from fusionagi.multi_agent.consensus_engine import run_consensus
from fusionagi._logger import logger
WITNESS_COMPOSE_SYSTEM = """You are the Witness meta-controller in a 12-headed multi-agent system.
You receive structured outputs from specialist heads (Logic, Research, Strategy, Security, etc.).
Your job: produce a clear, coherent final answer that synthesizes the head contributions.
Use the agreed claims. Acknowledge disputes if any. Be concise and actionable.
Output only the final narrative text, no JSON or meta-commentary."""
class WitnessAgent(BaseAgent):
"""
Witness: consumes HeadOutput from content heads, runs consensus, composes FinalResponse.
"""
def __init__(
self,
adapter: LLMAdapter | None = None,
max_context_chars: int = DEFAULT_MAX_CONTEXT_CHARS,
) -> None:
super().__init__(
identity=HeadId.WITNESS.value,
role="Witness",
objective="Arbitrate head outputs, resolve conflicts, produce final narrative",
memory_access=True,
tool_permissions=[],
)
self._adapter = adapter
self._max_context_chars = max_context_chars
def handle_message(self, envelope: AgentMessageEnvelope) -> AgentMessageEnvelope | None:
"""On witness_request, produce FinalResponse from head outputs."""
if envelope.message.intent != "witness_request":
return None
payload = envelope.message.payload or {}
head_outputs_data = payload.get("head_outputs", [])
user_prompt = payload.get("prompt", "")
head_outputs: list[HeadOutput] = []
for h in head_outputs_data:
if isinstance(h, dict):
try:
head_outputs.append(HeadOutput.model_validate(h))
except Exception as e:
logger.warning("Witness: skip invalid HeadOutput", extra={"error": str(e)})
elif isinstance(h, HeadOutput):
head_outputs.append(h)
logger.info(
"Witness handle_message",
extra={"head_count": len(head_outputs), "intent": envelope.message.intent},
)
response = self._produce_response(head_outputs, user_prompt)
if response is None:
return envelope.create_response(
"witness_failed",
payload={"error": "Failed to produce final response"},
)
return AgentMessageEnvelope(
message=AgentMessage(
sender=self.identity,
recipient=envelope.message.sender,
intent="witness_output",
payload={"final_response": response.model_dump()},
),
task_id=envelope.task_id,
correlation_id=envelope.correlation_id,
)
def _produce_response(
self,
head_outputs: list[HeadOutput],
user_prompt: str,
) -> FinalResponse | None:
"""Run consensus and compose final answer."""
agreement_map = run_consensus(head_outputs)
head_contributions: list[dict[str, Any]] = []
for out in head_outputs:
key_claims = [c.claim_text[:80] + "..." if len(c.claim_text) > 80 else c.claim_text for c in out.claims[:3]]
head_contributions.append({
"head_id": out.head_id.value,
"summary": out.summary,
"key_claims": key_claims,
})
safety_report = self._build_safety_report(head_outputs)
transparency = TransparencyReport(
head_contributions=head_contributions,
agreement_map=agreement_map,
safety_report=safety_report,
confidence_score=agreement_map.confidence_score,
)
final_answer = self._compose_final_answer(
head_outputs=head_outputs,
agreement_map=agreement_map,
user_prompt=user_prompt,
)
return FinalResponse(
final_answer=final_answer,
transparency_report=transparency,
head_contributions=head_contributions,
confidence_score=agreement_map.confidence_score,
)
def _build_safety_report(self, head_outputs: list[HeadOutput]) -> str:
"""Summarize safety-relevant findings from Safety head and risks."""
safety_summaries = []
all_risks: list[str] = []
for out in head_outputs:
if out.head_id == HeadId.SAFETY and out.summary:
safety_summaries.append(out.summary)
for r in out.risks:
if r.severity in ("high", "critical"):
all_risks.append(f"[{out.head_id.value}] {r.description}")
if safety_summaries:
return " ".join(safety_summaries)
if all_risks:
return "Risks identified: " + "; ".join(all_risks[:5])
return "No significant safety concerns raised."
def _truncate(self, text: str, max_len: int) -> str:
"""Truncate text with ellipsis if over max_len."""
if len(text) <= max_len:
return text
return text[: max_len - 3] + "..."
def _build_compose_context(
self,
head_outputs: list[HeadOutput],
agreement_map: AgreementMap,
user_prompt: str,
) -> str:
"""Build truncated context for LLM to avoid token overflow."""
max_chars = self._max_context_chars
prompt_limit = min(800, max_chars // 4)
summary_limit = min(300, max_chars // (len(head_outputs) * 2) if head_outputs else 300)
claim_limit = min(150, max_chars // 20)
user_trunc = self._truncate(user_prompt, prompt_limit)
context = f"User asked: {user_trunc}\n\n"
context += "Head summaries:\n"
for out in head_outputs:
s = self._truncate(out.summary or "", summary_limit)
context += f"- {out.head_id.value}: {s}\n"
context += "\nAgreed claims:\n"
for c in agreement_map.agreed_claims[:10]:
claim = self._truncate(c.get("claim_text", ""), claim_limit)
context += f"- {claim} (confidence: {c.get('confidence', 0)})\n"
if agreement_map.disputed_claims:
context += "\nDisputed:\n"
for c in agreement_map.disputed_claims[:5]:
claim = self._truncate(c.get("claim_text", ""), claim_limit)
context += f"- {claim}\n"
if len(context) > max_chars:
context = context[: max_chars - 20] + "\n...[truncated]"
return context
def _compose_final_answer(
self,
head_outputs: list[HeadOutput],
agreement_map: AgreementMap,
user_prompt: str,
) -> str:
"""Compose narrative from head outputs and consensus."""
if not self._adapter:
return self._fallback_compose(head_outputs, agreement_map)
context = self._build_compose_context(head_outputs, agreement_map, user_prompt)
messages = [
{"role": "system", "content": WITNESS_COMPOSE_SYSTEM},
{"role": "user", "content": context},
]
try:
result = self._adapter.complete(messages, temperature=0.3)
return result.strip() if result else self._fallback_compose(head_outputs, agreement_map)
except Exception as e:
logger.exception("Witness compose failed", extra={"error": str(e)})
return self._fallback_compose(head_outputs, agreement_map)
def _fallback_compose(
self,
head_outputs: list[HeadOutput],
agreement_map: AgreementMap,
) -> str:
"""Simple concatenation when no adapter."""
parts = []
for out in head_outputs[:5]:
parts.append(f"[{out.head_id.value}] {out.summary}")
if agreement_map.agreed_claims:
parts.append("Key points: " + "; ".join(
c.get("claim_text", "")[:60] for c in agreement_map.agreed_claims[:5]
))
return "\n\n".join(parts) if parts else "No head outputs available."