105 lines
3.7 KiB
Python
105 lines
3.7 KiB
Python
"""Dvādaśa content head agents: Logic, Research, Systems, Strategy, etc."""
|
|
|
|
from typing import Any
|
|
|
|
from fusionagi.agents.head_agent import HeadAgent
|
|
from fusionagi.adapters.base import LLMAdapter
|
|
from fusionagi.reasoning.native import NativeReasoningProvider
|
|
from fusionagi.schemas.head import HeadId
|
|
from fusionagi.prompts.heads import get_head_prompt
|
|
|
|
|
|
def create_head_agent(
|
|
head_id: HeadId,
|
|
adapter: LLMAdapter | None = None,
|
|
tool_permissions: list[str] | None = None,
|
|
reasoning_provider: NativeReasoningProvider | None = None,
|
|
use_native_reasoning: bool = True,
|
|
) -> HeadAgent:
|
|
"""Create a HeadAgent for the given head_id.
|
|
|
|
When adapter is None and use_native_reasoning is True, uses NativeReasoningProvider
|
|
for independent symbolic reasoning (no external LLM calls).
|
|
"""
|
|
if head_id == HeadId.WITNESS:
|
|
raise ValueError("Use WitnessAgent for Witness; HeadAgent is for content heads only")
|
|
|
|
# Use native reasoning when no adapter and use_native_reasoning, or when explicitly provided
|
|
provider = reasoning_provider
|
|
if provider is None and use_native_reasoning and adapter is None:
|
|
provider = NativeReasoningProvider()
|
|
|
|
role_map = {
|
|
HeadId.LOGIC: ("Logic", "Correctness, contradictions, formal checks"),
|
|
HeadId.RESEARCH: ("Research", "Retrieval, source quality, citations"),
|
|
HeadId.SYSTEMS: ("Systems", "Architecture, dependencies, scalability"),
|
|
HeadId.STRATEGY: ("Strategy", "Roadmap, prioritization, tradeoffs"),
|
|
HeadId.PRODUCT: ("Product/UX", "Interaction design, user flows"),
|
|
HeadId.SECURITY: ("Security", "Threats, auth, secrets, abuse vectors"),
|
|
HeadId.SAFETY: ("Safety/Ethics", "Policy alignment, harmful content prevention"),
|
|
HeadId.RELIABILITY: ("Reliability", "SLOs, failover, load testing, observability"),
|
|
HeadId.COST: ("Cost/Performance", "Token budgets, caching, model routing"),
|
|
HeadId.DATA: ("Data/Memory", "Schemas, privacy, retention, personalization"),
|
|
HeadId.DEVEX: ("DevEx", "CI/CD, testing strategy, local tooling"),
|
|
}
|
|
role, objective = role_map.get(
|
|
head_id,
|
|
(head_id.value.title(), "Provide analysis from your perspective."),
|
|
)
|
|
|
|
return HeadAgent(
|
|
head_id=head_id,
|
|
role=role,
|
|
objective=objective,
|
|
system_prompt=get_head_prompt(head_id),
|
|
adapter=adapter,
|
|
tool_permissions=tool_permissions,
|
|
reasoning_provider=provider,
|
|
)
|
|
|
|
|
|
# Heads that may call tools (Research, Systems, Security, Data)
|
|
TOOL_ENABLED_HEADS: list[HeadId] = [
|
|
HeadId.RESEARCH,
|
|
HeadId.SYSTEMS,
|
|
HeadId.SECURITY,
|
|
HeadId.DATA,
|
|
]
|
|
|
|
DEFAULT_TOOL_PERMISSIONS: dict[HeadId, list[str]] = {
|
|
HeadId.RESEARCH: ["search", "docs"],
|
|
HeadId.SYSTEMS: ["search", "db"],
|
|
HeadId.SECURITY: ["search", "code_runner"],
|
|
HeadId.DATA: ["db", "docs"],
|
|
}
|
|
|
|
|
|
def create_all_content_heads(
|
|
adapter: LLMAdapter | None = None,
|
|
tool_permissions_by_head: dict[HeadId, list[str]] | None = None,
|
|
reasoning_provider: NativeReasoningProvider | None = None,
|
|
use_native_reasoning: bool = True,
|
|
) -> dict[HeadId, HeadAgent]:
|
|
"""Create all 11 content head agents. Tool-enabled heads get default permissions.
|
|
|
|
When adapter is None, uses native reasoning by default (no external LLM).
|
|
"""
|
|
content_heads = [h for h in HeadId if h != HeadId.WITNESS]
|
|
perms = tool_permissions_by_head or {}
|
|
return {
|
|
hid: create_head_agent(
|
|
hid,
|
|
adapter,
|
|
perms.get(hid) or DEFAULT_TOOL_PERMISSIONS.get(hid),
|
|
reasoning_provider=reasoning_provider,
|
|
use_native_reasoning=use_native_reasoning,
|
|
)
|
|
for hid in content_heads
|
|
}
|
|
|
|
|
|
__all__ = [
|
|
"create_head_agent",
|
|
"create_all_content_heads",
|
|
]
|