Initial commit: add .gitignore and README
Some checks failed
Tests / test (3.10) (push) Has been cancelled
Tests / test (3.11) (push) Has been cancelled
Tests / test (3.12) (push) Has been cancelled
Tests / lint (push) Has been cancelled
Tests / docker (push) Has been cancelled

This commit is contained in:
defiQUG
2026-02-09 21:51:42 -08:00
commit c052b07662
3146 changed files with 808305 additions and 0 deletions

View File

@@ -0,0 +1,18 @@
"""LLM adapters: abstract interface and provider implementations.
NativeAdapter: Uses FusionAGI's internal reasoning—no external API calls.
OpenAIAdapter is None when the openai package is not installed (pip install fusionagi[openai]).
Use: from fusionagi.adapters import OpenAIAdapter; if OpenAIAdapter is not None: ...
"""
from fusionagi.adapters.base import LLMAdapter
from fusionagi.adapters.stub_adapter import StubAdapter
from fusionagi.adapters.cache import CachedAdapter
from fusionagi.adapters.native_adapter import NativeAdapter
try:
from fusionagi.adapters.openai_adapter import OpenAIAdapter
except ImportError:
OpenAIAdapter = None # type: ignore[misc, assignment]
__all__ = ["LLMAdapter", "StubAdapter", "CachedAdapter", "NativeAdapter", "OpenAIAdapter"]

View File

@@ -0,0 +1,55 @@
"""Abstract LLM adapter interface; model-agnostic for orchestrator and agents."""
from abc import ABC, abstractmethod
from typing import Any
class LLMAdapter(ABC):
"""
Abstract adapter for LLM completion.
Implementations should handle:
- openai/ - OpenAI API (GPT-4, etc.)
- anthropic/ - Anthropic API (Claude, etc.)
- local/ - Local models (Ollama, etc.)
"""
@abstractmethod
def complete(
self,
messages: list[dict[str, str]],
**kwargs: Any,
) -> str:
"""
Return completion text for the given messages.
Args:
messages: List of message dicts with 'role' and 'content' keys.
**kwargs: Provider-specific options (e.g., temperature, max_tokens).
Returns:
The model's response text.
"""
...
def complete_structured(
self,
messages: list[dict[str, str]],
schema: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Return structured (JSON) output.
Default implementation returns None; subclasses may override to use
provider-specific JSON modes (e.g., OpenAI's response_format).
Args:
messages: List of message dicts with 'role' and 'content' keys.
schema: Optional JSON schema for response validation.
**kwargs: Provider-specific options.
Returns:
Parsed JSON response or None if not supported/parsing fails.
"""
return None

115
fusionagi/adapters/cache.py Normal file
View File

@@ -0,0 +1,115 @@
"""Optional response cache for LLM adapter."""
import hashlib
import json
from collections import OrderedDict
from typing import Any
from fusionagi.adapters.base import LLMAdapter
class CachedAdapter(LLMAdapter):
"""
Wraps an adapter and caches responses by messages hash.
Features:
- Caches both complete() and complete_structured() responses
- LRU eviction when at capacity (most recently used retained)
- Separate caches for text and structured responses
- Cache statistics for monitoring
"""
def __init__(self, adapter: LLMAdapter, max_entries: int = 100) -> None:
"""
Initialize the cached adapter.
Args:
adapter: The underlying LLM adapter to wrap.
max_entries: Maximum cache entries before eviction.
"""
self._adapter = adapter
self._cache: OrderedDict[str, str] = OrderedDict()
self._structured_cache: OrderedDict[str, Any] = OrderedDict()
self._max_entries = max_entries
self._hits = 0
self._misses = 0
def _key(self, messages: list[dict[str, str]], kwargs: dict[str, Any], prefix: str = "") -> str:
"""Generate a cache key from messages and kwargs."""
payload = json.dumps(
{"prefix": prefix, "messages": messages, "kwargs": kwargs},
sort_keys=True,
default=str,
)
return hashlib.sha256(payload.encode()).hexdigest()
def _evict_if_needed(self, cache: OrderedDict[str, Any]) -> None:
"""Evict least recently used entry if cache is at capacity."""
while len(cache) >= self._max_entries and cache:
cache.popitem(last=False)
def _get_and_touch(self, cache: OrderedDict[str, Any], key: str) -> Any:
"""Get value and move to end (LRU touch)."""
val = cache[key]
cache.move_to_end(key)
return val
def complete(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
"""Complete with caching."""
key = self._key(messages, kwargs, prefix="complete")
if key in self._cache:
self._hits += 1
return self._get_and_touch(self._cache, key)
self._misses += 1
response = self._adapter.complete(messages, **kwargs)
self._evict_if_needed(self._cache)
self._cache[key] = response
return response
def complete_structured(
self,
messages: list[dict[str, str]],
schema: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Complete structured with caching.
Caches structured responses separately from text responses.
"""
cache_kwargs = {**kwargs, "_schema": schema}
key = self._key(messages, cache_kwargs, prefix="structured")
if key in self._structured_cache:
self._hits += 1
return self._get_and_touch(self._structured_cache, key)
self._misses += 1
response = self._adapter.complete_structured(messages, schema=schema, **kwargs)
if response is not None:
self._evict_if_needed(self._structured_cache)
self._structured_cache[key] = response
return response
def get_stats(self) -> dict[str, Any]:
"""Return cache statistics."""
total = self._hits + self._misses
hit_rate = self._hits / total if total > 0 else 0.0
return {
"hits": self._hits,
"misses": self._misses,
"hit_rate": hit_rate,
"text_cache_size": len(self._cache),
"structured_cache_size": len(self._structured_cache),
"max_entries": self._max_entries,
}
def clear_cache(self) -> None:
"""Clear all cached responses."""
self._cache.clear()
self._structured_cache.clear()
self._hits = 0
self._misses = 0

View File

@@ -0,0 +1,101 @@
"""
Native adapter: implements LLMAdapter using FusionAGI's internal reasoning.
No external API calls. Used for synthesis (e.g. Witness compose) when operating
in fully native AGI mode.
"""
from typing import Any
from fusionagi.adapters.base import LLMAdapter
def _synthesize_from_messages(messages: list[dict[str, Any]]) -> str:
"""
Synthesize narrative from message content using native logic only.
Extracts head summaries and agreed claims, produces coherent narrative.
"""
if not messages:
return ""
content_parts: list[str] = []
for msg in messages:
content = msg.get("content", "")
if isinstance(content, str) and content.strip():
content_parts.append(content)
if not content_parts:
return ""
full_content = "\n".join(content_parts)
# Extract "User asked:" for context
user_prompt = ""
if "User asked:" in full_content:
idx = full_content.index("User asked:") + len("User asked:")
end = full_content.find("\n\n", idx)
user_prompt = full_content[idx:end if end > 0 else None].strip()
narrative_parts: list[str] = []
if user_prompt:
truncated = user_prompt[:120] + ("..." if len(user_prompt) > 120 else "")
narrative_parts.append(f"Regarding your question: {truncated}")
# Extract head summaries
if "Head summaries:" in full_content:
start = full_content.index("Head summaries:") + len("Head summaries:")
end = full_content.find("\n\nAgreed claims:", start)
if end < 0:
end = full_content.find("Agreed claims:", start)
if end < 0:
end = len(full_content)
summaries = full_content[start:end].strip()
for line in summaries.split("\n"):
line = line.strip()
if line.startswith("-") and ":" in line:
narrative_parts.append(line[1:].strip())
# Extract agreed claims as key points
if "Agreed claims:" in full_content:
start = full_content.index("Agreed claims:") + len("Agreed claims:")
rest = full_content[start:].strip()
claims_section = rest.split("\n\nDisputed:")[0].split("\n\n")[0]
claim_lines = [ln.strip()[1:].strip() for ln in claims_section.split("\n") if ln.strip().startswith("-")]
for c in claim_lines[:5]:
if " (confidence:" in c:
c = c.split(" (confidence:")[0].strip()
if c:
narrative_parts.append(c)
if not narrative_parts:
paragraphs = [p.strip() for p in full_content.split("\n\n") if len(p.strip()) > 20]
narrative_parts = paragraphs[:5] if paragraphs else [full_content[:500]]
return "\n\n".join(narrative_parts)
class NativeAdapter(LLMAdapter):
"""
Adapter that uses FusionAGI's native synthesis—no external LLM calls.
For complete(): synthesizes narrative from message content.
For complete_structured(): returns None (use NativeReasoningProvider for heads).
"""
def complete(
self,
messages: list[dict[str, str]],
**kwargs: Any,
) -> str:
"""Synthesize response from message content using native logic."""
return _synthesize_from_messages(messages)
def complete_structured(
self,
messages: list[dict[str, str]],
schema: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Not supported; use NativeReasoningProvider for structured HeadOutput."""
return None

View File

@@ -0,0 +1,261 @@
"""OpenAI LLM adapter with error handling and retry logic."""
import time
from typing import Any
from fusionagi.adapters.base import LLMAdapter
from fusionagi._logger import logger
class OpenAIAdapterError(Exception):
"""Base exception for OpenAI adapter errors."""
pass
class OpenAIRateLimitError(OpenAIAdapterError):
"""Raised when rate limited by OpenAI API."""
pass
class OpenAIAuthenticationError(OpenAIAdapterError):
"""Raised when authentication fails."""
pass
class OpenAIAdapter(LLMAdapter):
"""
OpenAI API adapter with retry logic and error handling.
Requires openai package and OPENAI_API_KEY.
Features:
- Automatic retry with exponential backoff for transient errors
- Proper error classification (rate limits, auth errors, etc.)
- Structured output support via complete_structured()
"""
def __init__(
self,
model: str = "gpt-4o-mini",
api_key: str | None = None,
max_retries: int = 3,
retry_delay: float = 1.0,
retry_multiplier: float = 2.0,
max_retry_delay: float = 30.0,
**client_kwargs: Any,
) -> None:
"""
Initialize the OpenAI adapter.
Args:
model: Default model to use (e.g., "gpt-4o-mini", "gpt-4o").
api_key: OpenAI API key. If None, uses OPENAI_API_KEY env var.
max_retries: Maximum number of retry attempts for transient errors.
retry_delay: Initial delay between retries in seconds.
retry_multiplier: Multiplier for exponential backoff.
max_retry_delay: Maximum delay between retries.
**client_kwargs: Additional arguments passed to OpenAI client.
"""
self._model = model
self._api_key = api_key
self._max_retries = max_retries
self._retry_delay = retry_delay
self._retry_multiplier = retry_multiplier
self._max_retry_delay = max_retry_delay
self._client_kwargs = client_kwargs
self._client: Any = None
self._openai_module: Any = None
def _get_client(self) -> Any:
if self._client is None:
try:
import openai
self._openai_module = openai
self._client = openai.OpenAI(api_key=self._api_key, **self._client_kwargs)
except ImportError as e:
raise ImportError("Install with: pip install fusionagi[openai]") from e
return self._client
def _is_retryable_error(self, error: Exception) -> bool:
"""Check if an error is retryable (transient)."""
if self._openai_module is None:
return False
# Rate limit errors are retryable
if hasattr(self._openai_module, "RateLimitError"):
if isinstance(error, self._openai_module.RateLimitError):
return True
# API connection errors are retryable
if hasattr(self._openai_module, "APIConnectionError"):
if isinstance(error, self._openai_module.APIConnectionError):
return True
# Internal server errors are retryable
if hasattr(self._openai_module, "InternalServerError"):
if isinstance(error, self._openai_module.InternalServerError):
return True
# Timeout errors are retryable
if hasattr(self._openai_module, "APITimeoutError"):
if isinstance(error, self._openai_module.APITimeoutError):
return True
return False
def _classify_error(self, error: Exception) -> Exception:
"""Convert OpenAI exceptions to adapter exceptions."""
if self._openai_module is None:
return OpenAIAdapterError(str(error))
if hasattr(self._openai_module, "RateLimitError"):
if isinstance(error, self._openai_module.RateLimitError):
return OpenAIRateLimitError(str(error))
if hasattr(self._openai_module, "AuthenticationError"):
if isinstance(error, self._openai_module.AuthenticationError):
return OpenAIAuthenticationError(str(error))
return OpenAIAdapterError(str(error))
def complete(
self,
messages: list[dict[str, str]],
**kwargs: Any,
) -> str:
"""
Call OpenAI chat completion with retry logic.
Args:
messages: List of message dicts with 'role' and 'content'.
**kwargs: Additional arguments for the API call (e.g., temperature).
Returns:
The assistant's response content.
Raises:
OpenAIAuthenticationError: If authentication fails.
OpenAIRateLimitError: If rate limited after all retries.
OpenAIAdapterError: For other API errors after all retries.
"""
# Validate messages format
if not messages:
logger.warning("OpenAI complete called with empty messages")
return ""
for i, msg in enumerate(messages):
if not isinstance(msg, dict):
raise ValueError(f"Message {i} must be a dict, got {type(msg).__name__}")
if "role" not in msg:
raise ValueError(f"Message {i} missing 'role' key")
if "content" not in msg:
raise ValueError(f"Message {i} missing 'content' key")
client = self._get_client()
model = kwargs.get("model", self._model)
call_kwargs = {**kwargs, "model": model}
last_error: Exception | None = None
delay = self._retry_delay
for attempt in range(self._max_retries + 1):
try:
resp = client.chat.completions.create(
messages=messages,
**call_kwargs,
)
choice = resp.choices[0] if resp.choices else None
if choice and choice.message and choice.message.content:
return choice.message.content
logger.debug("OpenAI empty response", extra={"model": model, "attempt": attempt})
return ""
except Exception as e:
last_error = e
# Don't retry authentication errors
if self._openai_module and hasattr(self._openai_module, "AuthenticationError"):
if isinstance(e, self._openai_module.AuthenticationError):
logger.error("OpenAI authentication failed", extra={"error": str(e)})
raise OpenAIAuthenticationError(str(e)) from e
# Check if retryable
if not self._is_retryable_error(e):
logger.error(
"OpenAI non-retryable error",
extra={"error": str(e), "error_type": type(e).__name__},
)
raise self._classify_error(e) from e
# Log retry attempt
if attempt < self._max_retries:
logger.warning(
"OpenAI retryable error, retrying",
extra={
"error": str(e),
"attempt": attempt + 1,
"max_retries": self._max_retries,
"delay": delay,
},
)
time.sleep(delay)
delay = min(delay * self._retry_multiplier, self._max_retry_delay)
# All retries exhausted
logger.error(
"OpenAI all retries exhausted",
extra={"error": str(last_error), "attempts": self._max_retries + 1},
)
raise self._classify_error(last_error) from last_error
def complete_structured(
self,
messages: list[dict[str, str]],
schema: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Call OpenAI with JSON mode for structured output.
Args:
messages: List of message dicts with 'role' and 'content'.
schema: Optional JSON schema for response validation (informational).
**kwargs: Additional arguments for the API call.
Returns:
Parsed JSON response or None if parsing fails.
"""
import json
# Enable JSON mode
call_kwargs = {**kwargs, "response_format": {"type": "json_object"}}
# Add schema hint to system message if provided
if schema and messages:
schema_hint = f"\n\nRespond with JSON matching this schema: {json.dumps(schema)}"
if messages[0].get("role") == "system":
messages = [
{**messages[0], "content": messages[0]["content"] + schema_hint},
*messages[1:],
]
else:
messages = [
{"role": "system", "content": f"You must respond with valid JSON.{schema_hint}"},
*messages,
]
raw = self.complete(messages, **call_kwargs)
if not raw:
return None
try:
return json.loads(raw)
except json.JSONDecodeError as e:
logger.warning(
"OpenAI JSON parse failed",
extra={"error": str(e), "raw_response": raw[:200]},
)
return None

View File

@@ -0,0 +1,67 @@
"""Stub LLM adapter for tests; returns fixed responses."""
import json
from typing import Any
from fusionagi.adapters.base import LLMAdapter
class StubAdapter(LLMAdapter):
"""
Returns configurable fixed responses; no API calls.
Useful for testing without making actual LLM API calls.
Supports both text and structured (JSON) responses.
"""
def __init__(
self,
response: str = "Stub response",
structured_response: dict[str, Any] | list[Any] | None = None,
) -> None:
"""
Initialize the stub adapter.
Args:
response: Fixed text response for complete().
structured_response: Fixed structured response for complete_structured().
"""
self._response = response
self._structured_response = structured_response
def complete(
self,
messages: list[dict[str, str]],
**kwargs: Any,
) -> str:
"""Return the configured stub response."""
return self._response
def complete_structured(
self,
messages: list[dict[str, str]],
schema: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Return the configured structured response.
If no structured_response was configured, attempts to parse
the text response as JSON, or returns None.
"""
if self._structured_response is not None:
return self._structured_response
# Try to parse text response as JSON
try:
return json.loads(self._response)
except json.JSONDecodeError:
return None
def set_response(self, response: str) -> None:
"""Update the text response (useful for test scenarios)."""
self._response = response
def set_structured_response(self, response: dict[str, Any] | list[Any] | None) -> None:
"""Update the structured response (useful for test scenarios)."""
self._structured_response = response