147 lines
4.5 KiB
Python
147 lines
4.5 KiB
Python
"""Translators for OpenAI API request/response format to FusionAGI."""
|
|
|
|
import time
|
|
from typing import Any
|
|
|
|
from fusionagi.schemas.witness import FinalResponse
|
|
|
|
|
|
def _extract_content(msg: dict[str, Any]) -> str:
|
|
"""Extract text content from a message. Handles string or array content parts."""
|
|
content = msg.get("content")
|
|
if content is None:
|
|
return ""
|
|
if isinstance(content, str):
|
|
return content
|
|
if isinstance(content, list):
|
|
parts: list[str] = []
|
|
for part in content:
|
|
if isinstance(part, dict) and part.get("type") == "text":
|
|
parts.append(part.get("text", "") or "")
|
|
elif isinstance(part, str):
|
|
parts.append(part)
|
|
return "\n".join(parts)
|
|
return str(content)
|
|
|
|
|
|
def messages_to_prompt(messages: list[dict[str, Any]]) -> str:
|
|
"""
|
|
Translate OpenAI messages array to a single prompt string for Dvādaśa.
|
|
|
|
Format:
|
|
[System]: {system_content}
|
|
[User]: {user_msg_1}
|
|
[Assistant]: {assistant_msg_1}
|
|
[User]: {user_msg_2}
|
|
...
|
|
[User]: {last_user_message} <- primary goal for submit_task
|
|
|
|
Tool result messages (role: "tool") are appended as "Tool {name} returned: {content}".
|
|
Falls back to last non-system message if no explicit user turn.
|
|
|
|
Args:
|
|
messages: List of message dicts with 'role' and 'content'.
|
|
|
|
Returns:
|
|
Single prompt string for orch.submit_task / run_dvadasa.
|
|
"""
|
|
if not messages:
|
|
return ""
|
|
|
|
parts: list[str] = []
|
|
system_content = ""
|
|
last_user_content = ""
|
|
|
|
for msg in messages:
|
|
role = (msg.get("role") or "user").lower()
|
|
content = _extract_content(msg)
|
|
|
|
if role == "system":
|
|
system_content = content
|
|
elif role == "user":
|
|
last_user_content = content
|
|
if system_content and not parts:
|
|
parts.append(f"[System]: {system_content}")
|
|
parts.append(f"[User]: {content}")
|
|
elif role == "assistant":
|
|
if system_content and not parts:
|
|
parts.append(f"[System]: {system_content}")
|
|
parts.append(f"[Assistant]: {content}")
|
|
elif role == "tool":
|
|
name = msg.get("name", "unknown")
|
|
tool_id = msg.get("tool_call_id", "")
|
|
parts.append(f"[Tool {name}]{f' (id={tool_id})' if tool_id else ''} returned: {content}")
|
|
|
|
if not parts:
|
|
return last_user_content or system_content
|
|
|
|
return "\n\n".join(parts)
|
|
|
|
|
|
def estimate_usage(
|
|
messages: list[dict[str, Any]],
|
|
completion_text: str,
|
|
chars_per_token: int = 4,
|
|
) -> dict[str, int]:
|
|
"""
|
|
Estimate token usage from character counts (OpenAI-like heuristic).
|
|
|
|
Args:
|
|
messages: Request messages for prompt_tokens.
|
|
completion_text: Response text for completion_tokens.
|
|
chars_per_token: Approximate chars per token (default 4).
|
|
|
|
Returns:
|
|
Dict with prompt_tokens, completion_tokens, total_tokens.
|
|
"""
|
|
prompt_chars = sum(len(_extract_content(m)) for m in messages)
|
|
completion_chars = len(completion_text)
|
|
prompt_tokens = max(1, prompt_chars // chars_per_token)
|
|
completion_tokens = max(1, completion_chars // chars_per_token)
|
|
return {
|
|
"prompt_tokens": prompt_tokens,
|
|
"completion_tokens": completion_tokens,
|
|
"total_tokens": prompt_tokens + completion_tokens,
|
|
}
|
|
|
|
|
|
def final_response_to_openai(
|
|
final: FinalResponse,
|
|
task_id: str,
|
|
request_model: str | None = None,
|
|
messages: list[dict[str, Any]] | None = None,
|
|
) -> dict[str, Any]:
|
|
"""
|
|
Map FusionAGI FinalResponse to OpenAI Chat Completion format.
|
|
|
|
Args:
|
|
final: FinalResponse from run_dvadasa.
|
|
task_id: Task ID for response id.
|
|
request_model: Model ID from request, or default fusionagi-dvadasa.
|
|
messages: Original request messages for usage estimation.
|
|
|
|
Returns:
|
|
OpenAI-compatible chat completion dict.
|
|
"""
|
|
model = request_model or "fusionagi-dvadasa"
|
|
usage = estimate_usage(messages or [], final.final_answer)
|
|
|
|
return {
|
|
"id": f"chatcmpl-{task_id[:24]}" if len(task_id) >= 24 else f"chatcmpl-{task_id}",
|
|
"object": "chat.completion",
|
|
"created": int(time.time()),
|
|
"model": model,
|
|
"choices": [
|
|
{
|
|
"index": 0,
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": final.final_answer,
|
|
"tool_calls": None,
|
|
},
|
|
"finish_reason": "stop",
|
|
}
|
|
],
|
|
"usage": usage,
|
|
}
|