tyndale-ai-service/app/llm/adapter.py

111 lines
3.6 KiB
Python

from abc import ABC, abstractmethod
import httpx
from app.config import settings
class LLMAdapter(ABC):
"""Abstract base class for LLM adapters."""
@abstractmethod
async def generate(self, conversation_id: str, message: str) -> str:
"""Generate a response for the given message.
Args:
conversation_id: The conversation identifier
message: The user's message
Returns:
The generated response string
"""
pass
class LocalAdapter(LLMAdapter):
"""Local stub adapter for development and testing."""
async def generate(self, conversation_id: str, message: str) -> str:
"""Return a stub response echoing the user message.
This is a placeholder that will be replaced with a real local model.
"""
return (
f"[LOCAL STUB MODE] Acknowledged your message. "
f"You said: \"{message[:100]}{'...' if len(message) > 100 else ''}\". "
f"This is a stub response - local model not yet implemented."
)
class RemoteAdapter(LLMAdapter):
"""Remote adapter that calls an external LLM service via HTTP."""
def __init__(self, url: str, token: str | None = None, timeout: float = 30.0):
"""Initialize the remote adapter.
Args:
url: The remote LLM service URL
token: Optional bearer token for authentication
timeout: Request timeout in seconds
"""
self.url = url
self.token = token
self.timeout = timeout
async def generate(self, conversation_id: str, message: str) -> str:
"""Call the remote LLM service to generate a response.
Handles errors gracefully by returning informative error strings.
"""
headers = {"Content-Type": "application/json"}
if self.token:
headers["Authorization"] = f"Bearer {self.token}"
payload = {
"conversation_id": conversation_id,
"message": message,
}
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(self.url, json=payload, headers=headers)
if response.status_code != 200:
return (
f"[ERROR] Remote LLM returned status {response.status_code}: "
f"{response.text[:200] if response.text else 'No response body'}"
)
try:
data = response.json()
except ValueError:
return "[ERROR] Remote LLM returned invalid JSON response"
if "response" not in data:
return "[ERROR] Remote LLM response missing 'response' field"
return data["response"]
except httpx.TimeoutException:
return f"[ERROR] Remote LLM request timed out after {self.timeout} seconds"
except httpx.ConnectError:
return f"[ERROR] Could not connect to remote LLM at {self.url}"
except httpx.RequestError as e:
return f"[ERROR] Remote LLM request failed: {str(e)}"
def get_adapter() -> LLMAdapter:
"""Factory function to create the appropriate adapter based on configuration.
Returns:
An LLMAdapter instance based on the LLM_MODE setting
"""
if settings.llm_mode == "remote":
if not settings.llm_remote_url:
raise ValueError("LLM_REMOTE_URL must be set when LLM_MODE is 'remote'")
return RemoteAdapter(
url=settings.llm_remote_url,
token=settings.llm_remote_token or None,
)
return LocalAdapter()