Files
grimlock/backend/api/chat.py
JA d9a7c016b1 Backend MVP: FastAPI + Claude integration
Core components:
- FastAPI server with health endpoints
- AI client (Anthropic Claude integration)
- Context manager (loads company knowledge)
- Chat API (non-streaming and streaming)
- Requirements and environment setup

Ready to run: python backend/main.py
2026-02-12 21:16:38 +00:00

138 lines
3.8 KiB
Python

"""
Chat API - Main endpoint for interacting with Grimlock
"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import List, Optional
import logging
from core.context_manager import ContextManager
from core.ai_client import AIClient
import main
logger = logging.getLogger(__name__)
router = APIRouter()
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
messages: List[Message]
role: Optional[str] = None
stream: bool = False
class ChatResponse(BaseModel):
response: str
context_used: bool
@router.post("/", response_model=ChatResponse)
async def chat(
request: ChatRequest,
context_manager: ContextManager = Depends(main.get_context_manager),
ai_client: AIClient = Depends(main.get_ai_client)
):
"""
Chat with Grimlock
Args:
request: Chat request with messages and optional role
Returns:
ChatResponse with AI response
"""
try:
# Get the last user message for context
user_message = None
for msg in reversed(request.messages):
if msg.role == "user":
user_message = msg.content
break
# Get relevant context
context = ""
if user_message:
context = context_manager.get_context_for_query(
user_message,
role=request.role
)
# Build system prompt
system_prompt = context_manager.get_system_prompt(role=request.role)
# Add context to system prompt if available
if context:
system_prompt += f"\n\n# Company Context\n{context}"
# Convert messages to API format
api_messages = [
{"role": msg.role, "content": msg.content}
for msg in request.messages
]
# Get response from AI
response = await ai_client.chat(
messages=api_messages,
system_prompt=system_prompt
)
return ChatResponse(
response=response,
context_used=bool(context)
)
except Exception as e:
logger.error(f"Error in chat endpoint: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/stream")
async def chat_stream(
request: ChatRequest,
context_manager: ContextManager = Depends(main.get_context_manager),
ai_client: AIClient = Depends(main.get_ai_client)
):
"""Stream chat response from Grimlock"""
try:
# Get context
user_message = None
for msg in reversed(request.messages):
if msg.role == "user":
user_message = msg.content
break
context = ""
if user_message:
context = context_manager.get_context_for_query(
user_message,
role=request.role
)
# Build system prompt
system_prompt = context_manager.get_system_prompt(role=request.role)
if context:
system_prompt += f"\n\n# Company Context\n{context}"
# Convert messages
api_messages = [
{"role": msg.role, "content": msg.content}
for msg in request.messages
]
# Stream response
async def generate():
async for chunk in ai_client.chat_stream(
messages=api_messages,
system_prompt=system_prompt
):
yield chunk
return StreamingResponse(generate(), media_type="text/plain")
except Exception as e:
logger.error(f"Error in chat stream: {e}")
raise HTTPException(status_code=500, detail=str(e))