Backend MVP: FastAPI + Claude integration

Core components:
- FastAPI server with health endpoints
- AI client (Anthropic Claude integration)
- Context manager (loads company knowledge)
- Chat API (non-streaming and streaming)
- Requirements and environment setup

Ready to run: python backend/main.py
This commit is contained in:
JA
2026-02-12 21:16:38 +00:00
parent ee273be9c5
commit d9a7c016b1
7 changed files with 883 additions and 0 deletions

96
backend/core/ai_client.py Normal file
View File

@@ -0,0 +1,96 @@
"""
AI Client - Anthropic Claude Integration
"""
import anthropic
import os
import logging
from typing import List, Dict, Optional
logger = logging.getLogger(__name__)
class AIClient:
"""Client for interacting with Anthropic Claude API"""
def __init__(self, api_key: str):
self.client = anthropic.Anthropic(api_key=api_key)
self.model = os.getenv("AI_MODEL", "claude-sonnet-4-5-20250514")
self.max_tokens = int(os.getenv("AI_MAX_TOKENS", "4096"))
self.temperature = float(os.getenv("AI_TEMPERATURE", "0.7"))
logger.info(f"AIClient initialized with model: {self.model}")
async def chat(
self,
messages: List[Dict[str, str]],
system_prompt: Optional[str] = None,
max_tokens: Optional[int] = None
) -> str:
"""
Send chat messages to Claude and get response
Args:
messages: List of message dicts with 'role' and 'content'
system_prompt: Optional system prompt
max_tokens: Optional max tokens override
Returns:
Response text from Claude
"""
try:
kwargs = {
"model": self.model,
"max_tokens": max_tokens or self.max_tokens,
"temperature": self.temperature,
"messages": messages
}
if system_prompt:
kwargs["system"] = system_prompt
response = self.client.messages.create(**kwargs)
# Extract text from response
if response.content and len(response.content) > 0:
return response.content[0].text
return "No response generated"
except Exception as e:
logger.error(f"Error calling Claude API: {e}")
raise
async def chat_stream(
self,
messages: List[Dict[str, str]],
system_prompt: Optional[str] = None,
max_tokens: Optional[int] = None
):
"""
Stream chat response from Claude
Args:
messages: List of message dicts
system_prompt: Optional system prompt
max_tokens: Optional max tokens override
Yields:
Text chunks from Claude
"""
try:
kwargs = {
"model": self.model,
"max_tokens": max_tokens or self.max_tokens,
"temperature": self.temperature,
"messages": messages
}
if system_prompt:
kwargs["system"] = system_prompt
with self.client.messages.stream(**kwargs) as stream:
for text in stream.text_stream:
yield text
except Exception as e:
logger.error(f"Error streaming from Claude API: {e}")
raise

View File

@@ -0,0 +1,171 @@
"""
Context Manager - Loads and manages company context
"""
import os
import logging
from pathlib import Path
from typing import Dict, List, Optional
import yaml
logger = logging.getLogger(__name__)
class ContextManager:
"""Manages loading and accessing company context"""
def __init__(self, context_path: str):
self.context_path = Path(context_path)
self.context = {
"projects": {},
"patterns": {},
"anti_patterns": {},
"cost_models": {},
"repos": {}
}
self._loaded = False
def load_all_context(self):
"""Load all context from disk"""
logger.info(f"Loading context from {self.context_path}")
# Create context directories if they don't exist
self.context_path.mkdir(parents=True, exist_ok=True)
for subdir in ["projects", "patterns", "anti_patterns", "cost_models", "repos"]:
(self.context_path / subdir).mkdir(exist_ok=True)
# Load each context type
self._load_directory("projects")
self._load_directory("patterns")
self._load_directory("anti_patterns")
self._load_directory("cost_models")
self._loaded = True
logger.info(f"Context loaded: {self.get_summary()}")
def _load_directory(self, directory: str):
"""Load all markdown files from a directory"""
dir_path = self.context_path / directory
if not dir_path.exists():
logger.warning(f"Context directory not found: {dir_path}")
return
for file_path in dir_path.glob("*.md"):
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
self.context[directory][file_path.stem] = content
logger.debug(f"Loaded {directory}/{file_path.name}")
except Exception as e:
logger.error(f"Error loading {file_path}: {e}")
def get_context_for_query(self, query: str, role: Optional[str] = None) -> str:
"""
Get relevant context for a query
Args:
query: User query
role: User role (engineer, bd, admin, exec)
Returns:
Formatted context string
"""
# Simple implementation: include all context
# TODO: Implement smarter context selection based on query relevance
context_parts = []
# Add projects
if self.context["projects"]:
context_parts.append("# Vector Zulu Projects\n")
for name, content in self.context["projects"].items():
context_parts.append(f"## {name}\n{content}\n")
# Add patterns (most relevant for engineers)
if self.context["patterns"] and (role == "engineer" or role is None):
context_parts.append("\n# Reference Architectures & Patterns\n")
for name, content in self.context["patterns"].items():
context_parts.append(f"## {name}\n{content}\n")
# Add anti-patterns
if self.context["anti_patterns"]:
context_parts.append("\n# Anti-Patterns (Things to Avoid)\n")
for name, content in self.context["anti_patterns"].items():
context_parts.append(f"## {name}\n{content}\n")
# Add cost models (relevant for estimates)
if "cost" in query.lower() or "price" in query.lower():
if self.context["cost_models"]:
context_parts.append("\n# Cost Models\n")
for name, content in self.context["cost_models"].items():
context_parts.append(f"## {name}\n{content}\n")
return "\n".join(context_parts)
def get_system_prompt(self, role: Optional[str] = None) -> str:
"""
Generate system prompt based on role
Args:
role: User role
Returns:
System prompt for Claude
"""
base_prompt = """You are Grimlock, Vector Zulu's AI assistant. You help team members by:
- Answering questions about projects, patterns, and internal systems
- Generating documents, spreadsheets, code, and other artifacts
- Providing technical guidance based on Vector Zulu's proven patterns
- Keeping responses concise and actionable
You have access to Vector Zulu's internal context including:
- Project summaries (UTILEN, Vector Zulu platform, blockchain)
- Reference architectures and patterns
- Anti-patterns to avoid
- Cost models and estimates
"""
role_prompts = {
"engineer": "\nYou're speaking with an engineer. Provide technical depth, code examples, and architecture details.",
"bd": "\nYou're speaking with business development. Focus on capabilities, timelines, costs, and client-facing information.",
"admin": "\nYou're speaking with admin/operations. Focus on processes, reports, schedules, and organizational information.",
"exec": "\nYou're speaking with an executive. Provide high-level summaries, key metrics, and strategic insights."
}
if role and role in role_prompts:
base_prompt += role_prompts[role]
return base_prompt
def is_loaded(self) -> bool:
"""Check if context is loaded"""
return self._loaded
def get_summary(self) -> Dict:
"""Get summary of loaded context"""
return {
"projects": len(self.context["projects"]),
"patterns": len(self.context["patterns"]),
"anti_patterns": len(self.context["anti_patterns"]),
"cost_models": len(self.context["cost_models"]),
"repos": len(self.context["repos"])
}
def add_context(self, category: str, name: str, content: str):
"""Add or update context"""
if category in self.context:
self.context[category][name] = content
logger.info(f"Added/updated {category}/{name}")
def save_context(self, category: str, name: str):
"""Save context to disk"""
if category not in self.context or name not in self.context[category]:
logger.error(f"Context not found: {category}/{name}")
return
file_path = self.context_path / category / f"{name}.md"
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.write(self.context[category][name])
logger.info(f"Saved {category}/{name} to disk")
except Exception as e:
logger.error(f"Error saving {file_path}: {e}")