- FastAPI backend with SQLAlchemy ORM and SQLite - AI chatbot with OpenAI-compatible LLM integration (SSE streaming) - Admin panel for content management, LLM config, token management - Anonymous access with 3-question limit, token-based access control - Recruiter intent detection with admin notification - Resume generator (JD-based, Markdown to Word export) - Chinese localized public interface Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
163 lines
5.8 KiB
Python
163 lines
5.8 KiB
Python
from datetime import datetime
|
||
from typing import AsyncGenerator
|
||
|
||
from fastapi import UploadFile
|
||
from sqlalchemy.orm import Session
|
||
|
||
from app.models import Profile, Skill, Education, WorkExperience, ChatHistory, LLMConfig
|
||
from app.services import llm_service
|
||
from app.services.file_parser import parse_file
|
||
|
||
|
||
def build_system_prompt(db: Session) -> str:
|
||
"""Build system prompt from database content."""
|
||
profile = db.query(Profile).first()
|
||
skills = db.query(Skill).order_by(Skill.sort_order).all()
|
||
educations = db.query(Education).order_by(Education.sort_order).all()
|
||
experiences = db.query(WorkExperience).order_by(WorkExperience.sort_order).all()
|
||
|
||
# Check for custom system prompt in LLM config
|
||
config = db.query(LLMConfig).filter(LLMConfig.is_active == True).first()
|
||
custom_prompt = config.system_prompt if config and config.system_prompt else ""
|
||
|
||
name = profile.name if profile else "候选人"
|
||
|
||
parts = [
|
||
f"你是{name}的个人AI职业助手,专门帮助招聘者了解{name}的职业背景、技能和工作经验。",
|
||
"",
|
||
]
|
||
|
||
if profile:
|
||
parts.append("=== 个人基本信息 ===")
|
||
parts.append(f"姓名: {profile.name}")
|
||
parts.append(f"学历: {profile.education_level}")
|
||
parts.append(f"所在地: {profile.location}")
|
||
parts.append(f"邮箱: {profile.email}")
|
||
if profile.self_summary:
|
||
parts.append(f"个人总结: {profile.self_summary}")
|
||
parts.append("")
|
||
|
||
if skills:
|
||
parts.append("=== 技能特长 ===")
|
||
for s in skills:
|
||
parts.append(f"- {s.category}: {s.content}")
|
||
parts.append("")
|
||
|
||
if educations:
|
||
parts.append("=== 教育经历 ===")
|
||
for e in educations:
|
||
parts.append(f"{e.start_date} - {e.end_date} | {e.school} | {e.major} | {e.degree}")
|
||
if e.details:
|
||
parts.append(f" 详情: {e.details}")
|
||
parts.append("")
|
||
|
||
if experiences:
|
||
parts.append("=== 工作经历 ===")
|
||
for exp in experiences:
|
||
parts.append(f"{exp.start_date} - {exp.end_date} | {exp.company} | {exp.position}")
|
||
if exp.company_intro:
|
||
parts.append(f" 公司简介: {exp.company_intro}")
|
||
if exp.responsibilities:
|
||
parts.append(f" 工作职责: {exp.responsibilities}")
|
||
if exp.achievements:
|
||
parts.append(f" 工作成就: {exp.achievements}")
|
||
parts.append("")
|
||
|
||
parts.append("=== 回答规则 ===")
|
||
parts.append("1. 只回答与招聘、职业、工作能力、技术背景相关的问题")
|
||
parts.append("2. 对于无关问题,礼貌拒绝并引导回职业话题")
|
||
parts.append("3. 严格基于以上信息回答,不编造经历")
|
||
parts.append("4. 使用专业、友好的语气")
|
||
parts.append("5. 如果招聘者上传了职位描述(JD),详细分析岗位匹配度,突出亮点、优势和相关经历,同时客观指出可能的不足")
|
||
parts.append("6. 根据招聘者使用的语言选择中文或英文回答")
|
||
|
||
if custom_prompt:
|
||
parts.append("")
|
||
parts.append("=== 额外指令 ===")
|
||
parts.append(custom_prompt)
|
||
|
||
return "\n".join(parts)
|
||
|
||
|
||
async def process_message(
|
||
session_id: str,
|
||
user_message: str,
|
||
db: Session,
|
||
file: UploadFile | None = None,
|
||
) -> AsyncGenerator[str, None]:
|
||
"""Process a chat message and stream the response."""
|
||
# Handle file upload
|
||
file_context = ""
|
||
image_data = None
|
||
if file and file.filename:
|
||
parsed = await parse_file(file)
|
||
if parsed.get("error"):
|
||
file_context = f"\n[文件解析提示: {parsed['error']}]"
|
||
elif parsed.get("text"):
|
||
file_context = f"\n\n招聘者上传的职位描述(JD)内容如下:\n{parsed['text']}\n\n请分析此岗位与我的匹配度,包括匹配的方面、亮点优势和可能的不足。"
|
||
if parsed.get("base64_image"):
|
||
image_data = {
|
||
"base64": parsed["base64_image"],
|
||
"mime_type": parsed.get("mime_type", "image/png"),
|
||
}
|
||
|
||
full_user_message = user_message + file_context
|
||
|
||
# Get chat history
|
||
history = (
|
||
db.query(ChatHistory)
|
||
.filter(ChatHistory.session_id == session_id)
|
||
.order_by(ChatHistory.created_at.desc())
|
||
.limit(20)
|
||
.all()
|
||
)
|
||
history.reverse()
|
||
|
||
# Build messages
|
||
system_prompt = build_system_prompt(db)
|
||
messages = [{"role": "system", "content": system_prompt}]
|
||
|
||
for h in history:
|
||
messages.append({"role": h.role, "content": h.content})
|
||
|
||
# Format user message (with optional image)
|
||
if image_data:
|
||
user_content = [
|
||
{"type": "text", "text": full_user_message or "请分析这个职位描述图片与我的匹配度。"},
|
||
{
|
||
"type": "image_url",
|
||
"image_url": {
|
||
"url": f"data:{image_data['mime_type']};base64,{image_data['base64']}"
|
||
},
|
||
},
|
||
]
|
||
messages.append({"role": "user", "content": user_content})
|
||
else:
|
||
messages.append({"role": "user", "content": full_user_message})
|
||
|
||
# Save user message to history
|
||
db.add(ChatHistory(
|
||
session_id=session_id,
|
||
role="user",
|
||
content=full_user_message,
|
||
created_at=datetime.utcnow(),
|
||
))
|
||
db.commit()
|
||
|
||
# Stream response
|
||
full_response = []
|
||
async for chunk in llm_service.chat_completion_stream(messages, db):
|
||
full_response.append(chunk)
|
||
yield chunk
|
||
|
||
# Save assistant response (skip error messages)
|
||
assistant_content = "".join(full_response)
|
||
if assistant_content and "AI服务调用失败" not in assistant_content:
|
||
db.add(ChatHistory(
|
||
session_id=session_id,
|
||
role="assistant",
|
||
content=assistant_content,
|
||
created_at=datetime.utcnow(),
|
||
))
|
||
db.commit()
|