from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware import requests import json import time import uuid app = FastAPI() # 允许跨域请求 app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) def generate_openai_response(generated_text: str): """构建OpenAI标准响应格式""" return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(time.time()), "model": "gpt-3.5-turbo", "choices": [{ "index": 0, "message": { "role": "assistant", "content": generated_text }, "finish_reason": "stop" }], "usage": { "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0 } } @app.post("/v1/chat/completions") async def chat_completions(request: Request): try: # 解析请求体 data = await request.json() # 从messages中获取最后一个用户消息 messages = data.get("messages", []) if not messages: raise HTTPException(status_code=400, detail="No messages provided") prompt = messages[-1].get("content", "") if not prompt: raise HTTPException(status_code=400, detail="Empty prompt") # 调用第三方API url = f'https://api.ownthink.com/bot?appid=9ffcb5785ad9617bf4e64178ac64f7b1&spoken={prompt}' response = requests.get(url) if response.status_code != 200: raise HTTPException(status_code=502, detail="Upstream service error") te = response.json() generated_text = te.get('data', {}).get('info', {}).get('text', "") if not generated_text: raise HTTPException(status_code=500, detail="Empty response from model") # 构建OpenAI格式响应 return generate_openai_response(generated_text) except json.JSONDecodeError: raise HTTPException(status_code=400, detail="Invalid JSON format") except requests.RequestException: raise HTTPException(status_code=503, detail="Service unavailable") except Exception as e: raise HTTPException(status_code=500, detail=str(e)) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)