Files
mockserver_openaiapi/mock_openai_server.py
2025-08-20 09:32:35 +02:00

82 lines
1.9 KiB
Python

from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from datetime import datetime
import json
import os
app = FastAPI()
LOG_DIR = "request_logs"
os.makedirs(LOG_DIR, exist_ok=True)
def format_markdown_log(headers, body, response):
timestamp = datetime.now().isoformat()
# Extract messages
messages = body.get("messages", [])
messages_text = "\n".join(
f"{m.get('role', 'unknown')}: {m.get('content', '')}" for m in messages
)
log = f"""## 📥 Request - {timestamp}
**Timestamp:** {timestamp}
### Headers
\```json
{json.dumps(headers, indent=4)}
\```
### Message Contents
\```text
{messages_text or "No messages"}
\```
### Full Body
\```json
{json.dumps(body, indent=4)}
\```
### 📤 Response
\```json
{json.dumps(response, indent=4)}
\```
---
"""
return log
@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
headers = dict(request.headers)
body = await request.json()
dummy_response = {
"id": "chatcmpl-mock123",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": body.get("model", "gpt-4"),
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "This is a mock response for testing purposes."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 12,
"total_tokens": 22
}
}
markdown_log = format_markdown_log(headers, body, dummy_response)
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
log_file = os.path.join(LOG_DIR, f"request_{timestamp_str}.md")
with open(log_file, "w", encoding="utf-8") as f:
f.write(markdown_log)
return JSONResponse(content=dummy_response)