lokiai / main.py
ParthSadaria's picture
Update main.py
72b763e verified
raw
history blame
63.4 kB
import os
import re
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Request, Depends, Security, Query
from fastapi.responses import StreamingResponse, HTMLResponse, JSONResponse, FileResponse, PlainTextResponse
from fastapi.security import APIKeyHeader
from pydantic import BaseModel
import httpx
from functools import lru_cache
from pathlib import Path
import json
import datetime
import time
import asyncio
from starlette.status import HTTP_403_FORBIDDEN
import cloudscraper
from concurrent.futures import ThreadPoolExecutor
import uvloop
from fastapi.middleware.gzip import GZipMiddleware
from starlette.middleware.cors import CORSMiddleware
import contextlib
import requests
from typing import List, Dict, Any, Optional, Union # Import Optional and other typing helpers
from fastapi import APIRouter, Request, Depends, HTTPException
from fastapi.responses import StreamingResponse
import httpx
import hashlib
import asyncio
from functools import lru_cache
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
executor = ThreadPoolExecutor(max_workers=16)
load_dotenv()
api_key_header = APIKeyHeader(name="Authorization", auto_error=False)
from usage_tracker import UsageTracker
usage_tracker = UsageTracker()
# Disable the basic docs because we are better than that
app = FastAPI(docs_url=None, redoc_url=None)
app.add_middleware(GZipMiddleware, minimum_size=1000)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@lru_cache(maxsize=1)
def get_env_vars():
return {
'api_keys': os.getenv('API_KEYS', '').split(','),
'secret_api_endpoint': os.getenv('SECRET_API_ENDPOINT'),
'secret_api_endpoint_2': os.getenv('SECRET_API_ENDPOINT_2'),
'secret_api_endpoint_3': os.getenv('SECRET_API_ENDPOINT_3'),
'secret_api_endpoint_4': os.getenv('SECRET_API_ENDPOINT_4', "https://enter.pollinations.ai/api/generate"),
'pollinations_key': os.getenv('POLLINATIONS_KEY'),
'secret_api_endpoint_5': os.getenv('SECRET_API_ENDPOINT_5'),
'secret_api_endpoint_6': os.getenv('SECRET_API_ENDPOINT_6'),
'secret_api_endpoint_7': os.getenv('SECRET_API_ENDPOINT_7'), # <-- Added
'mistral_api': os.getenv('MISTRAL_API', "https://api.mistral.ai"),
'mistral_key': os.getenv('MISTRAL_KEY'),
'gemini_key': os.getenv('GEMINI_KEY'),
'bytez_key': os.getenv('BYTEZ_KEY'), # <-- Added
'endpoint_origin': os.getenv('ENDPOINT_ORIGIN'),
'new_img': os.getenv('NEW_IMG')
}
mistral_models = {
"mistral-large-latest", "pixtral-large-latest", "mistral-moderation-latest",
"ministral-3b-latest", "ministral-8b-latest", "open-mistral-nemo",
"mistral-small-latest", "mistral-saba-latest", "codestral-latest"
}
pollinations_models = {
"openai", "openai-fast", "openai-large", "openai-reasoning",
"qwen-coder", "mistral", "naughty", "deepseek", "openai-audio",
"claude", "gemini", "gemini-search", "unity", "midijourney",
"evil", "chickytutor", "perplexity-fast", "perplexity-reasoning"
}
alternate_models = {
"o1", "llama-4-scout", "o4-mini", "sonar", "sonar-pro", "sonar-reasoning",
"sonar-reasoning-pro", "grok-3", "grok-3-fast", "r1-1776", "o3"
}
claude_3_models = {
"claude-3-7-sonnet", "claude-3-7-sonnet-thinking", "claude 3.5 haiku",
"claude 3.5 sonnet", "claude 3.5 haiku", "o3-mini-medium", "o3-mini-high",
"grok-3", "grok-3-thinking", "grok 2"
}
bytez_models = {
"openai/gpt-4.1-nano",
"openai/gpt-4o",
"openai/gpt-4.1",
"openai/gpt-4.1-mini",
"openai/gpt-4o-mini",
"openai/gpt-3.5-turbo",
"openai/gpt-3.5-turbo-1106",
"openai/o3",
"openai/o1",
"openai/gpt-4",
"openai/gpt-5-nano",
"openai/gpt-5-mini",
"openai/gpt-5"
}
gemini_models = {
# Gemini 1.5 Series
"gemini-1.5-pro", # Alias for latest stable 1.5 Pro[4][5]
"gemini-1.5-pro-002", # Latest 1.5 Pro stable[4][5]
"gemini-1.5-flash", # Alias for latest stable 1.5 Flash[4][5]
"gemini-1.5-flash-002", # Latest 1.5 Flash stable[4][5]
"gemini-1.5-flash-8b", # 1.5 Flash 8B variant[1]
# Gemini 2.0 Series
"gemini-2.0-flash-lite", # Cost-efficient model[5]
"gemini-2.0-flash-lite-preview", # Preview version[1]
"gemini-2.0-flash", # Default model as of Jan 2025[5]
"gemini-2.0-flash-exp", # Experimental version[1]
"gemini-2.0-flash-thinking", # Exposes model reasoning[5]
"gemini-2.0-flash-thinking-exp-01-21", # Experimental thinking[1]
"gemini-2.0-flash-preview-image-generation", # Image generation[1]
"gemini-2.0-pro-exp-02-05", # 2.0 Pro Experimental[1]
# Gemini 2.5 Series
"gemini-2.5-flash", # Default model as of May 2025[5][7]
"gemini-2.5-flash-preview-05-20", # 2.5 Flash preview[3]
"gemini-2.5-flash-preview-native-audio-dialog", # Native audio output[3]
"gemini-2.5-flash-exp-native-audio-thinking-dialog", # Audio thinking[3]
"gemini-2.5-pro", # 2.5 Pro (active, most advanced)[6][7]
"gemini-2.5-pro-preview-06-05", # Latest 2.5 Pro preview[3]
"gemini-2.5-pro-preview-03-25", # 2.5 Pro preview[1][3]
"gemini-2.5-pro-exp-03-25", # 2.5 Pro experimental[3]
"gemini-2.5-pro-preview-tts", # Speech generation[3]
"gemini-2.5-flash-preview-tts", # Speech generation[3]
# Experimental and Special Models
"gemini-exp-1206", # Experimental 2.0 Pro[1]
"gemini-embedding-exp-03-07",# Experimental embeddings[3]
}
supported_image_models = {
"Flux Pro Ultra", "grok-2-aurora", "Flux Pro", "Flux Pro Ultra Raw",
"Flux Dev", "Flux Schnell", "stable-diffusion-3-large-turbo",
"Flux Realism", "stable-diffusion-ultra", "dall-e-3", "sdxl-lightning-4step"
}
class Payload(BaseModel):
model: str
messages: list
stream: bool = False
# Add optional fields for tools and tool_choice
tools: Optional[List[Dict[str, Any]]] = None
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
class ImageGenerationPayload(BaseModel):
model: str
prompt: str
size: str = "1024x1024"
number: int = 1
server_status = True
available_model_ids: list[str] = []
@lru_cache(maxsize=1)
def get_async_client():
return httpx.AsyncClient(
timeout=60.0,
limits=httpx.Limits(max_keepalive_connections=50, max_connections=200)
)
scraper_pool = []
MAX_SCRAPERS = 20
def get_scraper():
if not scraper_pool:
for _ in range(MAX_SCRAPERS):
scraper_pool.append(cloudscraper.create_scraper())
return scraper_pool[int(time.time() * 1000) % MAX_SCRAPERS]
async def verify_api_key(request: Request, api_key: str = None):
return True
@lru_cache(maxsize=1)
def load_models_data():
try:
file_path = Path(__file__).parent / 'models.json'
with open(file_path, 'r') as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
print(f"Error loading models.json: {str(e)}")
return []
@app.get("/api/v1/models")
@app.get("/models")
async def get_models():
models_data = load_models_data()
if not models_data:
raise HTTPException(status_code=500, detail="Error loading available models")
return models_data
async def generate_search_async(query: str, systemprompt: str | None = None, stream: bool = True):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
system_message = systemprompt or "Be Helpful and Friendly"
prompt_messages = [{"role": "user", "content": query}]
prompt_messages.insert(0, {"content": system_message, "role": "system"})
payload = {
"is_vscode_extension": True,
"message_history": prompt_messages,
"requested_model": "searchgpt",
"user_input": prompt_messages[-1]["content"],
}
secret_api_endpoint_3 = get_env_vars()['secret_api_endpoint_3']
if not secret_api_endpoint_3:
raise HTTPException(status_code=500, detail="Search API endpoint not configured")
client = get_async_client()
if stream:
queue = asyncio.Queue()
async def _fetch_search_data_stream():
try:
async with client.stream("POST", secret_api_endpoint_3, json=payload, headers=headers) as response:
if response.status_code != 200:
error_detail = await response.text()
await queue.put({"error": f"Search API returned status code {response.status_code}: {error_detail}"})
return
async for line in response.aiter_lines():
if line.startswith("data: "):
try:
json_data = json.loads(line[6:])
content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
if content.strip():
cleaned_response = {
"created": json_data.get("created"),
"id": json_data.get("id"),
"model": "searchgpt",
"object": "chat.completion",
"choices": [
{
"message": {
"content": content
}
}
]
}
await queue.put({"data": f"data: {json.dumps(cleaned_response)}\n\n", "text": content})
except json.JSONDecodeError:
if line.strip() == "[DONE]":
continue
print(f"Warning: Could not decode JSON from search API stream: {line}")
await queue.put({"error": f"Invalid JSON from search API: {line}"})
break
await queue.put(None)
except Exception as e:
print(f"Error in _fetch_search_data_stream: {e}")
await queue.put({"error": str(e)})
await queue.put(None)
asyncio.create_task(_fetch_search_data_stream())
return queue
else:
try:
response = await client.post(secret_api_endpoint_3, json=payload, headers=headers)
response.raise_for_status()
json_data = response.json()
content = json_data.get("choices", [{}])[0].get("message", {}).get("content", "")
return {"response": content}
except httpx.HTTPStatusError as e:
raise HTTPException(status_code=e.response.status_code, detail=f"Search API returned status {e.response.status_code}: {e.response.text}")
except httpx.RequestError as e:
raise HTTPException(status_code=502, detail=f"Failed to connect to search API: {str(e)}")
except Exception as e:
raise HTTPException(status_code=500, detail=f"An unexpected error occurred during search: {str(e)}")
@lru_cache(maxsize=10)
def read_html_file(file_path):
try:
with open(file_path, "r") as file:
return file.read()
except FileNotFoundError:
return None
@app.get("/favicon.ico")
async def favicon():
favicon_path = Path(__file__).parent / "favicon.ico"
return FileResponse(favicon_path, media_type="image/x-icon")
@app.get("/banner.jpg")
async def banner():
banner_path = Path(__file__).parent / "banner.jpg"
return FileResponse(banner_path, media_type="image/jpeg")
@app.get("/ping")
async def ping():
return {"message": "pong", "response_time": "0.000000 seconds"}
@app.get("/", response_class=HTMLResponse)
async def root():
html_content = read_html_file("index.html")
if html_content is None:
raise HTTPException(status_code=404, detail="index.html not found")
return HTMLResponse(content=html_content)
@app.get("/scraper", response_class=PlainTextResponse)
async def scrape_site(url: str = Query(..., description="URL to scrape")):
try:
loop = asyncio.get_running_loop()
response_text = await loop.run_in_executor(
executor,
lambda: get_scraper().get(url).text
)
if response_text and len(response_text.strip()) > 0:
return PlainTextResponse(response_text)
else:
raise HTTPException(status_code=500, detail="Scraping returned empty content.")
except Exception as e:
print(f"Cloudscraper failed: {e}")
raise HTTPException(status_code=500, detail=f"Cloudscraper failed: {e}")
@app.get("/playground", response_class=HTMLResponse)
async def playground():
html_content = read_html_file("playground.html")
if html_content is None:
raise HTTPException(status_code=404, detail="playground.html not found")
return HTMLResponse(content=html_content)
@app.get("/image-playground", response_class=HTMLResponse)
async def image_playground():
html_content = read_html_file("image-playground.html")
if html_content is None:
raise HTTPException(status_code=404, detail="image-playground.html not found")
return HTMLResponse(content=html_content)
GITHUB_BASE = "[https://raw.githubusercontent.com/Parthsadaria/Vetra/main](https://raw.githubusercontent.com/Parthsadaria/Vetra/main)"
FILES = {
"html": "index.html",
"css": "style.css",
"js": "script.js"
}
@app.get("/docs", include_in_schema=False)
async def custom_documentation():
return HTMLResponse("""
<!DOCTYPE html>
<html>
<head>
<title>Loki API - The Good Stuff</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700&display=swap" rel="stylesheet">
<style>
body {
margin: 0;
background: #0f0f13; /* Deep dark background */
}
/* Custom Scrollbar because default is cringe */
::-webkit-scrollbar { width: 8px; height: 8px; }
::-webkit-scrollbar-track { background: #0f0f13; }
::-webkit-scrollbar-thumb { background: #3b82f6; border-radius: 4px; }
::-webkit-scrollbar-thumb:hover { background: #8b5cf6; }
/* Overriding Scalar variables for that LOKI aesthetic */
:root {
--scalar-color-1: #3b82f6; /* Blue */
--scalar-color-2: #8b5cf6; /* Purple */
--scalar-color-3: #10b981; /* Green */
--scalar-color-accent: #8b5cf6;
--scalar-background-1: #0b0b0e;
--scalar-background-2: #141419;
--scalar-background-3: #1f1f26;
--scalar-border-color: #2d2d3b;
--scalar-font: 'Inter', sans-serif;
}
/* Smooth fade in for the whole page */
body {
animation: fadeIn 0.8s ease-in-out;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(10px); }
to { opacity: 1; transform: translateY(0); }
}
/* Let's hide the 'Made with Scalar' footer because we're mysterious */
.scalar-footer { display: none !important; }
</style>
</head>
<body>
<script
id="api-reference"
data-url="/openapi.json"
data-proxy-url="https://proxy.scalar.com"
data-layout="modern"
data-theme="deepSpace"
data-show-sidebar="true"
></script>
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
</body>
</html>
""")
header_url = os.getenv('HEADER_URL')
@app.post("/chat/completions")
@app.post("/api/v1/chat/completions")
async def get_completion(payload: Payload, request: Request, authenticated: bool = Depends(verify_api_key)):
if not server_status:
raise HTTPException(
status_code=503,
detail="Server is under maintenance. Please try again later."
)
model_to_use = payload.model or "mistral-small-latest"
if available_model_ids and model_to_use not in set(available_model_ids):
raise HTTPException(
status_code=400,
detail=f"Model '{model_to_use}' is not available. Check /models for the available model list."
)
usage_tracker.record_request(request=request, model=model_to_use, endpoint="/chat/completions")
payload_dict = payload.dict(exclude_none=True) # Exclude keys with None values
# The payload.dict(exclude_none=True) already handles this.
# The following checks are now redundant but can be kept for explicit clarity.
if payload.tools is not None:
payload_dict["tools"] = payload.tools
# Handle the tool_choice more robustly
if payload.tool_choice is not None:
# Check if the value is valid before passing it on
if isinstance(payload.tool_choice, (str, dict)):
payload_dict["tool_choice"] = payload.tool_choice
else:
print(f"Warning: tool_choice received with invalid type: {type(payload.tool_choice)}. Skipping.")
stream_enabled = payload_dict.get("stream", True)
env_vars = get_env_vars()
endpoint = None
custom_headers = {}
target_url_path = "/v1/chat/completions"
if model_to_use in mistral_models:
endpoint = env_vars['mistral_api']
custom_headers = {
"Authorization": f"Bearer {env_vars['mistral_key']}"
}
elif model_to_use in bytez_models:
endpoint = env_vars['secret_api_endpoint_7']
if not endpoint:
raise HTTPException(status_code=500, detail="SECRET_API_ENDPOINT_7 not configured for Bytez models.")
if not env_vars['bytez_key']:
raise HTTPException(status_code=500, detail="BYTEZ_KEY not configured for Bytez models.")
custom_headers = {
"Authorization": f"Bearer {env_vars['bytez_key']}"
}
elif model_to_use in pollinations_models:
endpoint = env_vars['secret_api_endpoint_4']
custom_headers = {
'Authorization': f"Bearer {env_vars['pollinations_key']}"
}
elif model_to_use in alternate_models:
endpoint = env_vars['secret_api_endpoint_2']
custom_headers = {}
elif model_to_use in claude_3_models:
endpoint = env_vars['secret_api_endpoint_5']
custom_headers = {}
elif model_to_use in gemini_models:
endpoint = env_vars['secret_api_endpoint_6']
if not endpoint:
raise HTTPException(status_code=500, detail="Gemini API endpoint (SECRET_API_ENDPOINT_6) not configured.")
if not env_vars['gemini_key']:
raise HTTPException(status_code=500, detail="GEMINI_KEY not configured for Gemini models.")
custom_headers = {
"Authorization": f"Bearer {env_vars['gemini_key']}"
}
target_url_path = "/chat/completions"
else:
endpoint = env_vars['secret_api_endpoint']
custom_headers = {
"Origin": header_url,
"Priority": "u=1, i",
"Referer": header_url
}
if not endpoint:
raise HTTPException(status_code=500, detail=f"No API endpoint configured for model: {model_to_use}")
print(f"Proxying request for model '{model_to_use}' to endpoint: {endpoint}{target_url_path}")
client = get_async_client()
if stream_enabled:
async def real_time_stream_generator():
try:
async with client.stream("POST", f"{endpoint}{target_url_path}", json=payload_dict, headers=custom_headers) as response:
if response.status_code >= 400:
error_messages = {
400: "Bad request. Verify input data.",
401: "Unauthorized. Invalid API key for upstream service.",
403: "Forbidden. You do not have access to this resource on upstream.",
404: "The requested resource was not found on upstream.",
422: "Unprocessable entity. Check your payload for upstream API.",
500: "Internal server error from upstream API."
}
detail_message = error_messages.get(response.status_code, f"Upstream error code: {response.status_code}")
try:
error_body = await response.aread()
error_json = json.loads(error_body.decode('utf-8'))
if 'error' in error_json and 'message' in error_json['error']:
detail_message += f" - Upstream detail: {error_json['error']['message']}"
elif 'detail' in error_json:
detail_message += f" - Upstream detail: {error_json['detail']}"
else:
detail_message += f" - Upstream raw: {error_body.decode('utf-8')[:200]}..."
except (json.JSONDecodeError, UnicodeDecodeError):
detail_message += f" - Upstream raw: {error_body.decode('utf-8', errors='ignore')[:200]}"
raise HTTPException(status_code=response.status_code, detail=detail_message)
async for line in response.aiter_lines():
if line:
yield line + "\n"
except httpx.TimeoutException:
raise HTTPException(status_code=504, detail="Request to upstream AI service timed out.")
except httpx.RequestError as e:
raise HTTPException(status_code=502, detail=f"Failed to connect to upstream AI service: {str(e)}")
except Exception as e:
if isinstance(e, HTTPException):
raise e
print(f"An unexpected error occurred during chat completion proxy: {e}")
raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")
return StreamingResponse(
real_time_stream_generator(),
media_type="text/event-stream",
headers={
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
)
else:
try:
response = await client.post(f"{endpoint}{target_url_path}", json=payload_dict, headers=custom_headers)
response.raise_for_status()
return JSONResponse(content=response.json())
except httpx.TimeoutException:
raise HTTPException(status_code=504, detail="Request to upstream AI service timed out.")
except httpx.RequestError as e:
raise HTTPException(status_code=502, detail=f"Failed to connect to upstream AI service: {str(e)}")
except httpx.HTTPStatusError as e:
error_messages = {
400: "Bad request. Verify input data.",
401: "Unauthorized. Invalid API key for upstream service.",
403: "Forbidden. You do not have access to this resource on upstream.",
404: "The requested resource was not found on upstream.",
422: "Unprocessable entity. Check your payload for upstream API.",
500: "Internal server error from upstream API."
}
detail_message = error_messages.get(e.response.status_code, f"Upstream error code: {e.response.status_code}")
try:
error_body = e.response.json()
if 'error' in error_body and 'message' in error_body['error']:
detail_message += f" - Upstream detail: {error_body['error']['message']}"
elif 'detail' in error_body:
detail_message += f" - Upstream detail: {error_body['detail']}"
except json.JSONDecodeError:
detail_message += f" - Upstream raw: {e.response.text[:200]}"
raise HTTPException(status_code=e.response.status_code, detail=detail_message)
except Exception as e:
print(f"An unexpected error occurred during non-streaming chat completion proxy: {e}")
raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")
@lru_cache(maxsize=256)
def cached_url(url: str):
return url
@app.get("/images/{prompt:path}")
async def create_image(
prompt: str,
request: Request,
authenticated: bool = Depends(verify_api_key)
):
if not server_status:
raise HTTPException(status_code=503, detail="Server is under maintenance.")
# Build base URL safely
base = "https://image.pollinations.ai/prompt/"
final_url = f"{base}{prompt}?nologo=true"
# If user provided custom query params, append them
if request.url.query:
final_url += f"&{request.url.query}"
# Apply caching wrapper
final_url = cached_url(final_url)
try:
async with httpx.AsyncClient(timeout=60) as client:
resp = await client.get(final_url)
if resp.status_code != 200:
raise HTTPException(status_code=resp.status_code, detail="Image generation failed.")
return StreamingResponse(
resp.aiter_bytes(),
media_type="image/jpeg"
)
except httpx.TimeoutException:
raise HTTPException(status_code=504, detail="Image generation timeout.")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
@app.get("/usage")
async def get_usage_json(days: int = 7):
return usage_tracker.get_usage_summary(days)
@app.get("/usage/page", response_class=HTMLResponse)
async def get_usage_page(days: int = Query(7, description="Number of days to include in the usage summary")):
usage_data = usage_tracker.get_usage_summary(days)
html_content = generate_usage_html(usage_data, days)
return HTMLResponse(content=html_content)
def generate_usage_html(usage_data: dict, days: int = 7):
model_labels = list(usage_data['model_usage_period'].keys())
model_counts = list(usage_data['model_usage_period'].values())
endpoint_labels = list(usage_data['endpoint_usage_period'].keys())
endpoint_counts = list(usage_data['endpoint_usage_period'].values())
daily_dates = list(usage_data['daily_usage_period'].keys())
daily_requests = [data['requests'] for data in usage_data['daily_usage_period'].values()]
daily_unique_ips = [data['unique_ips_count'] for data in usage_data['daily_usage_period'].values()]
daily_usage_table_rows = "\n".join([
f"""
<tr class="hover:bg-slate-700/20 transition-colors duration-200">
<td class="px-6 py-4 whitespace-nowrap text-sm font-medium text-slate-200">{date}</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-300">
<span class="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-blue-500/20 text-blue-300">
{data['requests']:,}
</span>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-300">
<span class="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-emerald-500/20 text-emerald-300">
{data['unique_ips_count']:,}
</span>
</td>
</tr>
""" for date, data in usage_data['daily_usage_period'].items()
])
model_usage_all_time_rows = "\n".join([
f"""
<tr class="hover:bg-slate-700/20 transition-colors duration-200">
<td class="px-6 py-4 whitespace-nowrap">
<div class="flex items-center">
<div class="w-2 h-2 bg-purple-400 rounded-full mr-3"></div>
<span class="text-sm font-medium text-slate-200">{model}</span>
</div>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-300">
<span class="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-purple-500/20 text-purple-300">
{stats['total_requests']:,}
</span>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-400">{datetime.datetime.fromisoformat(stats['first_used']).strftime("%Y-%m-%d %H:%M")}</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-400">{datetime.datetime.fromisoformat(stats['last_used']).strftime("%Y-%m-%d %H:%M")}</td>
</tr>
""" for model, stats in usage_data['all_time_model_usage'].items()
])
api_usage_all_time_rows = "\n".join([
f"""
<tr class="hover:bg-slate-700/20 transition-colors duration-200">
<td class="px-6 py-4 whitespace-nowrap">
<div class="flex items-center">
<div class="w-2 h-2 bg-emerald-400 rounded-full mr-3"></div>
<span class="text-sm font-medium text-slate-200">{endpoint}</span>
</div>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-300">
<span class="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-emerald-500/20 text-emerald-300">
{stats['total_requests']:,}
</span>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-400">{datetime.datetime.fromisoformat(stats['first_used']).strftime("%Y-%m-%d %H:%M")}</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-400">{datetime.datetime.fromisoformat(stats['last_used']).strftime("%Y-%m-%d %H:%M")}</td>
</tr>
""" for endpoint, stats in usage_data['all_time_endpoint_usage'].items()
])
recent_requests_rows = "\n".join([
f"""
<tr class="hover:bg-slate-700/20 transition-colors duration-200">
<td class="px-6 py-4 whitespace-nowrap text-sm font-mono text-slate-300">{datetime.datetime.fromisoformat(req['timestamp']).strftime("%Y-%m-%d %H:%M:%S")}</td>
<td class="px-6 py-4 whitespace-nowrap">
<span class="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-indigo-500/20 text-indigo-300">
{req['model']}
</span>
</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-300">{req['endpoint']}</td>
<td class="px-6 py-4 whitespace-nowrap text-sm font-mono text-slate-400">{req['ip_address']}</td>
<td class="px-6 py-4 whitespace-nowrap text-sm text-slate-500 truncate max-w-xs">{req['user_agent'][:50]}...</td>
</tr>
""" for req in usage_data['recent_requests']
])
html_content = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lokiai AI - Usage Analytics Dashboard</title>
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/cdn.min.js" defer></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
<script>
tailwind.config = {{
theme: {{
extend: {{
animation: {{
'fade-in': 'fadeIn 0.5s ease-in-out',
'slide-up': 'slideUp 0.6s ease-out',
'pulse-slow': 'pulse 3s infinite',
'bounce-gentle': 'bounceGentle 2s infinite',
}},
keyframes: {{
fadeIn: {{
'0%': {{ opacity: '0', transform: 'translateY(10px)' }},
'100%': {{ opacity: '1', transform: 'translateY(0)' }},
}},
slideUp: {{
'0%': {{ opacity: '0', transform: 'translateY(30px)' }},
'100%': {{ opacity: '1', transform: 'translateY(0)' }},
}},
bounceGentle: {{
'0%, 100%': {{ transform: 'translateY(-2px)' }},
'50%': {{ transform: 'translateY(2px)' }},
}}
}}
}}
}}
}}
</script>
</head>
<body class="bg-gradient-to-br from-slate-950 via-slate-900 to-indigo-950 text-white min-h-screen">
<!-- Navigation Header -->
<nav class="bg-slate-900/80 backdrop-blur-md border-b border-slate-700/50 sticky top-0 z-50">
<div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8">
<div class="flex justify-between items-center h-16">
<div class="flex items-center space-x-4">
<div class="w-10 h-10 bg-gradient-to-br from-blue-500 to-indigo-600 rounded-xl flex items-center justify-center shadow-lg">
<i class="fas fa-robot text-white text-lg"></i>
</div>
<div>
<h1 class="text-xl font-bold bg-gradient-to-r from-blue-400 to-indigo-400 bg-clip-text text-transparent">
Lokiai AI
</h1>
<p class="text-xs text-slate-400">Usage Analytics Dashboard</p>
</div>
</div>
<div class="flex items-center space-x-2">
<div class="w-3 h-3 bg-green-400 rounded-full animate-pulse"></div>
<span class="text-sm text-slate-300">Live</span>
</div>
</div>
</div>
</nav>
<div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-8 space-y-8">
<!-- Hero Stats Section -->
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-6 mb-8" x-data="{{}}">
<div class="bg-gradient-to-br from-blue-500/10 to-blue-600/5 backdrop-blur-sm border border-blue-500/20 rounded-2xl p-6 hover:border-blue-400/30 transition-all duration-300 animate-fade-in">
<div class="flex items-center justify-between">
<div>
<p class="text-blue-300 text-sm font-medium">Total Requests</p>
<p class="text-3xl font-bold text-white mt-2">{usage_data['total_requests']:,}</p>
<p class="text-green-400 text-xs mt-1">
<i class="fas fa-arrow-up mr-1"></i>All Time
</p>
</div>
<div class="w-12 h-12 bg-blue-500/20 rounded-xl flex items-center justify-center">
<i class="fas fa-chart-line text-blue-400 text-xl"></i>
</div>
</div>
</div>
<div class="bg-gradient-to-br from-emerald-500/10 to-emerald-600/5 backdrop-blur-sm border border-emerald-500/20 rounded-2xl p-6 hover:border-emerald-400/30 transition-all duration-300 animate-fade-in" style="animation-delay: 0.1s">
<div class="flex items-center justify-between">
<div>
<p class="text-emerald-300 text-sm font-medium">Unique Users</p>
<p class="text-3xl font-bold text-white mt-2">{usage_data['unique_ips_total_count']:,}</p>
<p class="text-green-400 text-xs mt-1">
<i class="fas fa-users mr-1"></i>All Time
</p>
</div>
<div class="w-12 h-12 bg-emerald-500/20 rounded-xl flex items-center justify-center">
<i class="fas fa-users text-emerald-400 text-xl"></i>
</div>
</div>
</div>
<div class="bg-gradient-to-br from-purple-500/10 to-purple-600/5 backdrop-blur-sm border border-purple-500/20 rounded-2xl p-6 hover:border-purple-400/30 transition-all duration-300 animate-fade-in" style="animation-delay: 0.2s">
<div class="flex items-center justify-between">
<div>
<p class="text-purple-300 text-sm font-medium">Active Models</p>
<p class="text-3xl font-bold text-white mt-2">{len(usage_data['model_usage_period'])}</p>
<p class="text-blue-400 text-xs mt-1">
<i class="fas fa-clock mr-1"></i>Last {days} Days
</p>
</div>
<div class="w-12 h-12 bg-purple-500/20 rounded-xl flex items-center justify-center">
<i class="fas fa-brain text-purple-400 text-xl"></i>
</div>
</div>
</div>
<div class="bg-gradient-to-br from-amber-500/10 to-amber-600/5 backdrop-blur-sm border border-amber-500/20 rounded-2xl p-6 hover:border-amber-400/30 transition-all duration-300 animate-fade-in" style="animation-delay: 0.3s">
<div class="flex items-center justify-between">
<div>
<p class="text-amber-300 text-sm font-medium">API Endpoints</p>
<p class="text-3xl font-bold text-white mt-2">{len(usage_data['endpoint_usage_period'])}</p>
<p class="text-blue-400 text-xs mt-1">
<i class="fas fa-clock mr-1"></i>Last {days} Days
</p>
</div>
<div class="w-12 h-12 bg-amber-500/20 rounded-xl flex items-center justify-center">
<i class="fas fa-plug text-amber-400 text-xl"></i>
</div>
</div>
</div>
</div>
<!-- Charts Section -->
<div class="grid grid-cols-1 lg:grid-cols-2 gap-8">
<!-- Daily Usage Chart -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl p-6 animate-slide-up">
<div class="flex items-center justify-between mb-6">
<div>
<h3 class="text-xl font-semibold text-white">Daily Usage Trends</h3>
<p class="text-slate-400 text-sm">Last {days} days performance</p>
</div>
<div class="flex items-center space-x-2">
<div class="w-3 h-3 bg-blue-400 rounded-full"></div>
<span class="text-xs text-slate-300">Requests</span>
<div class="w-3 h-3 bg-amber-400 rounded-full ml-4"></div>
<span class="text-xs text-slate-300">Unique IPs</span>
</div>
</div>
<div class="h-64">
<canvas id="dailyRequestsChart" class="w-full h-full"></canvas>
</div>
</div>
<!-- Model Usage Chart -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl p-6 animate-slide-up" style="animation-delay: 0.2s">
<div class="flex items-center justify-between mb-6">
<div>
<h3 class="text-xl font-semibold text-white">Model Distribution</h3>
<p class="text-slate-400 text-sm">Usage by AI models</p>
</div>
</div>
<div class="h-64">
<canvas id="modelUsageChart" class="w-full h-full"></canvas>
</div>
</div>
</div>
<!-- Endpoint Usage Chart -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl p-6 animate-slide-up" style="animation-delay: 0.4s">
<div class="flex items-center justify-between mb-6">
<div>
<h2 class="text-2xl font-bold text-white">API Endpoint Analytics</h2>
<p class="text-slate-400">Distribution of requests across different endpoints</p>
</div>
</div>
<div class="h-80">
<canvas id="endpointUsageChart" class="w-full h-full"></canvas>
</div>
</div>
<!-- Data Tables Section -->
<div class="grid grid-cols-1 xl:grid-cols-2 gap-8">
<!-- Daily Usage Table -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl animate-slide-up" style="animation-delay: 0.6s">
<div class="p-6 border-b border-slate-700/50">
<h3 class="text-xl font-semibold text-white flex items-center">
<i class="fas fa-calendar-alt mr-3 text-blue-400"></i>
Daily Breakdown
</h3>
<p class="text-slate-400 text-sm mt-1">Last {days} days detailed view</p>
</div>
<div class="overflow-x-auto max-h-96">
<table class="w-full">
<thead class="bg-slate-700/30">
<tr>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Date</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Requests</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Unique IPs</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-700/30">
{daily_usage_table_rows}
</tbody>
</table>
</div>
</div>
<!-- Model Usage Table -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl animate-slide-up" style="animation-delay: 0.8s">
<div class="p-6 border-b border-slate-700/50">
<h3 class="text-xl font-semibold text-white flex items-center">
<i class="fas fa-robot mr-3 text-purple-400"></i>
Model Statistics
</h3>
<p class="text-slate-400 text-sm mt-1">All-time model usage data</p>
</div>
<div class="overflow-x-auto max-h-96">
<table class="w-full">
<thead class="bg-slate-700/30">
<tr>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Model</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Requests</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">First Used</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Last Used</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-700/30">
{model_usage_all_time_rows}
</tbody>
</table>
</div>
</div>
</div>
<!-- API Endpoints Table -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl animate-slide-up" style="animation-delay: 1s">
<div class="p-6 border-b border-slate-700/50">
<h3 class="text-xl font-semibold text-white flex items-center">
<i class="fas fa-plug mr-3 text-emerald-400"></i>
API Endpoint Details
</h3>
<p class="text-slate-400 text-sm mt-1">Complete endpoint usage statistics</p>
</div>
<div class="overflow-x-auto">
<table class="w-full">
<thead class="bg-slate-700/30">
<tr>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Endpoint</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Total Requests</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">First Used</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Last Used</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-700/30">
{api_usage_all_time_rows}
</tbody>
</table>
</div>
</div>
<!-- Recent Requests -->
<div class="bg-slate-800/40 backdrop-blur-sm border border-slate-700/50 rounded-2xl animate-slide-up" style="animation-delay: 1.2s">
<div class="p-6 border-b border-slate-700/50">
<h3 class="text-xl font-semibold text-white flex items-center">
<i class="fas fa-clock mr-3 text-amber-400"></i>
Recent Activity
</h3>
<p class="text-slate-400 text-sm mt-1">Last 20 requests in real-time</p>
</div>
<div class="overflow-x-auto">
<table class="w-full">
<thead class="bg-slate-700/30">
<tr>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Timestamp</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Model</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">Endpoint</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">IP Address</th>
<th class="px-6 py-4 text-left text-xs font-medium text-slate-300 uppercase tracking-wider">User Agent</th>
</tr>
</thead>
<tbody class="divide-y divide-slate-700/30">
{recent_requests_rows}
</tbody>
</table>
</div>
</div>
</div>
<!-- Footer -->
<footer class="border-t border-slate-700/50 mt-16">
<div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-8">
<div class="flex flex-col md:flex-row justify-between items-center space-y-4 md:space-y-0">
<div class="flex items-center space-x-4">
<div class="w-8 h-8 bg-gradient-to-br from-blue-500 to-indigo-600 rounded-lg flex items-center justify-center">
<i class="fas fa-robot text-white text-sm"></i>
</div>
<div>
<p class="text-slate-300 font-medium">Lokiai AI Dashboard</p>
<p class="text-slate-500 text-xs">Advanced Analytics & Monitoring</p>
</div>
</div>
<div class="text-slate-400 text-sm">
Last updated: <span class="text-slate-300" id="currentTime"></span>
</div>
</div>
</div>
</footer>
<script>
// Update current time
document.getElementById('currentTime').textContent = new Date().toLocaleString();
// Chart data
const modelLabels = {json.dumps(model_labels)};
const modelCounts = {json.dumps(model_counts)};
const endpointLabels = {json.dumps(endpoint_labels)};
const endpointCounts = {json.dumps(endpoint_counts)};
const dailyDates = {json.dumps(daily_dates)};
const dailyRequests = {json.dumps(daily_requests)};
const dailyUniqueIps = {json.dumps(daily_unique_ips)};
// Chart options
const chartOptions = {{
responsive: true,
maintainAspectRatio: false,
plugins: {{
legend: {{
labels: {{
color: '#e2e8f0',
padding: 20,
font: {{
size: 12,
weight: 500
}}
}}
}}
}},
scales: {{
x: {{
ticks: {{
color: '#94a3b8',
font: {{
size: 11
}}
}},
grid: {{
color: 'rgba(148, 163, 184, 0.1)',
drawBorder: false
}}
}},
y: {{
beginAtZero: true,
ticks: {{
color: '#94a3b8',
font: {{
size: 11
}}
}},
grid: {{
color: 'rgba(148, 163, 184, 0.1)',
drawBorder: false
}}
}}
}}
}};
// Daily Requests Chart
new Chart(document.getElementById('dailyRequestsChart'), {{
type: 'line',
data: {{
labels: dailyDates,
datasets: [
{{
label: 'Total Requests',
data: dailyRequests,
borderColor: '#3b82f6',
backgroundColor: 'rgba(59, 130, 246, 0.1)',
fill: true,
tension: 0.4,
borderWidth: 3,
pointBackgroundColor: '#3b82f6',
pointBorderColor: '#1e40af',
pointBorderWidth: 2,
pointRadius: 4,
pointHoverRadius: 6
}},
{{
label: 'Unique IPs',
data: dailyUniqueIps,
borderColor: '#f59e0b',
backgroundColor: 'rgba(245, 158, 11, 0.1)',
fill: true,
tension: 0.4,
borderWidth: 3,
pointBackgroundColor: '#f59e0b',
pointBorderColor: '#d97706',
pointBorderWidth: 2,
pointRadius: 4,
pointHoverRadius: 6
}}
]
}},
options: chartOptions
}});
// Model Usage Chart
new Chart(document.getElementById('modelUsageChart'), {{
type: 'doughnut',
data: {{
labels: modelLabels,
datasets: [{{
data: modelCounts,
backgroundColor: [
'#3b82f6', '#8b5cf6', '#06b6d4', '#10b981', '#f59e0b',
'#ef4444', '#ec4899', '#84cc16', '#f97316', '#6366f1'
],
borderWidth: 0,
hoverOffset: 8
}}]
}},
options: {{
responsive: true,
maintainAspectRatio: false,
plugins: {{
legend: {{
position: 'bottom',
labels: {{
color: '#e2e8f0',
padding: 15,
usePointStyle: true,
font: {{
size: 12
}}
}}
}}
}},
cutout: '60%'
}}
}});
// Endpoint Usage Chart
new Chart(document.getElementById('endpointUsageChart'), {{
type: 'bar',
data: {{
labels: endpointLabels,
datasets: [{{
label: 'Requests',
data: endpointCounts,
backgroundColor: 'rgba(59, 130, 246, 0.8)',
borderColor: '#3b82f6',
borderWidth: 0,
borderRadius: 8,
borderSkipped: false,
}}]
}},
options: {{
...chartOptions,
plugins: {{
legend: {{
display: false
}}
}}
}}
}});
// Add hover effects to table rows
document.querySelectorAll('tbody tr').forEach(row => {{
row.addEventListener('mouseenter', function() {{
this.classList.add('bg-slate-700/20');
}});
row.addEventListener('mouseleave', function() {{
this.classList.remove('bg-slate-700/20');
}});
}});
</script>
</body>
</html>"""
# Update the table row generation with better styling
return html_content
@app.on_event("startup")
async def startup_event():
global available_model_ids
models_data = load_models_data()
available_model_ids = [m['id'] for m in models_data if isinstance(m, dict) and 'id' in m]
# Add all hardcoded model sets
available_model_ids.extend(list(pollinations_models))
available_model_ids.extend(list(alternate_models))
available_model_ids.extend(list(mistral_models))
available_model_ids.extend(list(claude_3_models))
available_model_ids.extend(list(gemini_models))
available_model_ids.extend(list(bytez_models))
# Remove duplicates
available_model_ids = list(set(available_model_ids))
print(f"Total unique available models after merging: {len(available_model_ids)}")
for _ in range(MAX_SCRAPERS):
scraper_pool.append(cloudscraper.create_scraper())
print(f"Initialized Cloudscraper pool with {MAX_SCRAPERS} instances.")
env_vars = get_env_vars()
missing_vars = []
if not env_vars['api_keys'] or env_vars['api_keys'] == ['']:
missing_vars.append('API_KEYS')
if not env_vars['secret_api_endpoint']:
missing_vars.append('SECRET_API_ENDPOINT')
if not env_vars['secret_api_endpoint_2']:
missing_vars.append('SECRET_API_ENDPOINT_2')
if not env_vars['secret_api_endpoint_3']:
missing_vars.append('SECRET_API_ENDPOINT_3')
if not env_vars['secret_api_endpoint_4'] and any(model in pollinations_models for model in available_model_ids):
missing_vars.append('SECRET_API_ENDPOINT_4 (Pollinations.ai)')
if not env_vars['secret_api_endpoint_5'] and any(model in claude_3_models for model in available_model_ids):
missing_vars.append('SECRET_API_ENDPOINT_5 (Claude 3.x)')
if not env_vars['secret_api_endpoint_6'] and any(model in gemini_models for model in available_model_ids):
missing_vars.append('SECRET_API_ENDPOINT_6 (Gemini)')
if not env_vars['mistral_api'] and any(model in mistral_models for model in available_model_ids):
missing_vars.append('MISTRAL_API')
if not env_vars['mistral_key'] and any(model in mistral_models for model in available_model_ids):
missing_vars.append('MISTRAL_KEY')
if not env_vars['gemini_key'] and any(model in gemini_models for model in available_model_ids):
missing_vars.append('GEMINI_KEY')
if not env_vars['new_img'] and len(supported_image_models) > 0:
missing_vars.append('NEW_IMG (Image Generation)')
if missing_vars:
print(f"WARNING: The following critical environment variables are missing or empty: {', '.join(missing_vars)}")
print("Some server functionality (e.g., specific AI models, image generation) may be limited or unavailable.")
else:
print("All critical environment variables appear to be configured.")
print("Server started successfully!")
@app.on_event("shutdown")
async def shutdown_event():
client = get_async_client()
await client.aclose()
scraper_pool.clear()
usage_tracker.save_data()
print("Server shutdown complete!")
@app.get("/health")
async def health_check():
env_vars = get_env_vars()
missing_critical_vars = []
if not env_vars['api_keys'] or env_vars['api_keys'] == ['']:
missing_critical_vars.append('API_KEYS')
if not env_vars['secret_api_endpoint']:
missing_critical_vars.append('SECRET_API_ENDPOINT')
if not env_vars['secret_api_endpoint_2']:
missing_critical_vars.append('SECRET_API_ENDPOINT_2')
if not env_vars['secret_api_endpoint_3']:
missing_critical_vars.append('SECRET_API_ENDPOINT_3')
if not env_vars['secret_api_endpoint_4'] and any(model in pollinations_models for model in available_model_ids):
missing_critical_vars.append('SECRET_API_ENDPOINT_4 (Pollinations.ai)')
if not env_vars['secret_api_endpoint_5'] and any(model in claude_3_models for model in available_model_ids):
missing_critical_vars.append('SECRET_API_ENDPOINT_5 (Claude 3.x)')
if not env_vars['secret_api_endpoint_6'] and any(model in gemini_models for model in available_model_ids):
missing_critical_vars.append('SECRET_API_ENDPOINT_6 (Gemini)')
if not env_vars['mistral_api'] and any(model in mistral_models for model in available_model_ids):
missing_critical_vars.append('MISTRAL_API')
if not env_vars['mistral_key'] and any(model in mistral_models for model in available_model_ids):
missing_critical_vars.append('MISTRAL_KEY')
if not env_vars['gemini_key'] and any(model in gemini_models for model in available_model_ids):
missing_critical_vars.append('GEMINI_KEY')
if not env_vars['new_img'] and len(supported_image_models) > 0:
missing_critical_vars.append('NEW_IMG (Image Generation)')
if not env_vars['secret_api_endpoint_7'] and any(model in bytez_models for model in available_model_ids):
missing_critical_vars.append('SECRET_API_ENDPOINT_7 (Bytez)')
if not env_vars['bytez_key'] and any(model in bytez_models for model in available_model_ids):
missing_critical_vars.append('BYTEZ_KEY (Bytez)')
health_status = {
"status": "healthy" if not missing_critical_vars else "unhealthy",
"missing_env_vars": missing_critical_vars,
"server_status": server_status,
"message": "Everything's lit! πŸš€" if not missing_critical_vars else "Uh oh, some env vars are missing. 😬"
}
return JSONResponse(content=health_status)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)