Commit
·
f841fdd
1
Parent(s):
d8a02d4
Update design and generation logic
Browse files- api/endpoints.py +58 -3
- api/models.py +2 -2
- main.py +116 -24
- requirements.txt +14 -3
- templates/docs.html +187 -390
- utils/generation.py +228 -37
api/endpoints.py
CHANGED
|
@@ -15,8 +15,10 @@ MODEL_NAME = os.getenv("MODEL_NAME", "openai/gpt-oss-20b:fireworks-ai")
|
|
| 15 |
def model_info():
|
| 16 |
return {
|
| 17 |
"model_name": MODEL_NAME,
|
| 18 |
-
"secondary_model": os.getenv("SECONDARY_MODEL_NAME", "
|
| 19 |
"tertiary_model": os.getenv("TERTIARY_MODEL_NAME", "mistralai/Mixtral-8x7B-Instruct-v0.1"),
|
|
|
|
|
|
|
| 20 |
"api_base": API_ENDPOINT,
|
| 21 |
"status": "online"
|
| 22 |
}
|
|
@@ -46,6 +48,42 @@ async def chat_endpoint(req: QueryRequest):
|
|
| 46 |
response = "".join(list(stream))
|
| 47 |
return {"response": response}
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
@router.post("/api/code")
|
| 50 |
async def code_endpoint(req: dict):
|
| 51 |
framework = req.get("framework")
|
|
@@ -57,7 +95,7 @@ async def code_endpoint(req: dict):
|
|
| 57 |
api_key=HF_TOKEN,
|
| 58 |
api_base=api_endpoint,
|
| 59 |
message=prompt,
|
| 60 |
-
system_prompt="You are a coding expert.",
|
| 61 |
model_name=model_name,
|
| 62 |
temperature=0.7,
|
| 63 |
max_new_tokens=128000,
|
|
@@ -72,13 +110,30 @@ async def analysis_endpoint(req: dict):
|
|
| 72 |
api_key=HF_TOKEN,
|
| 73 |
api_base=api_endpoint,
|
| 74 |
message=message,
|
| 75 |
-
system_prompt="You are an expert analyst. Provide detailed analysis with step-by-step reasoning.",
|
| 76 |
model_name=model_name,
|
| 77 |
temperature=0.7,
|
| 78 |
max_new_tokens=128000,
|
| 79 |
)))
|
| 80 |
return {"analysis": response}
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
@router.get("/api/test-model")
|
| 83 |
async def test_model(model: str = MODEL_NAME, endpoint: str = API_ENDPOINT):
|
| 84 |
try:
|
|
|
|
| 15 |
def model_info():
|
| 16 |
return {
|
| 17 |
"model_name": MODEL_NAME,
|
| 18 |
+
"secondary_model": os.getenv("SECONDARY_MODEL_NAME", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"),
|
| 19 |
"tertiary_model": os.getenv("TERTIARY_MODEL_NAME", "mistralai/Mixtral-8x7B-Instruct-v0.1"),
|
| 20 |
+
"clip_base_model": os.getenv("CLIP_BASE_MODEL", "openai/clip-vit-base-patch32"),
|
| 21 |
+
"clip_large_model": os.getenv("CLIP_LARGE_MODEL", "openai/clip-vit-large-patch14"),
|
| 22 |
"api_base": API_ENDPOINT,
|
| 23 |
"status": "online"
|
| 24 |
}
|
|
|
|
| 48 |
response = "".join(list(stream))
|
| 49 |
return {"response": response}
|
| 50 |
|
| 51 |
+
|
| 52 |
+
# في api/endpoints.py
|
| 53 |
+
@router.post("/api/audio-transcription")
|
| 54 |
+
async def audio_transcription_endpoint(file: UploadFile = File(...)):
|
| 55 |
+
model_name, api_endpoint = select_model("transcribe audio", input_type="audio")
|
| 56 |
+
audio_data = await file.read()
|
| 57 |
+
response = "".join(list(request_generation(
|
| 58 |
+
api_key=HF_TOKEN,
|
| 59 |
+
api_base=api_endpoint,
|
| 60 |
+
message="Transcribe audio",
|
| 61 |
+
system_prompt="Transcribe the provided audio using Whisper.",
|
| 62 |
+
model_name=model_name,
|
| 63 |
+
temperature=0.7,
|
| 64 |
+
max_new_tokens=128000,
|
| 65 |
+
input_type="audio",
|
| 66 |
+
audio_data=audio_data,
|
| 67 |
+
)))
|
| 68 |
+
return {"transcription": response}
|
| 69 |
+
|
| 70 |
+
@router.post("/api/text-to-speech")
|
| 71 |
+
async def text_to_speech_endpoint(req: dict):
|
| 72 |
+
text = req.get("text", "")
|
| 73 |
+
model_name, api_endpoint = select_model("text to speech", input_type="text")
|
| 74 |
+
response = request_generation(
|
| 75 |
+
api_key=HF_TOKEN,
|
| 76 |
+
api_base=api_endpoint,
|
| 77 |
+
message=text,
|
| 78 |
+
system_prompt="Convert the provided text to speech using Parler-TTS.",
|
| 79 |
+
model_name=model_name,
|
| 80 |
+
temperature=0.7,
|
| 81 |
+
max_new_tokens=128000,
|
| 82 |
+
input_type="text",
|
| 83 |
+
)
|
| 84 |
+
audio_data = b"".join(list(response))
|
| 85 |
+
return StreamingResponse(io.BytesIO(audio_data), media_type="audio/wav")
|
| 86 |
+
|
| 87 |
@router.post("/api/code")
|
| 88 |
async def code_endpoint(req: dict):
|
| 89 |
framework = req.get("framework")
|
|
|
|
| 95 |
api_key=HF_TOKEN,
|
| 96 |
api_base=api_endpoint,
|
| 97 |
message=prompt,
|
| 98 |
+
system_prompt="You are a coding expert. Provide detailed, well-commented code with examples and explanations.",
|
| 99 |
model_name=model_name,
|
| 100 |
temperature=0.7,
|
| 101 |
max_new_tokens=128000,
|
|
|
|
| 110 |
api_key=HF_TOKEN,
|
| 111 |
api_base=api_endpoint,
|
| 112 |
message=message,
|
| 113 |
+
system_prompt="You are an expert analyst. Provide detailed analysis with step-by-step reasoning and examples.",
|
| 114 |
model_name=model_name,
|
| 115 |
temperature=0.7,
|
| 116 |
max_new_tokens=128000,
|
| 117 |
)))
|
| 118 |
return {"analysis": response}
|
| 119 |
|
| 120 |
+
@router.post("/api/image-analysis")
|
| 121 |
+
async def image_analysis_endpoint(req: dict):
|
| 122 |
+
image_url = req.get("image_url", "")
|
| 123 |
+
task = req.get("task", "describe")
|
| 124 |
+
prompt = f"Perform the following task on the image at {image_url}: {task}"
|
| 125 |
+
model_name, api_endpoint = select_model(prompt)
|
| 126 |
+
response = "".join(list(request_generation(
|
| 127 |
+
api_key=HF_TOKEN,
|
| 128 |
+
api_base=api_endpoint,
|
| 129 |
+
message=prompt,
|
| 130 |
+
system_prompt="You are an expert in image analysis. Provide detailed descriptions or classifications based on the query.",
|
| 131 |
+
model_name=model_name,
|
| 132 |
+
temperature=0.7,
|
| 133 |
+
max_new_tokens=128000,
|
| 134 |
+
)))
|
| 135 |
+
return {"image_analysis": response}
|
| 136 |
+
|
| 137 |
@router.get("/api/test-model")
|
| 138 |
async def test_model(model: str = MODEL_NAME, endpoint: str = API_ENDPOINT):
|
| 139 |
try:
|
api/models.py
CHANGED
|
@@ -3,8 +3,8 @@ from typing import List, Optional
|
|
| 3 |
|
| 4 |
class QueryRequest(BaseModel):
|
| 5 |
message: str
|
| 6 |
-
system_prompt: str = "You are
|
| 7 |
history: Optional[List[dict]] = None
|
| 8 |
-
temperature: float = 0.
|
| 9 |
max_new_tokens: int = 128000
|
| 10 |
enable_browsing: bool = False
|
|
|
|
| 3 |
|
| 4 |
class QueryRequest(BaseModel):
|
| 5 |
message: str
|
| 6 |
+
system_prompt: str = "You are an expert assistant providing detailed, comprehensive, and well-structured responses. For code, include comments, examples, and complete implementations. For image-related queries, provide detailed analysis or descriptions. For general queries, provide in-depth explanations with examples and additional context where applicable. Continue generating content until the query is fully addressed, leveraging the full capacity of the model."
|
| 7 |
history: Optional[List[dict]] = None
|
| 8 |
+
temperature: float = 0.7
|
| 9 |
max_new_tokens: int = 128000
|
| 10 |
enable_browsing: bool = False
|
main.py
CHANGED
|
@@ -5,9 +5,11 @@ from fastapi.responses import HTMLResponse, RedirectResponse
|
|
| 5 |
from fastapi.staticfiles import StaticFiles
|
| 6 |
from fastapi.templating import Jinja2Templates
|
| 7 |
from starlette.middleware.base import BaseHTTPMiddleware
|
|
|
|
| 8 |
import gradio as gr
|
| 9 |
from api.endpoints import router as api_router
|
| 10 |
from utils.generation import generate, LATEX_DELIMS
|
|
|
|
| 11 |
|
| 12 |
# إعداد التسجيل
|
| 13 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -18,6 +20,7 @@ logger.info("Files in /app/: %s", os.listdir("/app"))
|
|
| 18 |
|
| 19 |
# إعداد العميل لـ Hugging Face Inference API
|
| 20 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
| 21 |
if not HF_TOKEN:
|
| 22 |
logger.error("HF_TOKEN is not set in environment variables.")
|
| 23 |
raise ValueError("HF_TOKEN is required for Inference API.")
|
|
@@ -28,43 +31,129 @@ CONCURRENCY_LIMIT = int(os.getenv("CONCURRENCY_LIMIT", 20))
|
|
| 28 |
|
| 29 |
# إعداد CSS
|
| 30 |
css = """
|
| 31 |
-
.gradio-container { max-width:
|
| 32 |
-
.chatbot { border: 1px solid #ccc; border-radius: 10px; }
|
| 33 |
-
.input-textbox { font-size: 16px; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
"""
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
# إعداد واجهة Gradio
|
| 37 |
chatbot_ui = gr.ChatInterface(
|
| 38 |
-
fn=
|
| 39 |
type="messages",
|
| 40 |
chatbot=gr.Chatbot(
|
| 41 |
label="MGZon Chatbot",
|
| 42 |
type="messages",
|
| 43 |
-
height=
|
| 44 |
latex_delimiters=LATEX_DELIMS,
|
| 45 |
),
|
| 46 |
additional_inputs_accordion=gr.Accordion("⚙️ Settings", open=True),
|
| 47 |
additional_inputs=[
|
| 48 |
-
gr.Textbox(
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
gr.Radio(label="Reasoning Effort", choices=["low", "medium", "high"], value="medium"),
|
| 51 |
gr.Checkbox(label="Enable DeepSearch (web browsing)", value=True),
|
| 52 |
-
gr.Slider(label="Max New Tokens", minimum=50, maximum=128000, step=50, value=
|
|
|
|
|
|
|
| 53 |
],
|
|
|
|
| 54 |
stop_btn="Stop",
|
| 55 |
examples=[
|
| 56 |
-
["Explain the difference between supervised and unsupervised learning."],
|
| 57 |
-
["Generate a React component for a login form."],
|
| 58 |
-
["
|
| 59 |
-
["
|
| 60 |
-
["
|
| 61 |
-
["
|
| 62 |
-
["What are the latest trends in AI?"],
|
| 63 |
-
["Provide guidelines for publishing a technical blog post."],
|
| 64 |
-
["Who is the founder of MGZon?"],
|
| 65 |
],
|
| 66 |
title="MGZon Chatbot",
|
| 67 |
-
description="A versatile chatbot powered by
|
| 68 |
theme="gradio/soft",
|
| 69 |
css=css,
|
| 70 |
)
|
|
@@ -79,20 +168,18 @@ app = gr.mount_gradio_app(app, chatbot_ui, path="/gradio")
|
|
| 79 |
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 80 |
templates = Jinja2Templates(directory="templates")
|
| 81 |
|
| 82 |
-
# تضمين API endpoints
|
| 83 |
-
app.include_router(api_router)
|
| 84 |
-
|
| 85 |
# Middleware لمعالجة 404
|
| 86 |
class NotFoundMiddleware(BaseHTTPMiddleware):
|
| 87 |
async def dispatch(self, request: Request, call_next):
|
| 88 |
try:
|
| 89 |
response = await call_next(request)
|
| 90 |
if response.status_code == 404:
|
|
|
|
| 91 |
return templates.TemplateResponse("404.html", {"request": request}, status_code=404)
|
| 92 |
return response
|
| 93 |
except Exception as e:
|
| 94 |
-
logger.exception(f"Error processing request: {e}")
|
| 95 |
-
return templates.TemplateResponse("
|
| 96 |
|
| 97 |
app.add_middleware(NotFoundMiddleware)
|
| 98 |
|
|
@@ -106,10 +193,15 @@ async def root(request: Request):
|
|
| 106 |
async def docs(request: Request):
|
| 107 |
return templates.TemplateResponse("docs.html", {"request": request})
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
# Redirect لـ /gradio
|
| 110 |
@app.get("/launch-chatbot", response_class=RedirectResponse)
|
| 111 |
async def launch_chatbot():
|
| 112 |
-
return RedirectResponse(url="/gradio")
|
| 113 |
|
| 114 |
# تشغيل الخادم
|
| 115 |
if __name__ == "__main__":
|
|
|
|
| 5 |
from fastapi.staticfiles import StaticFiles
|
| 6 |
from fastapi.templating import Jinja2Templates
|
| 7 |
from starlette.middleware.base import BaseHTTPMiddleware
|
| 8 |
+
from fastapi.openapi.docs import get_swagger_ui_html
|
| 9 |
import gradio as gr
|
| 10 |
from api.endpoints import router as api_router
|
| 11 |
from utils.generation import generate, LATEX_DELIMS
|
| 12 |
+
import io
|
| 13 |
|
| 14 |
# إعداد التسجيل
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 20 |
|
| 21 |
# إعداد العميل لـ Hugging Face Inference API
|
| 22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 23 |
+
BACKUP_HF_TOKEN = os.getenv("BACKUP_HF_TOKEN") # إضافة التوكن الاحتياطي
|
| 24 |
if not HF_TOKEN:
|
| 25 |
logger.error("HF_TOKEN is not set in environment variables.")
|
| 26 |
raise ValueError("HF_TOKEN is required for Inference API.")
|
|
|
|
| 31 |
|
| 32 |
# إعداد CSS
|
| 33 |
css = """
|
| 34 |
+
.gradio-container { max-width: 1200px; margin: auto; }
|
| 35 |
+
.chatbot { border: 1px solid #ccc; border-radius: 10px; padding: 15px; background-color: #f9f9f9; }
|
| 36 |
+
.input-textbox { font-size: 16px; padding: 10px; }
|
| 37 |
+
.upload-button::before {
|
| 38 |
+
content: '📷';
|
| 39 |
+
margin-right: 8px;
|
| 40 |
+
font-size: 22px;
|
| 41 |
+
}
|
| 42 |
+
.audio-input::before {
|
| 43 |
+
content: '🎤';
|
| 44 |
+
margin-right: 8px;
|
| 45 |
+
font-size: 22px;
|
| 46 |
+
}
|
| 47 |
+
.audio-output::before {
|
| 48 |
+
content: '🔊';
|
| 49 |
+
margin-right: 8px;
|
| 50 |
+
font-size: 22px;
|
| 51 |
+
}
|
| 52 |
+
.loading::after {
|
| 53 |
+
content: '';
|
| 54 |
+
display: inline-block;
|
| 55 |
+
width: 16px;
|
| 56 |
+
height: 16px;
|
| 57 |
+
border: 2px solid #333;
|
| 58 |
+
border-top-color: transparent;
|
| 59 |
+
border-radius: 50%;
|
| 60 |
+
animation: spin 1s linear infinite;
|
| 61 |
+
margin-left: 8px;
|
| 62 |
+
}
|
| 63 |
+
@keyframes spin {
|
| 64 |
+
to { transform: rotate(360deg); }
|
| 65 |
+
}
|
| 66 |
+
.output-container {
|
| 67 |
+
margin-top: 20px;
|
| 68 |
+
padding: 10px;
|
| 69 |
+
border: 1px solid #ddd;
|
| 70 |
+
border-radius: 8px;
|
| 71 |
+
}
|
| 72 |
+
.audio-output-container {
|
| 73 |
+
display: flex;
|
| 74 |
+
align-items: center;
|
| 75 |
+
gap: 10px;
|
| 76 |
+
margin-top: 10px;
|
| 77 |
+
}
|
| 78 |
"""
|
| 79 |
|
| 80 |
+
# دالة لمعالجة الإدخال (نص، صوت، صور، ملفات)
|
| 81 |
+
def process_input(message, audio_input=None, file_input=None, history=None, system_prompt=None, temperature=0.7, reasoning_effort="medium", enable_browsing=True, max_new_tokens=128000):
|
| 82 |
+
input_type = "text"
|
| 83 |
+
audio_data = None
|
| 84 |
+
image_data = None
|
| 85 |
+
if audio_input:
|
| 86 |
+
input_type = "audio"
|
| 87 |
+
with open(audio_input, "rb") as f:
|
| 88 |
+
audio_data = f.read()
|
| 89 |
+
message = "Transcribe this audio"
|
| 90 |
+
elif file_input:
|
| 91 |
+
input_type = "file"
|
| 92 |
+
if file_input.endswith(('.png', '.jpg', '.jpeg')):
|
| 93 |
+
input_type = "image"
|
| 94 |
+
with open(file_input, "rb") as f:
|
| 95 |
+
image_data = f.read()
|
| 96 |
+
message = f"Analyze image: {file_input}"
|
| 97 |
+
else:
|
| 98 |
+
message = f"Analyze file: {file_input}"
|
| 99 |
+
|
| 100 |
+
response_text = ""
|
| 101 |
+
audio_response = None
|
| 102 |
+
for chunk in generate(
|
| 103 |
+
message=message,
|
| 104 |
+
history=history,
|
| 105 |
+
system_prompt=system_prompt,
|
| 106 |
+
temperature=temperature,
|
| 107 |
+
reasoning_effort=reasoning_effort,
|
| 108 |
+
enable_browsing=enable_browsing,
|
| 109 |
+
max_new_tokens=max_new_tokens,
|
| 110 |
+
input_type=input_type,
|
| 111 |
+
audio_data=audio_data,
|
| 112 |
+
image_data=image_data
|
| 113 |
+
):
|
| 114 |
+
if isinstance(chunk, bytes):
|
| 115 |
+
audio_response = io.BytesIO(chunk)
|
| 116 |
+
audio_response.name = "response.wav"
|
| 117 |
+
else:
|
| 118 |
+
response_text += chunk
|
| 119 |
+
yield response_text, audio_response
|
| 120 |
+
|
| 121 |
# إعداد واجهة Gradio
|
| 122 |
chatbot_ui = gr.ChatInterface(
|
| 123 |
+
fn=process_input,
|
| 124 |
type="messages",
|
| 125 |
chatbot=gr.Chatbot(
|
| 126 |
label="MGZon Chatbot",
|
| 127 |
type="messages",
|
| 128 |
+
height=800,
|
| 129 |
latex_delimiters=LATEX_DELIMS,
|
| 130 |
),
|
| 131 |
additional_inputs_accordion=gr.Accordion("⚙️ Settings", open=True),
|
| 132 |
additional_inputs=[
|
| 133 |
+
gr.Textbox(
|
| 134 |
+
label="System Prompt",
|
| 135 |
+
value="You are an expert assistant providing detailed, comprehensive, and well-structured responses. Support text, audio, image, and file inputs. For audio, transcribe using Whisper. For text-to-speech, use Parler-TTS. For images and files, analyze content appropriately. Continue generating content until the query is fully addressed, leveraging the full capacity of the model.",
|
| 136 |
+
lines=4
|
| 137 |
+
),
|
| 138 |
+
gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, step=0.1, value=0.7),
|
| 139 |
gr.Radio(label="Reasoning Effort", choices=["low", "medium", "high"], value="medium"),
|
| 140 |
gr.Checkbox(label="Enable DeepSearch (web browsing)", value=True),
|
| 141 |
+
gr.Slider(label="Max New Tokens", minimum=50, maximum=128000, step=50, value=128000),
|
| 142 |
+
gr.Audio(label="Voice Input", type="filepath", elem_classes="audio-input"),
|
| 143 |
+
gr.File(label="Upload Image/File", file_types=["image", ".pdf", ".txt"], elem_classes="upload-button"),
|
| 144 |
],
|
| 145 |
+
additional_outputs=[gr.Audio(label="Voice Output", type="filepath", elem_classes="audio-output", autoplay=True)],
|
| 146 |
stop_btn="Stop",
|
| 147 |
examples=[
|
| 148 |
+
["Explain the difference between supervised and unsupervised learning in detail with examples."],
|
| 149 |
+
["Generate a complete React component for a login form with form validation and error handling."],
|
| 150 |
+
["Describe this image: https://example.com/image.jpg"],
|
| 151 |
+
["Transcribe this audio: [upload audio file]."],
|
| 152 |
+
["Convert this text to speech: Hello, welcome to MGZon!"],
|
| 153 |
+
["Analyze this file: [upload PDF or text file]."],
|
|
|
|
|
|
|
|
|
|
| 154 |
],
|
| 155 |
title="MGZon Chatbot",
|
| 156 |
+
description="A versatile chatbot powered by DeepSeek, CLIP, Whisper, and Parler-TTS for text, image, audio, and file queries. Supports long responses, voice input/output, file uploads with custom icons, and backup token switching. Licensed under Apache 2.0.",
|
| 157 |
theme="gradio/soft",
|
| 158 |
css=css,
|
| 159 |
)
|
|
|
|
| 168 |
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 169 |
templates = Jinja2Templates(directory="templates")
|
| 170 |
|
|
|
|
|
|
|
|
|
|
| 171 |
# Middleware لمعالجة 404
|
| 172 |
class NotFoundMiddleware(BaseHTTPMiddleware):
|
| 173 |
async def dispatch(self, request: Request, call_next):
|
| 174 |
try:
|
| 175 |
response = await call_next(request)
|
| 176 |
if response.status_code == 404:
|
| 177 |
+
logger.warning(f"404 Not Found: {request.url}")
|
| 178 |
return templates.TemplateResponse("404.html", {"request": request}, status_code=404)
|
| 179 |
return response
|
| 180 |
except Exception as e:
|
| 181 |
+
logger.exception(f"Error processing request {request.url}: {e}")
|
| 182 |
+
return templates.TemplateResponse("500.html", {"request": request, "error": str(e)}, status_code=500)
|
| 183 |
|
| 184 |
app.add_middleware(NotFoundMiddleware)
|
| 185 |
|
|
|
|
| 193 |
async def docs(request: Request):
|
| 194 |
return templates.TemplateResponse("docs.html", {"request": request})
|
| 195 |
|
| 196 |
+
# Swagger UI endpoint
|
| 197 |
+
@app.get("/swagger", response_class=HTMLResponse)
|
| 198 |
+
async def swagger_ui():
|
| 199 |
+
return get_swagger_ui_html(openapi_url="/openapi.json", title="MGZon API Documentation")
|
| 200 |
+
|
| 201 |
# Redirect لـ /gradio
|
| 202 |
@app.get("/launch-chatbot", response_class=RedirectResponse)
|
| 203 |
async def launch_chatbot():
|
| 204 |
+
return RedirectResponse(url="/gradio", status_code=302)
|
| 205 |
|
| 206 |
# تشغيل الخادم
|
| 207 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
|
@@ -1,13 +1,24 @@
|
|
| 1 |
-
fastapi==0.115.
|
| 2 |
uvicorn==0.30.6
|
| 3 |
-
gradio==4.
|
| 4 |
openai==1.42.0
|
| 5 |
httpx==0.27.0
|
| 6 |
python-dotenv==1.0.1
|
| 7 |
-
pydocstyle==6.
|
| 8 |
requests==2.32.3
|
| 9 |
beautifulsoup4==4.12.3
|
| 10 |
tenacity==8.5.0
|
| 11 |
selenium==4.25.0
|
|
|
|
| 12 |
jinja2==3.1.4
|
| 13 |
cachetools==5.5.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.115.2
|
| 2 |
uvicorn==0.30.6
|
| 3 |
+
gradio==4.48.0
|
| 4 |
openai==1.42.0
|
| 5 |
httpx==0.27.0
|
| 6 |
python-dotenv==1.0.1
|
| 7 |
+
pydocstyle==6.4.0
|
| 8 |
requests==2.32.3
|
| 9 |
beautifulsoup4==4.12.3
|
| 10 |
tenacity==8.5.0
|
| 11 |
selenium==4.25.0
|
| 12 |
+
webdriver-manager==4.0.2
|
| 13 |
jinja2==3.1.4
|
| 14 |
cachetools==5.5.0
|
| 15 |
+
pydub==0.25.1
|
| 16 |
+
ffmpeg-python==0.2.0
|
| 17 |
+
numpy==1.26.4
|
| 18 |
+
parler-tts==0.2.0
|
| 19 |
+
torch==2.4.1
|
| 20 |
+
torchaudio==2.4.1
|
| 21 |
+
transformers==4.45.1
|
| 22 |
+
webrtcvad==2.0.10
|
| 23 |
+
Pillow==10.4.0
|
| 24 |
+
|
templates/docs.html
CHANGED
|
@@ -1,176 +1,72 @@
|
|
| 1 |
<!DOCTYPE html>
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
background-size: 400% 400%;
|
| 24 |
-
animation: gradientShift 15s ease infinite;
|
| 25 |
-
font-family: system-ui, sans-serif;
|
| 26 |
-
}
|
| 27 |
-
/* Glass-morphism helpers */
|
| 28 |
-
.glass {
|
| 29 |
-
background: rgba(255, 255, 255, 0.07);
|
| 30 |
-
border-radius: 1rem;
|
| 31 |
-
border: 1px solid rgba(255, 255, 255, 0.12);
|
| 32 |
-
backdrop-filter: blur(12px);
|
| 33 |
-
-webkit-backdrop-filter: blur(12px);
|
| 34 |
-
}
|
| 35 |
-
/* Sidebar transition */
|
| 36 |
-
.sidebar {
|
| 37 |
-
transition: transform 0.3s ease-in-out;
|
| 38 |
-
}
|
| 39 |
-
.sidebar.collapsed .logo {
|
| 40 |
-
opacity: 0;
|
| 41 |
-
transition: opacity 0.2s ease;
|
| 42 |
-
}
|
| 43 |
-
/* Prevent sidebar from covering footer */
|
| 44 |
-
.main-content {
|
| 45 |
-
min-height: calc(100vh - 4rem);
|
| 46 |
-
}
|
| 47 |
-
/* Hover effects for cards and tabs */
|
| 48 |
-
.glass:hover {
|
| 49 |
-
transform: scale(1.05);
|
| 50 |
-
box-shadow: 0 8px 16px rgba(0, 0, 0, 0.3);
|
| 51 |
-
background: rgba(255, 255, 255, 0.15);
|
| 52 |
-
}
|
| 53 |
-
/* Tab content transition */
|
| 54 |
-
.tab-content {
|
| 55 |
-
display: none;
|
| 56 |
-
transition: opacity 0.3s ease;
|
| 57 |
-
}
|
| 58 |
-
.tab-content.active {
|
| 59 |
-
display: block;
|
| 60 |
-
opacity: 1;
|
| 61 |
-
}
|
| 62 |
-
/* Responsive sidebar */
|
| 63 |
-
@media (max-width: 768px) {
|
| 64 |
-
.sidebar {
|
| 65 |
-
transform: translateX(-100%);
|
| 66 |
-
}
|
| 67 |
-
.sidebar.active {
|
| 68 |
-
transform: translateX(0);
|
| 69 |
-
}
|
| 70 |
-
}
|
| 71 |
-
/* Copy button feedback */
|
| 72 |
-
.copy-btn.copied::after {
|
| 73 |
-
content: 'Copied!';
|
| 74 |
-
position: absolute;
|
| 75 |
-
top: -2rem;
|
| 76 |
-
right: 1rem;
|
| 77 |
-
background: #10b981;
|
| 78 |
-
color: white;
|
| 79 |
-
padding: 0.5rem;
|
| 80 |
-
border-radius: 0.5rem;
|
| 81 |
-
font-size: 0.875rem;
|
| 82 |
-
}
|
| 83 |
-
</style>
|
| 84 |
</head>
|
| 85 |
-
<body class="text-white flex flex-col
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
href="/"
|
| 126 |
-
class="inline-block bg-gray-700/80 text-white px-4 py-2 rounded-lg mb-4 hover:bg-emerald-600 transition"
|
| 127 |
-
>
|
| 128 |
-
Back to Home
|
| 129 |
-
</a>
|
| 130 |
-
<div class="text-center py-12">
|
| 131 |
-
<img
|
| 132 |
-
src="https://raw.githubusercontent.com/Mark-Lasfar/MGZon/main/public/icons/mg.svg"
|
| 133 |
-
alt="MGZon Logo"
|
| 134 |
-
class="w-32 h-32 mx-auto mb-6 animate-bounce"
|
| 135 |
-
/>
|
| 136 |
-
<h1 class="text-5xl font-bold mb-4 bg-clip-text text-transparent bg-gradient-to-r from-teal-300 to-emerald-400">
|
| 137 |
-
MGZon API Documentation 📚
|
| 138 |
-
</h1>
|
| 139 |
-
<p class="text-lg max-w-2xl mx-auto mb-8">
|
| 140 |
-
Integrate MGZon Chatbot into your projects with our API. Supports Python (Django, Flask, FastAPI), JavaScript (React, Node.js, Express), Ruby (Rails), PHP (Laravel), and more.
|
| 141 |
-
</p>
|
| 142 |
-
<a
|
| 143 |
-
href="/swagger"
|
| 144 |
-
class="inline-block bg-emerald-500 text-white px-4 py-2 rounded-lg mt-4 hover:bg-emerald-600 transition"
|
| 145 |
-
>
|
| 146 |
-
View Interactive API Docs
|
| 147 |
-
</a>
|
| 148 |
-
</div>
|
| 149 |
-
<div class="docs my-12">
|
| 150 |
-
<h2 class="text-3xl font-bold text-center mb-8">Endpoints</h2>
|
| 151 |
-
<div class="flex flex-wrap gap-4 mb-8">
|
| 152 |
-
<button class="tab-btn glass px-4 py-2 rounded-lg text-white bg-emerald-500" data-tab="chat">Chat</button>
|
| 153 |
-
<button class="tab-btn glass px-4 py-2 rounded-lg text-white" data-tab="code">Code</button>
|
| 154 |
-
<button class="tab-btn glass px-4 py-2 rounded-lg text-white" data-tab="analysis">Analysis</button>
|
| 155 |
-
<button class="tab-btn glass px-4 py-2 rounded-lg text-white" data-tab="oauth">OAuth</button>
|
| 156 |
-
</div>
|
| 157 |
-
<div id="chat" class="tab-content glass p-6 active">
|
| 158 |
-
<div class="code-block glass p-6 mb-6 relative">
|
| 159 |
-
<h3 class="text-xl font-semibold text-emerald-300 mb-2">POST /api/chat</h3>
|
| 160 |
-
<p>Send a chat message to the MGZon Chatbot and get a response.</p>
|
| 161 |
-
<pre><code class="language-json">{
|
| 162 |
-
"message": "Your query here",
|
| 163 |
-
"system_prompt": "You are a helpful assistant",
|
| 164 |
-
"history": [{"role": "user", "content": "Previous message"}, {"role": "assistant", "content": "Previous response"}],
|
| 165 |
-
"temperature": 0.7,
|
| 166 |
-
"max_new_tokens": 4096,
|
| 167 |
-
"enable_browsing": false
|
| 168 |
}</code></pre>
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
|
| 175 |
response = requests.post(
|
| 176 |
"https://hager-zon.vercel.app/api/chat",
|
|
@@ -182,11 +78,11 @@ response = requests.post(
|
|
| 182 |
}
|
| 183 |
)
|
| 184 |
print(response.json())</code></pre>
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
from django.http import JsonResponse
|
| 191 |
from django.views import View
|
| 192 |
|
|
@@ -198,11 +94,11 @@ class ChatView(View):
|
|
| 198 |
json={"message": data, "system_prompt": "You are a coding expert", "temperature": 0.7}
|
| 199 |
)
|
| 200 |
return JsonResponse(response.json())</code></pre>
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
import requests
|
| 207 |
|
| 208 |
app = Flask(__name__)
|
|
@@ -215,11 +111,11 @@ def chat():
|
|
| 215 |
json={"message": data, "system_prompt": "You are a coding expert", "temperature": 0.7}
|
| 216 |
)
|
| 217 |
return jsonify(response.json())</code></pre>
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
|
| 224 |
function ChatComponent() {
|
| 225 |
const [message, setMessage] = useState('');
|
|
@@ -250,11 +146,11 @@ function ChatComponent() {
|
|
| 250 |
}
|
| 251 |
|
| 252 |
export default ChatComponent;</code></pre>
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
const fetch = require('node-fetch');
|
| 259 |
const app = express();
|
| 260 |
|
|
@@ -277,11 +173,11 @@ app.post('/chat', async (req, res) => {
|
|
| 277 |
});
|
| 278 |
|
| 279 |
app.listen(3000, () => console.log('Server running on port 3000'));</code></pre>
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
|
| 286 |
class ChatController < ApplicationController
|
| 287 |
def create
|
|
@@ -298,11 +194,11 @@ class ChatController < ApplicationController
|
|
| 298 |
render json: response.parsed_response
|
| 299 |
end
|
| 300 |
end</code></pre>
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
|
| 307 |
class ChatController extends Controller
|
| 308 |
{
|
|
@@ -317,206 +213,107 @@ class ChatController extends Controller
|
|
| 317 |
return $response->json();
|
| 318 |
}
|
| 319 |
}</code></pre>
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-H "Content-Type: application/json" \
|
| 326 |
-d '{"message":"Generate a React component","system_prompt":"You are a coding expert","temperature":0.7,"max_new_tokens":4096}'</code></pre>
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
}</code></pre>
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
}</code></pre>
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-H "Content-Type: application/json" \
|
| 358 |
-d '{"client_id": "YOUR_CLIENT_ID", "client_secret": "YOUR_CLIENT_SECRET", "grant_type": "client_credentials"}'</code></pre>
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
</main>
|
| 365 |
-
|
| 366 |
-
<!-- Footer -->
|
| 367 |
-
<footer class="bg-gradient-to-r from-teal-900 to-emerald-900 py-12 mt-8">
|
| 368 |
-
<div class="container max-w-6xl mx-auto text-center">
|
| 369 |
-
<img
|
| 370 |
-
src="https://raw.githubusercontent.com/Mark-Lasfar/MGZon/main/public/icons/mg.svg"
|
| 371 |
-
alt="MGZon Logo"
|
| 372 |
-
class="w-24 h-24 mx-auto mb-6 animate-pulse"
|
| 373 |
-
/>
|
| 374 |
-
<p class="mb-4">
|
| 375 |
-
Developed by
|
| 376 |
-
<a href="https://mark-elasfar.web.app/" target="_blank" class="text-emerald-300 hover:underline">Mark Al-Asfar</a>
|
| 377 |
-
| Powered by
|
| 378 |
-
<a href="https://hager-zon.vercel.app/" target="_blank" class="text-emerald-300 hover:underline">MGZon AI</a>
|
| 379 |
-
</p>
|
| 380 |
-
<div class="grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 gap-6">
|
| 381 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('email')">
|
| 382 |
-
<i class="bx bx-mail-send text-3xl text-emerald-300 mb-2"></i>
|
| 383 |
-
<h4 class="font-semibold mb-1">Email Us</h4>
|
| 384 |
-
<p>
|
| 385 |
-
<a href="mailto:[email protected]" class="text-emerald-300 hover:underline">[email protected]</a>
|
| 386 |
-
</p>
|
| 387 |
-
<div id="email-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 388 |
-
<p>Reach out to our support team for any inquiries or assistance.</p>
|
| 389 |
-
<button onclick="closeCardDetails('email')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 390 |
-
</div>
|
| 391 |
</div>
|
| 392 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('phone')">
|
| 393 |
-
<i class="bx bx-phone text-3xl text-emerald-300 mb-2"></i>
|
| 394 |
-
<h4 class="font-semibold mb-1">Phone Support</h4>
|
| 395 |
-
<p>+1-800-123-4567</p>
|
| 396 |
-
<div id="phone-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 397 |
-
<p>Contact our support team via phone for immediate assistance.</p>
|
| 398 |
-
<button onclick="closeCardDetails('phone')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 399 |
-
</div>
|
| 400 |
-
</div>
|
| 401 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('community')">
|
| 402 |
-
<i class="bx bx-group text-3xl text-emerald-300 mb-2"></i>
|
| 403 |
-
<h4 class="font-semibold mb-1">Community</h4>
|
| 404 |
-
<p>
|
| 405 |
-
<a href="https://hager-zon.vercel.app/community" class="text-emerald-300 hover:underline">Join us</a>
|
| 406 |
-
</p>
|
| 407 |
-
<div id="community-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 408 |
-
<p>Join our vibrant community to share ideas and collaborate.</p>
|
| 409 |
-
<button onclick="closeCardDetails('community')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 410 |
-
</div>
|
| 411 |
-
</div>
|
| 412 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('api-docs')">
|
| 413 |
-
<i class="bx bx-code-alt text-3xl text-emerald-300 mb-2"></i>
|
| 414 |
-
<h4 class="font-semibold mb-1">API Docs</h4>
|
| 415 |
-
<p>
|
| 416 |
-
<a href="/docs" class="text-emerald-300 hover:underline">Explore Docs</a>
|
| 417 |
-
</p>
|
| 418 |
-
<div id="api-docs-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 419 |
-
<p>Explore our API documentation for seamless integration.</p>
|
| 420 |
-
<button onclick="closeCardDetails('api-docs')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 421 |
-
</div>
|
| 422 |
-
</div>
|
| 423 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('faq')">
|
| 424 |
-
<i class="bx bx-help-circle text-3xl text-emerald-300 mb-2"></i>
|
| 425 |
-
<h4 class="font-semibold mb-1">FAQ</h4>
|
| 426 |
-
<p>
|
| 427 |
-
<a href="https://hager-zon.vercel.app/faq" target="_blank" class="text-emerald-300 hover:underline">Read FAQ</a>
|
| 428 |
-
</p>
|
| 429 |
-
<div id="faq-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 430 |
-
<p>Find answers to common questions in our FAQ section.</p>
|
| 431 |
-
<button onclick="closeCardDetails('faq')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 432 |
-
</div>
|
| 433 |
-
</div>
|
| 434 |
-
<div class="glass p-4 cursor-pointer" onclick="showCardDetails('docs')">
|
| 435 |
-
<i class="bx bx-book text-3xl text-emerald-300 mb-2"></i>
|
| 436 |
-
<h4 class="font-semibold mb-1">Documentation</h4>
|
| 437 |
-
<p>
|
| 438 |
-
<a href="/docs" class="text-emerald-300 hover:underline">Full Docs</a>
|
| 439 |
-
</p>
|
| 440 |
-
<div id="docs-details" class="hidden mt-4 p-4 bg-gray-700/80 rounded-lg">
|
| 441 |
-
<p>Access comprehensive documentation for MGZon Chatbot.</p>
|
| 442 |
-
<button onclick="closeCardDetails('docs')" class="bg-emerald-500 text-white px-4 py-2 rounded-lg mt-2">Close</button>
|
| 443 |
-
</div>
|
| 444 |
-
</div>
|
| 445 |
-
</div>
|
| 446 |
-
<div class="flex justify-center gap-6 mt-6">
|
| 447 |
-
<a href="https://github.com/Mark-Lasfar/MGZon" class="text-2xl text-white hover:text-emerald-300 transition">
|
| 448 |
-
<i class="bx bxl-github"></i>
|
| 449 |
-
</a>
|
| 450 |
-
<a href="https://x.com/MGZon" class="text-2xl text-white hover:text-emerald-300 transition">
|
| 451 |
-
<i class="bx bxl-twitter"></i>
|
| 452 |
-
</a>
|
| 453 |
-
<a href="https://www.facebook.com/people/Mark-Al-Asfar/pfbid02GMisUQ8AqWkNZjoKtWFHH1tbdHuVscN1cjcFnZWy9HkRaAsmanBfT6mhySAyqpg4l/" class="text-2xl text-white hover:text-emerald-300 transition">
|
| 454 |
-
<i class="bx bxl-facebook"></i>
|
| 455 |
-
</a>
|
| 456 |
-
</div>
|
| 457 |
-
<p class="mt-6">© 2025 Mark Al-Asfar & MGZon AI. All rights reserved.</p>
|
| 458 |
</div>
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 509 |
});
|
| 510 |
-
|
| 511 |
-
});
|
| 512 |
-
|
| 513 |
-
// Card details toggle
|
| 514 |
-
function showCardDetails(cardId) {
|
| 515 |
-
document.getElementById(`${cardId}-details`).classList.remove('hidden');
|
| 516 |
-
}
|
| 517 |
-
function closeCardDetails(cardId) {
|
| 518 |
-
document.getElementById(`${cardId}-details`).classList.add('hidden');
|
| 519 |
-
}
|
| 520 |
-
</script>
|
| 521 |
</body>
|
| 522 |
</html>
|
|
|
|
| 1 |
<!DOCTYPE html>
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>MGZon API Documentation</title>
|
| 7 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet">
|
| 8 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/themes/prism.min.css" rel="stylesheet">
|
| 9 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/css/boxicons.min.css" rel="stylesheet">
|
| 10 |
+
<style>
|
| 11 |
+
@keyframes slideIn {
|
| 12 |
+
from { transform: translateX(-100%); }
|
| 13 |
+
to { transform: translateX(0); }
|
| 14 |
+
}
|
| 15 |
+
.sidebar.active { transform: translateX(0); }
|
| 16 |
+
@media (max-width: 768px) {
|
| 17 |
+
.sidebar { transform: translateX(-100%); }
|
| 18 |
+
.sidebar.active { transform: translateX(0); animation: slideIn 0.3s ease; }
|
| 19 |
+
}
|
| 20 |
+
.tab-content { display: none; }
|
| 21 |
+
.tab-content.active { display: block; }
|
| 22 |
+
</style>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
</head>
|
| 24 |
+
<body class="bg-gray-900 text-white min-h-screen flex flex-col">
|
| 25 |
+
<button class="md:hidden fixed top-4 left-4 z-50 text-white text-2xl sidebar-toggle">☰</button>
|
| 26 |
+
<div class="sidebar fixed top-0 left-0 h-full w-64 bg-gradient-to-b from-blue-800 to-orange-500 text-white p-6 flex flex-col md:translate-x-0 transition-transform duration-300">
|
| 27 |
+
<img src="https://raw.githubusercontent.com/Mark-Lasfar/MGZon/main/public/icons/mg.svg" alt="MGZon Logo" class="w-20 h-20 mb-6 animate-pulse">
|
| 28 |
+
<nav class="flex flex-col gap-4">
|
| 29 |
+
<a href="/" class="px-4 py-2 rounded-lg hover:bg-orange-600 transition">Home</a>
|
| 30 |
+
<a href="/docs" class="px-4 py-2 rounded-lg bg-orange-600">API Documentation</a>
|
| 31 |
+
<a href="https://hager-zon.vercel.app/about" class="px-4 py-2 rounded-lg hover:bg-orange-600 transition">About MGZon</a>
|
| 32 |
+
<a href="https://hager-zon.vercel.app/community" class="px-4 py-2 rounded-lg hover:bg-orange-600 transition">Community</a>
|
| 33 |
+
<a href="https://mark-elasfar.web.app/" target="_blank" class="px-4 py-2 rounded-lg hover:bg-orange-600 transition">Mark Al-Asfar</a>
|
| 34 |
+
</nav>
|
| 35 |
+
</div>
|
| 36 |
+
<div class="flex-1 md:ml-64 p-6">
|
| 37 |
+
<div class="container max-w-6xl mx-auto">
|
| 38 |
+
<div class="text-center py-12">
|
| 39 |
+
<img src="https://raw.githubusercontent.com/Mark-Lasfar/MGZon/main/public/icons/mg.svg" alt="MGZon Logo" class="w-32 h-32 mx-auto mb-6 animate-bounce">
|
| 40 |
+
<h1 class="text-5xl font-bold mb-4 animate-fade-in">MGZon API Documentation 📚</h1>
|
| 41 |
+
<p class="text-lg mb-8">
|
| 42 |
+
Integrate MGZon Chatbot into your projects with our API. Supports Python (Django, Flask, FastAPI), JavaScript (React, Node.js, Express), Ruby (Rails), PHP (Laravel), and more.
|
| 43 |
+
</p>
|
| 44 |
+
</div>
|
| 45 |
+
<div class="docs my-12">
|
| 46 |
+
<h2 class="text-3xl font-bold text-center mb-8">Endpoints</h2>
|
| 47 |
+
<div class="flex flex-wrap gap-4 mb-8">
|
| 48 |
+
<button class="tab-btn bg-orange-500 text-white px-4 py-2 rounded-lg" data-tab="chat">Chat</button>
|
| 49 |
+
<button class="tab-btn bg-gray-700 text-white px-4 py-2 rounded-lg" data-tab="code">Code</button>
|
| 50 |
+
<button class="tab-btn bg-gray-700 text-white px-4 py-2 rounded-lg" data-tab="analysis">Analysis</button>
|
| 51 |
+
<button class="tab-btn bg-gray-700 text-white px-4 py-2 rounded-lg" data-tab="oauth">OAuth</button>
|
| 52 |
+
</div>
|
| 53 |
+
<div id="chat" class="tab-content active">
|
| 54 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 55 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">POST /api/chat</h3>
|
| 56 |
+
<p>Send a chat message to the MGZon Chatbot and get a response.</p>
|
| 57 |
+
<pre><code class="language-json">{
|
| 58 |
+
"message": "Your query here",
|
| 59 |
+
"system_prompt": "You are a helpful assistant",
|
| 60 |
+
"history": [{"role": "user", "content": "Previous message"}, {"role": "assistant", "content": "Previous response"}],
|
| 61 |
+
"temperature": 0.7,
|
| 62 |
+
"max_new_tokens": 4096,
|
| 63 |
+
"enable_browsing": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
}</code></pre>
|
| 65 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 66 |
+
</div>
|
| 67 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 68 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">Python (requests)</h3>
|
| 69 |
+
<pre><code class="language-python">import requests
|
| 70 |
|
| 71 |
response = requests.post(
|
| 72 |
"https://hager-zon.vercel.app/api/chat",
|
|
|
|
| 78 |
}
|
| 79 |
)
|
| 80 |
print(response.json())</code></pre>
|
| 81 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 82 |
+
</div>
|
| 83 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 84 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">Python (Django)</h3>
|
| 85 |
+
<pre><code class="language-python">import requests
|
| 86 |
from django.http import JsonResponse
|
| 87 |
from django.views import View
|
| 88 |
|
|
|
|
| 94 |
json={"message": data, "system_prompt": "You are a coding expert", "temperature": 0.7}
|
| 95 |
)
|
| 96 |
return JsonResponse(response.json())</code></pre>
|
| 97 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 98 |
+
</div>
|
| 99 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 100 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">Python (Flask)</h3>
|
| 101 |
+
<pre><code class="language-python">from flask import Flask, request, jsonify
|
| 102 |
import requests
|
| 103 |
|
| 104 |
app = Flask(__name__)
|
|
|
|
| 111 |
json={"message": data, "system_prompt": "You are a coding expert", "temperature": 0.7}
|
| 112 |
)
|
| 113 |
return jsonify(response.json())</code></pre>
|
| 114 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 115 |
+
</div>
|
| 116 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 117 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">JavaScript (React)</h3>
|
| 118 |
+
<pre><code class="language-javascript">import React, { useState } from 'react';
|
| 119 |
|
| 120 |
function ChatComponent() {
|
| 121 |
const [message, setMessage] = useState('');
|
|
|
|
| 146 |
}
|
| 147 |
|
| 148 |
export default ChatComponent;</code></pre>
|
| 149 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 150 |
+
</div>
|
| 151 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 152 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">JavaScript (Node.js/Express)</h3>
|
| 153 |
+
<pre><code class="language-javascript">const express = require('express');
|
| 154 |
const fetch = require('node-fetch');
|
| 155 |
const app = express();
|
| 156 |
|
|
|
|
| 173 |
});
|
| 174 |
|
| 175 |
app.listen(3000, () => console.log('Server running on port 3000'));</code></pre>
|
| 176 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 177 |
+
</div>
|
| 178 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 179 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">Ruby (Rails)</h3>
|
| 180 |
+
<pre><code class="language-ruby">require 'httparty'
|
| 181 |
|
| 182 |
class ChatController < ApplicationController
|
| 183 |
def create
|
|
|
|
| 194 |
render json: response.parsed_response
|
| 195 |
end
|
| 196 |
end</code></pre>
|
| 197 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 198 |
+
</div>
|
| 199 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 200 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">PHP (Laravel)</h3>
|
| 201 |
+
<pre><code class="language-php">use Illuminate\Support\Facades\Http;
|
| 202 |
|
| 203 |
class ChatController extends Controller
|
| 204 |
{
|
|
|
|
| 213 |
return $response->json();
|
| 214 |
}
|
| 215 |
}</code></pre>
|
| 216 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 217 |
+
</div>
|
| 218 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 219 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">Bash (cURL)</h3>
|
| 220 |
+
<pre><code class="language-bash">curl -X POST https://hager-zon.vercel.app/api/chat \
|
| 221 |
-H "Content-Type: application/json" \
|
| 222 |
-d '{"message":"Generate a React component","system_prompt":"You are a coding expert","temperature":0.7,"max_new_tokens":4096}'</code></pre>
|
| 223 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 224 |
+
</div>
|
| 225 |
+
</div>
|
| 226 |
+
<div id="code" class="tab-content">
|
| 227 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 228 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">POST /api/code</h3>
|
| 229 |
+
<p>Generate or modify code for various frameworks.</p>
|
| 230 |
+
<pre><code class="language-json">{
|
| 231 |
+
"framework": "React",
|
| 232 |
+
"task": "Create a login form component",
|
| 233 |
+
"code": "// Existing code (optional)"
|
| 234 |
}</code></pre>
|
| 235 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 236 |
+
</div>
|
| 237 |
+
</div>
|
| 238 |
+
<div id="analysis" class="tab-content">
|
| 239 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 240 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">POST /api/analysis</h3>
|
| 241 |
+
<p>Analyze code or data with detailed insights.</p>
|
| 242 |
+
<pre><code class="language-json">{
|
| 243 |
+
"text": "Analyze this Python code: print('Hello World')"
|
| 244 |
}</code></pre>
|
| 245 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 246 |
+
</div>
|
| 247 |
+
</div>
|
| 248 |
+
<div id="oauth" class="tab-content">
|
| 249 |
+
<div class="code-block bg-gray-800 p-6 rounded-lg shadow-lg mb-6">
|
| 250 |
+
<h3 class="text-xl font-semibold text-orange-500 mb-2">OAuth 2.0 Authentication</h3>
|
| 251 |
+
<p>Authenticate with MGZon using OAuth 2.0.</p>
|
| 252 |
+
<pre><code class="language-bash">curl -X POST https://hager-zon.vercel.app/oauth/token \
|
| 253 |
-H "Content-Type: application/json" \
|
| 254 |
-d '{"client_id": "YOUR_CLIENT_ID", "client_secret": "YOUR_CLIENT_SECRET", "grant_type": "client_credentials"}'</code></pre>
|
| 255 |
+
<button class="copy-btn bg-orange-500 text-white px-4 py-2 rounded-lg mt-4">Copy</button>
|
| 256 |
+
</div>
|
| 257 |
+
</div>
|
| 258 |
+
<a href="/" class="inline-block bg-gradient-to-r from-orange-500 to-red-500 text-white px-8 py-4 rounded-full text-lg font-semibold hover:scale-105 transition transform mt-8">Back to Home</a>
|
| 259 |
+
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
</div>
|
| 262 |
+
<footer class="bg-gradient-to-r from-blue-900 to-gray-900 py-12 mt-auto">
|
| 263 |
+
<div class="container max-w-6xl mx-auto text-center">
|
| 264 |
+
<img src="https://raw.githubusercontent.com/Mark-Lasfar/MGZon/main/public/icons/mg.svg" alt="MGZon Logo" class="w-24 h-24 mx-auto mb-6 animate-pulse">
|
| 265 |
+
<p class="mb-4">Developed by <a href="https://mark-elasfar.web.app/" target="_blank" class="text-orange-500 hover:underline">Mark Al-Asfar</a> | Powered by <a href="https://hager-zon.vercel.app/" target="_blank" class="text-orange-500 hover:underline">MGZon AI</a></p>
|
| 266 |
+
<div class="grid grid-cols-2 md:grid-cols-3 gap-6">
|
| 267 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 268 |
+
<i class="bx bx-mail-send text-3xl text-orange-500 mb-4"></i>
|
| 269 |
+
<h3 class="text-lg font-semibold mb-2">Email Us</h3>
|
| 270 |
+
<p>Contact: <a href="mailto:[email protected]" class="text-orange-500 hover:underline">[email protected]</a></p>
|
| 271 |
+
</div>
|
| 272 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 273 |
+
<i class="bx bx-phone text-3xl text-orange-500 mb-4"></i>
|
| 274 |
+
<h3 class="text-lg font-semibold mb-2">Phone Support</h3>
|
| 275 |
+
<p>Call: +1-800-123-4567</p>
|
| 276 |
+
</div>
|
| 277 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 278 |
+
<i class="bx bx-group text-3xl text-orange-500 mb-4"></i>
|
| 279 |
+
<h3 class="text-lg font-semibold mb-2">Community</h3>
|
| 280 |
+
<p>Join: <a href="https://hager-zon.vercel.app/community" class="text-orange-500 hover:underline">mgzon.com/community</a></p>
|
| 281 |
+
</div>
|
| 282 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 283 |
+
<i class="bx bx-code-alt text-3xl text-orange-500 mb-4"></i>
|
| 284 |
+
<h3 class="text-lg font-semibold mb-2">API Docs</h3>
|
| 285 |
+
<p>Explore: <a href="/docs" class="text-orange-500 hover:underline">mgzon.com/docs</a></p>
|
| 286 |
+
</div>
|
| 287 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 288 |
+
<i class="bx bx-help-circle text-3xl text-orange-500 mb-4"></i>
|
| 289 |
+
<h3 class="text-lg font-semibold mb-2">FAQ</h3>
|
| 290 |
+
<p>Read: <a href="https://hager-zon.vercel.app/faq" class="text-orange-500 hover:underline">mgzon.com/faq</a></p>
|
| 291 |
+
</div>
|
| 292 |
+
<div class="bg-gray-800 p-6 rounded-lg shadow-lg">
|
| 293 |
+
<i class="bx bx-book text-3xl text-orange-500 mb-4"></i>
|
| 294 |
+
<h3 class="text-lg font-semibold mb-2">Documentation</h3>
|
| 295 |
+
<p>Learn: <a href="/docs" class="text-orange-500 hover:underline">mgzon.com/docs</a></p>
|
| 296 |
+
</div>
|
| 297 |
+
</div>
|
| 298 |
+
<div class="flex justify-center gap-6 mt-6">
|
| 299 |
+
<a href="https://github.com/Mark-Lasfar/MGZon" class="text-2xl text-white hover:text-orange-500 transition"><i class="bx bxl-github"></i></a>
|
| 300 |
+
<a href="https://x.com/MGZon" class="text-2xl text-white hover:text-orange-500 transition"><i class="bx bxl-twitter"></i></a>
|
| 301 |
+
<a href="https://www.facebook.com/people/Mark-Al-Asfar/pfbid02GMisUQ8AqWkNZjoKtWFHH1tbdHuVscN1cjcFnZWy9HkRaAsmanBfT6mhySAyqpg4l/" class="text-2xl text-white hover:text-orange-500 transition"><i class="bx bxl-facebook"></i></a>
|
| 302 |
+
</div>
|
| 303 |
+
<p class="mt-6">© 2025 Mark Al-Asfar & MGZon AI. All rights reserved.</p>
|
| 304 |
+
</div>
|
| 305 |
+
</footer>
|
| 306 |
+
<script src="/static/js/scripts.js"></script>
|
| 307 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/prism.min.js"></script>
|
| 308 |
+
<script>
|
| 309 |
+
document.querySelectorAll('.tab-btn').forEach(btn => {
|
| 310 |
+
btn.addEventListener('click', () => {
|
| 311 |
+
document.querySelectorAll('.tab-btn').forEach(b => b.classList.replace('bg-orange-500', 'bg-gray-700'));
|
| 312 |
+
btn.classList.replace('bg-gray-700', 'bg-orange-500');
|
| 313 |
+
document.querySelectorAll('.tab-content').forEach(tab => tab.classList.remove('active'));
|
| 314 |
+
document.getElementById(btn.dataset.tab).classList.add('active');
|
| 315 |
+
});
|
| 316 |
});
|
| 317 |
+
</script>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
</body>
|
| 319 |
</html>
|
utils/generation.py
CHANGED
|
@@ -8,6 +8,12 @@ from tenacity import retry, stop_after_attempt, wait_exponential
|
|
| 8 |
import logging
|
| 9 |
from cachetools import TTLCache
|
| 10 |
import hashlib
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
|
@@ -24,14 +30,59 @@ LATEX_DELIMS = [
|
|
| 24 |
|
| 25 |
# إعداد العميل لـ Hugging Face Inference API
|
| 26 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
| 27 |
API_ENDPOINT = os.getenv("API_ENDPOINT", "https://router.huggingface.co/v1")
|
| 28 |
FALLBACK_API_ENDPOINT = "https://api-inference.huggingface.co/v1"
|
| 29 |
MODEL_NAME = os.getenv("MODEL_NAME", "openai/gpt-oss-20b:fireworks-ai")
|
| 30 |
-
SECONDARY_MODEL_NAME = os.getenv("SECONDARY_MODEL_NAME", "
|
| 31 |
TERTIARY_MODEL_NAME = os.getenv("TERTIARY_MODEL_NAME", "mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
query_lower = query.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
mgzon_patterns = [
|
| 36 |
r"\bmgzon\b", r"\bmgzon\s+(products|services|platform|features|mission|technology|solutions|oauth)\b",
|
| 37 |
r"\bميزات\s+mgzon\b", r"\bخدمات\s+mgzon\b", r"\boauth\b"
|
|
@@ -40,10 +91,11 @@ def select_model(query: str) -> tuple[str, str]:
|
|
| 40 |
if re.search(pattern, query_lower, re.IGNORECASE):
|
| 41 |
logger.info(f"Selected {SECONDARY_MODEL_NAME} with endpoint {FALLBACK_API_ENDPOINT} for MGZon-related query: {query}")
|
| 42 |
return SECONDARY_MODEL_NAME, FALLBACK_API_ENDPOINT
|
|
|
|
| 43 |
logger.info(f"Selected {MODEL_NAME} with endpoint {API_ENDPOINT} for general query: {query}")
|
| 44 |
return MODEL_NAME, API_ENDPOINT
|
| 45 |
|
| 46 |
-
@retry(stop=stop_after_attempt(
|
| 47 |
def request_generation(
|
| 48 |
api_key: str,
|
| 49 |
api_base: str,
|
|
@@ -51,16 +103,25 @@ def request_generation(
|
|
| 51 |
system_prompt: str,
|
| 52 |
model_name: str,
|
| 53 |
chat_history: Optional[List[dict]] = None,
|
| 54 |
-
temperature: float = 0.
|
| 55 |
max_new_tokens: int = 128000,
|
| 56 |
reasoning_effort: str = "off",
|
| 57 |
tools: Optional[List[dict]] = None,
|
| 58 |
tool_choice: Optional[str] = None,
|
| 59 |
deep_search: bool = False,
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
| 61 |
from utils.web_search import web_search # تأخير الاستيراد
|
| 62 |
|
| 63 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
cache_key = hashlib.md5(json.dumps({
|
| 65 |
"message": message,
|
| 66 |
"system_prompt": system_prompt,
|
|
@@ -76,22 +137,93 @@ def request_generation(
|
|
| 76 |
yield chunk
|
| 77 |
return
|
| 78 |
|
| 79 |
-
client = OpenAI(api_key=
|
| 80 |
task_type = "general"
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
task_type = "code"
|
| 83 |
-
enhanced_system_prompt = f"{system_prompt}\nYou are an expert programmer. Provide accurate, well-commented code with examples and explanations. Support frameworks like React, Django, Flask, and others
|
| 84 |
elif any(keyword in message.lower() for keyword in ["analyze", "analysis", "تحليل"]):
|
| 85 |
task_type = "analysis"
|
| 86 |
-
enhanced_system_prompt = f"{system_prompt}\nProvide detailed analysis with step-by-step reasoning, examples, and data-driven insights."
|
| 87 |
elif any(keyword in message.lower() for keyword in ["review", "مراجعة"]):
|
| 88 |
task_type = "review"
|
| 89 |
-
enhanced_system_prompt = f"{system_prompt}\nReview the provided content thoroughly, identify issues, and suggest improvements with detailed explanations."
|
| 90 |
elif any(keyword in message.lower() for keyword in ["publish", "نشر"]):
|
| 91 |
task_type = "publish"
|
| 92 |
-
enhanced_system_prompt = f"{system_prompt}\nPrepare content for publishing, ensuring clarity, professionalism, and adherence to best practices."
|
| 93 |
else:
|
| 94 |
-
enhanced_system_prompt = system_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
logger.info(f"Task type detected: {task_type}")
|
| 97 |
input_messages: List[dict] = [{"role": "system", "content": enhanced_system_prompt}]
|
|
@@ -111,8 +243,8 @@ def request_generation(
|
|
| 111 |
else:
|
| 112 |
input_messages.append({"role": "user", "content": message})
|
| 113 |
|
| 114 |
-
tools = tools if tools and
|
| 115 |
-
tool_choice = tool_choice if tool_choice in ["auto", "none", "any", "required"] and
|
| 116 |
|
| 117 |
cached_chunks = []
|
| 118 |
try:
|
|
@@ -152,13 +284,13 @@ def request_generation(
|
|
| 152 |
saw_visible_output = True
|
| 153 |
buffer += content
|
| 154 |
|
| 155 |
-
if "\n" in buffer or len(buffer) >
|
| 156 |
cached_chunks.append(buffer)
|
| 157 |
yield buffer
|
| 158 |
buffer = ""
|
| 159 |
continue
|
| 160 |
|
| 161 |
-
if chunk.choices[0].delta.tool_calls and
|
| 162 |
tool_call = chunk.choices[0].delta.tool_calls[0]
|
| 163 |
name = getattr(tool_call, "function", {}).get("name", None)
|
| 164 |
args = getattr(tool_call, "function", {}).get("arguments", None)
|
|
@@ -168,7 +300,7 @@ def request_generation(
|
|
| 168 |
last_tool_args = args
|
| 169 |
continue
|
| 170 |
|
| 171 |
-
if chunk.choices[0].finish_reason in ("stop", "tool_calls", "error"):
|
| 172 |
if buffer:
|
| 173 |
cached_chunks.append(buffer)
|
| 174 |
yield buffer
|
|
@@ -193,22 +325,50 @@ def request_generation(
|
|
| 193 |
if chunk.choices[0].finish_reason == "error":
|
| 194 |
cached_chunks.append(f"Error: Unknown error")
|
| 195 |
yield f"Error: Unknown error"
|
|
|
|
|
|
|
|
|
|
| 196 |
break
|
| 197 |
|
| 198 |
if buffer:
|
| 199 |
cached_chunks.append(buffer)
|
| 200 |
yield buffer
|
| 201 |
|
| 202 |
-
cache[cache_key] = cached_chunks
|
| 203 |
|
| 204 |
except Exception as e:
|
| 205 |
logger.exception(f"[Gateway] Streaming failed for model {model_name}: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
if model_name == MODEL_NAME:
|
| 207 |
fallback_model = SECONDARY_MODEL_NAME
|
| 208 |
fallback_endpoint = FALLBACK_API_ENDPOINT
|
| 209 |
logger.info(f"Retrying with fallback model: {fallback_model} on {fallback_endpoint}")
|
| 210 |
try:
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
stream = client.chat.completions.create(
|
| 213 |
model=fallback_model,
|
| 214 |
messages=input_messages,
|
|
@@ -237,13 +397,13 @@ def request_generation(
|
|
| 237 |
saw_visible_output = True
|
| 238 |
buffer += content
|
| 239 |
|
| 240 |
-
if "\n" in buffer or len(buffer) >
|
| 241 |
cached_chunks.append(buffer)
|
| 242 |
yield buffer
|
| 243 |
buffer = ""
|
| 244 |
continue
|
| 245 |
|
| 246 |
-
if chunk.choices[0].finish_reason in ("stop", "error"):
|
| 247 |
if buffer:
|
| 248 |
cached_chunks.append(buffer)
|
| 249 |
yield buffer
|
|
@@ -260,6 +420,9 @@ def request_generation(
|
|
| 260 |
if chunk.choices[0].finish_reason == "error":
|
| 261 |
cached_chunks.append(f"Error: Unknown error with fallback model {fallback_model}")
|
| 262 |
yield f"Error: Unknown error with fallback model {fallback_model}"
|
|
|
|
|
|
|
|
|
|
| 263 |
break
|
| 264 |
|
| 265 |
if buffer:
|
|
@@ -270,10 +433,12 @@ def request_generation(
|
|
| 270 |
|
| 271 |
except Exception as e2:
|
| 272 |
logger.exception(f"[Gateway] Streaming failed for fallback model {fallback_model}: {e2}")
|
| 273 |
-
cached_chunks.append(f"Error: Failed to load both models ({model_name} and {fallback_model}): {e2}")
|
| 274 |
-
yield f"Error: Failed to load both models ({model_name} and {fallback_model}): {e2}"
|
| 275 |
try:
|
| 276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
stream = client.chat.completions.create(
|
| 278 |
model=TERTIARY_MODEL_NAME,
|
| 279 |
messages=input_messages,
|
|
@@ -288,12 +453,12 @@ def request_generation(
|
|
| 288 |
content = chunk.choices[0].delta.content
|
| 289 |
saw_visible_output = True
|
| 290 |
buffer += content
|
| 291 |
-
if "\n" in buffer or len(buffer) >
|
| 292 |
cached_chunks.append(buffer)
|
| 293 |
yield buffer
|
| 294 |
buffer = ""
|
| 295 |
continue
|
| 296 |
-
if chunk.choices[0].finish_reason in ("stop", "error"):
|
| 297 |
if buffer:
|
| 298 |
cached_chunks.append(buffer)
|
| 299 |
yield buffer
|
|
@@ -304,6 +469,9 @@ def request_generation(
|
|
| 304 |
if chunk.choices[0].finish_reason == "error":
|
| 305 |
cached_chunks.append(f"Error: Unknown error with tertiary model {TERTIARY_MODEL_NAME}")
|
| 306 |
yield f"Error: Unknown error with tertiary model {TERTIARY_MODEL_NAME}"
|
|
|
|
|
|
|
|
|
|
| 307 |
break
|
| 308 |
if buffer:
|
| 309 |
cached_chunks.append(buffer)
|
|
@@ -311,30 +479,32 @@ def request_generation(
|
|
| 311 |
cache[cache_key] = cached_chunks
|
| 312 |
except Exception as e3:
|
| 313 |
logger.exception(f"[Gateway] Streaming failed for tertiary model {TERTIARY_MODEL_NAME}: {e3}")
|
| 314 |
-
|
| 315 |
-
|
| 316 |
else:
|
| 317 |
-
cached_chunks.append(f"Error: Failed to load model {model_name}: {e}")
|
| 318 |
yield f"Error: Failed to load model {model_name}: {e}"
|
|
|
|
| 319 |
|
| 320 |
def format_final(analysis_text: str, visible_text: str) -> str:
|
| 321 |
reasoning_safe = html.escape((analysis_text or "").strip())
|
| 322 |
response = (visible_text or "").strip()
|
|
|
|
|
|
|
| 323 |
return (
|
| 324 |
"<details><summary><strong>🤔 Analysis</strong></summary>\n"
|
| 325 |
"<pre style='white-space:pre-wrap;'>"
|
| 326 |
f"{reasoning_safe}"
|
| 327 |
"</pre>\n</details>\n\n"
|
| 328 |
"**💬 Response:**\n\n"
|
| 329 |
-
f"{response}"
|
| 330 |
)
|
| 331 |
|
| 332 |
-
def generate(message, history, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens):
|
| 333 |
-
if not message.strip():
|
| 334 |
-
yield "Please enter a prompt."
|
| 335 |
return
|
| 336 |
|
| 337 |
-
model_name, api_endpoint = select_model(message)
|
| 338 |
chat_history = []
|
| 339 |
for h in history:
|
| 340 |
if isinstance(h, dict):
|
|
@@ -389,9 +559,24 @@ def generate(message, history, system_prompt, temperature, reasoning_effort, ena
|
|
| 389 |
"required": ["code", "language"],
|
| 390 |
},
|
| 391 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 392 |
}
|
| 393 |
-
] if
|
| 394 |
-
tool_choice = "auto" if
|
| 395 |
|
| 396 |
in_analysis = False
|
| 397 |
in_visible = False
|
|
@@ -423,9 +608,15 @@ def generate(message, history, system_prompt, temperature, reasoning_effort, ena
|
|
| 423 |
tools=tools,
|
| 424 |
tool_choice=tool_choice,
|
| 425 |
deep_search=enable_browsing,
|
|
|
|
|
|
|
|
|
|
| 426 |
)
|
| 427 |
|
| 428 |
for chunk in stream:
|
|
|
|
|
|
|
|
|
|
| 429 |
if chunk == "analysis":
|
| 430 |
in_analysis, in_visible = True, False
|
| 431 |
if not raw_started:
|
|
|
|
| 8 |
import logging
|
| 9 |
from cachetools import TTLCache
|
| 10 |
import hashlib
|
| 11 |
+
import requests
|
| 12 |
+
import pydub
|
| 13 |
+
import io
|
| 14 |
+
import torchaudio
|
| 15 |
+
from PIL import Image
|
| 16 |
+
from transformers import CLIPModel, CLIPProcessor, AutoProcessor, ParlerTTSForConditionalGeneration
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
|
|
|
|
| 30 |
|
| 31 |
# إعداد العميل لـ Hugging Face Inference API
|
| 32 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 33 |
+
BACKUP_HF_TOKEN = os.getenv("BACKUP_HF_TOKEN") # توكن احتياطي
|
| 34 |
API_ENDPOINT = os.getenv("API_ENDPOINT", "https://router.huggingface.co/v1")
|
| 35 |
FALLBACK_API_ENDPOINT = "https://api-inference.huggingface.co/v1"
|
| 36 |
MODEL_NAME = os.getenv("MODEL_NAME", "openai/gpt-oss-20b:fireworks-ai")
|
| 37 |
+
SECONDARY_MODEL_NAME = os.getenv("SECONDARY_MODEL_NAME", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B")
|
| 38 |
TERTIARY_MODEL_NAME = os.getenv("TERTIARY_MODEL_NAME", "mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 39 |
+
CLIP_BASE_MODEL = os.getenv("CLIP_BASE_MODEL", "openai/clip-vit-base-patch32")
|
| 40 |
+
CLIP_LARGE_MODEL = os.getenv("CLIP_LARGE_MODEL", "openai/clip-vit-large-patch14")
|
| 41 |
+
ASR_MODEL = os.getenv("ASR_MODEL", "openai/whisper-large-v3-turbo")
|
| 42 |
+
TTS_MODEL = os.getenv("TTS_MODEL", "parler-tts/parler-tts-mini-v1")
|
| 43 |
|
| 44 |
+
def check_model_availability(model_name: str, api_base: str, api_key: str) -> tuple[bool, str]:
|
| 45 |
+
"""التحقق من توفر النموذج عبر API مع دعم التوكن الاحتياطي"""
|
| 46 |
+
try:
|
| 47 |
+
response = requests.get(
|
| 48 |
+
f"{api_base}/models/{model_name}",
|
| 49 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
| 50 |
+
timeout=10
|
| 51 |
+
)
|
| 52 |
+
if response.status_code == 200:
|
| 53 |
+
return True, api_key
|
| 54 |
+
elif response.status_code == 429 and BACKUP_HF_TOKEN and api_key != BACKUP_HF_TOKEN:
|
| 55 |
+
logger.warning(f"Rate limit reached for token {api_key}. Switching to backup token.")
|
| 56 |
+
return check_model_availability(model_name, api_base, BACKUP_HF_TOKEN)
|
| 57 |
+
logger.error(f"Model {model_name} not available: {response.status_code}")
|
| 58 |
+
return False, api_key
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Failed to check model availability for {model_name}: {e}")
|
| 61 |
+
if BACKUP_HF_TOKEN and api_key != BACKUP_HF_TOKEN:
|
| 62 |
+
logger.warning(f"Retrying with backup token for {model_name}")
|
| 63 |
+
return check_model_availability(model_name, api_base, BACKUP_HF_TOKEN)
|
| 64 |
+
return False, api_key
|
| 65 |
+
|
| 66 |
+
def select_model(query: str, input_type: str = "text") -> tuple[str, str]:
|
| 67 |
query_lower = query.lower()
|
| 68 |
+
# دعم الصوت
|
| 69 |
+
if input_type == "audio" or any(keyword in query_lower for keyword in ["voice", "audio", "speech", "صوت", "تحويل صوت"]):
|
| 70 |
+
logger.info(f"Selected {ASR_MODEL} with endpoint {FALLBACK_API_ENDPOINT} for audio input")
|
| 71 |
+
return ASR_MODEL, FALLBACK_API_ENDPOINT
|
| 72 |
+
# دعم تحويل النص إلى صوت
|
| 73 |
+
if any(keyword in query_lower for keyword in ["text-to-speech", "tts", "تحويل نص إلى صوت"]):
|
| 74 |
+
logger.info(f"Selected {TTS_MODEL} with endpoint {FALLBACK_API_ENDPOINT} for text-to-speech")
|
| 75 |
+
return TTS_MODEL, FALLBACK_API_ENDPOINT
|
| 76 |
+
# نماذج CLIP للاستعلامات المتعلقة بالصور
|
| 77 |
+
image_patterns = [
|
| 78 |
+
r"\bimage\b", r"\bpicture\b", r"\bphoto\b", r"\bvisual\b", r"\bصورة\b", r"\bتحليل\s+صورة\b",
|
| 79 |
+
r"\bimage\s+analysis\b", r"\bimage\s+classification\b", r"\bimage\s+description\b"
|
| 80 |
+
]
|
| 81 |
+
for pattern in image_patterns:
|
| 82 |
+
if re.search(pattern, query_lower, re.IGNORECASE):
|
| 83 |
+
logger.info(f"Selected {CLIP_BASE_MODEL} with endpoint {FALLBACK_API_ENDPOINT} for image-related query: {query}")
|
| 84 |
+
return CLIP_BASE_MODEL, FALLBACK_API_ENDPOINT
|
| 85 |
+
# نموذج DeepSeek للاستعلامات المتعلقة بـ MGZon
|
| 86 |
mgzon_patterns = [
|
| 87 |
r"\bmgzon\b", r"\bmgzon\s+(products|services|platform|features|mission|technology|solutions|oauth)\b",
|
| 88 |
r"\bميزات\s+mgzon\b", r"\bخدمات\s+mgzon\b", r"\boauth\b"
|
|
|
|
| 91 |
if re.search(pattern, query_lower, re.IGNORECASE):
|
| 92 |
logger.info(f"Selected {SECONDARY_MODEL_NAME} with endpoint {FALLBACK_API_ENDPOINT} for MGZon-related query: {query}")
|
| 93 |
return SECONDARY_MODEL_NAME, FALLBACK_API_ENDPOINT
|
| 94 |
+
# النموذج الافتراضي للاستعلامات العامة
|
| 95 |
logger.info(f"Selected {MODEL_NAME} with endpoint {API_ENDPOINT} for general query: {query}")
|
| 96 |
return MODEL_NAME, API_ENDPOINT
|
| 97 |
|
| 98 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=2, min=4, max=60))
|
| 99 |
def request_generation(
|
| 100 |
api_key: str,
|
| 101 |
api_base: str,
|
|
|
|
| 103 |
system_prompt: str,
|
| 104 |
model_name: str,
|
| 105 |
chat_history: Optional[List[dict]] = None,
|
| 106 |
+
temperature: float = 0.7,
|
| 107 |
max_new_tokens: int = 128000,
|
| 108 |
reasoning_effort: str = "off",
|
| 109 |
tools: Optional[List[dict]] = None,
|
| 110 |
tool_choice: Optional[str] = None,
|
| 111 |
deep_search: bool = False,
|
| 112 |
+
input_type: str = "text",
|
| 113 |
+
audio_data: Optional[bytes] = None,
|
| 114 |
+
image_data: Optional[bytes] = None,
|
| 115 |
+
) -> Generator[bytes | str, None, None]:
|
| 116 |
from utils.web_search import web_search # تأخير الاستيراد
|
| 117 |
|
| 118 |
+
# التحقق من توفر النموذج مع دعم التوكن الاحتياطي
|
| 119 |
+
is_available, selected_api_key = check_model_availability(model_name, api_base, api_key)
|
| 120 |
+
if not is_available:
|
| 121 |
+
yield f"Error: Model {model_name} is not available. Please check the model endpoint or token."
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
# إنشاء مفتاح للـ cache
|
| 125 |
cache_key = hashlib.md5(json.dumps({
|
| 126 |
"message": message,
|
| 127 |
"system_prompt": system_prompt,
|
|
|
|
| 137 |
yield chunk
|
| 138 |
return
|
| 139 |
|
| 140 |
+
client = OpenAI(api_key=selected_api_key, base_url=api_base, timeout=120.0)
|
| 141 |
task_type = "general"
|
| 142 |
+
enhanced_system_prompt = system_prompt
|
| 143 |
+
|
| 144 |
+
# معالجة الصوت (ASR)
|
| 145 |
+
if model_name == ASR_MODEL and audio_data:
|
| 146 |
+
task_type = "audio_transcription"
|
| 147 |
+
try:
|
| 148 |
+
audio_file = io.BytesIO(audio_data)
|
| 149 |
+
audio = pydub.AudioSegment.from_file(audio_file)
|
| 150 |
+
audio = audio.set_channels(1).set_frame_rate(16000)
|
| 151 |
+
audio_file = io.BytesIO()
|
| 152 |
+
audio.export(audio_file, format="wav")
|
| 153 |
+
audio_file.name = "audio.wav"
|
| 154 |
+
transcription = client.audio.transcriptions.create(
|
| 155 |
+
model=model_name,
|
| 156 |
+
file=audio_file,
|
| 157 |
+
response_format="text"
|
| 158 |
+
)
|
| 159 |
+
yield transcription
|
| 160 |
+
cache[cache_key] = [transcription]
|
| 161 |
+
return
|
| 162 |
+
except Exception as e:
|
| 163 |
+
logger.error(f"Audio transcription failed: {e}")
|
| 164 |
+
yield f"Error: Audio transcription failed: {e}"
|
| 165 |
+
return
|
| 166 |
+
|
| 167 |
+
# معالجة تحويل النص إلى صوت (TTS)
|
| 168 |
+
if model_name == TTS_MODEL:
|
| 169 |
+
task_type = "text_to_speech"
|
| 170 |
+
try:
|
| 171 |
+
model = ParlerTTSForConditionalGeneration.from_pretrained(model_name)
|
| 172 |
+
processor = AutoProcessor.from_pretrained(model_name)
|
| 173 |
+
inputs = processor(text=message, return_tensors="pt")
|
| 174 |
+
audio = model.generate(**inputs)
|
| 175 |
+
audio_file = io.BytesIO()
|
| 176 |
+
torchaudio.save(audio_file, audio[0], sample_rate=22050, format="wav")
|
| 177 |
+
audio_file.seek(0)
|
| 178 |
+
yield audio_file.read()
|
| 179 |
+
cache[cache_key] = [audio_file.read()]
|
| 180 |
+
return
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.error(f"Text-to-speech failed: {e}")
|
| 183 |
+
yield f"Error: Text-to-speech failed: {e}"
|
| 184 |
+
return
|
| 185 |
+
|
| 186 |
+
# معالجة الصور
|
| 187 |
+
if model_name in [CLIP_BASE_MODEL, CLIP_LARGE_MODEL] and image_data:
|
| 188 |
+
task_type = "image_analysis"
|
| 189 |
+
try:
|
| 190 |
+
model = CLIPModel.from_pretrained(model_name)
|
| 191 |
+
processor = CLIPProcessor.from_pretrained(model_name)
|
| 192 |
+
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 193 |
+
inputs = processor(text=message, images=image, return_tensors="pt", padding=True)
|
| 194 |
+
outputs = model(**inputs)
|
| 195 |
+
logits_per_image = outputs.logits_per_image
|
| 196 |
+
probs = logits_per_image.softmax(dim=1)
|
| 197 |
+
yield f"Image analysis result: {probs.tolist()}"
|
| 198 |
+
cache[cache_key] = [f"Image analysis result: {probs.tolist()}"]
|
| 199 |
+
return
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.error(f"Image analysis failed: {e}")
|
| 202 |
+
yield f"Error: Image analysis failed: {e}"
|
| 203 |
+
return
|
| 204 |
+
|
| 205 |
+
# تحسين system_prompt بناءً على نوع المهمة
|
| 206 |
+
if model_name in [CLIP_BASE_MODEL, CLIP_LARGE_MODEL]:
|
| 207 |
+
task_type = "image"
|
| 208 |
+
enhanced_system_prompt = f"{system_prompt}\nYou are an expert in image analysis and description. Provide detailed descriptions, classifications, or analysis of images based on the query. Continue until the query is fully addressed."
|
| 209 |
+
elif any(keyword in message.lower() for keyword in ["code", "programming", "python", "javascript", "react", "django", "flask"]):
|
| 210 |
task_type = "code"
|
| 211 |
+
enhanced_system_prompt = f"{system_prompt}\nYou are an expert programmer. Provide accurate, well-commented code with comprehensive examples and detailed explanations. Support frameworks like React, Django, Flask, and others. Format code with triple backticks (```) and specify the language. Continue until the task is fully addressed."
|
| 212 |
elif any(keyword in message.lower() for keyword in ["analyze", "analysis", "تحليل"]):
|
| 213 |
task_type = "analysis"
|
| 214 |
+
enhanced_system_prompt = f"{system_prompt}\nProvide detailed analysis with step-by-step reasoning, examples, and data-driven insights. Continue until all aspects of the query are thoroughly covered."
|
| 215 |
elif any(keyword in message.lower() for keyword in ["review", "مراجعة"]):
|
| 216 |
task_type = "review"
|
| 217 |
+
enhanced_system_prompt = f"{system_prompt}\nReview the provided content thoroughly, identify issues, and suggest improvements with detailed explanations. Ensure the response is complete and detailed."
|
| 218 |
elif any(keyword in message.lower() for keyword in ["publish", "نشر"]):
|
| 219 |
task_type = "publish"
|
| 220 |
+
enhanced_system_prompt = f"{system_prompt}\nPrepare content for publishing, ensuring clarity, professionalism, and adherence to best practices. Provide a complete and detailed response."
|
| 221 |
else:
|
| 222 |
+
enhanced_system_prompt = f"{system_prompt}\nFor general queries, provide comprehensive, detailed responses with examples and explanations where applicable. Continue generating content until the query is fully answered, leveraging the full capacity of the model."
|
| 223 |
+
|
| 224 |
+
# إذا كان الاستعلام قصيرًا، شجع على التفصيل
|
| 225 |
+
if len(message.split()) < 5:
|
| 226 |
+
enhanced_system_prompt += "\nEven for short or general queries, provide a detailed, in-depth response with examples, explanations, and additional context to ensure completeness."
|
| 227 |
|
| 228 |
logger.info(f"Task type detected: {task_type}")
|
| 229 |
input_messages: List[dict] = [{"role": "system", "content": enhanced_system_prompt}]
|
|
|
|
| 243 |
else:
|
| 244 |
input_messages.append({"role": "user", "content": message})
|
| 245 |
|
| 246 |
+
tools = tools if tools and model_name in [MODEL_NAME, SECONDARY_MODEL_NAME, TERTIARY_MODEL_NAME] else []
|
| 247 |
+
tool_choice = tool_choice if tool_choice in ["auto", "none", "any", "required"] and model_name in [MODEL_NAME, SECONDARY_MODEL_NAME, TERTIARY_MODEL_NAME] else "none"
|
| 248 |
|
| 249 |
cached_chunks = []
|
| 250 |
try:
|
|
|
|
| 284 |
saw_visible_output = True
|
| 285 |
buffer += content
|
| 286 |
|
| 287 |
+
if "\n" in buffer or len(buffer) > 5000:
|
| 288 |
cached_chunks.append(buffer)
|
| 289 |
yield buffer
|
| 290 |
buffer = ""
|
| 291 |
continue
|
| 292 |
|
| 293 |
+
if chunk.choices[0].delta.tool_calls and model_name in [MODEL_NAME, SECONDARY_MODEL_NAME, TERTIARY_MODEL_NAME]:
|
| 294 |
tool_call = chunk.choices[0].delta.tool_calls[0]
|
| 295 |
name = getattr(tool_call, "function", {}).get("name", None)
|
| 296 |
args = getattr(tool_call, "function", {}).get("arguments", None)
|
|
|
|
| 300 |
last_tool_args = args
|
| 301 |
continue
|
| 302 |
|
| 303 |
+
if chunk.choices[0].finish_reason in ("stop", "tool_calls", "error", "length"):
|
| 304 |
if buffer:
|
| 305 |
cached_chunks.append(buffer)
|
| 306 |
yield buffer
|
|
|
|
| 325 |
if chunk.choices[0].finish_reason == "error":
|
| 326 |
cached_chunks.append(f"Error: Unknown error")
|
| 327 |
yield f"Error: Unknown error"
|
| 328 |
+
elif chunk.choices[0].finish_reason == "length":
|
| 329 |
+
cached_chunks.append("Response truncated due to token limit. Please refine your query or request continuation.")
|
| 330 |
+
yield "Response truncated due to token limit. Please refine your query or request continuation."
|
| 331 |
break
|
| 332 |
|
| 333 |
if buffer:
|
| 334 |
cached_chunks.append(buffer)
|
| 335 |
yield buffer
|
| 336 |
|
| 337 |
+
cache[cache_key] = cached_chunks
|
| 338 |
|
| 339 |
except Exception as e:
|
| 340 |
logger.exception(f"[Gateway] Streaming failed for model {model_name}: {e}")
|
| 341 |
+
if selected_api_key != BACKUP_HF_TOKEN and BACKUP_HF_TOKEN:
|
| 342 |
+
logger.warning(f"Retrying with backup token for model {model_name}")
|
| 343 |
+
for chunk in request_generation(
|
| 344 |
+
api_key=BACKUP_HF_TOKEN,
|
| 345 |
+
api_base=api_base,
|
| 346 |
+
message=message,
|
| 347 |
+
system_prompt=system_prompt,
|
| 348 |
+
model_name=model_name,
|
| 349 |
+
chat_history=chat_history,
|
| 350 |
+
temperature=temperature,
|
| 351 |
+
max_new_tokens=max_new_tokens,
|
| 352 |
+
reasoning_effort=reasoning_effort,
|
| 353 |
+
tools=tools,
|
| 354 |
+
tool_choice=tool_choice,
|
| 355 |
+
deep_search=deep_search,
|
| 356 |
+
input_type=input_type,
|
| 357 |
+
audio_data=audio_data,
|
| 358 |
+
image_data=image_data,
|
| 359 |
+
):
|
| 360 |
+
yield chunk
|
| 361 |
+
return
|
| 362 |
if model_name == MODEL_NAME:
|
| 363 |
fallback_model = SECONDARY_MODEL_NAME
|
| 364 |
fallback_endpoint = FALLBACK_API_ENDPOINT
|
| 365 |
logger.info(f"Retrying with fallback model: {fallback_model} on {fallback_endpoint}")
|
| 366 |
try:
|
| 367 |
+
is_available, selected_api_key = check_model_availability(fallback_model, fallback_endpoint, selected_api_key)
|
| 368 |
+
if not is_available:
|
| 369 |
+
yield f"Error: Fallback model {fallback_model} is not available."
|
| 370 |
+
return
|
| 371 |
+
client = OpenAI(api_key=selected_api_key, base_url=fallback_endpoint, timeout=120.0)
|
| 372 |
stream = client.chat.completions.create(
|
| 373 |
model=fallback_model,
|
| 374 |
messages=input_messages,
|
|
|
|
| 397 |
saw_visible_output = True
|
| 398 |
buffer += content
|
| 399 |
|
| 400 |
+
if "\n" in buffer or len(buffer) > 5000:
|
| 401 |
cached_chunks.append(buffer)
|
| 402 |
yield buffer
|
| 403 |
buffer = ""
|
| 404 |
continue
|
| 405 |
|
| 406 |
+
if chunk.choices[0].finish_reason in ("stop", "error", "length"):
|
| 407 |
if buffer:
|
| 408 |
cached_chunks.append(buffer)
|
| 409 |
yield buffer
|
|
|
|
| 420 |
if chunk.choices[0].finish_reason == "error":
|
| 421 |
cached_chunks.append(f"Error: Unknown error with fallback model {fallback_model}")
|
| 422 |
yield f"Error: Unknown error with fallback model {fallback_model}"
|
| 423 |
+
elif chunk.choices[0].finish_reason == "length":
|
| 424 |
+
cached_chunks.append("Response truncated due to token limit. Please refine your query or request continuation.")
|
| 425 |
+
yield "Response truncated due to token limit. Please refine your query or request continuation."
|
| 426 |
break
|
| 427 |
|
| 428 |
if buffer:
|
|
|
|
| 433 |
|
| 434 |
except Exception as e2:
|
| 435 |
logger.exception(f"[Gateway] Streaming failed for fallback model {fallback_model}: {e2}")
|
|
|
|
|
|
|
| 436 |
try:
|
| 437 |
+
is_available, selected_api_key = check_model_availability(TERTIARY_MODEL_NAME, FALLBACK_API_ENDPOINT, selected_api_key)
|
| 438 |
+
if not is_available:
|
| 439 |
+
yield f"Error: Tertiary model {TERTIARY_MODEL_NAME} is not available."
|
| 440 |
+
return
|
| 441 |
+
client = OpenAI(api_key=selected_api_key, base_url=FALLBACK_API_ENDPOINT, timeout=120.0)
|
| 442 |
stream = client.chat.completions.create(
|
| 443 |
model=TERTIARY_MODEL_NAME,
|
| 444 |
messages=input_messages,
|
|
|
|
| 453 |
content = chunk.choices[0].delta.content
|
| 454 |
saw_visible_output = True
|
| 455 |
buffer += content
|
| 456 |
+
if "\n" in buffer or len(buffer) > 5000:
|
| 457 |
cached_chunks.append(buffer)
|
| 458 |
yield buffer
|
| 459 |
buffer = ""
|
| 460 |
continue
|
| 461 |
+
if chunk.choices[0].finish_reason in ("stop", "error", "length"):
|
| 462 |
if buffer:
|
| 463 |
cached_chunks.append(buffer)
|
| 464 |
yield buffer
|
|
|
|
| 469 |
if chunk.choices[0].finish_reason == "error":
|
| 470 |
cached_chunks.append(f"Error: Unknown error with tertiary model {TERTIARY_MODEL_NAME}")
|
| 471 |
yield f"Error: Unknown error with tertiary model {TERTIARY_MODEL_NAME}"
|
| 472 |
+
elif chunk.choices[0].finish_reason == "length":
|
| 473 |
+
cached_chunks.append("Response truncated due to token limit. Please refine your query or request continuation.")
|
| 474 |
+
yield "Response truncated due to token limit. Please refine your query or request continuation."
|
| 475 |
break
|
| 476 |
if buffer:
|
| 477 |
cached_chunks.append(buffer)
|
|
|
|
| 479 |
cache[cache_key] = cached_chunks
|
| 480 |
except Exception as e3:
|
| 481 |
logger.exception(f"[Gateway] Streaming failed for tertiary model {TERTIARY_MODEL_NAME}: {e3}")
|
| 482 |
+
yield f"Error: Failed to load all models: Primary ({model_name}), Secondary ({fallback_model}), Tertiary ({TERTIARY_MODEL_NAME}). Please check your model configurations."
|
| 483 |
+
return
|
| 484 |
else:
|
|
|
|
| 485 |
yield f"Error: Failed to load model {model_name}: {e}"
|
| 486 |
+
return
|
| 487 |
|
| 488 |
def format_final(analysis_text: str, visible_text: str) -> str:
|
| 489 |
reasoning_safe = html.escape((analysis_text or "").strip())
|
| 490 |
response = (visible_text or "").strip()
|
| 491 |
+
if not reasoning_safe and not response:
|
| 492 |
+
return "No response generated."
|
| 493 |
return (
|
| 494 |
"<details><summary><strong>🤔 Analysis</strong></summary>\n"
|
| 495 |
"<pre style='white-space:pre-wrap;'>"
|
| 496 |
f"{reasoning_safe}"
|
| 497 |
"</pre>\n</details>\n\n"
|
| 498 |
"**💬 Response:**\n\n"
|
| 499 |
+
f"{response}" if response else "No final response available."
|
| 500 |
)
|
| 501 |
|
| 502 |
+
def generate(message, history, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens, input_type="text", audio_data=None, image_data=None):
|
| 503 |
+
if not message.strip() and not audio_data and not image_data:
|
| 504 |
+
yield "Please enter a prompt or upload a file."
|
| 505 |
return
|
| 506 |
|
| 507 |
+
model_name, api_endpoint = select_model(message, input_type=input_type)
|
| 508 |
chat_history = []
|
| 509 |
for h in history:
|
| 510 |
if isinstance(h, dict):
|
|
|
|
| 559 |
"required": ["code", "language"],
|
| 560 |
},
|
| 561 |
},
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "function",
|
| 565 |
+
"function": {
|
| 566 |
+
"name": "image_analysis",
|
| 567 |
+
"description": "Analyze or describe an image based on the provided query",
|
| 568 |
+
"parameters": {
|
| 569 |
+
"type": "object",
|
| 570 |
+
"properties": {
|
| 571 |
+
"image_url": {"type": "string", "description": "URL of the image to analyze"},
|
| 572 |
+
"task": {"type": "string", "description": "Task description (e.g., describe, classify)"},
|
| 573 |
+
},
|
| 574 |
+
"required": ["task"],
|
| 575 |
+
},
|
| 576 |
+
},
|
| 577 |
}
|
| 578 |
+
] if model_name in [MODEL_NAME, SECONDARY_MODEL_NAME, TERTIARY_MODEL_NAME] else []
|
| 579 |
+
tool_choice = "auto" if model_name in [MODEL_NAME, SECONDARY_MODEL_NAME, TERTIARY_MODEL_NAME] else "none"
|
| 580 |
|
| 581 |
in_analysis = False
|
| 582 |
in_visible = False
|
|
|
|
| 608 |
tools=tools,
|
| 609 |
tool_choice=tool_choice,
|
| 610 |
deep_search=enable_browsing,
|
| 611 |
+
input_type=input_type,
|
| 612 |
+
audio_data=audio_data,
|
| 613 |
+
image_data=image_data,
|
| 614 |
)
|
| 615 |
|
| 616 |
for chunk in stream:
|
| 617 |
+
if isinstance(chunk, bytes):
|
| 618 |
+
yield chunk
|
| 619 |
+
continue
|
| 620 |
if chunk == "analysis":
|
| 621 |
in_analysis, in_visible = True, False
|
| 622 |
if not raw_started:
|