ibrahimlasfar commited on
Commit
379310a
·
1 Parent(s): d8b507b

Shorten short_description to meet Hugging Face metadata requirements

Browse files
Files changed (1) hide show
  1. main.py +34 -121
main.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import logging
3
  from fastapi import FastAPI, Request
4
  from fastapi.responses import HTMLResponse, RedirectResponse
@@ -7,80 +8,34 @@ from fastapi.templating import Jinja2Templates
7
  from starlette.middleware.base import BaseHTTPMiddleware
8
  from fastapi.openapi.docs import get_swagger_ui_html
9
  import gradio as gr
10
- from api.endpoints import router as api_router
11
- from utils.generation import generate, LATEX_DELIMS
12
 
 
13
  from utils.generation import generate, LATEX_DELIMS
14
 
15
- # إعداد التسجيل
16
  logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger(__name__)
18
 
19
- # تحقق من الملفات في /app/
20
  logger.info("Files in /app/: %s", os.listdir("/app"))
21
 
22
- # إعداد العميل لـ Hugging Face Inference API
23
  HF_TOKEN = os.getenv("HF_TOKEN")
24
- BACKUP_HF_TOKEN = os.getenv("BACKUP_HF_TOKEN") # إضافة التوكن الاحتياطي
25
  if not HF_TOKEN:
26
  logger.error("HF_TOKEN is not set in environment variables.")
27
  raise ValueError("HF_TOKEN is required for Inference API.")
28
 
29
- # إعدادات الـ queue
30
  QUEUE_SIZE = int(os.getenv("QUEUE_SIZE", 80))
31
  CONCURRENCY_LIMIT = int(os.getenv("CONCURRENCY_LIMIT", 20))
32
 
33
- # إعداد CSS
34
  css = """
35
- .gradio-container { max-width: 1200px; margin: auto; }
36
- .chatbot { border: 1px solid #ccc; border-radius: 10px; padding: 15px; background-color: #f9f9f9; }
37
- .input-textbox { font-size: 16px; padding: 10px; }
38
- .upload-button::before {
39
- content: '📷';
40
- margin-right: 8px;
41
- font-size: 22px;
42
- }
43
- .audio-input::before {
44
- content: '🎤';
45
- margin-right: 8px;
46
- font-size: 22px;
47
- }
48
- .audio-output::before {
49
- content: '🔊';
50
- margin-right: 8px;
51
- font-size: 22px;
52
- }
53
- .loading::after {
54
- content: '';
55
- display: inline-block;
56
- width: 16px;
57
- height: 16px;
58
- border: 2px solid #333;
59
- border-top-color: transparent;
60
- border-radius: 50%;
61
- animation: spin 1s linear infinite;
62
- margin-left: 8px;
63
- }
64
- @keyframes spin {
65
- to { transform: rotate(360deg); }
66
- }
67
- .output-container {
68
- margin-top: 20px;
69
- padding: 10px;
70
- border: 1px solid #ddd;
71
- border-radius: 8px;
72
- }
73
- .audio-output-container {
74
- display: flex;
75
- align-items: center;
76
- gap: 10px;
77
- margin-top: 10px;
78
- }
79
  """
80
 
81
- # دالة لمعالجة الإدخال (نص، صوت، صور، ملفات)
82
-
83
- # === دالة المعالجة ===
84
  def process_input(message, history, audio_input=None, file_input=None):
85
  input_type = "text"
86
  audio_data, image_data = None, None
@@ -102,31 +57,30 @@ def process_input(message, history, audio_input=None, file_input=None):
102
  message = f"Analyze file: {file_input}"
103
 
104
  response_text, audio_response = "", None
105
- for chunk in generate(message=message, history=history,
106
- input_type=input_type,
107
- audio_data=audio_data,
108
- image_data=image_data):
109
- if isinstance(chunk, bytes):
 
 
 
110
  audio_response = io.BytesIO(chunk)
111
  audio_response.name = "reply.wav"
112
- else:
113
  response_text += chunk
114
 
115
  yield response_text, audio_response
116
 
117
- # === Gradio UI ===
118
- with gr.Blocks(css="""
119
- #input-row {display:flex; gap:6px; align-items:center;}
120
- #msg-box {flex:1;}
121
- """) as demo:
122
- chatbot = gr.Chatbot(label="MGZon Chatbot", height=700,
123
- latex_delimiters=LATEX_DELIMS)
124
  state = gr.State([])
125
 
126
  with gr.Row(elem_id="input-row"):
127
  msg = gr.Textbox(placeholder="Type your message...", elem_id="msg-box")
128
- mic = gr.Audio(sources=["microphone"], type="filepath", label="", elem_classes="audio-input")
129
- file = gr.File(file_types=["image", ".pdf", ".txt"], label="", elem_classes="upload-button")
130
  send_btn = gr.Button("Send")
131
 
132
  voice_reply = gr.Audio(label="🔊 Voice Reply", type="filepath", autoplay=True)
@@ -142,59 +96,17 @@ with gr.Blocks(css="""
142
  process_input, [msg, state, mic, file], [chatbot, voice_reply]
143
  )
144
 
145
- demo.launch()
146
-
147
-
148
-
149
- # إعداد واجهة Gradio
150
- chatbot_ui = gr.ChatInterface(
151
- fn=process_input,
152
- chatbot=gr.Chatbot(
153
- label="MGZon Chatbot",
154
- height=800,
155
- latex_delimiters=LATEX_DELIMS,
156
- ),
157
- additional_inputs_accordion=gr.Accordion("⚙️ Settings", open=True),
158
- additional_inputs=[
159
- gr.Textbox(
160
- label="System Prompt",
161
- value="You are an expert assistant providing detailed, comprehensive, and well-structured responses. Support text, audio, image, and file inputs. For audio, transcribe using Whisper. For text-to-speech, use Parler-TTS. For images and files, analyze content appropriately. Continue generating content until the query is fully addressed, leveraging the full capacity of the model.",
162
- lines=4
163
- ),
164
- gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, step=0.1, value=0.7),
165
- gr.Radio(label="Reasoning Effort", choices=["low", "medium", "high"], value="medium"),
166
- gr.Checkbox(label="Enable DeepSearch (web browsing)", value=True),
167
- gr.Slider(label="Max New Tokens", minimum=50, maximum=128000, step=50, value=128000),
168
- gr.Audio(label="Voice Input", type="filepath", elem_classes="audio-input"),
169
- gr.File(label="Upload Image/File", file_types=["image", ".pdf", ".txt"], elem_classes="upload-button"),
170
- ],
171
- additional_outputs=[gr.Audio(label="Voice Output", type="filepath", elem_classes="audio-output", autoplay=True)],
172
- stop_btn="Stop",
173
- examples=[
174
- ["Explain the difference between supervised and unsupervised learning in detail with examples."],
175
- ["Generate a complete React component for a login form with form validation and error handling."],
176
- ["Describe this image: https://example.com/image.jpg"],
177
- ["Transcribe this audio: [upload audio file]."],
178
- ["Convert this text to speech: Hello, welcome to MGZon!"],
179
- ["Analyze this file: [upload PDF or text file]."],
180
- ],
181
- title="MGZon Chatbot",
182
- description="A versatile chatbot powered by DeepSeek, CLIP, Whisper, and Parler-TTS for text, image, audio, and file queries. Supports long responses, voice input/output, file uploads with custom icons, and backup token switching. Licensed under Apache 2.0.",
183
- theme="gradio/soft",
184
- css=css,
185
- )
186
-
187
- # إعداد FastAPI
188
  app = FastAPI(title="MGZon Chatbot API")
189
 
190
- # ربط Gradio مع FastAPI
191
  app = gr.mount_gradio_app(app, chatbot_ui, path="/gradio")
192
 
193
- # ربط الملفات الثابتة والقوالب
194
  app.mount("/static", StaticFiles(directory="static"), name="static")
195
  templates = Jinja2Templates(directory="templates")
196
 
197
- # Middleware لمعالجة 404
198
  class NotFoundMiddleware(BaseHTTPMiddleware):
199
  async def dispatch(self, request: Request, call_next):
200
  try:
@@ -209,27 +121,28 @@ class NotFoundMiddleware(BaseHTTPMiddleware):
209
 
210
  app.add_middleware(NotFoundMiddleware)
211
 
212
- # Root endpoint
213
  @app.get("/", response_class=HTMLResponse)
214
  async def root(request: Request):
215
  return templates.TemplateResponse("index.html", {"request": request})
216
 
217
- # Docs endpoint
218
  @app.get("/docs", response_class=HTMLResponse)
219
  async def docs(request: Request):
220
  return templates.TemplateResponse("docs.html", {"request": request})
221
 
222
- # Swagger UI endpoint
223
  @app.get("/swagger", response_class=HTMLResponse)
224
  async def swagger_ui():
225
  return get_swagger_ui_html(openapi_url="/openapi.json", title="MGZon API Documentation")
226
 
227
- # Redirect لـ /gradio
228
  @app.get("/launch-chatbot", response_class=RedirectResponse)
229
  async def launch_chatbot():
230
  return RedirectResponse(url="/gradio", status_code=302)
231
 
232
- # تشغيل الخادم
233
  if __name__ == "__main__":
234
  import uvicorn
235
  uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)))
 
 
1
  import os
2
+ import io
3
  import logging
4
  from fastapi import FastAPI, Request
5
  from fastapi.responses import HTMLResponse, RedirectResponse
 
8
  from starlette.middleware.base import BaseHTTPMiddleware
9
  from fastapi.openapi.docs import get_swagger_ui_html
10
  import gradio as gr
 
 
11
 
12
+ from api.endpoints import router as api_router
13
  from utils.generation import generate, LATEX_DELIMS
14
 
15
+ # ================= إعداد اللوج =================
16
  logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger(__name__)
18
 
 
19
  logger.info("Files in /app/: %s", os.listdir("/app"))
20
 
21
+ # ================= مفاتيح HuggingFace =================
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
+ BACKUP_HF_TOKEN = os.getenv("BACKUP_HF_TOKEN")
24
  if not HF_TOKEN:
25
  logger.error("HF_TOKEN is not set in environment variables.")
26
  raise ValueError("HF_TOKEN is required for Inference API.")
27
 
28
+ # ================= إعداد Queue =================
29
  QUEUE_SIZE = int(os.getenv("QUEUE_SIZE", 80))
30
  CONCURRENCY_LIMIT = int(os.getenv("CONCURRENCY_LIMIT", 20))
31
 
32
+ # ================= CSS =================
33
  css = """
34
+ #input-row {display:flex; gap:6px; align-items:center;}
35
+ #msg-box {flex:1;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
 
38
+ # ================= دالة المعالجة =================
 
 
39
  def process_input(message, history, audio_input=None, file_input=None):
40
  input_type = "text"
41
  audio_data, image_data = None, None
 
57
  message = f"Analyze file: {file_input}"
58
 
59
  response_text, audio_response = "", None
60
+ for chunk in generate(
61
+ message=message,
62
+ history=history,
63
+ input_type=input_type,
64
+ audio_data=audio_data,
65
+ image_data=image_data
66
+ ):
67
+ if isinstance(chunk, bytes): # 🔊 صوت
68
  audio_response = io.BytesIO(chunk)
69
  audio_response.name = "reply.wav"
70
+ else: # 📝 نص
71
  response_text += chunk
72
 
73
  yield response_text, audio_response
74
 
75
+ # ================= واجهة Gradio =================
76
+ with gr.Blocks(css=css, theme="gradio/soft") as chatbot_ui:
77
+ chatbot = gr.Chatbot(label="MGZon Chatbot", height=700, latex_delimiters=LATEX_DELIMS)
 
 
 
 
78
  state = gr.State([])
79
 
80
  with gr.Row(elem_id="input-row"):
81
  msg = gr.Textbox(placeholder="Type your message...", elem_id="msg-box")
82
+ mic = gr.Audio(sources=["microphone"], type="filepath", label="🎤", elem_classes="audio-input")
83
+ file = gr.File(file_types=["image", ".pdf", ".txt"], label="📎", elem_classes="upload-button")
84
  send_btn = gr.Button("Send")
85
 
86
  voice_reply = gr.Audio(label="🔊 Voice Reply", type="filepath", autoplay=True)
 
96
  process_input, [msg, state, mic, file], [chatbot, voice_reply]
97
  )
98
 
99
+ # ================= FastAPI =================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  app = FastAPI(title="MGZon Chatbot API")
101
 
102
+ # ربط Gradio داخل FastAPI
103
  app = gr.mount_gradio_app(app, chatbot_ui, path="/gradio")
104
 
105
+ # ملفات ثابتة + قوالب
106
  app.mount("/static", StaticFiles(directory="static"), name="static")
107
  templates = Jinja2Templates(directory="templates")
108
 
109
+ # Middleware 404
110
  class NotFoundMiddleware(BaseHTTPMiddleware):
111
  async def dispatch(self, request: Request, call_next):
112
  try:
 
121
 
122
  app.add_middleware(NotFoundMiddleware)
123
 
124
+ # Root
125
  @app.get("/", response_class=HTMLResponse)
126
  async def root(request: Request):
127
  return templates.TemplateResponse("index.html", {"request": request})
128
 
129
+ # Docs
130
  @app.get("/docs", response_class=HTMLResponse)
131
  async def docs(request: Request):
132
  return templates.TemplateResponse("docs.html", {"request": request})
133
 
134
+ # Swagger
135
  @app.get("/swagger", response_class=HTMLResponse)
136
  async def swagger_ui():
137
  return get_swagger_ui_html(openapi_url="/openapi.json", title="MGZon API Documentation")
138
 
139
+ # Redirect
140
  @app.get("/launch-chatbot", response_class=RedirectResponse)
141
  async def launch_chatbot():
142
  return RedirectResponse(url="/gradio", status_code=302)
143
 
144
+ # Run
145
  if __name__ == "__main__":
146
  import uvicorn
147
  uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)))
148
+