Commit
·
f5c6ef3
1
Parent(s):
1c08614
Fix Gradio _js error, add flash-attention, update Chatbot type, and set MODEL_NAME to together
Browse files- main.py +31 -9
- requirements.txt +1 -1
main.py
CHANGED
|
@@ -78,10 +78,10 @@ css = """
|
|
| 78 |
transition: color 0.2s;
|
| 79 |
}
|
| 80 |
.input-icon:hover {
|
| 81 |
-
color: #
|
| 82 |
}
|
| 83 |
.submit-btn {
|
| 84 |
-
background: #
|
| 85 |
color: white;
|
| 86 |
border-radius: 50%;
|
| 87 |
width: 36px;
|
|
@@ -94,7 +94,7 @@ css = """
|
|
| 94 |
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
|
| 95 |
}
|
| 96 |
.submit-btn:hover {
|
| 97 |
-
background: #
|
| 98 |
}
|
| 99 |
.output-container {
|
| 100 |
margin: 15px 0;
|
|
@@ -126,6 +126,7 @@ css = """
|
|
| 126 |
transform: scale(1.05);
|
| 127 |
}
|
| 128 |
"""
|
|
|
|
| 129 |
# دالة لمعالجة الإدخال
|
| 130 |
def process_input(message, audio_input=None, image_input=None, history=None, system_prompt=None, temperature=0.7, reasoning_effort="medium", enable_browsing=True, max_new_tokens=128000, output_format="text"):
|
| 131 |
input_type = "text"
|
|
@@ -176,6 +177,10 @@ def process_input(message, audio_input=None, image_input=None, history=None, sys
|
|
| 176 |
logger.error(f"Generation failed: {e}")
|
| 177 |
yield f"Error: Generation failed: {e}", None
|
| 178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
# دالة لمعالجة زر إرسال الصوت
|
| 180 |
def submit_audio(audio_input, output_format):
|
| 181 |
if not audio_input:
|
|
@@ -206,7 +211,6 @@ def submit_image(image_input, output_format):
|
|
| 206 |
logger.error(f"Image submission failed: {e}")
|
| 207 |
return f"Error: Image processing failed: {e}", None
|
| 208 |
|
| 209 |
-
# إعداد واجهة Gradio
|
| 210 |
# إعداد واجهة Gradio
|
| 211 |
with gr.Blocks(css=css, theme="gradio/soft") as chatbot_ui:
|
| 212 |
gr.Markdown(
|
|
@@ -217,7 +221,13 @@ with gr.Blocks(css=css, theme="gradio/soft") as chatbot_ui:
|
|
| 217 |
)
|
| 218 |
with gr.Row():
|
| 219 |
with gr.Column(scale=3):
|
| 220 |
-
chatbot = gr.Chatbot(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
with gr.Column(scale=1):
|
| 222 |
with gr.Accordion("⚙️ Settings", open=False, elem_classes="settings-accordion"):
|
| 223 |
system_prompt = gr.Textbox(
|
|
@@ -270,25 +280,37 @@ with gr.Blocks(css=css, theme="gradio/soft") as chatbot_ui:
|
|
| 270 |
fn=process_input,
|
| 271 |
inputs=[message, audio_input, file_input, chatbot, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens, output_format],
|
| 272 |
outputs=[output_text, output_audio, chatbot, message],
|
| 273 |
-
|
|
|
|
|
|
|
|
|
|
| 274 |
)
|
| 275 |
message.submit(
|
| 276 |
fn=process_input,
|
| 277 |
inputs=[message, audio_input, file_input, chatbot, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens, output_format],
|
| 278 |
outputs=[output_text, output_audio, chatbot, message],
|
| 279 |
-
|
|
|
|
|
|
|
|
|
|
| 280 |
)
|
| 281 |
file_input.change(
|
| 282 |
fn=submit_image,
|
| 283 |
inputs=[file_input, output_format],
|
| 284 |
outputs=[output_text, output_audio, chatbot, message],
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
| 286 |
)
|
| 287 |
audio_input.change(
|
| 288 |
fn=submit_audio,
|
| 289 |
inputs=[audio_input, output_format],
|
| 290 |
outputs=[output_text, output_audio, chatbot, message],
|
| 291 |
-
|
|
|
|
|
|
|
|
|
|
| 292 |
)
|
| 293 |
|
| 294 |
# إعداد FastAPI
|
|
|
|
| 78 |
transition: color 0.2s;
|
| 79 |
}
|
| 80 |
.input-icon:hover {
|
| 81 |
+
color: #25D366;
|
| 82 |
}
|
| 83 |
.submit-btn {
|
| 84 |
+
background: #25D366;
|
| 85 |
color: white;
|
| 86 |
border-radius: 50%;
|
| 87 |
width: 36px;
|
|
|
|
| 94 |
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
|
| 95 |
}
|
| 96 |
.submit-btn:hover {
|
| 97 |
+
background: #20B858;
|
| 98 |
}
|
| 99 |
.output-container {
|
| 100 |
margin: 15px 0;
|
|
|
|
| 126 |
transform: scale(1.05);
|
| 127 |
}
|
| 128 |
"""
|
| 129 |
+
|
| 130 |
# دالة لمعالجة الإدخال
|
| 131 |
def process_input(message, audio_input=None, image_input=None, history=None, system_prompt=None, temperature=0.7, reasoning_effort="medium", enable_browsing=True, max_new_tokens=128000, output_format="text"):
|
| 132 |
input_type = "text"
|
|
|
|
| 177 |
logger.error(f"Generation failed: {e}")
|
| 178 |
yield f"Error: Generation failed: {e}", None
|
| 179 |
|
| 180 |
+
# دالة لتنظيف المدخلات بعد الإرسال
|
| 181 |
+
def clear_inputs(response_text, audio_response, chatbot, message):
|
| 182 |
+
return response_text, audio_response, [], ""
|
| 183 |
+
|
| 184 |
# دالة لمعالجة زر إرسال الصوت
|
| 185 |
def submit_audio(audio_input, output_format):
|
| 186 |
if not audio_input:
|
|
|
|
| 211 |
logger.error(f"Image submission failed: {e}")
|
| 212 |
return f"Error: Image processing failed: {e}", None
|
| 213 |
|
|
|
|
| 214 |
# إعداد واجهة Gradio
|
| 215 |
with gr.Blocks(css=css, theme="gradio/soft") as chatbot_ui:
|
| 216 |
gr.Markdown(
|
|
|
|
| 221 |
)
|
| 222 |
with gr.Row():
|
| 223 |
with gr.Column(scale=3):
|
| 224 |
+
chatbot = gr.Chatbot(
|
| 225 |
+
label="Chat",
|
| 226 |
+
height=600,
|
| 227 |
+
latex_delimiters=LATEX_DELIMS,
|
| 228 |
+
elem_classes="chatbot",
|
| 229 |
+
type="messages"
|
| 230 |
+
)
|
| 231 |
with gr.Column(scale=1):
|
| 232 |
with gr.Accordion("⚙️ Settings", open=False, elem_classes="settings-accordion"):
|
| 233 |
system_prompt = gr.Textbox(
|
|
|
|
| 280 |
fn=process_input,
|
| 281 |
inputs=[message, audio_input, file_input, chatbot, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens, output_format],
|
| 282 |
outputs=[output_text, output_audio, chatbot, message],
|
| 283 |
+
).then(
|
| 284 |
+
fn=clear_inputs,
|
| 285 |
+
inputs=[output_text, output_audio, chatbot, message],
|
| 286 |
+
outputs=[output_text, output_audio, chatbot, message]
|
| 287 |
)
|
| 288 |
message.submit(
|
| 289 |
fn=process_input,
|
| 290 |
inputs=[message, audio_input, file_input, chatbot, system_prompt, temperature, reasoning_effort, enable_browsing, max_new_tokens, output_format],
|
| 291 |
outputs=[output_text, output_audio, chatbot, message],
|
| 292 |
+
).then(
|
| 293 |
+
fn=clear_inputs,
|
| 294 |
+
inputs=[output_text, output_audio, chatbot, message],
|
| 295 |
+
outputs=[output_text, output_audio, chatbot, message]
|
| 296 |
)
|
| 297 |
file_input.change(
|
| 298 |
fn=submit_image,
|
| 299 |
inputs=[file_input, output_format],
|
| 300 |
outputs=[output_text, output_audio, chatbot, message],
|
| 301 |
+
).then(
|
| 302 |
+
fn=clear_inputs,
|
| 303 |
+
inputs=[output_text, output_audio, chatbot, message],
|
| 304 |
+
outputs=[output_text, output_audio, chatbot, message]
|
| 305 |
)
|
| 306 |
audio_input.change(
|
| 307 |
fn=submit_audio,
|
| 308 |
inputs=[audio_input, output_format],
|
| 309 |
outputs=[output_text, output_audio, chatbot, message],
|
| 310 |
+
).then(
|
| 311 |
+
fn=clear_inputs,
|
| 312 |
+
inputs=[output_text, output_audio, chatbot, message],
|
| 313 |
+
outputs=[output_text, output_audio, chatbot, message]
|
| 314 |
)
|
| 315 |
|
| 316 |
# إعداد FastAPI
|
requirements.txt
CHANGED
|
@@ -16,7 +16,7 @@ pydub==0.25.1
|
|
| 16 |
ffmpeg-python==0.2.0
|
| 17 |
numpy==1.26.4
|
| 18 |
parler-tts @ git+https://github.com/huggingface/parler-tts.git@5d0aca9753ab74ded179732f5bd797f7a8c6f8ee
|
| 19 |
-
torch
|
| 20 |
torchaudio==2.4.1
|
| 21 |
transformers==4.43.3
|
| 22 |
webrtcvad==2.0.10
|
|
|
|
| 16 |
ffmpeg-python==0.2.0
|
| 17 |
numpy==1.26.4
|
| 18 |
parler-tts @ git+https://github.com/huggingface/parler-tts.git@5d0aca9753ab74ded179732f5bd797f7a8c6f8ee
|
| 19 |
+
torch>=2.0.0
|
| 20 |
torchaudio==2.4.1
|
| 21 |
transformers==4.43.3
|
| 22 |
webrtcvad==2.0.10
|