Update app.py
Browse files
app.py
CHANGED
|
@@ -24,7 +24,16 @@ with st.sidebar:
|
|
| 24 |
# ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
| 25 |
# RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
| 26 |
|
|
|
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Prediction function
|
| 30 |
def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
|
|
@@ -52,8 +61,18 @@ st.write(DESCRIPTION)
|
|
| 52 |
textinput = st.chat_input("Ask LLama-2-70b anything...")
|
| 53 |
wav_audio_data = st_audiorec()
|
| 54 |
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
|
| 59 |
if "messages" not in st.session_state:
|
|
@@ -76,8 +95,4 @@ if prompt := textinput:
|
|
| 76 |
with st.chat_message("assistant", avatar='🦙'):
|
| 77 |
st.markdown(response)
|
| 78 |
# Add assistant response to chat history
|
| 79 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
| 24 |
# ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
| 25 |
# RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
| 26 |
|
| 27 |
+
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
| 28 |
|
| 29 |
+
|
| 30 |
+
def transcribe(wav_path):
|
| 31 |
+
|
| 32 |
+
return whisper_client.predict(
|
| 33 |
+
wav_path, # str (filepath or URL to file) in 'inputs' Audio component
|
| 34 |
+
"transcribe", # str in 'Task' Radio component
|
| 35 |
+
api_name="/predict"
|
| 36 |
+
)
|
| 37 |
|
| 38 |
# Prediction function
|
| 39 |
def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
|
|
|
|
| 61 |
textinput = st.chat_input("Ask LLama-2-70b anything...")
|
| 62 |
wav_audio_data = st_audiorec()
|
| 63 |
|
| 64 |
+
if wav_audio_data != None:
|
| 65 |
+
# save audio
|
| 66 |
+
with open("audio.wav", "wb") as f:
|
| 67 |
+
f.write(wav_audio_data)
|
| 68 |
+
# transcribe audio
|
| 69 |
+
response = transcribe("audio.wav")
|
| 70 |
|
| 71 |
+
with st.chat_message("assistant", avatar='🦙'):
|
| 72 |
+
st.markdown(response)
|
| 73 |
+
# Add assistant response to chat history
|
| 74 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 75 |
+
|
| 76 |
|
| 77 |
|
| 78 |
if "messages" not in st.session_state:
|
|
|
|
| 95 |
with st.chat_message("assistant", avatar='🦙'):
|
| 96 |
st.markdown(response)
|
| 97 |
# Add assistant response to chat history
|
| 98 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
|
|
|
|
|
|
|