Spaces:
Sleeping
Sleeping
| # from pydantic import NoneStr | |
| import os | |
| import mimetypes | |
| # import validators | |
| import requests | |
| import tempfile | |
| import gradio as gr | |
| from openai import AzureOpenAI | |
| import re | |
| import json | |
| from transformers import pipeline | |
| import matplotlib.pyplot as plt | |
| import plotly.express as px | |
| import pandas as pd | |
| import json | |
| import plotly.graph_objects as go | |
| client = AzureOpenAI() | |
| class SentimentAnalyzer: | |
| def __init__(self): | |
| pass | |
| def emotion_analysis(self,text): | |
| # Create a conversation for the OpenAI chat API | |
| conversation = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": f""" Your task is find the emotions for this converstion. | |
| Conversation: {text} | |
| lables : [Sadness, Happiness, Fear, Anger] | |
| provide emotion score for each label for given conversation. | |
| Return answer should be in valid JSON format only. | |
| """} | |
| ] | |
| # Call OpenAI GPT-3.5-turbo | |
| chat_completion = client.chat.completions.create( | |
| model = "GPT-4o", | |
| messages = conversation, | |
| max_tokens=500, | |
| temperature=0 | |
| ) | |
| response = chat_completion.choices[0].message.content | |
| print("emotion_analysis", response) | |
| return response | |
| def analyze_sentiment_for_graph(self, text): | |
| # Create a conversation for the OpenAI chat API | |
| conversation = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": f""" Your task is analyse the conversarion to provide the sentiment analysis. | |
| ```converstion: {text}``` | |
| ```labels :[ positive, negative, neutral]``` | |
| provide sentiment score for each label for given conversation. Return answer should be in valid JSON format only. | |
| """} | |
| ] | |
| # Call OpenAI GPT-3.5-turbo | |
| chat_completion = client.chat.completions.create( | |
| model = "GPT-3", | |
| messages = conversation, | |
| max_tokens=500, | |
| temperature=0 | |
| ) | |
| response = chat_completion.choices[0].message.content | |
| print("analyze_sentiment_for_graph", response) | |
| return response | |
| class Summarizer: | |
| def __init__(self): | |
| # openai.api_key=os.getenv("OPENAI_API_KEY") | |
| pass | |
| def generate_summary(self, text): | |
| # Create a conversation for the OpenAI chat API | |
| conversation = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": f"""summarize the following conversation delimited by triple backticks. write within 60 words.```{text}``` """} | |
| ] | |
| # Call OpenAI GPT-3.5-turbo | |
| chat_completion = client.chat.completions.create( | |
| model = "GPT-4o", | |
| messages = conversation, | |
| max_tokens=500, | |
| temperature=0 | |
| ) | |
| response = chat_completion.choices[0].message.content | |
| return response | |
| history_state = gr.State() | |
| summarizer = Summarizer() | |
| sentiment = SentimentAnalyzer() | |
| class LangChain_Document_QA: | |
| def __init__(self): | |
| # openai.api_key=os.getenv("OPENAI_API_KEY") | |
| pass | |
| def _display_history(self): | |
| formatted_history=self._chat_history() | |
| # formatted_history = _suggested_answer() | |
| summary=summarizer.generate_summary(formatted_history) | |
| return summary | |
| def _display_graph(self,json_string): | |
| # Parse the JSON string into a dictionary | |
| json_data = json.loads(json_string) | |
| sentiments = list(json_data.keys()) | |
| scores = list(json_data.values()) | |
| fig = go.Figure(data=[go.Bar(x=sentiments, y=scores, marker_color=['green', 'red', 'blue'])]) | |
| fig.update_layout( | |
| title='Sentiment Analysis Scores', | |
| xaxis=dict(title='Sentiment'), | |
| yaxis=dict(title='Score'), | |
| ) | |
| return fig | |
| def _display_graph_emotion(self,json_string): | |
| # Parse the JSON string into a dictionary | |
| json_data = json.loads(json_string) | |
| sentiments = list(json_data.keys()) | |
| scores = list(json_data.values()) | |
| fig = go.Figure(data=[go.Bar(x=sentiments, y=scores, marker_color=['green', 'red', 'blue'])]) | |
| fig.update_layout( | |
| title='Emotion Analysis Scores', | |
| xaxis=dict(title='Emotions'), | |
| yaxis=dict(title='Score'), | |
| ) | |
| return fig | |
| def _suggested_answer(self, text, chat_history): | |
| try: | |
| file_path = "patient_details.json" | |
| with open(file_path) as file: | |
| patient_details = json.load(file) | |
| except: | |
| pass | |
| # Create a conversation for the OpenAI chat API | |
| conversation = [ | |
| {"role": "system", "content": "You are a Mental Healthcare Assistant."}, | |
| {"role": "user", "content": f"""You are a Mental Healthcare Assistant. | |
| Ask more about the patient's problems step by step. | |
| Then give the short mental healthcare solution for patient's problems. | |
| ```Chat History:{chat_history}``` | |
| Patient Query:{text}. | |
| Mental Healthcare Chatbot: | |
| """} | |
| ] | |
| # Call OpenAI GPT-3.5-turbo | |
| chat_completion = client.chat.completions.create( | |
| model = "GPT-4o", | |
| messages = conversation, | |
| max_tokens=300, | |
| temperature=0 | |
| ) | |
| response = chat_completion.choices[0].message.content | |
| chat_history.append((text, response)) | |
| return "", chat_history | |
| def _on_sentiment_btn_click(self, history): | |
| # client=self._history_of_chat() | |
| customer_emotion=sentiment.emotion_analysis(history) | |
| customer_sentiment_score = sentiment.analyze_sentiment_for_graph(history) | |
| sentiment_graph = self._display_graph(customer_sentiment_score) | |
| emotion_graph = self._display_graph_emotion(customer_emotion) | |
| return sentiment_graph, emotion_graph | |
| def gradio_interface(self): | |
| with gr.Blocks(css="style.css",theme='JohnSmith9982/small_and_pretty') as demo: | |
| with gr.Row(): | |
| gr.HTML("""<center></center> | |
| """) | |
| with gr.Row(): | |
| gr.HTML("""<center><h1>AI Mental Healthcare ChatBot</h1></center>""") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Row(): | |
| chatbot = gr.Chatbot() | |
| with gr.Row(): | |
| with gr.Column(scale=0.90): | |
| txt = gr.Textbox(show_label=False,placeholder="Patient") | |
| with gr.Column(scale=0.10): | |
| emptyBtn = gr.ClearButton([txt, chatbot]) | |
| # emptyBtn = gr.Button() | |
| with gr.Accordion("Conversational AI Analytics", open = False): | |
| with gr.Row(): | |
| with gr.Column(scale=1.0): | |
| txt4 =gr.Textbox( | |
| show_label=False, | |
| lines=4, | |
| placeholder="Summary") | |
| with gr.Row(): | |
| with gr.Column(scale=0.50, min_width=0): | |
| end_btn=gr.Button(value="End") | |
| with gr.Column(scale=0.50, min_width=0): | |
| Sentiment_btn=gr.Button(value="π") | |
| with gr.Row(): | |
| gr.HTML("""<center><h1>Sentiment and Emotion Score Graph</h1></center>""") | |
| with gr.Row(): | |
| with gr.Column(scale=1, min_width=0): | |
| plot =gr.Plot(label="Patient") | |
| with gr.Row(): | |
| with gr.Column(scale=1, min_width=0): | |
| plot_3 =gr.Plot(label="Patient_Emotion") | |
| # txt_msg = txt.submit(self._add_text, [chatbot, txt], [chatbot, txt]).then( | |
| # self._suggested_answer, [chatbot,txt],chatbot) | |
| # txt_msg.then(lambda: gr.update(interactive=True), None, [txt]) | |
| # txt.submit(self._suggested_answer, [chatbot,txt],chatbot) | |
| # button.click(self._agent_text, [chatbot,txt3], chatbot) | |
| txt.submit(self._suggested_answer, [txt,chatbot],[txt,chatbot]) | |
| print("chatbot", chatbot) | |
| end_btn.click(summarizer.generate_summary,chatbot, txt4) | |
| # emptyBtn.click(self.clear_func,[],[]) | |
| # emptyBtn.click(lambda: None, None, chatbot, queue=False) | |
| Sentiment_btn.click(self._on_sentiment_btn_click,chatbot,[plot,plot_3]) | |
| demo.title = "AI Mental Healthcare ChatBot" | |
| demo.launch(debug = True) | |
| document_qa =LangChain_Document_QA() | |
| document_qa.gradio_interface() |