elismasilva commited on
Commit
f7824b9
·
1 Parent(s): 65384af

ajusted structure

Browse files
.env.example ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ DATABASE_URL="postgres..."
2
+ AGENT_API_URL=https://run.blaxel.ai...
3
+ BLAXEL_API_KEY=...
4
+ MCP_SERVER_URL=https://mcp-1st-birthday-....hf.space/
5
+ GOOGLE_API_KEY=...
6
+ USE_SERVER_KEYS=True #if True, use server keys; if False, user input keys
.gitignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .eggs/
2
+ dist/
3
+ /bk/
4
+ .vscode/
5
+ *.pyc
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+ __tmp/*
10
+ *.pyi
11
+ .mypycache
12
+ .ruff_cache
13
+ node_modules
14
+ backend/**/templates/
15
+ README_TEMPLATE.md
16
+ .env
17
+ *.pem
18
+ post.md
Dockerfile ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.10 base image
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ git \
10
+ git-lfs \
11
+ ffmpeg \
12
+ libsm6 \
13
+ libxext6 \
14
+ cmake \
15
+ rsync \
16
+ libgl1 \
17
+ && rm -rf /var/lib/apt/lists/* \
18
+ && git lfs install
19
+
20
+ # Copy requirements first (for better caching)
21
+ COPY requirements.txt .
22
+
23
+ # Fresh install of Gradio 6 as recommended by Gradio team
24
+ # Install in a single command to avoid conflicts
25
+ RUN pip install --no-cache-dir --upgrade pip && \
26
+ pip install --no-cache-dir "gradio[mcp,oauth]==5.50.0" && \
27
+ pip install --no-cache-dir -r requirements.txt
28
+
29
+ # Copy application code
30
+ COPY . .
31
+
32
+ # Expose port
33
+ EXPOSE 7860
34
+
35
+ # Set environment variables
36
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
37
+ ENV GRADIO_SERVER_PORT=7860
38
+
39
+ # Run the application
40
+ CMD ["python", "app.py"]
Makefile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ check_dirs := config
2
+
3
+ check:
4
+ ruff check $(check_dirs) app.py
5
+ ruff format --check $(check_dirs) app.py
6
+
7
+ format:
8
+ ruff check $(check_dirs) app.py --fix
9
+ ruff format $(check_dirs) app.py
10
+
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Gitrepo Inspector Dashboard
3
  emoji: 💻
4
- colorFrom: red
5
- colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
  short_description: GitRepo Inspector Application Dashboard
 
1
  ---
2
+ title: GitRepo Inspector Dashboard
3
  emoji: 💻
4
+ colorFrom: gray
5
+ colorTo: indigo
6
  sdk: docker
7
  pinned: false
8
  short_description: GitRepo Inspector Application Dashboard
app.py ADDED
@@ -0,0 +1,1200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import time
4
+ import gradio as gr
5
+ import markdown
6
+ import pandas as pd
7
+ import requests
8
+ import services.db as db
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from gradio_htmlplus import HTMLPlus
12
+ from gradio_bottombar import BottomBar
13
+ from gradio_buttonplus import ButtonPlus
14
+ from services import charts
15
+ from services.table_renderer import generate_issues_html
16
+ from services.chat_utils import stream_to_gradio
17
+ from services.agent_chat import create_dashboard_agent
18
+ from config.constants import AVAILABLE_MODELS_BY_PROVIDER
19
+
20
+ load_dotenv()
21
+ ACTIVE_SYNCS = set()
22
+ DEFAULT_SORT = {"col": "updated_at", "asc": False}
23
+ DEFAULT_REPO = "https://github.com/gradio-app/gradio"
24
+ BLAXEL_ICON_URL = "https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/blaxel_logo.png"
25
+
26
+ # Configuration
27
+ MCP_SERVER_URL = os.getenv("MCP_SERVER_URL", "https://mcp-1st-birthday-gitrepo-inspector-mcp.hf.space/")
28
+ AGENT_API_URL = os.getenv("AGENT_API_URL", "https://run.blaxel.ai/devaiexp/agents/agent")
29
+ BLAXEL_API_KEY = os.getenv("BLAXEL_API_KEY")
30
+
31
+ # LOGIC LAYER
32
+ def _validate_api_keys(llm_api_key, request):
33
+ USE_SERVER_KEYS = os.getenv("USE_SERVER_KEYS", "false").lower() in (
34
+ "True",
35
+ "true",
36
+ "1",
37
+ "yes",
38
+ )
39
+ if not USE_SERVER_KEYS and request and request.headers.get("referer"):
40
+ if not llm_api_key or not llm_api_key.strip():
41
+ raise gr.Error("⚠️ LLM API Key Required! Please enter your own API Key to use this tool in the demo UI.")
42
+
43
+ def _get_custom_header():
44
+ html = """
45
+ <style>
46
+ .header-container {
47
+ background: linear-gradient(314deg, #64748b 0%, #373f4a 100%);
48
+ padding: 30px 20px;
49
+ border-radius: 16px;
50
+ color: white !important;
51
+ text-align: center;
52
+ box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
53
+ margin-bottom: 25px;
54
+ font-family: 'Inter', -apple-system, sans-serif;
55
+ }
56
+
57
+ .header-content {
58
+ max-width: 800px;
59
+ margin: 0 auto;
60
+ }
61
+
62
+ .header-title {
63
+ color: white !important;
64
+ font-size: 2.5rem;
65
+ font-weight: 800;
66
+ margin: 0;
67
+ display: flex;
68
+ align-items: center;
69
+ justify-content: center;
70
+ gap: 15px;
71
+ letter-spacing: -0.02em;
72
+ text-shadow: 0 2px 4px rgba(0,0,0,0.1);
73
+ }
74
+
75
+ .header-icon {
76
+ font-size: 3rem;
77
+ filter: drop-shadow(0 2px 4px rgba(0,0,0,0.1));
78
+ }
79
+
80
+ .header-subtitle {
81
+ color: var(--neutral-200) !important;
82
+ font-size: 1.1rem;
83
+ font-weight: 400;
84
+ margin-top: 8px;
85
+ opacity: 0.9;
86
+ letter-spacing: 0.01em;
87
+ }
88
+
89
+ .header-footer {
90
+ color: var(--neutral-200) !important;
91
+ margin-top: 25px;
92
+ padding-top: 15px;
93
+ border-top: 1px solid rgba(255, 255, 255, 0.2);
94
+ font-size: 0.85rem;
95
+ font-weight: 500;
96
+ opacity: 0.85;
97
+ display: flex;
98
+ justify-content: center;
99
+ flex-wrap: wrap;
100
+ gap: 15px;
101
+ }
102
+
103
+ .header-tag {
104
+ color: var(--neutral-200) !important;
105
+ display: flex;
106
+ align-items: center;
107
+ gap: 6px;
108
+ }
109
+
110
+ .separator {
111
+ color: var(--neutral-200) !important;
112
+ opacity: 0.4;
113
+ }
114
+ .header-img-icon {
115
+ width: 18px;
116
+ height: 18px;
117
+ vertical-align: text-bottom;
118
+ opacity: 0.9;
119
+ }
120
+ </style>
121
+
122
+ <div class="header-container">
123
+ <div class="header-content">
124
+ <div class="header-title">
125
+ <span class="header-icon">🕵️‍♂️</span> GitRepo Inspector
126
+ </div>
127
+ <div class="header-subtitle">
128
+ Autonomous AI Agent for GitHub Issue Triage & Management
129
+ </div>
130
+
131
+ <div class="header-footer">
132
+ <span class="header-tag">Powered by Gradio 🚀</span>
133
+ <span class="separator">|</span>
134
+ <span class="header-tag">
135
+ Blaxel Agents
136
+ <img src="{BLAXEL_ICON_URL}" class="header-img-icon" alt="icon">
137
+ </span>
138
+ <span class="separator">|</span>
139
+ <span class="header-tag">MCP Protocol 🔌</span>
140
+ <span class="separator">|</span>
141
+ <span class="header-tag">Gemini • SambaNova • OpenAI • Nebius</span>
142
+ </div>
143
+ </div>
144
+ </div>
145
+ """
146
+ return html.replace("{BLAXEL_ICON_URL}", BLAXEL_ICON_URL)
147
+
148
+ def _format_logs_to_html(logs_df):
149
+ """
150
+ Converts logs to HTML list with Icons and Client-side Timezone conversion.
151
+ """
152
+ if logs_df.empty:
153
+ return "<div style='color: #666; padding: 10px;'>Waiting for agent activity...</div>"
154
+
155
+ html_content = """
156
+ <style>
157
+ .log-entry {
158
+ display: flex;
159
+ align-items: center;
160
+ padding: 4px 0;
161
+ border-bottom: 1px solid rgba(128,128,128, 0.1);
162
+ font-family: 'Consolas', monospace;
163
+ font-size: 12px;
164
+ color: var(--body-text-color);
165
+ }
166
+ .log-icon-img {
167
+ width: 20px;
168
+ height: 20px;
169
+ margin-right: 8px;
170
+ border-radius: 4px;
171
+ }
172
+ .log-time {
173
+ color: #6B7280;
174
+ margin-right: 10px;
175
+ min-width: 70px;
176
+ font-weight: bold;
177
+ }
178
+ .log-type { margin-right: 8px; }
179
+ </style>
180
+ <div id="log-container">
181
+ """
182
+
183
+ for _, row in logs_df.iterrows():
184
+ # ISO timestamp for JS to read (e.g., 2023-11-22T10:00:00Z)
185
+ iso_time = row['created_at'].isoformat()
186
+
187
+ etype = str(row['event_type']).lower()
188
+ icon_char = "🧠" if 'thought' in etype else "🔌" if 'tool' in etype else "❌" if 'error' in etype else "✅" if 'success' in etype else "ℹ️"
189
+
190
+ issue_tag = f"<span style='color: #3B82F6;'>[#{int(row['issue_number'])}]</span> " if pd.notna(row['issue_number']) and row['issue_number'] != "None" else ""
191
+
192
+ # Build HTML line
193
+ html_content += f"""
194
+ <div class="log-entry">
195
+ <img src="{BLAXEL_ICON_URL}" width=32 class="log-icon-img" alt="icon">
196
+ <!-- JS will fill this span with local time -->
197
+ <span class="log-time local-time" data-timestamp="{iso_time}">--:--:--</span>
198
+ <span class="log-type">{icon_char}</span>
199
+ <span>{issue_tag}{row['message']}</span>
200
+ </div>
201
+ """
202
+
203
+ html_content += "</div>"
204
+
205
+ return html_content
206
+
207
+ def fetch_fresh_data_to_cache(log_limit=50, repo_url=None):
208
+ """
209
+ Fetches all necessary data from DB.
210
+ """
211
+ stats_df = db.fetch_dashboard_stats(repo_url)
212
+ total_open = db.get_total_open_issues_count(repo_url)
213
+ donut_fig = charts.create_verdict_donut(stats_df)
214
+ funnel_fig = charts.create_efficiency_funnel(stats_df, total_open)
215
+
216
+ logs_df = db.fetch_agent_logs(limit=log_limit)
217
+ logs_text = _format_logs_to_html(logs_df)
218
+
219
+ full_df = db.fetch_issues_dataframe("all", repo_url)
220
+ timeline_fig = charts.create_timeline_chart(full_df)
221
+
222
+ return donut_fig, funnel_fig, timeline_fig, full_df, logs_text
223
+
224
+ def render_from_cache(df_cache, view_filter, sort_config, page=1):
225
+ if df_cache is None or df_cache.empty:
226
+ return generate_issues_html(pd.DataFrame()), "Page 1 / 1", 1
227
+
228
+ if "Action" in view_filter:
229
+ df = df_cache[
230
+ (df_cache['status'] == 'pending_approval') &
231
+ (df_cache['proposed_action'].notnull())
232
+ ]
233
+ else:
234
+ df = df_cache
235
+
236
+ if sort_config and sort_config['col'] in df.columns:
237
+ df = df.sort_values(
238
+ by=sort_config['col'],
239
+ ascending=sort_config['asc'],
240
+ na_position='last'
241
+ )
242
+
243
+ total_rows = len(df)
244
+ total_pages = (total_rows // 50) + 1 if total_rows > 0 else 1
245
+
246
+ page = max(1, min(page, total_pages))
247
+
248
+ start = (page - 1) * 50
249
+ end = start + 50
250
+
251
+ df_page = df.iloc[start:end]
252
+
253
+ html = generate_issues_html(df_page, sort_config.get('col'), sort_config.get('asc'))
254
+ page_label = f"Page **{page}** of {total_pages} ({total_rows} items)"
255
+
256
+ return html, page_label, page
257
+
258
+ def refresh_dashboard_data(view_filter, sort_config=None, log_limit=50, repo_url=None):
259
+ if sort_config is None:
260
+ sort_config = DEFAULT_SORT
261
+
262
+ donut_fig, timeline_fig, full_df, logs_text = fetch_fresh_data_to_cache(log_limit, repo_url)
263
+
264
+ html_table, page_label, page = render_from_cache(full_df, view_filter, sort_config, 1)
265
+
266
+ return donut_fig, timeline_fig, html_table, logs_text, full_df, page_label, page
267
+
268
+ def load_issue_details(issue_number, repo_url):
269
+ """
270
+ Loads details for an issue.
271
+ Strategy: Check Agent Cache -> Fallback to DB.
272
+ """
273
+ if not issue_number or issue_number == 0:
274
+ return gr.skip(), gr.skip(), gr.skip(), gr.skip(), gr.skip(), gr.skip(), gr.skip(), gr.skip(), gr.skip()
275
+
276
+ body = None
277
+ action_json = None
278
+ status = "pending_approval"
279
+ thought_content = ""
280
+ wrapped_html = ""
281
+
282
+ body, action_json, status, thought_process = db.fetch_issue_details_by_id(issue_number, repo_url)
283
+ raw_body = body if body else "No content."
284
+
285
+
286
+ if "<div" in raw_body and "Analyzed by" in raw_body:
287
+ final_html = raw_body.replace("```html", "").replace("```", "")
288
+ else:
289
+ final_html = markdown.markdown(raw_body, extensions=['tables', 'fenced_code'])
290
+ final_html = f"<h3 style='color:#4B5563; border-bottom:1px solid #eee; padding-bottom:5px;'>Analysis Report</h3>{final_html}"
291
+
292
+ wrapped_html = f"""
293
+ <div style="
294
+ font-family: -apple-system, system-ui, sans-serif;
295
+ line-height: 1.6;
296
+ color: var(--body-text-color);
297
+ padding: 20px;
298
+ background: var(--background-fill-secondary);
299
+ border-radius: 8px;
300
+ border: 1px solid var(--border-color-primary);
301
+ max-height: 600px;
302
+ overflow-y: auto;
303
+ ">
304
+ {final_html}
305
+ </div>
306
+ """
307
+
308
+ def get_summary_text(st, act):
309
+ if st == 'pending_approval':
310
+ return f"### ⚠️ Action Required\n**Issue #{issue_number}** is marked as **{act.get('reason', 'resolved') if act else 'needs review'}**."
311
+ elif st == 'executed':
312
+ return f"### ✅ Action Executed\n**Issue #{issue_number}** was closed."
313
+ else:
314
+ return f"### ℹ️ Status: {st}"
315
+
316
+ has_thought = thought_process is not None and len(thought_process.strip()) > 0
317
+ thought_content = thought_process if has_thought else "No thought process available."
318
+
319
+ summary_text = get_summary_text(status, action_json)
320
+
321
+ if not body:
322
+ placeholder_html = f"""
323
+ <div style="
324
+ font-family: -apple-system, system-ui, sans-serif;
325
+ color: #64748B;
326
+ padding: 40px;
327
+ text-align: center;
328
+ background: var(--background-fill-secondary);
329
+ border-radius: 8px;
330
+ border: 1px dashed var(--border-color-primary);
331
+ ">
332
+ <h3>Waiting for Analysis ⏳</h3>
333
+ <p>This issue has not been processed by the Agent yet.</p>
334
+ <p>Click <b>'Analyze Now'</b> to start manual analysis.</p>
335
+ </div>
336
+ """
337
+
338
+ return (
339
+ placeholder_html,
340
+ gr.update(value="", interactive=False, label="No Proposal"),
341
+ "",
342
+ gr.update(visible=False),
343
+ gr.update(visible=True, interactive=True, value="▶️ Analyze Now"),
344
+ summary_text,
345
+ "", # Empty thought
346
+ gr.update(visible=True, open=True),
347
+ gr.update(visible=False, open=False)
348
+ )
349
+
350
+ is_executed = (status == 'executed')
351
+ is_pending = (status == 'pending_approval')
352
+
353
+ comment_val = action_json.get('comment', '') if action_json else ""
354
+
355
+ comment_update = gr.update(
356
+ value=comment_val,
357
+ interactive=is_pending,
358
+ label="Executed Comment" if is_executed else "Proposed Comment"
359
+ )
360
+
361
+ auth_vis = gr.update(visible=is_pending)
362
+
363
+ reanalyze_vis = gr.update(visible=not is_executed, interactive=not is_executed, value="🔄 Re-Analyze Issue")
364
+
365
+ return wrapped_html, comment_update, "", auth_vis, reanalyze_vis, summary_text, thought_content, gr.update(visible=True, open=True), gr.update(visible=has_thought, open=False)
366
+
367
+ def trigger_sync_action(repo_url, user_token):
368
+ """
369
+ Calls the MCP sync tool for the selected repository.
370
+ """
371
+ if not repo_url:
372
+ error = "Please select or type a repository URL."
373
+ gr.Info(f"⚠️ {error}")
374
+ return error
375
+
376
+ clean_url = repo_url.strip().rstrip("/")
377
+
378
+ if not clean_url.startswith("https://github.com/"):
379
+ error = "Invalid URL. Must start with https://github.com/"
380
+ gr.Info(f"⚠️ {error}")
381
+ return error
382
+
383
+ if clean_url in ACTIVE_SYNCS:
384
+ error = "Sync already in progress for {clean_url}. Please wait..."
385
+ gr.Info(f"⚠️ {error}")
386
+ return error
387
+
388
+ if not user_token:
389
+ error = "GitHub Token Required!"
390
+ gr.Info(f"⚠️ {error}")
391
+ return error
392
+
393
+ ACTIVE_SYNCS.add(clean_url)
394
+ try:
395
+ client = Client(MCP_SERVER_URL)
396
+
397
+ job = client.submit(
398
+ repo_url,
399
+ user_token,
400
+ api_name="/sync_repository"
401
+ )
402
+
403
+ for update in job:
404
+ yield f"{update}"
405
+
406
+ except Exception as e:
407
+ yield f"❌ Sync Failed: {str(e)}"
408
+ finally:
409
+ if clean_url in ACTIVE_SYNCS:
410
+ ACTIVE_SYNCS.remove(clean_url)
411
+
412
+ def trigger_manual_reanalysis(issue_number, repo_url, provider, model, github_token, api_key, request: gr.Request = None):
413
+ """
414
+ Calls the Blaxel Agent to force a re-analysis.
415
+ """
416
+ if not issue_number:
417
+ error = "Select an issue first."
418
+ gr.Info(f"⚠️ {error}")
419
+ return error
420
+
421
+ _validate_api_keys(api_key, request)
422
+
423
+ if not AGENT_API_URL:
424
+ error = "Agent URL not configured."
425
+ gr.Info(f"⚠️ {error}")
426
+ return error
427
+
428
+ payload = {
429
+ "repo_url": repo_url,
430
+ "provider": provider,
431
+ "model": model,
432
+ "specific_issue": int(issue_number),
433
+ "github_token": github_token if github_token else None
434
+ }
435
+ gr.Info(f"⏳ Starting analysis for #{issue_number}. Please wait...")
436
+ try:
437
+ headers = {"Authorization": f"Bearer {BLAXEL_API_KEY}", "Content-Type": "application/json"}
438
+ with requests.post(AGENT_API_URL, json=payload, headers=headers, stream=True) as resp:
439
+ for _ in resp.iter_lines():
440
+ pass
441
+ return f"✅ Analysis completed for #{issue_number}. Check the Trace Log tab for details."
442
+ except Exception as e:
443
+ return f"❌ Agent Trigger Failed: {e}"
444
+
445
+ def execute_approval_workflow(issue_number, repo_url, user_token, final_comment):
446
+ if not user_token:
447
+ error = "GitHub Token Required! Check Sidebar."
448
+ gr.Info(f"⚠️ {error}")
449
+ return error
450
+
451
+ try:
452
+ proposal = db.get_proposed_action_payload(issue_number, repo_url)
453
+ # Flexible Logic:
454
+ # If proposal exists, use its 'close' flag.
455
+ # If NO proposal (user manual action), assume they want to close.
456
+ should_close = True
457
+ if proposal:
458
+ should_close = proposal.get('action') == 'close'
459
+
460
+ # If user didn't type comment, use proposal or default
461
+ if not final_comment:
462
+ if proposal:
463
+ final_comment = proposal.get('comment', "Closing via GitRepo Inspector.")
464
+ else:
465
+ final_comment = "Closing via GitRepo Inspector (Manual Action)."
466
+
467
+ client = Client(MCP_SERVER_URL)
468
+ result = client.predict(
469
+ repo_url,
470
+ int(issue_number),
471
+ final_comment,
472
+ should_close,
473
+ user_token,
474
+ api_name="/reply_and_close_issue"
475
+ )
476
+
477
+ db.update_issue_status(issue_number, repo_url, "executed", final_comment)
478
+
479
+ return f"✅ Success: {result}"
480
+
481
+ except Exception as e:
482
+ # If it's a gr.Error, let it raise to show popup
483
+ if isinstance(e, gr.Error):
484
+ raise e
485
+ # If other error, format it
486
+ return f"❌ Error: {str(e)}"
487
+
488
+ def generate_priority_report(repo_url, provider, model, api_key, request: gr.Request = None):
489
+ """
490
+ Generates priority report using Sidebar configs.
491
+ Handles tuple return (html, thought) from MCP.
492
+ """
493
+ gr.Info(f"🧠 Generating Strategy Report for {repo_url}... Please wait.")
494
+ llm_api_key = _validate_api_keys(api_key, request)
495
+
496
+ try:
497
+ client = Client(MCP_SERVER_URL)
498
+ job = client.submit(
499
+ repo_url,
500
+ provider,
501
+ model,
502
+ llm_api_key,
503
+ api_name="/prioritize_open_issues"
504
+ )
505
+
506
+ for tuple_result in job:
507
+ if isinstance(tuple_result, (list, tuple)) and len(tuple_result) > 0:
508
+ html_content = tuple_result[0]
509
+ yield html_content
510
+ else:
511
+ yield tuple_result
512
+
513
+ except Exception as e:
514
+ return f"❌ Error: {str(e)}"
515
+
516
+ def get_repo_choices():
517
+ repos = db.fetch_distinct_repos()
518
+
519
+ if DEFAULT_REPO not in repos:
520
+ repos.insert(0, DEFAULT_REPO)
521
+ return repos
522
+
523
+ # UI LAYOUT
524
+ css_code = ""
525
+ try:
526
+ with open("./style.css", "r", encoding="utf-8") as f:
527
+ css_code += f.read() + "\n"
528
+ except FileNotFoundError:
529
+ pass
530
+
531
+ # JS
532
+ APP_HEAD = """
533
+ <script>
534
+ window.convert_timestamps = function() {
535
+ console.log("🕒 Converting timestamps...");
536
+ const spans = document.querySelectorAll('.local-time');
537
+ spans.forEach(span => {
538
+ const ts = span.getAttribute('data-timestamp');
539
+ if (ts) {
540
+ const date = new Date(ts);
541
+ span.innerText = date.toLocaleTimeString([], { hour12: false });
542
+ }
543
+ });
544
+ }
545
+ window.print_report = function() {
546
+ const report = document.getElementById('prio-report');
547
+ if (!report) return;
548
+
549
+ // Get inner HTML content (within shadow DOM or Gradio div)
550
+ // Gradio 5 encapsulates, so we need to find real content
551
+ const content = report.querySelector('.prose') || report;
552
+
553
+ const printWindow = window.open('', '', 'height=600,width=800');
554
+ printWindow.document.write('<html><head><title>Priority Report</title>');
555
+ printWindow.document.write('<style>body{font-family:sans-serif; padding:20px;} table{width:100%; border-collapse:collapse;} th,td{border:1px solid #ddd; padding:8px;}</style>');
556
+ printWindow.document.write('</head><body>');
557
+ printWindow.document.write(content.innerHTML);
558
+ printWindow.document.write('</body></html>');
559
+ printWindow.document.close();
560
+ printWindow.print();
561
+ }
562
+ </script>
563
+ """
564
+
565
+ theme = gr.themes.Default(
566
+ primary_hue='blue',
567
+ secondary_hue='teal',
568
+ neutral_hue='neutral'
569
+ ).set(
570
+ body_background_fill='*neutral_100',
571
+ body_background_fill_dark='*neutral_900',
572
+ body_text_color='*neutral_700',
573
+ body_text_color_dark='*neutral_200',
574
+ body_text_weight='400',
575
+ link_text_color='*primary_500',
576
+ link_text_color_dark='*primary_400',
577
+ code_background_fill='*neutral_100',
578
+ code_background_fill_dark='*neutral_800',
579
+ shadow_drop='0 1px 3px rgba(0,0,0,0.1)',
580
+ shadow_inset='inset 0 2px 4px rgba(0,0,0,0.05)',
581
+ block_background_fill='*neutral_50',
582
+ block_background_fill_dark='*neutral_700',
583
+ block_border_color='*neutral_200',
584
+ block_border_color_dark='*neutral_600',
585
+ block_border_width='1px',
586
+ block_border_width_dark='1px',
587
+ block_label_background_fill='*primary_50',
588
+ block_label_background_fill_dark='*primary_600',
589
+ block_label_text_color='*primary_600',
590
+ block_label_text_color_dark='*primary_50',
591
+ panel_background_fill='white',
592
+ panel_background_fill_dark='*neutral_800',
593
+ panel_border_color='*neutral_200',
594
+ panel_border_color_dark='*neutral_700',
595
+ panel_border_width='1px',
596
+ panel_border_width_dark='1px',
597
+ input_background_fill='white',
598
+ input_background_fill_dark='*neutral_800',
599
+ input_border_color='*neutral_300',
600
+ input_border_color_dark='*neutral_700',
601
+ slider_color='*primary_500',
602
+ slider_color_dark='*primary_400',
603
+ button_primary_background_fill='*primary_600',
604
+ button_primary_background_fill_dark='*primary_500',
605
+ button_primary_background_fill_hover='*primary_700',
606
+ button_primary_background_fill_hover_dark='*primary_400',
607
+ button_primary_border_color='transparent',
608
+ button_primary_border_color_dark='transparent',
609
+ button_primary_text_color='white',
610
+ button_primary_text_color_dark='white',
611
+ button_secondary_background_fill='*neutral_200',
612
+ button_secondary_background_fill_dark='*neutral_600',
613
+ button_secondary_background_fill_hover='*neutral_300',
614
+ button_secondary_background_fill_hover_dark='*neutral_500',
615
+ button_secondary_border_color='transparent',
616
+ button_secondary_border_color_dark='transparent',
617
+ button_secondary_text_color='*neutral_700',
618
+ button_secondary_text_color_dark='*neutral_200'
619
+ )
620
+
621
+ with gr.Blocks(title="GitRepo Inspector", theme=theme, css=css_code, head=APP_HEAD) as app:
622
+
623
+ # STATES
624
+ current_sort = gr.State(DEFAULT_SORT)
625
+ issues_cache = gr.State(pd.DataFrame())
626
+ current_page = gr.State(1)
627
+ sel_issue_state = gr.State()
628
+ sel_repo_state = gr.State()
629
+ agent_state = gr.State(value=None)
630
+
631
+ with gr.Sidebar(position="left", open=True, label="Settings"):
632
+ gr.Markdown("### ⚙️ Configuration")
633
+
634
+ # Repository Selection
635
+ repo_choices = get_repo_choices()
636
+ global_repo_url = gr.Dropdown(
637
+ elem_classes="custom-dropdown",
638
+ choices=repo_choices,
639
+ value=DEFAULT_REPO,
640
+ label="Repository",
641
+ allow_custom_value=True,
642
+ interactive=True,
643
+ info="Select existing or type new URL"
644
+ )
645
+ with gr.Row():
646
+ sync_repo_btn = ButtonPlus("🔄 Sync This Repo", help="Update Repository Issues Cache. This may take a while.")
647
+ sync_status = gr.Markdown(visible=True, value="Ready to sync.")
648
+
649
+ global_github_token = gr.Textbox(
650
+ label="GitHub Personal Token",
651
+ type="password",
652
+ placeholder="ghp_...",
653
+ info="Required for 'Approve' and 'Prioritize'"
654
+ )
655
+
656
+ gr.Markdown("---")
657
+ gr.Markdown("### 🧠 AI Model Configuration")
658
+ gr.Markdown("(For Manual Actions)")
659
+
660
+ default_provider = "gemini"
661
+ global_provider = gr.Dropdown(
662
+ choices=list(AVAILABLE_MODELS_BY_PROVIDER.keys()),
663
+ value=default_provider,
664
+ label="LLM Provider"
665
+ )
666
+
667
+ # Use default provider list
668
+ global_model = gr.Dropdown(
669
+ choices=AVAILABLE_MODELS_BY_PROVIDER[default_provider],
670
+ value=AVAILABLE_MODELS_BY_PROVIDER[default_provider][0],
671
+ label="Model Name",
672
+ allow_custom_value=True,
673
+ interactive=True
674
+ )
675
+
676
+ global_llm_key = gr.Textbox(
677
+ label="LLM API Key",
678
+ type="password",
679
+ placeholder="your API Key...",
680
+ info="Required for approval and reanalysis actions"
681
+ )
682
+
683
+ gr.Markdown("---")
684
+ log_limit_slider = gr.Slider(
685
+ minimum=10,
686
+ maximum=200,
687
+ value=50,
688
+ step=10,
689
+ label="Log Lines to Fetch"
690
+ )
691
+
692
+ gr.HTML(_get_custom_header())
693
+
694
+ # Control Bar
695
+ with gr.Row():
696
+ with gr.Column(scale=1):
697
+ pass
698
+ with gr.Column(scale=0, min_width=150):
699
+ refresh_global = gr.Button("🔄 Refresh Data", variant="secondary")
700
+
701
+ with gr.Sidebar(position="right", open=False, width=400):
702
+ with gr.Column(scale=1, elem_classes="column-container", min_width=400):
703
+ gr.Markdown("### 🤖 AI Assistant")
704
+ with gr.Column(scale=2):
705
+ chatbot = gr.Chatbot(
706
+ elem_classes="chatbot-container",
707
+ type="messages",
708
+ height=500,
709
+ show_label=False,
710
+ show_copy_button=True,
711
+ avatar_images=(
712
+ "https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/user.png",
713
+ "https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/gemini-icon.png"
714
+ )
715
+ )
716
+ with gr.Row():
717
+ msg = gr.Textbox(label="Input Message", max_lines=2, scale=4, placeholder="Ask: 'What's up with issue #123?'")
718
+ send_btn = gr.Button("Send", variant="primary", scale=1)
719
+ with gr.Row():
720
+ clear_chat_btn = gr.Button("🧹 Clear History")
721
+
722
+ chat_trigger_issue = gr.Number(visible=False)
723
+ def clear_chat():
724
+ return []
725
+
726
+ clear_chat_btn.click(
727
+ fn=clear_chat,
728
+ inputs=None,
729
+ outputs=[chatbot]
730
+ )
731
+ def init_agent():
732
+ return create_dashboard_agent()
733
+
734
+ def interact(agent, prompt, history, current_repo, provider, model, token):
735
+ agent_state = agent
736
+ if agent_state is None:
737
+ agent_state = init_agent()
738
+ if agent_state is None:
739
+ history.append(gr.ChatMessage(role="user", content=prompt, metadata={"status": "done"}))
740
+ history.append(gr.ChatMessage(
741
+ role="assistant",
742
+ content="❌ Failed to initialize agent",
743
+ metadata={"status": "done"}
744
+ ))
745
+ yield history, "", gr.skip(), agent_state
746
+ return
747
+ time.sleep(2)
748
+
749
+ context_prompt = f"""
750
+ User is looking at repository: '{current_repo}'
751
+ Selected analysis provider: '{provider}'
752
+ Selected model: '{model}'
753
+ GitHub Token provided: {'Yes' if token else 'No'}
754
+
755
+ User Query: {prompt}
756
+ """
757
+ history.append(gr.ChatMessage(role="user", content=prompt))
758
+ yield history, "", gr.skip(), agent_state
759
+
760
+ full_response = ""
761
+ detected_issue = gr.skip(), agent_state
762
+
763
+ if agent_state:
764
+ try:
765
+ for chunk in stream_to_gradio(agent_state, context_prompt):
766
+ if isinstance(chunk, gr.ChatMessage):
767
+ history.append(chunk)
768
+ elif isinstance(chunk, str):
769
+ if history and history[-1].role == "assistant":
770
+ history[-1].content += chunk
771
+ else:
772
+ history.append(gr.ChatMessage(role="assistant", content=chunk))
773
+
774
+ content = chunk.content if isinstance(chunk, gr.ChatMessage) else chunk
775
+ full_response += str(content)
776
+ yield history, "", gr.skip(), agent_state
777
+
778
+ # Check Trigger
779
+ triggers = re.findall(r"\[VIEW:#(\d+)\]", full_response)
780
+ detected_issue = None
781
+ if triggers:
782
+ # Get the last mentioned (or first, your choice)
783
+ # Usually the last is the conclusion focus
784
+ detected_issue = int(triggers[-1])
785
+
786
+ # Remove ALL [VIEW:#...] tags from final message
787
+ last_msg = history[-1]
788
+
789
+ if isinstance(last_msg, gr.ChatMessage):
790
+ clean_content = re.sub(r"\[VIEW:#\d+\]", "", last_msg.content).strip()
791
+ last_msg.content = clean_content
792
+ elif isinstance(last_msg, dict):
793
+ last_msg['content'] = re.sub(r"\[VIEW:#\d+\]", "", last_msg['content']).strip()
794
+
795
+ yield history, "", detected_issue, agent_state
796
+
797
+ except Exception as e:
798
+ history.append(gr.ChatMessage(role="assistant", content=f"Error: {e}"))
799
+ yield history, "", gr.skip(), agent_state
800
+ else:
801
+ history.append(gr.ChatMessage(role="assistant", content="Agent failed to initialize."))
802
+ yield history, "", gr.skip(), agent_state
803
+
804
+ submit_event = gr.on(
805
+ triggers=[send_btn.click, msg.submit],
806
+ fn=interact,
807
+ inputs=[
808
+ agent_state,
809
+ msg,
810
+ chatbot,
811
+ global_repo_url,
812
+ global_provider,
813
+ global_model,
814
+ global_github_token
815
+ ],
816
+ outputs=[chatbot, msg, chat_trigger_issue, agent_state],
817
+ show_progress="hidden"
818
+ )
819
+
820
+ with gr.Row():
821
+ with gr.Column(scale=3, elem_classes="column-container"):
822
+ # MAIN CONTENT
823
+ with gr.Tabs():
824
+ with gr.TabItem("📋 Issues Overview"):
825
+ with gr.Row():
826
+ # Left: Table
827
+ with gr.Column():
828
+ view_filter = gr.Radio(
829
+ ["Action Required", "All Issues"],
830
+ value="Action Required",
831
+ label="View Filter",
832
+ show_label=False,
833
+ container=False
834
+ )
835
+ html_table_output = HTMLPlus(
836
+ label="Issue List",
837
+ selectable_elements=["th", "tr"]
838
+ )
839
+ with gr.Row(elem_id="pagination-row"):
840
+ btn_prev = gr.Button("◀", elem_classes="pagination-btn", size="sm")
841
+ page_display = gr.Markdown("Page 1 of 1", elem_id="page_label")
842
+ btn_next = gr.Button("▶", elem_classes="pagination-btn", size="sm")
843
+
844
+ # Details & Action
845
+ with gr.Group(visible=True):
846
+ with gr.Accordion("📄 Issue Analysis Detail", open=False, visible=False) as issue_analysis_accordion:
847
+ with gr.Row():
848
+ with gr.Column(scale=3):
849
+ detail_view = gr.HTML(label="Full Report")
850
+
851
+ with gr.Column(scale=1, elem_classes="action-console-col"):
852
+ with gr.Group() as action_group:
853
+ gr.Markdown("#### ⚡ Action Console")
854
+ action_summary = gr.Markdown("Select an issue.")
855
+ action_comment_input = gr.Textbox(label="Proposed Comment", lines=3, interactive=True)
856
+
857
+ reanalyze_btn = gr.Button("🔄 Re-Analyze Issue")
858
+
859
+ with gr.Column(visible=False) as auth_box:
860
+ approve_btn = gr.Button("✅ Approve Action", variant="primary")
861
+ exec_status = gr.Markdown()
862
+ with gr.Accordion("🧠 Agent Thought Process", open=False, visible=False) as thought_accordion:
863
+ thought_view = gr.Markdown("Select an issue to see the reasoning.")
864
+ gr.Markdown("---")
865
+ # Charts
866
+ gr.Markdown("### 📊 Analytics Overview")
867
+ with gr.Row():
868
+ with gr.Column():
869
+ stats_plot = gr.Plot(label="Distribution", show_label=False)
870
+ with gr.Column():
871
+ funnel_plot = gr.Plot(label="Efficiency", show_label=False)
872
+ with gr.Column():
873
+ timeline_plot = gr.Plot(label="Timeline", show_label=False)
874
+
875
+ with gr.TabItem("🥇 Prioritize"):
876
+ gr.Markdown("### Strategic Backlog Prioritization")
877
+ with gr.Row():
878
+ prio_btn = gr.Button("Generate Strategy Report 🧠", variant="primary")
879
+ print_btn = gr.Button("🖨️ Print Report", variant="secondary")
880
+ with gr.Group():
881
+ prio_out = gr.HTML(label="AI Strategy Report", elem_id="prio-report")
882
+
883
+ # BOTTOM BAR
884
+ with BottomBar(label="🤖 AI Assistant", bring_to_front=False, height=320, open=False, rounded_borders=True):
885
+ gr.Markdown("### 📊 Workflow Agent Activity Logs", elem_id="logs-header")
886
+ with gr.Row():
887
+ auto_refresh_timer = gr.Timer(value=30, active=True)
888
+ with gr.Column(scale=1):
889
+ refresh_interval = gr.Slider(
890
+ info="Auto-refresh interval",
891
+ minimum=30,
892
+ maximum=300,
893
+ value=30,
894
+ step=30,
895
+ label="Auto-Refresh (seconds)",
896
+ interactive=True,
897
+ )
898
+ with gr.Column(scale=6):
899
+ activity_log = gr.HTML(label="Trace Log", elem_id="trace-log")
900
+
901
+ # EVENTS
902
+ trigger_outputs = [
903
+ detail_view,
904
+ action_comment_input,
905
+ exec_status,
906
+ auth_box,
907
+ reanalyze_btn,
908
+ action_summary,
909
+ thought_view,
910
+ issue_analysis_accordion,
911
+ thought_accordion,
912
+ chat_trigger_issue
913
+ ]
914
+ def handle_chat_view_trigger(issue_number, repo_url):
915
+ if not issue_number:
916
+ return tuple(gr.skip() for _ in range(len(trigger_outputs)))
917
+
918
+ # Small pause to ensure DB write propagated
919
+ time.sleep(3)
920
+
921
+ # Call function to load data, which will now find the record
922
+ details = load_issue_details(issue_number, repo_url)
923
+ return details + (None,)
924
+
925
+ chat_trigger_issue.change(
926
+ fn=handle_chat_view_trigger,
927
+ inputs=[chat_trigger_issue, global_repo_url],
928
+ outputs=trigger_outputs
929
+ )
930
+
931
+ # Refresh Logic
932
+ def hard_refresh(filter_val, sort_val, log_lim, repo_url=None):
933
+ repo = repo_url if repo_url else "https://github.com/gradio-app/gradio"
934
+ donut_fig, funnel_fig, timeline_fig, full_df, logs = fetch_fresh_data_to_cache(log_lim, repo)
935
+ html, label, page = render_from_cache(full_df, filter_val, sort_val, 1)
936
+
937
+ return (
938
+ donut_fig, # stats_plot
939
+ timeline_fig, # timeline_plot
940
+ funnel_fig, # funnel_plot
941
+ html, # html_table_output
942
+ logs, # activity_log
943
+ full_df, # issues_cache
944
+ label, # page_display
945
+ page, # current_page
946
+
947
+ "", # detail_view
948
+ "Select an issue.", # action_summary
949
+ gr.update(visible=False), # auth_box
950
+ "", # exec_status
951
+ gr.update(visible=False), # thought_accordion
952
+ gr.update(visible=False, open=False), # issue_analysis_accordion
953
+ "" # thought_view
954
+ )
955
+
956
+ common_outputs = [
957
+ stats_plot,
958
+ funnel_plot,
959
+ timeline_plot,
960
+ html_table_output,
961
+ activity_log,
962
+ issues_cache,
963
+ page_display,
964
+ current_page,
965
+ detail_view,
966
+ action_summary,
967
+ auth_box,
968
+ exec_status,
969
+ thought_accordion,
970
+ issue_analysis_accordion,
971
+ thought_view
972
+ ]
973
+
974
+ # Added log_limit_slider to inputs
975
+ app.load(
976
+ init_agent,
977
+ outputs=[agent_state]
978
+ )
979
+ app.load(
980
+ fn=hard_refresh,
981
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
982
+ outputs=common_outputs,
983
+ show_progress_on=[
984
+ html_table_output,
985
+ stats_plot,
986
+ funnel_plot,
987
+ timeline_plot,
988
+ ]
989
+ ).success(
990
+ fn=None,
991
+ js="() => window.convert_timestamps()"
992
+ )
993
+
994
+ refresh_global.click(
995
+ fn=hard_refresh,
996
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
997
+ outputs=common_outputs,
998
+ show_progress_on=[
999
+ html_table_output,
1000
+ stats_plot,
1001
+ funnel_plot,
1002
+ timeline_plot,
1003
+ ]
1004
+ ).then(
1005
+ fn=None,
1006
+ js="() => window.convert_timestamps()"
1007
+ )
1008
+ def quick_log_refresh(log_lim):
1009
+ df = db.fetch_agent_logs(limit=log_lim)
1010
+ return _format_logs_to_html(df)
1011
+
1012
+ auto_refresh_timer.tick(
1013
+ fn=quick_log_refresh,
1014
+ inputs=[log_limit_slider],
1015
+ outputs=[activity_log]
1016
+ ).then(
1017
+ fn=None,
1018
+ js="() => window.convert_timestamps()"
1019
+ )
1020
+
1021
+ # Update timer when slider changes
1022
+ refresh_interval.change(
1023
+ fn=lambda x: gr.Timer(value=x),
1024
+ inputs=[refresh_interval],
1025
+ outputs=[auto_refresh_timer]
1026
+ )
1027
+
1028
+ def update_dashboard_models(provider):
1029
+ models = AVAILABLE_MODELS_BY_PROVIDER.get(provider, [])
1030
+ # Select first from list by default, or empty if none
1031
+ new_val = models[0] if models else None
1032
+ return gr.update(choices=models, value=new_val)
1033
+
1034
+ global_provider.change(
1035
+ fn=update_dashboard_models,
1036
+ inputs=[global_provider],
1037
+ outputs=[global_model]
1038
+ )
1039
+ # Soft Updates
1040
+ def soft_update(df, filter_val, sort_val):
1041
+ html, label, page = render_from_cache(df, filter_val, sort_val, 1)
1042
+ return html, label, page
1043
+
1044
+ view_filter.change(fn=soft_update, inputs=[issues_cache, view_filter, current_sort], outputs=[html_table_output, page_display, current_page])
1045
+
1046
+ def change_page(direction, current, df, filter_val, sort_val):
1047
+ new_page = current + direction
1048
+ html, label, final_page = render_from_cache(df, filter_val, sort_val, new_page)
1049
+ return html, label, final_page
1050
+
1051
+ btn_prev.click(fn=lambda p, df, f, s: change_page(-1, p, df, f, s), inputs=[current_page, issues_cache, view_filter, current_sort], outputs=[html_table_output, page_display, current_page])
1052
+ btn_next.click(fn=lambda p, df, f, s: change_page(1, p, df, f, s), inputs=[current_page, issues_cache, view_filter, current_sort], outputs=[html_table_output, page_display, current_page])
1053
+
1054
+ # Selection
1055
+ def handle_table_interaction(evt: gr.SelectData, df_cache, view_filter, sort_data):
1056
+ data = evt.value
1057
+ if evt.index == "th":
1058
+ clicked_col = data.get('sortCol') or data.get('sort-col')
1059
+ if not clicked_col: return gr.skip()
1060
+ new_asc = not sort_data['asc'] if sort_data['col'] == clicked_col else False
1061
+ new_sort = {"col": clicked_col, "asc": new_asc}
1062
+ html, _, _ = render_from_cache(df_cache, view_filter, new_sort, page=1)
1063
+ return (
1064
+ new_sort,
1065
+ html,
1066
+ gr.skip(),
1067
+ gr.skip(),
1068
+ gr.skip(),
1069
+ gr.skip(),
1070
+ gr.skip(),
1071
+ gr.skip(),
1072
+ gr.skip(),
1073
+ gr.skip(),
1074
+ gr.skip(),
1075
+ gr.skip(),
1076
+ gr.skip(),
1077
+ )
1078
+ elif evt.index == "tr":
1079
+ issue_num = data.get('issueNumber') or data.get('issue-number')
1080
+ repo_url = data.get('repoUrl') or data.get('repo-url')
1081
+ if not issue_num:
1082
+ return gr.skip()
1083
+ report, action_text, status, auth_vis, reanalyze_vis, summary_text, thought_text, issue_accordion, thought_accordion = load_issue_details(issue_num, repo_url)
1084
+
1085
+ return (
1086
+ gr.skip(),
1087
+ gr.skip(),
1088
+ report,
1089
+ action_text,
1090
+ auth_vis,
1091
+ issue_num,
1092
+ repo_url,
1093
+ "",
1094
+ reanalyze_vis,
1095
+ thought_accordion,
1096
+ issue_accordion,
1097
+ summary_text,
1098
+ thought_text
1099
+ )
1100
+ return gr.skip()
1101
+
1102
+ html_table_output.select(
1103
+ fn=handle_table_interaction,
1104
+ inputs=[issues_cache, view_filter, current_sort],
1105
+ outputs=[
1106
+ current_sort,
1107
+ html_table_output,
1108
+ detail_view,
1109
+ action_comment_input,
1110
+ auth_box,
1111
+ sel_issue_state,
1112
+ sel_repo_state,
1113
+ exec_status,
1114
+ reanalyze_btn,
1115
+ thought_accordion,
1116
+ issue_analysis_accordion,
1117
+ action_summary,
1118
+ thought_view
1119
+ ],
1120
+ show_progress_on=[issue_analysis_accordion]
1121
+ )
1122
+
1123
+ # Actions
1124
+ approve_btn.click(
1125
+ fn=execute_approval_workflow,
1126
+ inputs=[sel_issue_state, sel_repo_state, global_github_token, action_comment_input],
1127
+ outputs=[exec_status]
1128
+ ).success(
1129
+ fn=hard_refresh,
1130
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
1131
+ outputs=common_outputs,
1132
+ show_progress_on=[
1133
+ html_table_output,
1134
+ stats_plot,
1135
+ funnel_plot,
1136
+ timeline_plot,
1137
+ ]
1138
+ )
1139
+
1140
+ prio_btn.click(
1141
+ fn=generate_priority_report,
1142
+ inputs=[global_repo_url, global_provider, global_model, global_llm_key],
1143
+ outputs=[prio_out],
1144
+ show_progress="hidden"
1145
+ )
1146
+ print_btn.click(fn=None, js="() => window.print_report()")
1147
+ global_repo_url.change(
1148
+ fn=hard_refresh,
1149
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
1150
+ outputs=common_outputs,
1151
+ show_progress_on=[
1152
+ html_table_output,
1153
+ stats_plot,
1154
+ funnel_plot,
1155
+ timeline_plot,
1156
+ ]
1157
+ )
1158
+ # Sync Button Logic
1159
+ sync_repo_btn.click(
1160
+ fn=trigger_sync_action,
1161
+ inputs=[global_repo_url, global_github_token],
1162
+ outputs=[sync_status],
1163
+ show_progress="hidden",
1164
+ concurrency_limit=10
1165
+ ).success(
1166
+ fn=hard_refresh,
1167
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
1168
+ outputs=common_outputs,
1169
+ show_progress_on=[
1170
+ html_table_output,
1171
+ stats_plot,
1172
+ funnel_plot,
1173
+ timeline_plot,
1174
+ ]
1175
+ )
1176
+ reanalyze_btn.click(
1177
+ fn=trigger_manual_reanalysis,
1178
+ inputs=[
1179
+ sel_issue_state,
1180
+ sel_repo_state,
1181
+ global_provider,
1182
+ global_model,
1183
+ global_github_token,
1184
+ global_llm_key
1185
+ ],
1186
+ outputs=[exec_status]
1187
+ ).success(
1188
+ fn=hard_refresh,
1189
+ inputs=[view_filter, current_sort, log_limit_slider, global_repo_url],
1190
+ outputs=common_outputs,
1191
+ show_progress_on=[
1192
+ html_table_output,
1193
+ stats_plot,
1194
+ funnel_plot,
1195
+ timeline_plot,
1196
+ ]
1197
+ )
1198
+
1199
+ if __name__ == "__main__":
1200
+ app.launch()
assets/gemini.svg ADDED
assets/user.png ADDED
config/constants.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # General
2
+ AVAILABLE_MODELS_BY_PROVIDER = {
3
+ "gemini": ["gemini-2.0-flash"],
4
+ "sambanova": [
5
+ "DeepSeek-R1",
6
+ "DeepSeek-V3-0324",
7
+ "DeepSeek-V3.1",
8
+ "Meta-Llama-3.1-8B-Instruct",
9
+ "Meta-Llama-3.3-70B-Instruct",
10
+ "gpt-oss-120b",
11
+ ],
12
+ "openai": ["gpt-4o-mini", "gpt-4o"],
13
+ "nebius": [
14
+ "deepseek-ai/DeepSeek-R1-0528",
15
+ "meta-llama/Llama-3.3-70B-Instruct",
16
+ "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
17
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-fast",
18
+ ],
19
+ }
config/gitrepo_agent_prompt.yaml ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system_prompt: |-
2
+ You are the GitRepo Inspector Agent. Your goal is to answer questions about GitHub issues using the provided tools.
3
+
4
+ **CONTEXT & OVERRIDES:**
5
+ - The user has a default repository and LLM selected in the UI. This is your primary context.
6
+ - HOWEVER, if the user's prompt explicitly asks to use a DIFFERENT provider or model (e.g., "using OpenAI"), you MUST override the default from the context and use the one specified in the prompt.
7
+ - **Priority: Prompt > Context**.
8
+
9
+ **RULES:**
10
+ - You MUST start every step with `Thought:`.
11
+ - Then, write Python code in a ` ```python ... ``` ` block.
12
+ - Call one tool at a time.
13
+ - Finish your work by calling `final_answer("your text")`.
14
+
15
+ **TOOLS:**
16
+ - `get_issue_status(issue_number, repo_url)`: Gets a pre-formatted AI report for a single issue.
17
+ - `search_issues(repo_url, author, verdict, query)`: Finds a LIST of issues.
18
+ - `trigger_live_analysis(issue_number, repo_url, github_token)`: Generate AI analysis report for a single issue.
19
+ - `web_search(query)`: Searches the web.
20
+
21
+ **DEFAULT REPO:**
22
+ If not specified, assume repo_url is `https://github.com/gradio-app/gradio`.
23
+
24
+ **UI CONTROL:**
25
+ - If your final answer is about ONE issue, append `[VIEW:#<issue_number>]` at the end of the text.
26
+
27
+ **MODEL-TO-PROVIDER MAPPING:**
28
+ - Gemini models: 'gemini-2.0-flash'
29
+ - OpenAI models: 'gpt-4o', 'gpt-4o-mini'
30
+ - Nebius models: 'deepseek-ai/DeepSeek-R1-0528', 'meta-llama/Llama-3.3-70B-Instruct', 'nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", "meta-llama/Meta-Llama-3.1-8B-Instruct-fast'
31
+ - SambaNova models: 'DeepSeek-R1', 'DeepSeek-V3-0324', 'DeepSeek-V3.1', 'Meta-Llama-3.1-8B-Instruct', 'Meta-Llama-3.3-70B-Instruct', 'gpt-oss-120b'
32
+
33
+ If a user specifies a model name, you MUST infer the correct provider from this map
34
+
35
+ **EXAMPLES:**
36
+
37
+ ---
38
+ Task: "Which repository are we analyzing?"
39
+ Thought: I know the context. I will answer directly using `final_answer`.
40
+ ```python
41
+ final_answer("We are currently analyzing the Gradio repository: https://github.com/gradio-app/gradio")
42
+ ```
43
+ ---
44
+
45
+ Task: "whats up with issue 123?"
46
+ Thought: The user wants to see everything about issue 123. I will get the report and trigger the UI to show the full details.
47
+ ```python
48
+ import re
49
+ report_text = get_issue_status(issue_number=123, repo_url="https://github.com/gradio-app/gradio")
50
+
51
+ if "ERROR" in report_text:
52
+ final_answer(f"No report found for #123. Should I analyze it now?")
53
+ else:
54
+ verdict_match = re.search(r"VERDICT: (\w+)", report_text)
55
+ verdict = verdict_match.group(1) if verdict_match else "unknown"
56
+ final_answer(f"The verdict for issue #123 is **{verdict}**. I've opened the full report for you. [VIEW:#123]")
57
+ ```
58
+
59
+ ---
60
+ Task: "What is the status of issue 456?"
61
+ Thought: The user just wants the status. I will get the report, extract only the verdict, and give a short answer without triggering the UI.
62
+ ```python
63
+ import re
64
+ report_text = get_issue_status(issue_number=456, repo_url="https://github.com/gradio-app/gradio")
65
+
66
+ if "ERROR" in report_text:
67
+ final_answer("No analysis found in the database for issue #123")
68
+ else:
69
+ verdict_match = re.search(r"VERDICT: (\w+)", report_text)
70
+ verdict = verdict_match.group(1) if verdict_match else "unknown"
71
+ final_answer(f"The current AI-analyzed status for issue #456 is **{verdict}**.")
72
+ ```
73
+
74
+ ---
75
+ Task: "Show me the full report for issue 123"
76
+ Thought: The user wants to see the full report. I will use `get_issue_status` to get the data, but I only need to confirm it exists. The UI trigger will handle the display.
77
+ ```python
78
+ report_text = get_issue_status(issue_number=123, repo_url="https://github.com/gradio-app/gradio")
79
+
80
+ if "ERROR" in report_text:
81
+ final_answer("I couldn't find a report for that issue.")
82
+ else:
83
+ final_answer(f"Opening full report for issue #123... [VIEW:#123]")
84
+ ```
85
+
86
+ ---
87
+ Task: "Why was issue #123 marked as duplicate?"
88
+ Thought: The user wants the justification for a duplicate. I need to get the report with `get_issue_status` and then use Python's `re` module to parse the "Justification:" text from the report.
89
+ ```python
90
+ import re
91
+ report_text = get_issue_status(issue_number=123, repo_url="https://github.com/gradio-app/gradio")
92
+
93
+ if "duplicate" in report_text.lower():
94
+ justification_match = re.search(r"Justification:</strong>\s*(.*?)(</li>|<br>|\n)", report_text, re.DOTALL)
95
+
96
+ if justification_match:
97
+ reason = justification_match.group(1).strip()
98
+ final_answer(f"It was marked as duplicate because: '{reason}'. [VIEW:#123]")
99
+ else:
100
+ final_answer("The report confirms it's a duplicate, but the specific reason was not found. [VIEW:#123]")
101
+ ```
102
+
103
+ ---
104
+ Task: "Re-analyze issue #123 with OpenAI"
105
+ Thought: The user wants to re-analyze with a specific provider. The context says the user selected 'openai' and 'gpt-4o-mini' in the UI. I will use the `trigger_live_analysis` tool and pass these parameters.
106
+ ```python
107
+ result = trigger_live_analysis(
108
+ issue_number=123,
109
+ repo_url="https://github.com/gradio-app/gradio",
110
+ provider="openai",
111
+ model="gpt-4o-mini"
112
+ )
113
+ final_answer(f"Re-analysis with OpenAI is complete. [VIEW:#123]")
114
+ ```
115
+
116
+ ---
117
+ Task: "Analyze/Check issue #456 with Nebius"
118
+ Context: "Selected provider is 'gemini'"
119
+ Thought: The user's prompt explicitly asks to use 'Nebius', which overrides the default 'gemini' from the context. I will call `trigger_live_analysis` with `provider='nebius'`.
120
+ ```python
121
+ result = trigger_live_analysis(
122
+ issue_number=456,
123
+ repo_url="https://github.com/gradio-app/gradio",
124
+ provider="nebius" # Override
125
+ )
126
+ final_answer(f"Analysis with Nebius is complete. [VIEW:#456]")
127
+ ```
128
+
129
+ ---
130
+ Task: "Analyze/Check issue #456 with Nebius using the DeepSeek-R1 model"
131
+ Context: "Selected provider is 'gemini'"
132
+ Thought: The user is overriding both the provider and the model. I must call `trigger_live_analysis` with `provider='nebius'` and `model='deepseek-ai/DeepSeek-R1'`.
133
+ ```python
134
+ result = trigger_live_analysis(
135
+ issue_number=456,
136
+ repo_url="https://github.com/gradio-app/gradio",
137
+ provider="nebius",
138
+ model="deepseek-ai/DeepSeek-R1-0528" # Override completo
139
+ )
140
+ final_answer(f"Analysis with Nebius (DeepSeek) is complete. [VIEW:#456]")
141
+ ```
142
+
143
+ ---
144
+ Task: "Analyze/Check issue #789 with gpt-4o-mini"
145
+ Thought: The user specified the model 'gpt-4o-mini'. According to my mapping, this model belongs to the 'openai' provider. I will call the tool with both parameters.
146
+ ```python
147
+ result = trigger_live_analysis(
148
+ issue_number=789,
149
+ repo_url="https://github.com/gradio-app/gradio",
150
+ provider="openai", # I inferred this
151
+ model="gpt-4o-mini"
152
+ )
153
+ final_answer(f"Analysis with gpt-4o-mini is complete. [VIEW:#789]")
154
+ ```
155
+
156
+ ---
157
+ Task: "Is issue #123 a duplicate? Check now."
158
+ Thought: The user is asking for the status of a specific issue. I should first check the database using `get_issue_status` to see if we already have an analysis.
159
+ ```python
160
+ import re
161
+ report_text = get_issue_status(issue_number=123, repo_url="https://github.com/gradio-app/gradio")
162
+
163
+ if "duplicate" in report_text.lower():
164
+ final_answer(f"Yes, my last analysis shows issue #123 is a duplicate. I've opened the report. [VIEW:#123]")
165
+ elif "ERROR" in report_text:
166
+ final_answer("I don't have a report for #123. Should I run a full analysis now?")
167
+ else:
168
+ verdict_match = re.search(r"VERDICT: (\w+)", report_text)
169
+ verdict = verdict_match.group(1) if verdict_match else "unknown"
170
+ final_answer(f"My last analysis for issue #123 shows a verdict of **{verdict}**, not duplicate. [VIEW:#123]")
171
+ ```
172
+
173
+ ---
174
+ Task: "Find issues by abidlabs"
175
+ Thought: I need to search for multiple issues. I will use `search_issues`. It returns a formatted string list.
176
+ ```python
177
+ list_of_issues = search_issues(
178
+ repo_url="https://github.com/gradio-app/gradio",
179
+ author="abidlabs"
180
+ )
181
+ final_answer(list_of_issues)
182
+ ```
183
+
184
+ planning:
185
+ initial_plan: |-
186
+ You are a planner. Given a task, create a simple plan.
187
+ Task: {{task}}
188
+
189
+ 1. Facts: What do I know? (Issue #, Repo)
190
+ 2. Plan: Which tools to call?
191
+
192
+ Write the plan and end with <end_plan>.
193
+
194
+ update_plan_pre_messages: |-
195
+ Update the plan based on progress.
196
+
197
+ update_plan_post_messages: |-
198
+ Write the new plan and end with <end_plan>.
199
+
200
+ managed_agent:
201
+ task: |-
202
+ You are a helper agent. Task: {{task}}
203
+ report: |-
204
+ {{final_answer}}
205
+
206
+ final_answer:
207
+ pre_messages: |-
208
+ The agent has an answer.
209
+ post_messages: |-
210
+ {{task}}
pyproject.toml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ line-length = 160
3
+
4
+ [tool.ruff.lint]
5
+ # Never enforce `E501` (line length violations).
6
+ ignore = ["C901", "E501", "E741", "F402", "F823"]
7
+ select = ["C", "E", "F", "I", "W"]
8
+
9
+ [tool.ruff.lint.isort]
10
+ lines-after-imports = 2
11
+ known-first-party = ["gitrepo_inspector_dashboard"]
12
+
13
+ [tool.ruff.format]
14
+ # Like Black, use double quotes for strings.
15
+ quote-style = "double"
16
+
17
+ # Like Black, indent with spaces, rather than tabs.
18
+ indent-style = "space"
19
+
20
+ # Like Black, respect magic trailing commas.
21
+ skip-magic-trailing-comma = false
22
+
23
+ # Like Black, automatically detect the appropriate line ending.
24
+ line-ending = "auto"
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ sqlalchemy
3
+ psycopg2-binary
4
+ python-dotenv
5
+ requests
6
+ plotly
7
+ markdown
8
+ gradio_client
9
+ gradio_htmlplus
10
+ gradio_bottombar
11
+ gradio_buttonplus
12
+ smolagents>=1.22.0
13
+ smolagents[mcp]>=1.22.0
14
+
services/agent_chat.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import markdown
4
+ import yaml
5
+ from dotenv import load_dotenv
6
+ from smolagents import CodeAgent, LiteLLMModel, tool
7
+ from smolagents.mcp_client import MCPClient as SmolMCPClient
8
+ from gradio_client import Client as GradioClient
9
+ from config.constants import AVAILABLE_MODELS_BY_PROVIDER
10
+ from services.db import save_analysis_report
11
+
12
+ load_dotenv()
13
+
14
+ MCP_SERVER_URL = os.getenv("MCP_SERVER_URL", "https://mcp-1st-birthday-gitrepo-inspector-mcp.hf.space/")
15
+ GEMINI_API_KEY = os.getenv("GOOGLE_API_KEY")
16
+
17
+ class ModelFixerWrapper(LiteLLMModel):
18
+ """
19
+ Inherits from LiteLLMModel and overrides the `generate` method
20
+ to fix Gemini's common output formatting mistakes.
21
+ """
22
+ def generate(self, messages, **kwargs):
23
+ raw_chat_message = super().generate(messages, **kwargs)
24
+ raw_output = raw_chat_message.content if raw_chat_message.content else ""
25
+
26
+ # 1. Try to find the code block
27
+ # Regex that searches for ```python ... ``` OR ``` ... ```
28
+ code_match = re.search(r"```(python)?\n(.*?)\n```", raw_output, re.DOTALL)
29
+
30
+ if code_match:
31
+ # If found, ensure it's formatted correctly as ```python
32
+ code_content = code_match.group(2).strip()
33
+ fixed_code_block = f"```python\n{code_content}\n```"
34
+
35
+ # Replace the original block with the corrected one
36
+ fixed_output = raw_output[:code_match.start()] + fixed_code_block + raw_output[code_match.end():]
37
+ else:
38
+ # If not found, don't modify
39
+ fixed_output = raw_output
40
+
41
+ # Ensure "Thought:"
42
+ if "```" in fixed_output and "Thought:" not in fixed_output:
43
+ fixed_output = "Thought: Executing.\n" + fixed_output
44
+
45
+ raw_chat_message.content = fixed_output
46
+ return raw_chat_message
47
+
48
+
49
+ @tool
50
+ def trigger_live_analysis(issue_number: int, repo_url: str, provider: str = "gemini", model: str = None, github_token: str = None) -> str:
51
+ """
52
+ FORCES a NEW, LIVE analysis of an issue, ignoring any cached data.
53
+ Use this ONLY when the user explicitly asks to "re-analyze", "check again", "analyze now", or "run again".
54
+ Do NOT use this for simple status checks.
55
+
56
+ Args:
57
+ issue_number: The issue number (e.g. 123).
58
+ repo_url: The full repository URL.
59
+ provider (str, optional): The AI provider name. Defaults to "gemini".
60
+ model (str, optional): The model identifier to use for analysis.
61
+ Defaults to "gemini-2.0-flash".
62
+ github_token: Optional GitHub token for authentication.
63
+ """
64
+ try:
65
+
66
+ if not model:
67
+ models_for_provider = AVAILABLE_MODELS_BY_PROVIDER.get(provider.lower(), [])
68
+ if models_for_provider:
69
+ model = models_for_provider[0]
70
+ else:
71
+ model = "gemini-2.0-flash"
72
+
73
+ # Use GradioClient directly for control over output format
74
+ client = GradioClient(MCP_SERVER_URL)
75
+
76
+ # Call MCP Tool
77
+ result = client.predict(
78
+ repo_url,
79
+ int(issue_number),
80
+ provider,
81
+ model,
82
+ github_token,
83
+ None,
84
+ api_name="/analyze_github_issue"
85
+ )
86
+
87
+ html_report = result[0]
88
+ thought = result[1]
89
+
90
+ verdict = "unresolved" # Default
91
+ if "✅ Resolved" in html_report: verdict = "resolved"
92
+ elif "⚠️ Possibly Resolved" in html_report: verdict = "possibly_resolved"
93
+ elif "🔥 Duplicate" in html_report: verdict = "duplicate"
94
+
95
+ try:
96
+ save_analysis_report(
97
+ repo_url=repo_url,
98
+ issue_number=int(issue_number),
99
+ provider=provider,
100
+ model=model,
101
+ verdict=verdict,
102
+ body=html_report,
103
+ thought=thought,
104
+ action=None,
105
+ priority=None
106
+ )
107
+ print(f"💾 Saved analysis for #{issue_number} to DB.")
108
+
109
+
110
+ except Exception as e:
111
+ print(f"⚠️ Failed to save report: {e}")
112
+
113
+ # Clean HTML tags for LLM readability
114
+ clean_report = re.sub(r'<[^>]+>', '', html_report)
115
+
116
+ return f"""
117
+ ANALYSIS COMPLETED.
118
+
119
+ --- THINKING PROCESS ---
120
+ {thought}
121
+
122
+ --- FINAL REPORT ---
123
+ {clean_report}
124
+ """
125
+ except Exception as e:
126
+ return f"Analysis failed: {e}"
127
+
128
+ @tool
129
+ def get_issue_status(issue_number: int, repo_url: str) -> str:
130
+ """
131
+ Retrieves the detailed AI analysis report for a SINGLE, SPECIFIC issue.
132
+ Use this when the user asks for "details", "status of", "what's up with" a specific issue number.
133
+
134
+ Args:
135
+ issue_number: The specific issue number (e.g., 123).
136
+ repo_url: The full repository URL.
137
+ """
138
+ try:
139
+ client = GradioClient(MCP_SERVER_URL)
140
+
141
+ result = client.predict(
142
+ repo_url,
143
+ issue_number,
144
+ api_name="/get_issue_report"
145
+ )
146
+
147
+ import json
148
+ if isinstance(result, str):
149
+ try:
150
+ data = json.loads(result)
151
+ except:
152
+ return str(result)
153
+ elif isinstance(result, dict):
154
+ data = result
155
+ else:
156
+ return f"Unexpected format: {type(result)}"
157
+
158
+ if "error" in data and "No AI report found" in data['error']:
159
+ return f"ERROR: No analysis found in the database for issue #{issue_number}."
160
+
161
+ if "error" in data:
162
+ return f"Status: Not Found\nDetails: {data['error']}"
163
+
164
+ body = data.get('report', '')
165
+ if "<div" in body and "Analyzed by" in body:
166
+ final_html = body.replace("```html", "").replace("```", "")
167
+ else:
168
+ final_html = markdown.markdown(body, extensions=['tables', 'fenced_code'])
169
+ final_html = f"<h3 style='color:#4B5563; border-bottom:1px solid #eee; padding-bottom:5px;'>Analysis Report</h3>{final_html}"
170
+
171
+ return f"""
172
+ --- ISSUE REPORT #{data.get('issue')} ---
173
+ VERDICT: {data.get('verdict')}
174
+
175
+ --- ANALYSIS BODY ---
176
+ {data.get('report')}
177
+
178
+ --- PROPOSED ACTION ---
179
+ {data.get('action')}
180
+ """
181
+
182
+ except Exception as e:
183
+ return f"Failed to fetch status: {e}"
184
+
185
+ @tool
186
+ def search_issues(
187
+ repo_url: str,
188
+ query: str = "",
189
+ issue_number: int = None,
190
+ status: str = "open",
191
+ verdict: str = None,
192
+ author: str = None,
193
+ limit: int = 5,
194
+ state: str = None,
195
+ max_results: int = None,
196
+ per_page: int = None
197
+ ) -> str:
198
+ """
199
+ Searches a LIST of issues based on KEYWORDS, author, or verdict.
200
+ Use this for BROAD questions like "Are there issues about...?" or "Find all issues by...".
201
+ DO NOT use this to get details of a single known issue number.
202
+
203
+ Args:
204
+ repo_url: The full repository URL.
205
+ query: Text to search in title or body.
206
+ issue_number: Specific issue ID to find.
207
+ status: GitHub state ('open', 'closed'). Default: 'open'.
208
+ verdict: AI Analysis Verdict ('resolved', 'duplicate', 'unresolved').
209
+ author: Filter by issue author.
210
+ limit: Max results to return. Default: 5.
211
+ state: Alias for status parameter.
212
+ max_results: Alias for limit parameter.
213
+ per_page: Alias for limit parameter.
214
+ """
215
+
216
+ final_status = state if state else status
217
+ final_limit = max_results if max_results else (per_page if per_page else limit)
218
+
219
+ try:
220
+ client = GradioClient(MCP_SERVER_URL)
221
+ result = client.predict(
222
+ repo_url,
223
+ query,
224
+ issue_number,
225
+ final_status,
226
+ verdict,
227
+ author,
228
+ final_limit,
229
+ api_name="/search_issues"
230
+ )
231
+
232
+ import json
233
+ if isinstance(result, str):
234
+ try:
235
+ data = json.loads(result)
236
+ except:
237
+ return f"Error parsing JSON from MCP: {result}"
238
+ else:
239
+ data = result
240
+
241
+ if not data:
242
+ return "No issues found matching criteria."
243
+ if isinstance(data, dict) and "error" in data:
244
+ return f"Search Error: {data['error']}"
245
+ if isinstance(data, str):
246
+ return data
247
+
248
+ output = f"Found {len(data)} issues:\n"
249
+ for item in data:
250
+ if not isinstance(item, dict): continue
251
+
252
+ i_id = item.get('id', '?')
253
+ i_title = item.get('title', 'No Title')
254
+ i_author = item.get('author', 'Unknown')
255
+ i_verdict = item.get('verdict', 'pending')
256
+ i_snippet = item.get('snippet', '')[:100].replace('\n', ' ')
257
+
258
+ output += f"- #{i_id} '{i_title}' (by @{i_author}) [Verdict: {i_verdict}]\n"
259
+ output += f" Snippet: {i_snippet}...\n"
260
+
261
+ return output
262
+
263
+ except Exception as e:
264
+ return f"Search tool failed: {str(e)}"
265
+
266
+ def create_dashboard_agent():
267
+ if not GEMINI_API_KEY:
268
+ print("⚠️ Warning: GOOGLE_API_KEY not found.")
269
+
270
+ try:
271
+ print(f"🚀 Initializing Agent...")
272
+ yaml_path = os.path.join(os.path.dirname(__file__), "../config/gitrepo_agent_prompt.yaml")
273
+
274
+ with open(yaml_path, 'r', encoding='utf-8') as f:
275
+ prompt_templates = yaml.safe_load(f)
276
+
277
+ model = ModelFixerWrapper(
278
+ model_id="gemini/gemini-2.0-flash",
279
+ api_key=GEMINI_API_KEY,
280
+ temperature=0.2
281
+ )
282
+
283
+ # 1. Import Automatic Tools from MCP (Search, WebSearch, etc.)
284
+ # Ensure URL ends with /gradio_api/mcp/sse for smolagents discovery
285
+ sse_url = f"{MCP_SERVER_URL.rstrip('/')}/gradio_api/mcp/sse"
286
+ mcp_client = SmolMCPClient({"url":sse_url, "transport":"sse"})
287
+ auto_tools = mcp_client.get_tools()
288
+
289
+ # 2. Filter out tools we want to override
290
+ # Remove 'analyze_github_issue' to use our 'trigger_live_analysis' wrapper
291
+ # Remove 'get_issue_report' if we want to force usage of 'get_issue_status' wrapper
292
+ filtered_tools = [
293
+ t for t in auto_tools
294
+ if t.name not in ['analyze_github_issue',
295
+ 'search_issues',
296
+ 'get_issue_report',
297
+ 'generate_sketch_to_ui',
298
+ 'generate_theme'
299
+ ]
300
+ ]
301
+
302
+ # 3. Combine: Auto Tools + Custom Wrappers
303
+ final_tools = filtered_tools + [trigger_live_analysis, get_issue_status, search_issues]
304
+
305
+ # 4. Create Agent
306
+ agent = CodeAgent(
307
+ tools=final_tools,
308
+ model=model,
309
+ prompt_templates=prompt_templates,
310
+ additional_authorized_imports=["json", "re", "time", "datetime", "ast"],
311
+ add_base_tools=False,
312
+ max_steps=8,
313
+ planning_interval=3
314
+ )
315
+ if agent:
316
+ print(f"Agent initialized successfully!")
317
+ else:
318
+ print(f"Agent not initialized!")
319
+ return agent
320
+
321
+ except Exception as e:
322
+ print(f"❌ Error creating agent: {e}")
323
+ import traceback
324
+ traceback.print_exc()
325
+ return None
services/charts.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import plotly.graph_objects as go
2
+ import pandas as pd
3
+
4
+ def create_verdict_donut(df: pd.DataFrame) -> go.Figure:
5
+ """
6
+ Creates a High-Fidelity Donut Chart.
7
+ """
8
+ if df.empty:
9
+ return _create_empty_figure("No Data")
10
+
11
+ color_map = {
12
+ 'resolved': '#10B981', # Emerald 500
13
+ 'possibly_resolved': '#34D399', # Emerald 400
14
+ 'duplicate': '#F59E0B', # Amber 500
15
+ 'unresolved': '#64748B', # Slate 500
16
+ 'error': '#EF4444', # Red 500
17
+ 'unknown': '#CBD5E1' # Slate 300
18
+ }
19
+
20
+ colors = [color_map.get(v, '#999') for v in df['verdict']]
21
+ total = df['count'].sum()
22
+
23
+ fig = go.Figure(data=[go.Pie(
24
+ labels=df['verdict'],
25
+ values=df['count'],
26
+ hole=0.7,
27
+ marker=dict(colors=colors, line=dict(color='#1F2937', width=0)),
28
+ textinfo='value',
29
+ hoverinfo='label+percent',
30
+ textfont=dict(size=14, color='white')
31
+ )])
32
+
33
+ fig.update_layout(
34
+ title=dict(
35
+ text="Analysis Distribution",
36
+ font=dict(size=14, color="#6B7280"),
37
+ x=0.5,
38
+ xanchor='center'
39
+ ),
40
+
41
+ showlegend=True,
42
+ legend=dict(
43
+ orientation="h",
44
+ yanchor="top",
45
+ y=-0.1,
46
+ xanchor="center",
47
+ x=0.5,
48
+ font=dict(color="#9CA3AF", size=11)
49
+ ),
50
+
51
+ margin=dict(t=40, b=60, l=20, r=20),
52
+ height=250,
53
+ paper_bgcolor='rgba(0,0,0,0)',
54
+ plot_bgcolor='rgba(0,0,0,0)',
55
+
56
+ annotations=[dict(text=str(total), x=0.5, y=0.5, font_size=32, showarrow=False, font_color="#E5E7EB")],
57
+
58
+ modebar_remove=['zoom', 'pan', 'select', 'lasso2d', 'zoomIn2d', 'zoomOut2d', 'autoScale2d', 'resetScale2d', 'toImage']
59
+ )
60
+
61
+ return fig
62
+
63
+ def create_timeline_chart(full_df: pd.DataFrame) -> go.Figure:
64
+ """
65
+ Creates a Timeline showing activity over time.
66
+ """
67
+ if full_df.empty:
68
+ return _create_empty_figure("No Timeline Data")
69
+
70
+ df = full_df.copy()
71
+
72
+ df['date'] = pd.to_datetime(df['updated_at'], errors='coerce').dt.date
73
+ daily_counts = df.groupby('date').size().reset_index(name='count')
74
+
75
+ fig = go.Figure()
76
+
77
+ fig.add_trace(go.Scatter(
78
+ x=daily_counts['date'],
79
+ y=daily_counts['count'],
80
+ mode='lines+markers',
81
+ name='Activity',
82
+ line=dict(color='#3B82F6', width=2, shape='spline'),
83
+ marker=dict(size=4, color='#60A5FA', line=dict(width=0)),
84
+ fill='tozeroy',
85
+ fillcolor='rgba(59, 130, 246, 0.1)'
86
+ ))
87
+
88
+ fig.update_layout(
89
+ title=dict(
90
+ text="Activity Timeline (Last Updates)",
91
+ font=dict(size=14, color="#6B7280"),
92
+ x=0.5,
93
+ xanchor='center'
94
+ ),
95
+ xaxis=dict(
96
+ showgrid=False,
97
+ color="#9CA3AF",
98
+ gridcolor="rgba(255,255,255,0.1)",
99
+ tickformat="%b %d"
100
+ ),
101
+ yaxis=dict(
102
+ showgrid=True,
103
+ gridcolor="rgba(255,255,255,0.05)",
104
+ color="#9CA3AF",
105
+ zeroline=False
106
+ ),
107
+ margin=dict(t=40, b=20, l=30, r=10),
108
+ height=250,
109
+ paper_bgcolor='rgba(0,0,0,0)',
110
+ plot_bgcolor='rgba(0,0,0,0)',
111
+ showlegend=False,
112
+
113
+ modebar_remove=['zoom', 'pan', 'select', 'lasso2d', 'zoomIn2d', 'zoomOut2d', 'autoScale2d', 'resetScale2d', 'toImage']
114
+ )
115
+
116
+ return fig
117
+
118
+ def _create_empty_figure(text):
119
+ fig = go.Figure()
120
+ fig.add_annotation(text=text, showarrow=False, font=dict(color="gray"))
121
+ fig.update_layout(
122
+ height=250,
123
+ paper_bgcolor='rgba(0,0,0,0)',
124
+ plot_bgcolor='rgba(0,0,0,0)',
125
+ xaxis=dict(visible=False),
126
+ yaxis=dict(visible=False),
127
+ modebar_remove=['zoom', 'pan', 'select', 'lasso2d', 'zoomIn2d', 'zoomOut2d', 'autoScale2d', 'resetScale2d', 'toImage']
128
+ )
129
+ return fig
130
+
131
+ def create_efficiency_funnel(stats_df: pd.DataFrame, total_open_issues: int) -> go.Figure:
132
+ """
133
+ Creates a Funnel Chart showing how the AI filters the noise.
134
+ """
135
+ if stats_df.empty:
136
+ return _create_empty_figure("No Data")
137
+
138
+ total_backlog = total_open_issues
139
+ total_analyzed = stats_df['count'].sum()
140
+
141
+ ai_solved = stats_df[stats_df['verdict'].isin(['resolved', 'duplicate', 'possibly_resolved'])]['count'].sum()
142
+
143
+
144
+ values = [total_backlog, total_analyzed, ai_solved]
145
+ stages = ["Total Backlog", "Analyzed by AI", "Actionable Findings"]
146
+
147
+ fig = go.Figure(go.Funnel(
148
+ y=stages,
149
+ x=values,
150
+ textinfo="value+percent initial",
151
+
152
+ textfont=dict(color="#e5e5e5", size=12),
153
+ marker=dict(color=["#64748B", "#4486F0", "#10B981"]),
154
+ connector=dict(line=dict(color="rgba(128,128,128,0.5)", width=1))
155
+ ))
156
+
157
+ fig.update_layout(
158
+ title=dict(
159
+ text="AI Efficiency Funnel",
160
+ font=dict(size=14, color="#6B7280"),
161
+ x=0.5, xanchor='center'
162
+ ),
163
+ xaxis=dict(
164
+ showgrid=False,
165
+ color="#9CA3AF",
166
+ gridcolor="rgba(255,255,255,0.1)",
167
+ tickformat="%b %d"
168
+ ),
169
+ yaxis=dict(
170
+ showgrid=True,
171
+ gridcolor="rgba(255,255,255,0.1)",
172
+ color="#9CA3AF",
173
+ zeroline=False
174
+ ),
175
+ margin=dict(t=40, b=20, l=100, r=10),
176
+ height=250,
177
+ paper_bgcolor='rgba(0,0,0,0)',
178
+ plot_bgcolor='rgba(0,0,0,0)',
179
+ showlegend=False,
180
+ modebar_remove=['pan', 'select', 'lasso2d', 'autoScale2d', 'resetScale2d']
181
+ )
182
+
183
+ return fig
services/chat_utils.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import re
3
+ from smolagents.memory import ActionStep, PlanningStep, FinalAnswerStep
4
+ from smolagents.models import ChatMessageStreamDelta
5
+
6
+ def get_step_footnote_content(step_log: ActionStep | PlanningStep, step_name: str) -> str:
7
+ """Get a footnote string for a step log with duration and token information"""
8
+ step_footnote = f"**{step_name}**"
9
+ if hasattr(step_log, 'token_usage') and step_log.token_usage is not None:
10
+ step_footnote += f" | Input tokens: {step_log.token_usage.input_tokens:,} | Output tokens: {step_log.token_usage.output_tokens:,}"
11
+ if hasattr(step_log, 'timing') and step_log.timing and step_log.timing.duration:
12
+ step_footnote += f" | Duration: {round(float(step_log.timing.duration), 2)}s"
13
+ return f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
14
+
15
+ def _clean_model_output(model_output: str) -> str:
16
+ if not model_output:
17
+ return ""
18
+ model_output = model_output.strip()
19
+ model_output = re.sub(r"```\s*<end_code>", "```", model_output)
20
+ model_output = re.sub(r"<end_code>\s*```", "```", model_output)
21
+ model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
22
+ return model_output
23
+
24
+ def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False):
25
+ step_number = f"Step {step_log.step_number}"
26
+
27
+ if not skip_model_outputs and getattr(step_log, "model_output", ""):
28
+ model_output = _clean_model_output(step_log.model_output)
29
+ yield gr.ChatMessage(
30
+ role="assistant",
31
+ content=model_output,
32
+ metadata={"title": f"💭 Reasoning ({step_number})", "status": "done"}
33
+ )
34
+
35
+ if getattr(step_log, "tool_calls", []):
36
+ first_tool_call = step_log.tool_calls[0]
37
+ args = first_tool_call.arguments
38
+ content = str(args.get("answer", str(args))) if isinstance(args, dict) else str(args).strip()
39
+ tool_name = first_tool_call.name
40
+ icon = "🔍" if "search" in tool_name else "🛠️"
41
+
42
+ yield gr.ChatMessage(
43
+ role="assistant",
44
+ content=f"```python\n{content}\n```",
45
+ metadata={"title": f"{icon} Used tool: {tool_name}", "status": "done"}
46
+ )
47
+
48
+ if getattr(step_log, "observations", "") and step_log.observations.strip():
49
+ log_content = step_log.observations.strip()
50
+ yield gr.ChatMessage(
51
+ role="assistant",
52
+ content=f"```text\n{log_content}\n```",
53
+ metadata={"title": "📋 Tool Output", "status": "done"}
54
+ )
55
+
56
+ if getattr(step_log, "error", None):
57
+ yield gr.ChatMessage(
58
+ role="assistant",
59
+ content=f"⚠️ **Error:** {str(step_log.error)}",
60
+ metadata={"title": "🚫 Error", "status": "done"}
61
+ )
62
+
63
+ yield gr.ChatMessage(
64
+ role="assistant",
65
+ content=get_step_footnote_content(step_log, step_number),
66
+ metadata={"status": "done"}
67
+ )
68
+ yield gr.ChatMessage(role="assistant", content="---", metadata={"status": "done"})
69
+
70
+ def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False):
71
+ if not skip_model_outputs:
72
+ yield gr.ChatMessage(
73
+ role="assistant",
74
+ content=step_log.plan,
75
+ metadata={"title": "🧠 Planning Phase", "status": "done"}
76
+ )
77
+
78
+ yield gr.ChatMessage(
79
+ role="assistant",
80
+ content=get_step_footnote_content(step_log, "Planning Stats"),
81
+ metadata={"status": "done"}
82
+ )
83
+ yield gr.ChatMessage(role="assistant", content="---", metadata={"status": "done"})
84
+
85
+
86
+ def _process_final_answer_step(step_log: FinalAnswerStep):
87
+ """
88
+ Extracts the final answer from the step log, handling multiple possible attributes.
89
+ """
90
+ final_answer = None
91
+
92
+ possible_attrs = ['output', 'answer', 'final_answer']
93
+
94
+ for attr in possible_attrs:
95
+ if hasattr(step_log, attr):
96
+ final_answer = getattr(step_log, attr)
97
+ if final_answer:
98
+ break
99
+
100
+ if final_answer is None:
101
+ final_answer = str(step_log)
102
+ match = re.search(r"output=(['\"])(.*?)\1\)", final_answer, re.DOTALL)
103
+ if match:
104
+ final_answer = match.group(2).encode('utf-8').decode('unicode_escape')
105
+
106
+ content = final_answer
107
+ if hasattr(content, 'to_string'):
108
+ content = content.to_string()
109
+ else:
110
+ content = str(content)
111
+
112
+ yield gr.ChatMessage(
113
+ role="assistant",
114
+ content=f"📜 **Final Answer**\n\n{content}",
115
+ metadata={"status": "done"}
116
+ )
117
+ def pull_messages_from_step(step_log, skip_model_outputs=False):
118
+ if isinstance(step_log, PlanningStep):
119
+ yield from _process_planning_step(step_log, skip_model_outputs)
120
+ elif isinstance(step_log, ActionStep):
121
+ yield from _process_action_step(step_log, skip_model_outputs)
122
+ elif isinstance(step_log, FinalAnswerStep):
123
+ yield from _process_final_answer_step(step_log)
124
+
125
+ def stream_to_gradio(agent, task: str, reset_agent_memory: bool = False):
126
+ """Main generator function for the Chat Interface"""
127
+
128
+ for event in agent.run(task, stream=True, max_steps=10, reset=reset_agent_memory):
129
+ if isinstance(event, (ActionStep, PlanningStep, FinalAnswerStep)):
130
+ for message in pull_messages_from_step(event):
131
+ yield message
132
+ elif isinstance(event, ChatMessageStreamDelta):
133
+ if event.content:
134
+ yield event.content
services/db.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import psycopg2
4
+ import pandas as pd
5
+ from sqlalchemy import create_engine
6
+
7
+ RAW_DB_URL = os.getenv("DATABASE_URL")
8
+
9
+ # SQLAlchemy Configuration (Protocol fix for newer versions: postgres:// -> postgresql://)
10
+ SQLALCHEMY_DB_URL = RAW_DB_URL.replace("postgres://", "postgresql://") if RAW_DB_URL else None
11
+
12
+ # --- SQLALCHEMY SETUP (For Pandas Read Operations) ---
13
+ try:
14
+ if not SQLALCHEMY_DB_URL:
15
+ raise ValueError("DATABASE_URL is not set in environment.")
16
+
17
+ # Create global engine for connection pooling
18
+ db_engine = create_engine(SQLALCHEMY_DB_URL)
19
+ print("✅ SQLAlchemy Engine initialized.")
20
+ except Exception as e:
21
+ print(f"❌ Error creating engine: {e}")
22
+ db_engine = None
23
+
24
+ # --- PSYCOPG2 SETUP (For Write Operations) ---
25
+ def get_raw_connection():
26
+ """
27
+ Creates a raw psycopg2 connection.
28
+ Used for UPDATE/INSERT operations where Pandas overhead is not needed.
29
+ """
30
+ if not RAW_DB_URL:
31
+ raise ValueError("DATABASE_URL is not set.")
32
+ return psycopg2.connect(RAW_DB_URL)
33
+
34
+ # ==============================================================================
35
+ # READ OPERATIONS (Using SQLAlchemy + Pandas)
36
+ # Note: Pandas manages the connection opening/closing automatically via the engine.
37
+ # ==============================================================================
38
+
39
+ def fetch_distinct_repos():
40
+ """
41
+ Fetches a list of unique repository URLs present in the 'items' table.
42
+ Used to populate the repository selection dropdown in the Dashboard.
43
+ """
44
+ try:
45
+ # We query 'items' because it contains the raw synced data
46
+ query = "SELECT DISTINCT repo FROM items ORDER BY repo"
47
+ df = pd.read_sql(query, db_engine)
48
+
49
+ slugs = df['repo'].tolist()
50
+
51
+ # Transform slugs (owner/repo) into Full URLs
52
+ urls = [f"https://github.com/{slug}" for slug in slugs]
53
+ return urls
54
+ except Exception as e:
55
+ print(f"Error fetching repos: {e}")
56
+ return []
57
+
58
+ def fetch_dashboard_stats(repo_url=None):
59
+ """
60
+ Calculates aggregate statistics (Total, Resolved, etc.), optionally filtered by repository.
61
+ Returns a DataFrame for the Donut Chart.
62
+ """
63
+ try:
64
+ query = "SELECT verdict, count(*) as count FROM issue_reports"
65
+ params = []
66
+
67
+ if repo_url:
68
+ # Handle potential trailing slashes for exact matching
69
+ clean_url = repo_url.rstrip('/')
70
+ query += " WHERE (repo_url = %s OR repo_url = %s)"
71
+ params.extend([clean_url, clean_url + '/'])
72
+
73
+ query += " GROUP BY verdict"
74
+
75
+ return pd.read_sql(query, db_engine, params=tuple(params))
76
+ except Exception as e:
77
+ print(f"Stats Error: {e}")
78
+ return pd.DataFrame(columns=['verdict', 'count'])
79
+
80
+ def fetch_issues_dataframe(view_filter="pending", repo_url=None):
81
+ """
82
+ Fetches the main list of issues for the table.
83
+ Performs a LEFT JOIN between 'items' (GitHub Data) and 'issue_reports' (AI Analysis).
84
+ """
85
+ try:
86
+ # Base Query: Retrieves everything from 'items' and attaches report data if available
87
+ base_query = """
88
+ SELECT
89
+ i.number as issue_number,
90
+ i.title,
91
+ i.state as github_state,
92
+ -- URL: Use report URL if exists, otherwise construct from slug
93
+ COALESCE(r.repo_url, 'https://github.com/' || i.repo) as repo_url,
94
+
95
+ -- Verdict: Default to 'Pending Analysis' if null
96
+ COALESCE(r.verdict, 'Pending Analysis') as verdict,
97
+ r.llm_model,
98
+ r.confidence,
99
+ r.priority,
100
+
101
+ -- Status: Default to 'new' if null
102
+ COALESCE(r.status, 'new') as status,
103
+ r.proposed_action,
104
+
105
+ -- Date: Use analysis date if available, else GitHub update date
106
+ COALESCE(r.updated_at, i.updated_at) as updated_at
107
+ FROM items i
108
+ -- Soft Join: Match issue number AND ensure repo matches (via slug check)
109
+ LEFT JOIN issue_reports r
110
+ ON i.number = r.issue_number
111
+ AND r.repo_url ILIKE '%%' || i.repo || '%%'
112
+ WHERE (i.state = 'open' OR r.status = 'executed') AND i.is_pr = FALSE
113
+ """
114
+
115
+ params = []
116
+
117
+ # Apply Repo Filter (Filtering the 'items' table via slug)
118
+ if repo_url:
119
+ # Convert full URL to slug (e.g. "https://github.com/user/repo" -> "user/repo")
120
+ slug = repo_url.replace("https://github.com/", "").replace("http://github.com/", "").strip("/")
121
+ base_query += " AND i.repo = %s"
122
+ params.append(slug)
123
+
124
+ # Apply View Filter
125
+ if "Action" in view_filter or "pending" in view_filter:
126
+ # Show only issues waiting for approval
127
+ base_query += " AND r.status = 'pending_approval' AND r.proposed_action IS NOT NULL ORDER BY r.updated_at DESC"
128
+ else:
129
+ # Show all issues (limit for performance)
130
+ base_query += " ORDER BY i.number DESC LIMIT 1000"
131
+
132
+ return pd.read_sql(base_query, db_engine, params=tuple(params))
133
+
134
+ except Exception as e:
135
+ print(f"Dataframe Error: {e}")
136
+ return pd.DataFrame()
137
+
138
+ def get_total_open_issues_count(repo_url=None) -> int:
139
+ """
140
+ Counts the total number of open issues in the raw items table.
141
+ Used for the top of the Efficiency Funnel chart.
142
+ """
143
+ try:
144
+ query = "SELECT count(*) as total FROM items WHERE state = 'open'"
145
+ params = []
146
+
147
+ if repo_url:
148
+ slug = repo_url.replace("https://github.com/", "").strip("/")
149
+ query += " AND repo = %s"
150
+ params.append(slug)
151
+
152
+ df = pd.read_sql(query, db_engine, params=tuple(params))
153
+
154
+ if not df.empty:
155
+ return int(df.iloc[0]['total'])
156
+ return 0
157
+
158
+ except Exception as e:
159
+ print(f"Error counting open issues: {e}")
160
+ return 0
161
+
162
+ def fetch_agent_logs(limit=20):
163
+ """
164
+ Fetches the latest activity logs from the agent_traces table.
165
+ """
166
+ try:
167
+ query = f"SELECT created_at, event_type, message, issue_number FROM agent_traces ORDER BY created_at DESC LIMIT {limit}"
168
+ return pd.read_sql(query, db_engine)
169
+ except Exception as e:
170
+ print(f"Logs Error: {e}")
171
+ return pd.DataFrame()
172
+
173
+ # ==============================================================================
174
+ # WRITE OPERATIONS (Using Psycopg2 Raw Connection)
175
+ # Requires manual cursor management and connection closing.
176
+ # ==============================================================================
177
+
178
+ def fetch_issue_details_by_id(issue_number, repo_url):
179
+ """
180
+ Fetches the full markdown report, proposed action, and status for a specific issue.
181
+ Used when clicking a row in the table.
182
+ """
183
+ conn = get_raw_connection()
184
+ cursor = conn.cursor()
185
+ try:
186
+ cursor.execute(
187
+ "SELECT analysis_body, proposed_action, status, thought_process FROM issue_reports WHERE issue_number = %s AND repo_url = %s",
188
+ (issue_number, repo_url)
189
+ )
190
+ row = cursor.fetchone()
191
+ return row if row else (None, None, None, None)
192
+ except Exception as e:
193
+ print(f"Error fetching details: {e}")
194
+ return (None, None, None, None)
195
+ finally:
196
+ cursor.close()
197
+ conn.close()
198
+
199
+ def get_proposed_action_payload(issue_number, repo_url):
200
+ """
201
+ Retrieves the JSON action payload for execution.
202
+ """
203
+ conn = get_raw_connection()
204
+ cursor = conn.cursor()
205
+ try:
206
+ cursor.execute(
207
+ "SELECT proposed_action FROM issue_reports WHERE issue_number = %s AND repo_url = %s",
208
+ (issue_number, repo_url)
209
+ )
210
+ row = cursor.fetchone()
211
+ return row[0] if row else None
212
+ except Exception as e:
213
+ print(f"Error fetching action: {e}")
214
+ return None
215
+ finally:
216
+ cursor.close()
217
+ conn.close()
218
+
219
+ def update_issue_status(issue_number, repo_url, new_status, final_comment=None):
220
+ """
221
+ Updates status and optionally saves the final comment used.
222
+ """
223
+ conn = get_raw_connection()
224
+ cursor = conn.cursor()
225
+ try:
226
+ if final_comment is not None:
227
+ cursor.execute(
228
+ """
229
+ UPDATE issue_reports
230
+ SET status = %s,
231
+ proposed_action = jsonb_set(COALESCE(proposed_action, '{}'), '{comment}', to_jsonb(%s::text))
232
+ WHERE issue_number = %s AND repo_url = %s
233
+ """,
234
+ (new_status, final_comment, issue_number, repo_url)
235
+ )
236
+ else:
237
+ cursor.execute(
238
+ "UPDATE issue_reports SET status = %s WHERE issue_number = %s AND repo_url = %s",
239
+ (new_status, issue_number, repo_url)
240
+ )
241
+ conn.commit()
242
+ except Exception as e:
243
+ print(f"Error updating status: {e}")
244
+ finally:
245
+ cursor.close()
246
+ conn.close()
247
+
248
+ def save_analysis_report(
249
+ repo_url: str,
250
+ issue_number: int,
251
+ provider: str,
252
+ model: str,
253
+ verdict: str,
254
+ body: str,
255
+ thought: str = None,
256
+ action: dict = None,
257
+ priority: str = None,
258
+ duplicate_of: int = None
259
+ ):
260
+ """
261
+ Saves or updates an analysis report in the database.
262
+ Used by the Dashboard Chatbot when performing manual re-analysis.
263
+ """
264
+ conn = get_raw_connection()
265
+ cursor = conn.cursor()
266
+ try:
267
+ cursor.execute(
268
+ """
269
+ INSERT INTO issue_reports (repo_url, issue_number, llm_provider, llm_model, verdict, analysis_body, thought_process, proposed_action, priority, duplicate_of, updated_at, status)
270
+ VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), 'pending_approval')
271
+ ON CONFLICT (repo_url, issue_number)
272
+ DO UPDATE SET
273
+ llm_provider = EXCLUDED.llm_provider,
274
+ llm_model = EXCLUDED.llm_model,
275
+ verdict = EXCLUDED.verdict,
276
+ analysis_body = EXCLUDED.analysis_body,
277
+ thought_process = EXCLUDED.thought_process,
278
+ proposed_action = EXCLUDED.proposed_action,
279
+ priority = EXCLUDED.priority,
280
+ duplicate_of = EXCLUDED.duplicate_of,
281
+ status = 'pending_approval',
282
+ updated_at = NOW();
283
+ """,
284
+ (repo_url, issue_number, provider, model, verdict, body, thought, json.dumps(action) if action else None, priority, duplicate_of),
285
+ )
286
+ conn.commit()
287
+ except Exception as e:
288
+ print(f"Error saving report: {e}")
289
+ finally:
290
+ cursor.close()
291
+ conn.close()
services/table_renderer.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ def get_verdict_badge(verdict, status, github_state):
4
+ v = str(verdict).lower()
5
+ # Badges use solid colors and white text, work well in both modes
6
+ if status == 'executed' or github_state == 'closed':
7
+ return '<span style="background-color: #374151; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px; font-weight: 600;">CLOSED</span>'
8
+ elif "possibly_resolved" in v:
9
+ return '<span style="background-color: #84CC16; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px; font-weight: 600; white-space: nowrap;">POSSIBLY RESOLVED</span>'
10
+ elif "unresolved" in v:
11
+ return '<span style="background-color: #64748B; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px; font-weight: 600;">OPEN BUG</span>'
12
+ elif "resolved" in v:
13
+ return '<span style="background-color: #10B981; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px; font-weight: 600;">RESOLVED</span>'
14
+ elif "duplicate" in v:
15
+ return '<span style="background-color: #F59E0B; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px; font-weight: 600;">DUPLICATE</span>'
16
+ elif "pending" in v or "new" in v:
17
+ # Adjusted for dark mode: darker background if needed, but light gray usually works
18
+ return '<span style="background-color: #94A3B8; color: white; padding: 3px 7px; border-radius: 12px; font-size: 10px; font-weight: 500; white-space: nowrap;">WAITING AGENT</span>'
19
+ else:
20
+ return f'<span style="background-color: #475569; color: white; padding: 4px 8px; border-radius: 12px; font-size: 10px;">{verdict}</span>'
21
+
22
+ def get_priority_badge(priority):
23
+ p = str(priority).lower() if priority else ""
24
+ # Using text colors, they should contrast well on gradio's dark/light background
25
+ if "critical" in p: return '<span style="color: #EF4444; font-weight: 700;">🔥 Critical</span>'
26
+ if "high" in p: return '<span style="color: #F97316; font-weight: 600;">High</span>'
27
+ if "medium" in p: return '<span style="color: #3B82F6;">Medium</span>'
28
+ if "low" in p: return '<span style="color: #9CA3AF;">Low</span>'
29
+ return "-"
30
+
31
+ def generate_issues_html(df: pd.DataFrame, sort_col: str = "updated_at", sort_asc: bool = False) -> str:
32
+ if df.empty:
33
+ # Using a white container even when empty to maintain consistency
34
+ return """
35
+ <div style='padding: 40px; text-align: center; color: var(--body-text-color); background: var(--background-fill-primary); border-radius: 12px; border: 1px solid #E2E8F0; font-family: sans-serif;'>
36
+ No issues found for this view.
37
+ </div>
38
+ """
39
+
40
+ headers_map = {
41
+ "Issue": "issue_number",
42
+ "Title / Repo": "title",
43
+ "Verdict": "verdict",
44
+ #"Priority": "priority",
45
+ "Model": "llm_model",
46
+ #"Confidence": "confidence",
47
+ "Updated": "updated_at"
48
+ }
49
+
50
+ html = """
51
+ <style>
52
+ .tm-table-container {
53
+ background-color: var(--background-fill-primary) !important;
54
+ border: 0px solid var(--border-color-primary) !important;
55
+ border-radius: 6px;
56
+ max-height: 500px;
57
+ overflow-y: auto;
58
+ position: relative;
59
+ font-family: 'Inter', sans-serif;
60
+ }
61
+
62
+ .tm-table {
63
+ width: 100%;
64
+ border-collapse: collapse;
65
+ border-spacing: 0;
66
+ border: none !important; /* Remove table border */
67
+ background-color: transparent !important;
68
+ color: var(--body-text-color) !important;
69
+ }
70
+
71
+ /* HEADER */
72
+ .tm-table thead th {
73
+ position: sticky;
74
+ top: 0;
75
+ background: linear-gradient(135deg, var(--primary-600) 0%, var(--primary-500) 100%) !important;
76
+ color: #FFFFFF !important;
77
+ z-index: 10;
78
+ padding: 14px 16px;
79
+ text-align: left;
80
+ font-weight: 600;
81
+ text-transform: uppercase;
82
+ font-size: 11px;
83
+ letter-spacing: 0.05em;
84
+ border: none !important; /* Remove header bottom border */
85
+ box-sizing: content-box !important; /* Requested adjustment */
86
+ }
87
+
88
+ /* ROWS */
89
+ .tm-table tbody tr {
90
+ background-color: var(--background-fill-primary) !important;
91
+ border: none !important; /* Remove border between rows */
92
+ cursor: pointer;
93
+ transition: all 0.15s;
94
+ box-sizing: content-box !important; /* Requested adjustment */
95
+ }
96
+
97
+ /* Zebra Striping (Essential now that there are no borders) */
98
+ .tm-table tbody tr:nth-child(even) {
99
+ background-color: var(--background-fill-secondary) !important;
100
+ }
101
+
102
+ /* Hover */
103
+ .tm-table tbody tr:hover {
104
+ background-color: var(--background-fill-secondary) !important;
105
+ filter: brightness(1.1); /* Subtle highlight */
106
+ }
107
+
108
+ /* CELLS */
109
+ .tm-table td {
110
+ padding: 14px 16px;
111
+ color: var(--body-text-color) !important;
112
+ font-size: 13px;
113
+ vertical-align: middle;
114
+ border: none !important; /* Ensure no borders in cells */
115
+ }
116
+
117
+ /* Typography */
118
+ .tm-title-text {
119
+ font-weight: 600;
120
+ color: var(--body-text-color) !important;
121
+ font-size: 14px;
122
+ }
123
+ .tm-subtext {
124
+ font-size: 11px;
125
+ color: var(--body-text-color-subdued) !important;
126
+ margin-top: 2px;
127
+ }
128
+
129
+ /* Sort Icons */
130
+ .tm-sortable { cursor: pointer; }
131
+ .tm-sort-icon { display: inline-block; margin-left: 6px; font-size: 9px; color: rgba(255,255,255,0.7) !important; }
132
+ .tm-sort-active { color: #FFFFFF !important; }
133
+
134
+ /* Status (Keep only the left border for indication) */
135
+ .status-pending { border-left: 4px solid #F59E0B !important; }
136
+ .status-executed { border-left: 4px solid #10B981 !important; }
137
+ .status-new { border-left: 4px solid transparent !important; }
138
+
139
+ /* View Button */
140
+ a.tm-view-btn {
141
+ background-color: transparent !important;
142
+ color: #3B82F6 !important;
143
+ border: 1px solid #3B82F6 !important;
144
+ padding: 5px 10px;
145
+ border-radius: 20px;
146
+ font-size: 10px;
147
+ font-weight: 700;
148
+ text-transform: uppercase;
149
+ text-decoration: none;
150
+ display: inline-block;
151
+ transition: all 0.2s;
152
+ }
153
+ a.tm-view-btn:hover {
154
+ background-color: #3B82F6 !important;
155
+ color: #FFFFFF !important;
156
+ }
157
+ </style>
158
+
159
+ <div class="tm-table-container">
160
+ <table class="tm-table">
161
+ <thead>
162
+ <tr>
163
+ """
164
+
165
+ # Loop Headers
166
+ for display, col_name in headers_map.items():
167
+ width = 'style="width: 70px;"' if display == "Issue" else ""
168
+
169
+ icon = ""
170
+ if col_name == sort_col:
171
+ icon = "▲" if sort_asc else "▼"
172
+ icon_class = "tm-sort-icon tm-sort-active"
173
+ else:
174
+ icon = "▼" # Default hint
175
+ icon_class = "tm-sort-icon"
176
+
177
+ html += f"""
178
+ <th class="tm-sortable" {width} data-sort-col="{col_name}" title="Sort by {display}">
179
+ {display} <span class="{icon_class}">{icon}</span>
180
+ </th>
181
+ """
182
+
183
+ html += """
184
+ <th style="width: 90px; text-align: center;">Actions</th>
185
+ </tr>
186
+ </thead>
187
+ <tbody>
188
+ """
189
+
190
+ for _, row in df.iterrows():
191
+ issue_num = row['issue_number']
192
+ repo_full = str(row['repo_url'])
193
+ repo_short = repo_full.split('github.com/')[-1] if 'github.com' in repo_full else repo_full
194
+
195
+ title = str(row.get('title') or "No Title")
196
+ verdict_html = get_verdict_badge(row['verdict'], row['status'], row.get('github_state'))
197
+ #priority_html = get_priority_badge(row.get('priority'))
198
+
199
+ raw_model = str(row.get('llm_model') or "")
200
+ model = raw_model.split('/')[-1] if raw_model else "-"
201
+
202
+ #conf = f"{float(row.get('confidence', 0))*100:.0f}%" if row.get('confidence') else "-"
203
+
204
+ # Define border class based on status
205
+ status_code = row.get('status', 'new')
206
+ status_class = f"status-{status_code}" if status_code in ['pending_approval', 'executed'] else "status-new"
207
+
208
+ data_attrs = f'data-issue-number="{issue_num}" data-repo-url="{row["repo_url"]}" data-verdict="{row["verdict"]}"'
209
+ target_url = f"{repo_full.rstrip('/')}/issues/{issue_num}"
210
+
211
+ html += f"""
212
+ <tr class="{status_class}" {data_attrs}>
213
+ <td style="font-family: 'Monaco', monospace; font-weight: 600; color: #475569;">#{issue_num}</td>
214
+ <td>
215
+ <div class="tm-title-text">{title[:65]}{'...' if len(title)>65 else ''}</div>
216
+ <div class="tm-subtext">{repo_short}</div>
217
+ </td>
218
+ <td>{verdict_html}</td>
219
+ <td class="tm-subtext">{model}</td>
220
+ <td class="tm-subtext">{str(row["updated_at"])[:10]}</td>
221
+ <td style="text-align: center;">
222
+ <a href="{target_url}" target="_blank" class="tm-view-btn" onclick="event.stopPropagation()">
223
+ GitHub ↗
224
+ </a>
225
+ </td>
226
+ </tr>
227
+ """
228
+
229
+ html += "</tbody></table></div>"
230
+ return html
style.css ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ==========================================================================
2
+ 1. GLOBAL & LAYOUT STYLES
3
+ ========================================================================== */
4
+
5
+ body {
6
+ font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
7
+ margin: 0;
8
+ padding: 0;
9
+ }
10
+
11
+ /* Main application container styling */
12
+ .gradio-container {
13
+ border-radius: 15px;
14
+ padding: 10px 20px;
15
+ box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
16
+ margin: 10px;
17
+ }
18
+ .column-container {
19
+ border-radius: 15px;
20
+ padding: 15px;
21
+ box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
22
+ margin: auto;
23
+ }
24
+
25
+ .action-console-col {
26
+ padding: 10px !important;
27
+ border-color: var(--border-color-primary) !important;
28
+ border-width: 1px !important;
29
+ border-radius: 8px;
30
+ margin-top: 10px;
31
+ }
32
+ .chatbot-container .message-row{
33
+ max-width: calc(100% - var(--spacing-sm) * 6) !important;
34
+ }
35
+ .chatbot-container pre code {
36
+ text-wrap: auto !important;
37
+ }
38
+ /* 2. Media Query for larger screens (desktops) */
39
+ @media (min-width: 1281px) {
40
+ .gradio-container {
41
+ padding: 10px 370px;
42
+ max-width: 2400px;
43
+ }
44
+ }
45
+
46
+ /* Text shadow for main headings for better readability */
47
+ .gradio-container h1 {
48
+ text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.2);
49
+ }
50
+
51
+ /* Utility class to make elements fill their container's width */
52
+ /* .fillable {
53
+ width: 100% !important;
54
+ max-width: unset !important;
55
+ } */
56
+ #trace-log {
57
+ height: 200px !important;
58
+ overflow-y: auto !important;
59
+ background-color: var(--background-fill-primary);
60
+ border: 1px solid var(--border-color-primary);
61
+ border-radius: 8px;
62
+ padding: 10px;
63
+ }
64
+ .fillable .sidebar-parent {
65
+ padding-left: 10px !important;
66
+ padding-right: 10px !important;
67
+ }
68
+
69
+
70
+ /* ==========================================================================
71
+ 2. CUSTOM SCROLLBAR STYLES (GLOBAL & SIDEBAR)
72
+ ========================================================================== */
73
+
74
+ /* --- For WebKit browsers (Chrome, Safari, Edge, etc.) --- */
75
+ .sidebar-content::-webkit-scrollbar,
76
+ body::-webkit-scrollbar {
77
+ width: 8px;
78
+ height: 8px;
79
+ background-color: transparent;
80
+ }
81
+
82
+ .sidebar-content::-webkit-scrollbar-track,
83
+ body::-webkit-scrollbar-track {
84
+ background: transparent;
85
+ border-radius: 10px;
86
+ }
87
+
88
+ .sidebar-content::-webkit-scrollbar-thumb,
89
+ body::-webkit-scrollbar-thumb {
90
+ background-color: rgba(136, 136, 136, 0.4);
91
+ border-radius: 10px;
92
+ border: 2px solid transparent;
93
+ background-clip: content-box;
94
+ }
95
+
96
+ .sidebar-content::-webkit-scrollbar-thumb:hover,
97
+ body::-webkit-scrollbar-thumb:hover {
98
+ background-color: rgba(136, 136, 136, 0.7);
99
+ }
100
+
101
+ /* --- For Firefox --- */
102
+ .sidebar-content,
103
+ html {
104
+ scrollbar-width: thin;
105
+ scrollbar-color: rgba(136, 136, 136, 0.7) transparent;
106
+ }
107
+
108
+
109
+ /* ==========================================================================
110
+ 3. SIDEBAR STYLES
111
+ ========================================================================== */
112
+
113
+ .sidebar {
114
+ border-radius: 10px;
115
+ padding: 10px;
116
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
117
+ }
118
+
119
+ /* Overrides for the content padding inside the sidebar */
120
+ .sidebar .sidebar-content {
121
+ padding-left: 10px !important;
122
+ padding-right: 10px !important;
123
+ }
124
+
125
+ /* Centers text within Markdown blocks inside the sidebar */
126
+ /* .sidebar .sidebar-content .column .block div .prose {
127
+ text-align: center;
128
+ } */
129
+
130
+ /* Stylish override for the sidebar toggle button */
131
+ .sidebar .toggle-button {
132
+ background: linear-gradient(135deg, var(--primary-500), var(--primary-600)) !important;
133
+ border: none;
134
+ padding: 12px 18px;
135
+ text-transform: uppercase;
136
+ font-weight: bold;
137
+ letter-spacing: 1px;
138
+ /* border-radius: 5px; */
139
+ position: absolute;
140
+ top: 50%;
141
+ right: -28px !important;
142
+ left: auto !important;
143
+ transform: unset !important;
144
+ border-radius: 0 var(--radius-lg) var(--radius-lg) 0 !important
145
+ }
146
+
147
+ .sidebar.right .toggle-button {
148
+ left: -28px !important;
149
+ right: auto !important;
150
+ transform: rotate(180deg) !important;
151
+ }
152
+
153
+ .sidebar.open .chevron-left {
154
+ transform: rotate(-135deg);
155
+ }
156
+
157
+ .bottom-bar .toggle-bottom-button {
158
+ background: linear-gradient(135deg, var(--primary-500), var(--primary-600)) !important;
159
+ }
160
+
161
+ .toggle-button:hover {
162
+ transform: scale(1.05);
163
+ }
164
+ .bottom-bar .toggle-bottom-button .chevron {
165
+ width: 100%;
166
+ height: 100%;
167
+ position: relative;
168
+ display: flex;
169
+ align-items: center;
170
+ justify-content: center;
171
+ }
172
+
173
+ .bottom-bar .toggle-bottom-button .chevron-arrow {
174
+ position: relative !important;
175
+ top: auto !important;
176
+ left: auto !important;
177
+ width: 10px !important;
178
+ height: 10px !important;
179
+ border-bottom: 2px solid var(--body-text-color) !important;
180
+ border-right: 2px solid var(--body-text-color) !important;
181
+ transform: rotate(-135deg) translateY(2px);
182
+ margin-bottom: -4px;
183
+ }
184
+ .bottom-bar.open .chevron-arrow {
185
+ transform: rotate(45deg) translateY(-2px) !important;
186
+ }
187
+
188
+ /* ==========================================================================
189
+ 5. COMPONENT-SPECIFIC STYLES & OVERRIDES
190
+ ========================================================================== */
191
+
192
+ /* Custom styling for the cancel button */
193
+ #cancel-button { /* Corrected the typo from 'cancell' to 'cancel' */
194
+ background: linear-gradient(120deg, var(--neutral-500) 0%, var(--neutral-600) 60%, var(--neutral-700) 100%) !important;
195
+ }
196
+ .custom-dropdown .wrap-inner input {
197
+ padding-right: 22px;
198
+ }
199
+
200
+ #pagination-row {
201
+ display: flex !important;
202
+ justify-content: center !important;
203
+ align-items: center !important;
204
+ gap: 4px !important;
205
+ margin-top: 8px !important;
206
+ padding: 0 !important;
207
+ min-height: 0 !important;
208
+ background: transparent !important;
209
+ border: none !important;
210
+ }
211
+
212
+ .pagination-btn {
213
+ width: 28px !important;
214
+ height: 28px !important;
215
+ min-width: 28px !important;
216
+ padding: 0 !important;
217
+ display: flex !important;
218
+ align-items: center !important;
219
+ justify-content: center !important;
220
+ font-size: 12px !important;
221
+ line-height: 1 !important;
222
+ border-radius: 4px !important;
223
+ flex-grow: 0 !important;
224
+ box-shadow: none !important;
225
+ }
226
+
227
+ #page_label {
228
+ flex: 0 0 auto !important;
229
+ width: auto !important;
230
+ display: inline-block !important;
231
+ margin: 0 8px !important;
232
+ font-size: 11px !important;
233
+ font-family: monospace !important;
234
+ white-space: nowrap !important;
235
+ color: var(--body-text-color-subdued);
236
+ }
237
+
238
+ #page_label p {
239
+ margin: 0 !important;
240
+ padding: 0 !important;
241
+ display: inline !important;
242
+ }