# app.py import os import json from pathlib import Path import gradio as gr # ---------- Paths ---------- CONFIG_DIR = Path("config") SYSTEM_PROMPT_DIR = Path("system_prompts") USER_TYPES_PATH = CONFIG_DIR / "user_types.json" ASSISTANT_ROLES_PATH = CONFIG_DIR / "assistant_roles.json" # ---------- Model config ---------- COHERE_MODEL = "command-a-03-2025" MAX_TOKENS = 2000 TEMPERATURE = 0.3 # ---------- Ensure dirs ---------- def ensure_dirs(): CONFIG_DIR.mkdir(exist_ok=True) SYSTEM_PROMPT_DIR.mkdir(exist_ok=True) # ---------- Load JSON (no demo defaults injected) ---------- def load_json_or_empty(path: Path) -> dict: if not path.exists(): return {} try: return json.loads(path.read_text(encoding="utf-8")) except Exception: return {} # ---------- Load system prompts from folder ---------- def load_system_prompts(): files = sorted(p for p in SYSTEM_PROMPT_DIR.glob("*.txt") if p.is_file()) names = [f.stem for f in files] mapping = {f.stem: f.read_text(encoding="utf-8") for f in files} return names, mapping # ---------- Cohere client ---------- _cohere_client = None def get_cohere_client(): global _cohere_client if _cohere_client is None: api_key = os.getenv("COHERE_API_KEY") if not api_key: raise RuntimeError("COHERE_API_KEY env var not set.") import cohere _cohere_client = cohere.Client(api_key=api_key) return _cohere_client def cohere_generate(system_instruction: str, user_msg: str) -> str: client = get_cohere_client() # Prefer Responses API (v5) if hasattr(client, "responses"): try: resp = client.responses.create( model=COHERE_MODEL, messages=[ {"role": "system", "content": system_instruction}, {"role": "user", "content": user_msg}, ], temperature=TEMPERATURE, max_output_tokens=MAX_TOKENS, ) if getattr(resp, "output_text", None): return resp.output_text.strip() # Fallback parse chunks = [] for block in getattr(resp, "output", []) or []: for c in getattr(block, "content", []) or []: if getattr(c, "text", None): chunks.append(c.text) return ("\n".join(chunks)).strip() or "(No content returned.)" except Exception as e: return f"⚠️ Cohere error (responses): {e}" # Fallback to legacy Chat (v4) try: resp = client.chat( model=COHERE_MODEL, preamble=system_instruction, message=user_msg, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, ) if getattr(resp, "text", None): return resp.text.strip() msg = getattr(resp, "message", None) if msg and getattr(msg, "content", None): return " ".join(getattr(p, "text", "") for p in msg.content if hasattr(p, "text")).strip() return "(No content returned.)" except Exception as e: return f"⚠️ Cohere error (chat): {e}" # ---------- Prompt composition ---------- def build_context_block(user_type, role, kb, ont, system_prompt_name): return ( "CONTEXT\n" f"- User Type: {user_type}\n" f"- Assistant Role: {role}\n" f"- Knowledge Base: {kb}\n" f"- Ontology: {ont}\n" f"- System Prompt: {system_prompt_name}" ) def build_system_instruction(role, user_type, sys_prompt_text, user_types_map, roles_map) -> str: role_brief = ((roles_map.get(role, {}) or {}).get("prompt_addendum") or (roles_map.get(role, {}) or {}).get("brief", "") or "") ut_brief = ((user_types_map.get(user_type, {}) or {}).get("prompt_addendum") or (user_types_map.get(user_type, {}) or {}).get("brief", "") or "") # Keep your exact literals/structure return ( f"You are acting as the '{role}' assistant. {role_brief} " f"Your audience is a {user_type}. {ut_brief} " "Operate as an ecolinguistically aligned assistant: " "Adhere to these core instructions:\n" f"{sys_prompt_text}" ).strip() def compose_for_api(user_type, role, kb, ont, system_prompt_name, user_text, prompt_map, user_types_map, roles_map): sys_prompt_text = prompt_map.get(system_prompt_name, "").strip() # If the selected file is missing/empty, we keep it empty (no demo fallback) system_instruction = build_system_instruction( role, user_type, sys_prompt_text, user_types_map, roles_map ) # Optional audience/role addenda appended to user message (kept outside system) ut_add = (user_types_map.get(user_type, {}) or {}).get("prompt_addendum", "") role_add = (roles_map.get(role, {}) or {}).get("prompt_addendum", "") guidance_parts = [p for p in [ut_add, role_add] if p] guidance_block = ("\n\nAUDIENCE & ROLE GUIDANCE\n" + "\n".join(guidance_parts)) if guidance_parts else "" context_block = build_context_block(user_type, role, kb, ont, system_prompt_name) user_msg = f"{context_block}{guidance_block}\n\nTASK\n{user_text.strip()}" # What we show in the UI (no system prompt content) pretty = f"{context_block}{guidance_block}\n\nTASK\n{user_text.strip()}" return system_instruction, user_msg, pretty # ---------- Gradio callbacks ---------- def generate_response(user_type, role, kb, ont, sp_name, user_text, prompt_state, user_types_state, roles_state): user_text = (user_text or "").strip() if not user_text: return "", "Please enter a task or question." if not prompt_state: return "", "No system prompts found. Add .txt files to /system_prompts and click Refresh." if sp_name not in prompt_state: return "", "Selected system prompt not found. Click Refresh." sys_instr, user_msg, pretty = compose_for_api( user_type, role, kb, ont, sp_name, user_text, prompt_state, user_types_state, roles_state ) out = cohere_generate(sys_instr, user_msg) return pretty, out def refresh_all(): ensure_dirs() user_types = load_json_or_empty(USER_TYPES_PATH) roles = load_json_or_empty(ASSISTANT_ROLES_PATH) sp_names, sp_map = load_system_prompts() # User Types ut_list = list(user_types.keys()) if not ut_list: ut_list = ["Default"] if "Default" not in user_types: user_types = {"Default": {}} # Roles role_list = list(roles.keys()) if not role_list: role_list = ["Default"] if "Default" not in roles: roles = {"Default": {}} # System Prompts if sp_names: sp_update = gr.update(choices=sp_names, value=sp_names[0]) else: sp_update = gr.update(choices=["(no prompts found)"], value="(no prompts found)") return ( sp_update, gr.update(choices=ut_list, value=ut_list[0]), gr.update(choices=role_list, value=role_list[0]), sp_map, user_types, roles, f"Loaded {len(sp_names)} prompt(s), {len(ut_list)} user type(s), {len(role_list)} role(s)." ) # ---------- UI ---------- def launch_interface(): ensure_dirs() user_types_map = load_json_or_empty(USER_TYPES_PATH) roles_map = load_json_or_empty(ASSISTANT_ROLES_PATH) sp_names, sp_map = load_system_prompts() ut_choices = list(user_types_map.keys()) or ["Default"] role_choices = list(roles_map.keys()) or ["Default"] sp_value = sp_names[0] if sp_names else None theme = gr.themes.Soft(primary_hue="green", neutral_hue="gray") with gr.Blocks(theme=theme, css=""" .gradio-container { max-width: 96vw !important; margin: 0 auto; } /* Thin card look */ .section { border: 1px solid rgba(255,255,255,0.14); border-radius: 10px; padding: 10px; } .label-muted { opacity: .95; font-size: .96em; margin: 4px 0 8px; } /* Compact form spacing */ .section *:is(.gr-dropdown, .gr-textbox, .gr-button) { margin-top: 6px !important; margin-bottom: 6px !important; } /* Make dropdown triggers reserve space for the chevron and prevent text overlap */ .gr-dropdown button { padding-right: 2.6rem !important; /* room for arrow */ width: 100%; } .gr-dropdown button span, .gr-dropdown button div { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; max-width: 100%; display: block; } """) as demo: prompt_state = gr.State(sp_map) user_types_state = gr.State(user_types_map) roles_state = gr.State(roles_map) gr.Markdown("## 🌿 H4rmony — Ecolinguistic AI Assistant") # ----------------- Configure (one horizontal row, equal widths) ----------------- gr.Markdown("#### Configure", elem_classes="label-muted") with gr.Group(elem_classes="section"): with gr.Row(equal_height=True): # set the same scale & min_width so they render the same size common_scale = 1 common_min_w = 220 with gr.Column(scale=common_scale, min_width=common_min_w): user_type = gr.Dropdown(ut_choices, value=ut_choices[0], label="User Type") with gr.Column(scale=common_scale, min_width=common_min_w): role = gr.Dropdown(role_choices, value=role_choices[0], label="Assistant’s Role") with gr.Column(scale=common_scale, min_width=common_min_w): kb = gr.Dropdown(["Ecolinguistics","Econarrative","N/A"], value="Ecolinguistics", label="Knowledge Base") with gr.Column(scale=common_scale, min_width=common_min_w): ont = gr.Dropdown(["Ecolinguistics","Ecojustice","Degrowth Discourse", "Green Tech Framing","N/A"], value="Ecolinguistics", label="Ontology") with gr.Column(scale=common_scale, min_width=common_min_w): sp = gr.Dropdown(sp_names, value=sp_value, label="System Prompt") with gr.Column(scale=0, min_width=120): refresh_btn = gr.Button("Refresh") # ----------------- Input ----------------- gr.Markdown("#### Input", elem_classes="label-muted") with gr.Group(elem_classes="section"): user_text = gr.Textbox(lines=5, placeholder="Enter your task or question…", label="Task or question") submit_btn = gr.Button("Generate", variant="primary") # ----------------- Outputs ----------------- gr.Markdown("#### Optimised Prompt + Assistant Output", elem_classes="label-muted") with gr.Group(elem_classes="section"): optimized = gr.Textbox(label="Optimised Prompt", interactive=False, lines=10) output = gr.Textbox(label="Assistant Response", interactive=False, lines=12) # Wire actions submit_btn.click( fn=generate_response, inputs=[user_type, role, kb, ont, sp, user_text, prompt_state, user_types_state, roles_state], outputs=[optimized, output] ) refresh_msg = gr.Markdown(" ") refresh_btn.click( fn=refresh_all, outputs=[sp, user_type, role, prompt_state, user_types_state, roles_state, refresh_msg] ) return demo if __name__ == "__main__": demo = launch_interface() demo.launch()