| # Configuration file for llm4hep supervisor-coder framework | |
| # | |
| # This file controls the LLM models and parameters used for testing. | |
| # Copy this file to config.yml and customize for your experiments. | |
| # Supervisor model - analyzes tasks and provides instructions to the coder | |
| supervisor: lbl/cborg-deepthought:latest | |
| # Coder model - generates Python code based on supervisor instructions | |
| coder: lbl/cborg-deepthought:latest | |
| # Temperature for LLM generation (0.0 = deterministic, 1.0 = creative) | |
| temperature: 0.0 | |
| # Optional: Maximum iterations per step (default: 10) | |
| # Uncomment to limit supervisor-coder refinement loops | |
| # max_iterations: 3 | |
| # Optional: Custom output directory | |
| # Uncomment to specify where results should be saved | |
| # out_dir: results/my_experiment | |
| # Model Options: | |
| # See CBORG_MODEL_MAPPINGS.md for available models including: | |
| # | |
| # Anthropic Claude: | |
| # - anthropic/claude-sonnet:latest | |
| # - anthropic/claude-opus:latest | |
| # - anthropic/claude-haiku:latest | |
| # | |
| # OpenAI: | |
| # - openai/gpt-5-mini | |
| # - openai/gpt-5 | |
| # - openai/o3 | |
| # - openai/o3-mini | |
| # - openai/o4-mini | |
| # | |
| # Google Gemini: | |
| # - google/gemini:latest | |
| # - google/gemini-flash | |
| # | |
| # xAI Grok: | |
| # - xai/grok:latest | |
| # - xai/grok-mini | |
| # | |
| # AWS/Meta Llama: | |
| # - aws/llama-4-maverick | |
| # - aws/llama-4-scout | |
| # | |
| # Other: | |
| # - deepseek-r1 | |
| # - gcp/qwen-3 | |
| # - gpt-oss-120b | |