Yash Sakhale
commited on
Commit
·
14d4dea
1
Parent(s):
54b0b19
Add AI-powered project requirements generation feature
Browse files
app.py
CHANGED
|
@@ -435,6 +435,258 @@ class CatalogValidator:
|
|
| 435 |
return warnings
|
| 436 |
|
| 437 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 438 |
class ExplanationEngine:
|
| 439 |
"""Generate intelligent explanations for dependency conflicts using LLM."""
|
| 440 |
|
|
@@ -589,6 +841,7 @@ Keep it under 150 words and use plain language.
|
|
| 589 |
|
| 590 |
|
| 591 |
def process_dependencies(
|
|
|
|
| 592 |
library_list: str,
|
| 593 |
requirements_text: str,
|
| 594 |
uploaded_file,
|
|
@@ -604,6 +857,20 @@ def process_dependencies(
|
|
| 604 |
) -> Tuple[str, str, str]:
|
| 605 |
"""Main processing function for Gradio interface."""
|
| 606 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 607 |
# Collect dependencies from all sources
|
| 608 |
all_dependencies = []
|
| 609 |
|
|
@@ -780,6 +1047,22 @@ def process_dependencies(
|
|
| 780 |
output_parts = []
|
| 781 |
output_parts.append("## Dependency Analysis Results\n\n")
|
| 782 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 783 |
# Show ML prediction if available
|
| 784 |
if ML_AVAILABLE and ml_conflict_prediction is not None:
|
| 785 |
if ml_conflict_prediction:
|
|
@@ -849,6 +1132,7 @@ def create_interface():
|
|
| 849 |
|
| 850 |
| Feature | Status | Description |
|
| 851 |
|---------|--------|-------------|
|
|
|
|
| 852 |
| **LLM Reasoning** | Active | AI-powered natural language explanations for conflicts |
|
| 853 |
| **ML Conflict Prediction** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Machine learning model predicts conflicts before analysis |
|
| 854 |
| **Embedding-Based Spell Check** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Semantic similarity matching for package names |
|
|
@@ -857,6 +1141,29 @@ def create_interface():
|
|
| 857 |
|
| 858 |
""")
|
| 859 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 860 |
with gr.Row():
|
| 861 |
with gr.Column(scale=1):
|
| 862 |
gr.Markdown("### Input Methods")
|
|
@@ -974,6 +1281,29 @@ def create_interface():
|
|
| 974 |
visible=True
|
| 975 |
)
|
| 976 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 977 |
def process_and_download(*args):
|
| 978 |
# Extract all arguments
|
| 979 |
result_text, resolved_text, ml_details = process_dependencies(*args)
|
|
@@ -993,9 +1323,77 @@ def create_interface():
|
|
| 993 |
|
| 994 |
return result_text, resolved_text, temp_file if temp_file else None, ml_output_text
|
| 995 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 996 |
process_btn.click(
|
| 997 |
fn=process_and_download,
|
| 998 |
-
inputs=[library_input, requirements_input, file_upload, python_version, device, os_type, mode, resolution_strategy, use_llm, use_ml_prediction, use_ml_spellcheck, show_ml_details],
|
| 999 |
outputs=[output_display, resolved_output, download_btn, ml_output]
|
| 1000 |
)
|
| 1001 |
|
|
@@ -1003,15 +1401,19 @@ def create_interface():
|
|
| 1003 |
---
|
| 1004 |
### How to Use
|
| 1005 |
|
| 1006 |
-
1. **
|
| 1007 |
-
2. **
|
| 1008 |
-
3. **
|
| 1009 |
-
4. **
|
| 1010 |
-
5. **
|
| 1011 |
-
6. **
|
|
|
|
|
|
|
|
|
|
| 1012 |
|
| 1013 |
### Features
|
| 1014 |
|
|
|
|
| 1015 |
- Parse multiple input formats
|
| 1016 |
- Detect version conflicts
|
| 1017 |
- Check compatibility across dependency graph
|
|
|
|
| 435 |
return warnings
|
| 436 |
|
| 437 |
|
| 438 |
+
class ProjectRequirementsGenerator:
|
| 439 |
+
"""Generate requirements.txt from project description using LLM."""
|
| 440 |
+
|
| 441 |
+
def __init__(self, use_llm: bool = True):
|
| 442 |
+
"""
|
| 443 |
+
Initialize project requirements generator.
|
| 444 |
+
|
| 445 |
+
Args:
|
| 446 |
+
use_llm: If True, uses Hugging Face Inference API
|
| 447 |
+
If False, uses rule-based suggestions
|
| 448 |
+
"""
|
| 449 |
+
self.use_llm = use_llm
|
| 450 |
+
# Using a better model for code generation
|
| 451 |
+
# Try to use a code generation model, fallback to GPT-2
|
| 452 |
+
self.api_url = "https://api-inference.huggingface.co/models/bigcode/starcoder"
|
| 453 |
+
self.fallback_url = "https://api-inference.huggingface.co/models/gpt2"
|
| 454 |
+
self.headers = {"Content-Type": "application/json"}
|
| 455 |
+
|
| 456 |
+
def generate_requirements(self, project_description: str) -> Tuple[str, str]:
|
| 457 |
+
"""
|
| 458 |
+
Generate requirements.txt from project description.
|
| 459 |
+
|
| 460 |
+
Args:
|
| 461 |
+
project_description: User's description of their project
|
| 462 |
+
|
| 463 |
+
Returns:
|
| 464 |
+
Tuple of (requirements_text, explanations_text)
|
| 465 |
+
"""
|
| 466 |
+
if not project_description or not project_description.strip():
|
| 467 |
+
return "", ""
|
| 468 |
+
|
| 469 |
+
# Always try rule-based first as it's more reliable
|
| 470 |
+
requirements, explanations = self._rule_based_suggestions(project_description)
|
| 471 |
+
|
| 472 |
+
# Try LLM to enhance the suggestions if enabled
|
| 473 |
+
if self.use_llm:
|
| 474 |
+
prompt = self._create_requirements_prompt(project_description)
|
| 475 |
+
llm_response = self._call_llm_for_requirements(prompt)
|
| 476 |
+
llm_requirements, llm_explanations = self._parse_llm_response(llm_response)
|
| 477 |
+
|
| 478 |
+
# If LLM generated valid requirements, use them (or merge with rule-based)
|
| 479 |
+
if llm_requirements and len(llm_requirements.strip()) > 10:
|
| 480 |
+
# Merge: prefer LLM but keep rule-based if LLM is incomplete
|
| 481 |
+
if len(llm_requirements) > len(requirements):
|
| 482 |
+
requirements = llm_requirements
|
| 483 |
+
explanations = llm_explanations if llm_explanations else explanations
|
| 484 |
+
else:
|
| 485 |
+
# Combine both
|
| 486 |
+
combined = set(requirements.split('\n'))
|
| 487 |
+
combined.update(llm_requirements.split('\n'))
|
| 488 |
+
requirements = '\n'.join([r for r in combined if r.strip()])
|
| 489 |
+
|
| 490 |
+
return requirements, explanations
|
| 491 |
+
|
| 492 |
+
def _create_requirements_prompt(self, description: str) -> str:
|
| 493 |
+
"""Create a prompt for generating requirements.txt."""
|
| 494 |
+
prompt = f"""You are a Python expert. Based on this project description, generate a requirements.txt file with appropriate Python packages.
|
| 495 |
+
|
| 496 |
+
Project Description:
|
| 497 |
+
{description}
|
| 498 |
+
|
| 499 |
+
Generate a requirements.txt file with:
|
| 500 |
+
1. Essential packages needed for this project
|
| 501 |
+
2. Appropriate version pins where necessary
|
| 502 |
+
3. Format: one package per line with version (e.g., "pandas==2.0.3" or "fastapi>=0.100.0")
|
| 503 |
+
|
| 504 |
+
For each package, provide a brief explanation of why it's needed.
|
| 505 |
+
|
| 506 |
+
Format your response as:
|
| 507 |
+
REQUIREMENTS:
|
| 508 |
+
package1==version1
|
| 509 |
+
package2>=version2
|
| 510 |
+
...
|
| 511 |
+
|
| 512 |
+
EXPLANATIONS:
|
| 513 |
+
- package1: Brief explanation of why it's needed
|
| 514 |
+
- package2: Brief explanation of why it's needed
|
| 515 |
+
...
|
| 516 |
+
|
| 517 |
+
Keep it practical and focused on the most important dependencies (5-15 packages typically).
|
| 518 |
+
"""
|
| 519 |
+
return prompt
|
| 520 |
+
|
| 521 |
+
def _call_llm_for_requirements(self, prompt: str) -> str:
|
| 522 |
+
"""Call LLM API to generate requirements."""
|
| 523 |
+
try:
|
| 524 |
+
# Try the code generation model first
|
| 525 |
+
payload = {
|
| 526 |
+
"inputs": prompt,
|
| 527 |
+
"parameters": {
|
| 528 |
+
"max_new_tokens": 500,
|
| 529 |
+
"temperature": 0.3,
|
| 530 |
+
"return_full_text": False
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
response = requests.post(
|
| 535 |
+
self.api_url,
|
| 536 |
+
headers=self.headers,
|
| 537 |
+
json=payload,
|
| 538 |
+
timeout=15
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
if response.status_code == 200:
|
| 542 |
+
result = response.json()
|
| 543 |
+
if isinstance(result, list) and len(result) > 0:
|
| 544 |
+
generated_text = result[0].get('generated_text', '')
|
| 545 |
+
if generated_text:
|
| 546 |
+
return generated_text.strip()
|
| 547 |
+
|
| 548 |
+
# Fallback to GPT-2
|
| 549 |
+
response = requests.post(
|
| 550 |
+
self.fallback_url,
|
| 551 |
+
headers=self.headers,
|
| 552 |
+
json=payload,
|
| 553 |
+
timeout=15
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
if response.status_code == 200:
|
| 557 |
+
result = response.json()
|
| 558 |
+
if isinstance(result, list) and len(result) > 0:
|
| 559 |
+
generated_text = result[0].get('generated_text', '')
|
| 560 |
+
if generated_text:
|
| 561 |
+
return generated_text.strip()
|
| 562 |
+
|
| 563 |
+
return ""
|
| 564 |
+
|
| 565 |
+
except Exception as e:
|
| 566 |
+
print(f"LLM API error: {e}")
|
| 567 |
+
return ""
|
| 568 |
+
|
| 569 |
+
def _parse_llm_response(self, response: str) -> Tuple[str, str]:
|
| 570 |
+
"""Parse LLM response to extract requirements and explanations."""
|
| 571 |
+
if not response:
|
| 572 |
+
return "", ""
|
| 573 |
+
|
| 574 |
+
requirements = []
|
| 575 |
+
explanations = []
|
| 576 |
+
|
| 577 |
+
# Try to extract REQUIREMENTS section
|
| 578 |
+
if "REQUIREMENTS:" in response:
|
| 579 |
+
req_section = response.split("REQUIREMENTS:")[1]
|
| 580 |
+
if "EXPLANATIONS:" in req_section:
|
| 581 |
+
req_section = req_section.split("EXPLANATIONS:")[0]
|
| 582 |
+
|
| 583 |
+
for line in req_section.strip().split('\n'):
|
| 584 |
+
line = line.strip()
|
| 585 |
+
if line and not line.startswith('#') and not line.startswith('-'):
|
| 586 |
+
# Clean up the line
|
| 587 |
+
line = line.split('#')[0].strip() # Remove comments
|
| 588 |
+
if line and ('==' in line or '>=' in line or '<=' in line or '>' in line or '<' in line or not any(c in line for c in '=<>')):
|
| 589 |
+
requirements.append(line)
|
| 590 |
+
|
| 591 |
+
# Try to extract EXPLANATIONS section
|
| 592 |
+
if "EXPLANATIONS:" in response:
|
| 593 |
+
exp_section = response.split("EXPLANATIONS:")[1]
|
| 594 |
+
for line in exp_section.strip().split('\n'):
|
| 595 |
+
line = line.strip()
|
| 596 |
+
if line and line.startswith('-'):
|
| 597 |
+
explanations.append(line[1:].strip())
|
| 598 |
+
|
| 599 |
+
# If parsing failed, try to extract package names from the response
|
| 600 |
+
if not requirements:
|
| 601 |
+
# Look for lines that look like package specifications
|
| 602 |
+
for line in response.split('\n'):
|
| 603 |
+
line = line.strip()
|
| 604 |
+
# Check if it looks like a package (has letters, maybe numbers, maybe version)
|
| 605 |
+
if line and ('==' in line or '>=' in line or '<=' in line):
|
| 606 |
+
parts = line.split()
|
| 607 |
+
if parts:
|
| 608 |
+
requirements.append(parts[0])
|
| 609 |
+
|
| 610 |
+
requirements_text = '\n'.join(requirements[:20]) # Limit to 20 packages
|
| 611 |
+
explanations_text = '\n'.join(explanations[:20]) if explanations else ""
|
| 612 |
+
|
| 613 |
+
return requirements_text, explanations_text
|
| 614 |
+
|
| 615 |
+
def _rule_based_suggestions(self, description: str) -> Tuple[str, str]:
|
| 616 |
+
"""Generate rule-based suggestions when LLM is unavailable."""
|
| 617 |
+
desc_lower = description.lower()
|
| 618 |
+
suggestions = []
|
| 619 |
+
explanations = []
|
| 620 |
+
|
| 621 |
+
# RAG / Chatbot / PDF processing
|
| 622 |
+
if any(word in desc_lower for word in ['rag', 'chatbot', 'pdf', 'document', 'query', 'retrieval']):
|
| 623 |
+
suggestions.append("streamlit>=1.28.0")
|
| 624 |
+
suggestions.append("langchain>=0.1.0")
|
| 625 |
+
suggestions.append("pypdf>=3.17.0")
|
| 626 |
+
if 'openai' in desc_lower or 'gpt' in desc_lower:
|
| 627 |
+
suggestions.append("openai>=1.0.0")
|
| 628 |
+
else:
|
| 629 |
+
suggestions.append("openai>=1.0.0")
|
| 630 |
+
suggestions.append("chromadb>=0.4.0")
|
| 631 |
+
explanations.append("- streamlit: Build interactive web apps for your chatbot interface")
|
| 632 |
+
explanations.append("- langchain: Framework for building RAG applications")
|
| 633 |
+
explanations.append("- pypdf: PDF parsing and text extraction")
|
| 634 |
+
explanations.append("- openai: OpenAI API for LLM integration")
|
| 635 |
+
explanations.append("- chromadb: Vector database for document embeddings")
|
| 636 |
+
|
| 637 |
+
# Web frameworks
|
| 638 |
+
if any(word in desc_lower for word in ['web', 'api', 'server', 'backend', 'rest']):
|
| 639 |
+
suggestions.append("fastapi>=0.100.0")
|
| 640 |
+
suggestions.append("uvicorn[standard]>=0.23.0")
|
| 641 |
+
explanations.append("- fastapi: Modern web framework for building APIs")
|
| 642 |
+
explanations.append("- uvicorn: ASGI server to run FastAPI applications")
|
| 643 |
+
|
| 644 |
+
# Data science
|
| 645 |
+
if any(word in desc_lower for word in ['data', 'analysis', 'csv', 'excel', 'dataframe', 'pandas']):
|
| 646 |
+
suggestions.append("pandas>=2.0.0")
|
| 647 |
+
suggestions.append("numpy>=1.24.0")
|
| 648 |
+
explanations.append("- pandas: Data manipulation and analysis")
|
| 649 |
+
explanations.append("- numpy: Numerical computing library")
|
| 650 |
+
|
| 651 |
+
# Machine learning
|
| 652 |
+
if any(word in desc_lower for word in ['ml', 'machine learning', 'model', 'train', 'neural', 'deep learning', 'ai']):
|
| 653 |
+
suggestions.append("scikit-learn>=1.3.0")
|
| 654 |
+
if 'pytorch' in desc_lower or 'torch' in desc_lower:
|
| 655 |
+
suggestions.append("torch>=2.0.0")
|
| 656 |
+
explanations.append("- torch: PyTorch deep learning framework")
|
| 657 |
+
elif 'tensorflow' in desc_lower or 'tf' in desc_lower:
|
| 658 |
+
suggestions.append("tensorflow>=2.13.0")
|
| 659 |
+
explanations.append("- tensorflow: TensorFlow deep learning framework")
|
| 660 |
+
explanations.append("- scikit-learn: Machine learning algorithms and utilities")
|
| 661 |
+
|
| 662 |
+
# Database
|
| 663 |
+
if any(word in desc_lower for word in ['database', 'sql', 'db', 'postgres', 'mysql']):
|
| 664 |
+
suggestions.append("sqlalchemy>=2.0.0")
|
| 665 |
+
explanations.append("- sqlalchemy: SQL toolkit and ORM")
|
| 666 |
+
|
| 667 |
+
# HTTP requests
|
| 668 |
+
if any(word in desc_lower for word in ['http', 'request', 'fetch', 'download']):
|
| 669 |
+
suggestions.append("requests>=2.31.0")
|
| 670 |
+
explanations.append("- requests: HTTP library for making API calls")
|
| 671 |
+
|
| 672 |
+
# Environment variables
|
| 673 |
+
if any(word in desc_lower for word in ['config', 'env', 'environment', 'settings']):
|
| 674 |
+
suggestions.append("python-dotenv>=1.0.0")
|
| 675 |
+
explanations.append("- python-dotenv: Load environment variables from .env file")
|
| 676 |
+
|
| 677 |
+
# If no specific matches, provide common packages
|
| 678 |
+
if not suggestions:
|
| 679 |
+
suggestions.append("requests>=2.31.0")
|
| 680 |
+
suggestions.append("python-dotenv>=1.0.0")
|
| 681 |
+
explanations.append("- requests: HTTP library for API calls and web requests")
|
| 682 |
+
explanations.append("- python-dotenv: Manage environment variables and configuration")
|
| 683 |
+
|
| 684 |
+
requirements_text = '\n'.join(suggestions) if suggestions else ""
|
| 685 |
+
explanations_text = '\n'.join(explanations) if explanations else ""
|
| 686 |
+
|
| 687 |
+
return requirements_text, explanations_text
|
| 688 |
+
|
| 689 |
+
|
| 690 |
class ExplanationEngine:
|
| 691 |
"""Generate intelligent explanations for dependency conflicts using LLM."""
|
| 692 |
|
|
|
|
| 841 |
|
| 842 |
|
| 843 |
def process_dependencies(
|
| 844 |
+
project_description: str,
|
| 845 |
library_list: str,
|
| 846 |
requirements_text: str,
|
| 847 |
uploaded_file,
|
|
|
|
| 857 |
) -> Tuple[str, str, str]:
|
| 858 |
"""Main processing function for Gradio interface."""
|
| 859 |
|
| 860 |
+
# Generate requirements from project description if provided
|
| 861 |
+
generated_requirements = ""
|
| 862 |
+
generation_explanations = ""
|
| 863 |
+
if project_description and project_description.strip():
|
| 864 |
+
generator = ProjectRequirementsGenerator(use_llm=True)
|
| 865 |
+
generated_requirements, generation_explanations = generator.generate_requirements(project_description)
|
| 866 |
+
|
| 867 |
+
# If we generated requirements, add them to the requirements_text
|
| 868 |
+
if generated_requirements:
|
| 869 |
+
if requirements_text:
|
| 870 |
+
requirements_text = generated_requirements + "\n" + requirements_text
|
| 871 |
+
else:
|
| 872 |
+
requirements_text = generated_requirements
|
| 873 |
+
|
| 874 |
# Collect dependencies from all sources
|
| 875 |
all_dependencies = []
|
| 876 |
|
|
|
|
| 1047 |
output_parts = []
|
| 1048 |
output_parts.append("## Dependency Analysis Results\n\n")
|
| 1049 |
|
| 1050 |
+
# Show generated requirements if project description was provided
|
| 1051 |
+
if project_description and project_description.strip() and generated_requirements:
|
| 1052 |
+
output_parts.append("### Generated Requirements from Project Description\n\n")
|
| 1053 |
+
output_parts.append(f"**Project:** {project_description[:100]}{'...' if len(project_description) > 100 else ''}\n\n")
|
| 1054 |
+
output_parts.append("**Suggested Packages:**\n")
|
| 1055 |
+
output_parts.append("```\n")
|
| 1056 |
+
output_parts.append(generated_requirements)
|
| 1057 |
+
output_parts.append("\n```\n\n")
|
| 1058 |
+
|
| 1059 |
+
if generation_explanations:
|
| 1060 |
+
output_parts.append("**Why these packages?**\n")
|
| 1061 |
+
output_parts.append(generation_explanations)
|
| 1062 |
+
output_parts.append("\n\n")
|
| 1063 |
+
|
| 1064 |
+
output_parts.append("---\n\n")
|
| 1065 |
+
|
| 1066 |
# Show ML prediction if available
|
| 1067 |
if ML_AVAILABLE and ml_conflict_prediction is not None:
|
| 1068 |
if ml_conflict_prediction:
|
|
|
|
| 1132 |
|
| 1133 |
| Feature | Status | Description |
|
| 1134 |
|---------|--------|-------------|
|
| 1135 |
+
| **LLM Requirements Generation** | Active | Generate requirements.txt from project description using AI |
|
| 1136 |
| **LLM Reasoning** | Active | AI-powered natural language explanations for conflicts |
|
| 1137 |
| **ML Conflict Prediction** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Machine learning model predicts conflicts before analysis |
|
| 1138 |
| **Embedding-Based Spell Check** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Semantic similarity matching for package names |
|
|
|
|
| 1141 |
|
| 1142 |
""")
|
| 1143 |
|
| 1144 |
+
# Project Description Input (Optional)
|
| 1145 |
+
with gr.Row():
|
| 1146 |
+
with gr.Column(scale=3):
|
| 1147 |
+
project_description_input = gr.Textbox(
|
| 1148 |
+
label="Project Description (Optional) - AI-Powered Requirements Generation",
|
| 1149 |
+
placeholder="Describe your project idea here...\nExample: 'I want to build a web API for data analysis with machine learning capabilities'",
|
| 1150 |
+
lines=4,
|
| 1151 |
+
info="Describe your project and AI will suggest required libraries with explanations.",
|
| 1152 |
+
value=""
|
| 1153 |
+
)
|
| 1154 |
+
with gr.Column(scale=1):
|
| 1155 |
+
generate_requirements_btn = gr.Button(
|
| 1156 |
+
"Generate Requirements from Description",
|
| 1157 |
+
variant="secondary",
|
| 1158 |
+
size="lg"
|
| 1159 |
+
)
|
| 1160 |
+
generated_requirements_display = gr.Markdown(
|
| 1161 |
+
label="Generated Requirements Preview",
|
| 1162 |
+
value="AI-generated requirements preview will appear here after clicking the button above."
|
| 1163 |
+
)
|
| 1164 |
+
|
| 1165 |
+
gr.Markdown("---")
|
| 1166 |
+
|
| 1167 |
with gr.Row():
|
| 1168 |
with gr.Column(scale=1):
|
| 1169 |
gr.Markdown("### Input Methods")
|
|
|
|
| 1281 |
visible=True
|
| 1282 |
)
|
| 1283 |
|
| 1284 |
+
def generate_requirements_only(project_desc):
|
| 1285 |
+
"""Generate requirements from project description only."""
|
| 1286 |
+
if not project_desc or not project_desc.strip():
|
| 1287 |
+
return "", ""
|
| 1288 |
+
|
| 1289 |
+
generator = ProjectRequirementsGenerator(use_llm=True)
|
| 1290 |
+
requirements, explanations = generator.generate_requirements(project_desc)
|
| 1291 |
+
|
| 1292 |
+
if requirements:
|
| 1293 |
+
output = f"## Generated Requirements\n\n"
|
| 1294 |
+
output += f"**Project:** {project_desc[:100]}{'...' if len(project_desc) > 100 else ''}\n\n"
|
| 1295 |
+
output += "**Suggested Packages:**\n```\n"
|
| 1296 |
+
output += requirements
|
| 1297 |
+
output += "\n```\n\n"
|
| 1298 |
+
if explanations:
|
| 1299 |
+
output += "**Why these packages?**\n"
|
| 1300 |
+
output += explanations
|
| 1301 |
+
# Also return the requirements text for the textbox
|
| 1302 |
+
return output, requirements
|
| 1303 |
+
else:
|
| 1304 |
+
error_msg = "Could not generate requirements. Please try a more detailed description or check your connection."
|
| 1305 |
+
return error_msg, ""
|
| 1306 |
+
|
| 1307 |
def process_and_download(*args):
|
| 1308 |
# Extract all arguments
|
| 1309 |
result_text, resolved_text, ml_details = process_dependencies(*args)
|
|
|
|
| 1323 |
|
| 1324 |
return result_text, resolved_text, temp_file if temp_file else None, ml_output_text
|
| 1325 |
|
| 1326 |
+
# Button to generate requirements from description
|
| 1327 |
+
def generate_and_update(project_desc, existing_reqs):
|
| 1328 |
+
"""Generate requirements and update the requirements input."""
|
| 1329 |
+
if not project_desc or not project_desc.strip():
|
| 1330 |
+
return "Please enter a project description first.", existing_reqs
|
| 1331 |
+
|
| 1332 |
+
generator = ProjectRequirementsGenerator(use_llm=True)
|
| 1333 |
+
requirements, explanations = generator.generate_requirements(project_desc)
|
| 1334 |
+
|
| 1335 |
+
# Check if we got valid requirements (rule-based should always return something)
|
| 1336 |
+
if requirements and requirements.strip() and len(requirements.strip()) > 5:
|
| 1337 |
+
# Create preview output
|
| 1338 |
+
preview = f"## Generated Requirements\n\n"
|
| 1339 |
+
preview += f"**Project:** {project_desc[:100]}{'...' if len(project_desc) > 100 else ''}\n\n"
|
| 1340 |
+
preview += "**Suggested Packages:**\n```\n"
|
| 1341 |
+
preview += requirements
|
| 1342 |
+
preview += "\n```\n\n"
|
| 1343 |
+
if explanations and explanations.strip():
|
| 1344 |
+
preview += "**Why these packages?**\n"
|
| 1345 |
+
preview += explanations
|
| 1346 |
+
preview += "\n\n*Requirements have been added to the 'Requirements.txt Content' box below. You can edit them before analysis.*"
|
| 1347 |
+
|
| 1348 |
+
# Update requirements input (append or replace)
|
| 1349 |
+
if existing_reqs and existing_reqs.strip():
|
| 1350 |
+
updated_reqs = requirements + "\n" + existing_reqs
|
| 1351 |
+
else:
|
| 1352 |
+
updated_reqs = requirements
|
| 1353 |
+
|
| 1354 |
+
return preview, updated_reqs
|
| 1355 |
+
else:
|
| 1356 |
+
# Fallback - generate basic requirements
|
| 1357 |
+
desc_lower = project_desc.lower()
|
| 1358 |
+
basic_reqs = []
|
| 1359 |
+
basic_explanations = []
|
| 1360 |
+
|
| 1361 |
+
if 'streamlit' in desc_lower or 'web' in desc_lower or 'app' in desc_lower:
|
| 1362 |
+
basic_reqs.append("streamlit>=1.28.0")
|
| 1363 |
+
basic_explanations.append("- streamlit: Build interactive web applications")
|
| 1364 |
+
|
| 1365 |
+
if 'pdf' in desc_lower or 'document' in desc_lower:
|
| 1366 |
+
basic_reqs.append("pypdf>=3.17.0")
|
| 1367 |
+
basic_explanations.append("- pypdf: PDF parsing and text extraction")
|
| 1368 |
+
|
| 1369 |
+
if 'rag' in desc_lower or 'chatbot' in desc_lower or 'llm' in desc_lower:
|
| 1370 |
+
basic_reqs.append("langchain>=0.1.0")
|
| 1371 |
+
basic_reqs.append("openai>=1.0.0")
|
| 1372 |
+
basic_explanations.append("- langchain: Framework for building LLM applications")
|
| 1373 |
+
basic_explanations.append("- openai: OpenAI API integration")
|
| 1374 |
+
|
| 1375 |
+
if basic_reqs:
|
| 1376 |
+
reqs_text = '\n'.join(basic_reqs)
|
| 1377 |
+
exp_text = '\n'.join(basic_explanations)
|
| 1378 |
+
preview = f"## Generated Requirements\n\n**Project:** {project_desc[:100]}\n\n**Suggested Packages:**\n```\n{reqs_text}\n```\n\n**Why these packages?**\n{exp_text}"
|
| 1379 |
+
if existing_reqs and existing_reqs.strip():
|
| 1380 |
+
updated_reqs = reqs_text + "\n" + existing_reqs
|
| 1381 |
+
else:
|
| 1382 |
+
updated_reqs = reqs_text
|
| 1383 |
+
return preview, updated_reqs
|
| 1384 |
+
|
| 1385 |
+
error_msg = "## Could not generate requirements\n\nPlease try a more detailed description with keywords like: web, API, data analysis, machine learning, PDF, chatbot, etc."
|
| 1386 |
+
return error_msg, existing_reqs
|
| 1387 |
+
|
| 1388 |
+
generate_requirements_btn.click(
|
| 1389 |
+
fn=generate_and_update,
|
| 1390 |
+
inputs=[project_description_input, requirements_input],
|
| 1391 |
+
outputs=[generated_requirements_display, requirements_input]
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
process_btn.click(
|
| 1395 |
fn=process_and_download,
|
| 1396 |
+
inputs=[project_description_input, library_input, requirements_input, file_upload, python_version, device, os_type, mode, resolution_strategy, use_llm, use_ml_prediction, use_ml_spellcheck, show_ml_details],
|
| 1397 |
outputs=[output_display, resolved_output, download_btn, ml_output]
|
| 1398 |
)
|
| 1399 |
|
|
|
|
| 1401 |
---
|
| 1402 |
### How to Use
|
| 1403 |
|
| 1404 |
+
1. **(Optional) Describe your project** in the "Project Description" box - AI will suggest required libraries
|
| 1405 |
+
2. **Input your dependencies** using any of the three methods (or combine them)
|
| 1406 |
+
3. **Configure your environment** (Python version, device, OS)
|
| 1407 |
+
4. **Enable AI/ML features** (LLM explanations, ML predictions, ML spell-check)
|
| 1408 |
+
5. **Choose analysis mode**: Quick for fast results, Deep for complete dependency tree
|
| 1409 |
+
6. **Select resolution strategy**: How to handle version conflicts
|
| 1410 |
+
7. **Click "Analyze & Resolve Dependencies"**
|
| 1411 |
+
8. **Review the results** including AI-generated requirements and explanations
|
| 1412 |
+
9. **Download the resolved requirements.txt**
|
| 1413 |
|
| 1414 |
### Features
|
| 1415 |
|
| 1416 |
+
- **AI Requirements Generation**: Describe your project and get suggested libraries with explanations
|
| 1417 |
- Parse multiple input formats
|
| 1418 |
- Detect version conflicts
|
| 1419 |
- Check compatibility across dependency graph
|