Delete apps/gradio_app/new-inference.py
Browse files- apps/gradio_app/new-inference.py +0 -104
apps/gradio_app/new-inference.py
DELETED
|
@@ -1,104 +0,0 @@
|
|
| 1 |
-
import sys
|
| 2 |
-
import os
|
| 3 |
-
import subprocess
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
import uuid
|
| 6 |
-
import torch
|
| 7 |
-
|
| 8 |
-
# Append the current directory to sys.path
|
| 9 |
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 10 |
-
|
| 11 |
-
def run_setup_script():
|
| 12 |
-
setup_script = os.path.join(os.path.dirname(__file__), "setup_scripts.py")
|
| 13 |
-
try:
|
| 14 |
-
result = subprocess.run(["python", setup_script], capture_output=True, text=True, check=True)
|
| 15 |
-
return result.stdout
|
| 16 |
-
except subprocess.CalledProcessError as e:
|
| 17 |
-
return f"Setup script failed: {e.stderr}"
|
| 18 |
-
|
| 19 |
-
def run_inference(
|
| 20 |
-
model_path="./ckpts/zeroscope_v2_576w",
|
| 21 |
-
checkpoint_folder="./ckpts/zeroscope_v2_576w-Ghibli-LoRA",
|
| 22 |
-
prompt="Studio Ghibli style. Two women walk down coastal village path toward sea, passing colorful houses, sailboats visible.",
|
| 23 |
-
negative_prompt="ugly, noise, fragment, blur, static video",
|
| 24 |
-
width=256,
|
| 25 |
-
height=256,
|
| 26 |
-
num_frames=8,
|
| 27 |
-
num_steps=30,
|
| 28 |
-
guidance_scale=30.0,
|
| 29 |
-
fps=8,
|
| 30 |
-
lora_rank=32,
|
| 31 |
-
lora_scale=0.7,
|
| 32 |
-
noise_prior=0.1,
|
| 33 |
-
device="cuda",
|
| 34 |
-
seed=100
|
| 35 |
-
):
|
| 36 |
-
print("Start Inference")
|
| 37 |
-
output_dir = "apps/gradio_app/temp_data"
|
| 38 |
-
os.makedirs(output_dir, exist_ok=True)
|
| 39 |
-
|
| 40 |
-
# Get list of files in output_dir
|
| 41 |
-
for file_name in os.listdir(output_dir):
|
| 42 |
-
# Check if file ends with .mp4
|
| 43 |
-
if file_name.endswith(".mp4"):
|
| 44 |
-
# Remove the file
|
| 45 |
-
os.remove(os.path.join(output_dir, file_name))
|
| 46 |
-
|
| 47 |
-
command = [
|
| 48 |
-
"python", "src/third_party/MotionDirector/main_inference.py",
|
| 49 |
-
"--model", model_path,
|
| 50 |
-
"--checkpoint_folder", checkpoint_folder,
|
| 51 |
-
"--prompt", prompt,
|
| 52 |
-
"--negative-prompt", negative_prompt,
|
| 53 |
-
"--width", str(width),
|
| 54 |
-
"--height", str(height),
|
| 55 |
-
"--num-frames", str(num_frames),
|
| 56 |
-
"--num-steps", str(num_steps),
|
| 57 |
-
"--guidance-scale", str(guidance_scale),
|
| 58 |
-
"--fps", str(fps),
|
| 59 |
-
"--lora_rank", str(lora_rank),
|
| 60 |
-
"--lora_scale", str(lora_scale),
|
| 61 |
-
"--noise_prior", str(noise_prior),
|
| 62 |
-
"--device", device,
|
| 63 |
-
"--seed", str(seed),
|
| 64 |
-
"--output_dir", output_dir,
|
| 65 |
-
"--no-prompt-name"
|
| 66 |
-
]
|
| 67 |
-
|
| 68 |
-
# Use Popen to execute the command
|
| 69 |
-
process = subprocess.Popen(
|
| 70 |
-
command,
|
| 71 |
-
stdout=subprocess.PIPE,
|
| 72 |
-
stderr=subprocess.PIPE,
|
| 73 |
-
text=True,
|
| 74 |
-
bufsize=1 # Line buffering
|
| 75 |
-
)
|
| 76 |
-
|
| 77 |
-
# Read output line-by-line in real-time
|
| 78 |
-
output_lines = []
|
| 79 |
-
try:
|
| 80 |
-
for line in process.stdout:
|
| 81 |
-
output_lines.append(line.strip())
|
| 82 |
-
except Exception as e:
|
| 83 |
-
return None, f"Error reading output: {str(e)}"
|
| 84 |
-
|
| 85 |
-
# Capture stderr and wait for process to complete
|
| 86 |
-
stderr_output = process.communicate()[1]
|
| 87 |
-
if process.returncode != 0:
|
| 88 |
-
return None, f"Error: {stderr_output.strip()}"
|
| 89 |
-
|
| 90 |
-
# Check for MP4 files in output directory
|
| 91 |
-
output_file = [f for f in os.listdir(output_dir) if f.lower().endswith('.mp4')]
|
| 92 |
-
if output_file:
|
| 93 |
-
output_path = os.path.join(output_dir, output_file[-1])
|
| 94 |
-
if os.path.exists(output_path):
|
| 95 |
-
return output_path, "\n".join(output_lines)
|
| 96 |
-
else:
|
| 97 |
-
return None, f"Video file not found at {output_path}\nLogs:\n" + "\n".join(output_lines)
|
| 98 |
-
return None, f"No MP4 files found in {output_dir}\nLogs:\n" + "\n".join(output_lines)
|
| 99 |
-
|
| 100 |
-
if __name__ == "__main__":
|
| 101 |
-
# Example usage
|
| 102 |
-
video_path, logs = run_inference(device="cpu" if not torch.cuda.is_available() else "cuda")
|
| 103 |
-
print(f"Generated Video: {video_path}")
|
| 104 |
-
print(f"Logs: {logs}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|