Delete apps/gradio_app/old-inference.py
Browse files
apps/gradio_app/old-inference.py
DELETED
|
@@ -1,73 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
import subprocess
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
import uuid
|
| 6 |
-
import torch
|
| 7 |
-
|
| 8 |
-
# Append the current directory to sys.path
|
| 9 |
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 10 |
-
|
| 11 |
-
def run_setup_script():
|
| 12 |
-
setup_script = os.path.join(os.path.dirname(__file__), "setup_scripts.py")
|
| 13 |
-
try:
|
| 14 |
-
result = subprocess.run(["python", setup_script], capture_output=True, text=True, check=True)
|
| 15 |
-
return result.stdout
|
| 16 |
-
except subprocess.CalledProcessError as e:
|
| 17 |
-
return f"Setup script failed: {e.stderr}"
|
| 18 |
-
|
| 19 |
-
def run_inference(
|
| 20 |
-
model_path="./ckpts/zeroscope_v2_576w",
|
| 21 |
-
checkpoint_folder="./ckpts/zeroscope_v2_576w-Ghibli-LoRA",
|
| 22 |
-
prompt="Studio Ghibli style. Two women walk down coastal village path toward sea, passing colorful houses, sailboats visible.",
|
| 23 |
-
negative_prompt="ugly, noise, fragment, blur, static video",
|
| 24 |
-
width=512,
|
| 25 |
-
height=512,
|
| 26 |
-
num_frames=16,
|
| 27 |
-
num_steps=50,
|
| 28 |
-
guidance_scale=30.0,
|
| 29 |
-
fps=16,
|
| 30 |
-
lora_rank=96,
|
| 31 |
-
lora_scale=0.7,
|
| 32 |
-
noise_prior=0.1,
|
| 33 |
-
device="cuda",
|
| 34 |
-
seed=100
|
| 35 |
-
):
|
| 36 |
-
output_dir = "apps/gradio_app/temp_data"
|
| 37 |
-
os.makedirs(output_dir, exist_ok=True)
|
| 38 |
-
|
| 39 |
-
command = [
|
| 40 |
-
"python", "src/third_party/MotionDirector/main_inference.py",
|
| 41 |
-
"--model", model_path,
|
| 42 |
-
"--checkpoint_folder", checkpoint_folder,
|
| 43 |
-
"--prompt", prompt,
|
| 44 |
-
"--negative-prompt", negative_prompt,
|
| 45 |
-
"--width", str(width),
|
| 46 |
-
"--height", str(height),
|
| 47 |
-
"--num-frames", str(num_frames),
|
| 48 |
-
"--num-steps", str(num_steps),
|
| 49 |
-
"--guidance-scale", str(guidance_scale),
|
| 50 |
-
"--fps", str(fps),
|
| 51 |
-
"--lora_rank", str(lora_rank),
|
| 52 |
-
"--lora_scale", str(lora_scale),
|
| 53 |
-
"--noise_prior", str(noise_prior),
|
| 54 |
-
"--device", device,
|
| 55 |
-
"--seed", str(seed),
|
| 56 |
-
"--output_dir", output_dir,
|
| 57 |
-
"--no-prompt-name"
|
| 58 |
-
]
|
| 59 |
-
|
| 60 |
-
output_file = [f for f in os.listdir(output_dir) if f.lower().endswith('.mp4')]
|
| 61 |
-
print(os.path.join(output_dir, output_file[0]) if output_file else "No MP4 files found.")
|
| 62 |
-
|
| 63 |
-
try:
|
| 64 |
-
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
| 65 |
-
return str(output_file), result.stdout
|
| 66 |
-
except subprocess.CalledProcessError as e:
|
| 67 |
-
return None, f"Error: {e.stderr}"
|
| 68 |
-
|
| 69 |
-
if __name__ == "__main__":
|
| 70 |
-
# Example usage
|
| 71 |
-
video, logs = run_inference(device="cpu" if not torch.cuda.is_available() else "cuda")
|
| 72 |
-
print(f"Generated Video: {video}")
|
| 73 |
-
print(f"Logs: {logs}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|