134 lines
5.3 KiB
Python
134 lines
5.3 KiB
Python
#!/usr/bin/env python3
|
|
"""One-command pipeline runner with config-driven paths."""
|
|
|
|
import argparse
|
|
import json
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
from platform_utils import safe_path, is_windows, resolve_path
|
|
|
|
|
|
def run(cmd):
|
|
print("running:", " ".join(cmd))
|
|
cmd = [safe_path(arg) for arg in cmd]
|
|
if is_windows():
|
|
subprocess.run(cmd, check=True, shell=False)
|
|
else:
|
|
subprocess.run(cmd, check=True)
|
|
|
|
|
|
def parse_args():
|
|
parser = argparse.ArgumentParser(description="Run full pipeline end-to-end.")
|
|
base_dir = Path(__file__).resolve().parent
|
|
parser.add_argument("--config", default=str(base_dir / "config.json"))
|
|
parser.add_argument("--device", default="auto", help="cpu, cuda, or auto")
|
|
parser.add_argument("--reference", default="", help="override reference glob (train*.csv.gz)")
|
|
parser.add_argument("--skip-prepare", action="store_true")
|
|
parser.add_argument("--skip-train", action="store_true")
|
|
parser.add_argument("--skip-export", action="store_true")
|
|
parser.add_argument("--skip-eval", action="store_true")
|
|
parser.add_argument("--skip-postprocess", action="store_true")
|
|
parser.add_argument("--skip-post-eval", action="store_true")
|
|
parser.add_argument("--skip-diagnostics", action="store_true")
|
|
return parser.parse_args()
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
base_dir = Path(__file__).resolve().parent
|
|
config_path = Path(args.config)
|
|
with open(config_path, "r", encoding="utf-8") as f:
|
|
cfg = json.load(f)
|
|
|
|
# Resolve config path without duplicating base_dir on Windows when user passes example/config.json
|
|
if config_path.is_absolute():
|
|
config_path = resolve_path(config_path.parent, config_path)
|
|
else:
|
|
candidate = base_dir / config_path
|
|
if candidate.exists():
|
|
config_path = resolve_path(candidate.parent, candidate)
|
|
elif config_path.exists():
|
|
config_path = resolve_path(config_path.parent, config_path)
|
|
else:
|
|
config_path = resolve_path(base_dir, config_path)
|
|
timesteps = cfg.get("timesteps", 200)
|
|
seq_len = cfg.get("sample_seq_len", cfg.get("seq_len", 64))
|
|
batch_size = cfg.get("sample_batch_size", cfg.get("batch_size", 2))
|
|
clip_k = cfg.get("clip_k", 5.0)
|
|
|
|
if not args.skip_prepare:
|
|
run([sys.executable, str(base_dir / "prepare_data.py")])
|
|
if not args.skip_train:
|
|
run([sys.executable, str(base_dir / "train.py"), "--config", str(config_path), "--device", args.device])
|
|
if not args.skip_export:
|
|
run(
|
|
[
|
|
sys.executable,
|
|
str(base_dir / "export_samples.py"),
|
|
"--include-time",
|
|
"--device",
|
|
args.device,
|
|
"--config",
|
|
str(config_path),
|
|
"--timesteps",
|
|
str(timesteps),
|
|
"--seq-len",
|
|
str(seq_len),
|
|
"--batch-size",
|
|
str(batch_size),
|
|
"--clip-k",
|
|
str(clip_k),
|
|
"--use-ema",
|
|
]
|
|
)
|
|
ref = args.reference or cfg.get("data_glob") or cfg.get("data_path") or ""
|
|
if not args.skip_eval:
|
|
if ref:
|
|
run([sys.executable, str(base_dir / "evaluate_generated.py"), "--reference", str(ref)])
|
|
else:
|
|
run([sys.executable, str(base_dir / "evaluate_generated.py")])
|
|
run([sys.executable, str(base_dir / "summary_metrics.py")])
|
|
|
|
if not args.skip_postprocess:
|
|
cmd = [
|
|
sys.executable,
|
|
str(base_dir / "postprocess_types.py"),
|
|
"--generated",
|
|
str(base_dir / "results" / "generated.csv"),
|
|
"--config",
|
|
str(config_path),
|
|
]
|
|
if ref:
|
|
cmd += ["--reference", str(ref)]
|
|
run(cmd)
|
|
|
|
if not args.skip_post_eval:
|
|
cmd = [
|
|
sys.executable,
|
|
str(base_dir / "evaluate_generated.py"),
|
|
"--generated",
|
|
str(base_dir / "results" / "generated_post.csv"),
|
|
"--out",
|
|
"results/eval_post.json",
|
|
]
|
|
if ref:
|
|
cmd += ["--reference", str(ref)]
|
|
run(cmd)
|
|
|
|
if not args.skip_diagnostics:
|
|
if ref:
|
|
run([sys.executable, str(base_dir / "diagnose_ks.py"), "--generated", str(base_dir / "results" / "generated_post.csv"), "--reference", str(ref)])
|
|
run([sys.executable, str(base_dir / "filtered_metrics.py"), "--eval", str(base_dir / "results" / "eval_post.json")])
|
|
run([sys.executable, str(base_dir / "ranked_ks.py"), "--eval", str(base_dir / "results" / "eval_post.json")])
|
|
run([sys.executable, str(base_dir / "program_stats.py"), "--config", str(config_path), "--reference", str(ref or config_path)])
|
|
run([sys.executable, str(base_dir / "controller_stats.py"), "--config", str(config_path), "--reference", str(ref or config_path)])
|
|
run([sys.executable, str(base_dir / "actuator_stats.py"), "--config", str(config_path), "--reference", str(ref or config_path)])
|
|
run([sys.executable, str(base_dir / "pv_stats.py"), "--config", str(config_path), "--reference", str(ref or config_path)])
|
|
run([sys.executable, str(base_dir / "aux_stats.py"), "--config", str(config_path), "--reference", str(ref or config_path)])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|