diff --git a/graph_net_bench/torch/backends/base_backend.py b/graph_net_bench/torch/backends/base_backend.py new file mode 100644 index 000000000..431385570 --- /dev/null +++ b/graph_net_bench/torch/backends/base_backend.py @@ -0,0 +1,41 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict + + +class ExecuteResult: + """Result of a single backend execution.""" + + def __init__(self, outputs: Any, metrics: Dict[str, float]): + self.outputs = outputs + self.metrics = metrics + + +class BaseBackend(ABC): + """Base class for all backends.""" + + def __init__(self, model_path: str, config: Dict[str, Any]): + self.model_path = model_path + self.config = config + + @abstractmethod + def execute(self) -> ExecuteResult: + """Execute a single inference. + + Returns: + ExecuteResult containing outputs and metrics. + Timing in metrics must include device synchronization. + """ + pass + + def warmup(self, num_warmup: int) -> None: + """Warmup runs before benchmark. + + Default implementation executes num_warmup times. + Override for custom warmup logic. + """ + for _ in range(num_warmup): + self.execute() + + def cleanup(self) -> None: + """Release resources. Override if needed.""" + pass diff --git a/graph_net_bench/torch/eval_backend_diff.py b/graph_net_bench/torch/eval_backend_diff.py index cfa171dc6..2afbce88d 100755 --- a/graph_net_bench/torch/eval_backend_diff.py +++ b/graph_net_bench/torch/eval_backend_diff.py @@ -189,20 +189,37 @@ def eval_multi_models(args, model_path_prefix=None, use_model_list=False): print(f"- {model_path}", file=sys.stderr, flush=True) +def _build_args_for_perf( + config: dict, model_path: str, output_path: str +) -> types.SimpleNamespace: + """ + TODO: Remove or modify this function for args building + when finish refactoring eval_backend_perf to new runner-backend structure. + """ + backend_config = config.get("backend_config", {}) + return types.SimpleNamespace( + model_path=model_path, + output_path=output_path, + compiler=backend_config.get("compiler", ""), + device=backend_config.get("device", "cuda"), + seed=backend_config.get("seed", 123), + warmup=config.get("warmup", 3), + trials=config.get("trials", 5), + log_prompt=backend_config.get("log_prompt", "graph-net-bench-log"), + model_path_prefix=backend_config.get("model_path_prefix"), + backend_config=backend_config.get("backend_config"), + ) + + def eval_single_model(args): + ref_config = test_compiler_util.convert_to_dict(args.reference_config) + target_config = test_compiler_util.convert_to_dict(args.target_config) + ref_dir = "/tmp/eval_perf_diff/reference" target_dir = "/tmp/eval_perf_diff/target" - ref_args = types.SimpleNamespace( - model_path=args.model_path, - output_path=ref_dir, - **test_compiler_util.convert_to_dict(args.reference_config), - ) - target_args = types.SimpleNamespace( - model_path=args.model_path, - output_path=target_dir, - **test_compiler_util.convert_to_dict(args.target_config), - ) + ref_args = _build_args_for_perf(ref_config, args.model_path, ref_dir) + target_args = _build_args_for_perf(target_config, args.model_path, target_dir) eval_single_model_with_single_backend(ref_args) eval_single_model_with_single_backend(target_args) @@ -230,11 +247,12 @@ def eval_single_model(args): def main(args): ref_config = test_compiler_util.convert_to_dict(args.reference_config) - model_path_prefix = ref_config.get("model_path_prefix") + backend_config = ref_config.get("backend_config", {}) + model_path_prefix = backend_config.get("model_path_prefix") if args.model_path_list and model_path_prefix: eval_multi_models(args, model_path_prefix, use_model_list=True) - elif os.path.isdir(args.model_path): + elif args.model_path and os.path.isdir(args.model_path): if path_utils.is_single_model_dir(args.model_path): eval_single_model(args) else: diff --git a/graph_net_bench/torch/runners/base_runner.py b/graph_net_bench/torch/runners/base_runner.py new file mode 100644 index 000000000..1d73fc04c --- /dev/null +++ b/graph_net_bench/torch/runners/base_runner.py @@ -0,0 +1,35 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict + +from ..backends.base_backend import BaseBackend + + +class BenchmarkResult: + """Aggregated result of a benchmark run.""" + + def __init__(self, outputs: Any, metrics: Dict[str, Any]): + self.outputs = outputs + self.metrics = metrics + + +class BaseRunner(ABC): + """Base class for all runners.""" + + @abstractmethod + def load_backend( + self, backend_spec: Dict[str, Any], model_path: str + ) -> BaseBackend: + """Load and initialize a backend instance.""" + pass + + @abstractmethod + def cleanup_backend(self, backend: BaseBackend) -> None: + """Cleanup backend resources.""" + pass + + @abstractmethod + def run_benchmark( + self, backend_spec: Dict[str, Any], model_path: str + ) -> BenchmarkResult: + """Run the full benchmark (warmup + trials) and return aggregated result.""" + pass diff --git a/test/eval_backend_diff_test.sh b/test/eval_backend_diff_test.sh index 1eaca5ecd..5a38f1d38 100755 --- a/test/eval_backend_diff_test.sh +++ b/test/eval_backend_diff_test.sh @@ -10,21 +10,35 @@ python3 -m graph_net_bench.torch.eval_backend_diff \ --model-path-list $model_list \ --reference-config $(base64 -w 0 <&1 | tee "$OUTPUT_PATH/validation.log"