diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..3bc9f3e9 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,93 @@ +# AGENTS.md + +This file provides guidance to AI coding agents when working with code in this repository. + +## Overview + +CodSpeed Runner is a Rust CLI application for gathering performance data and uploading reports to CodSpeed. The binary is named `codspeed` and supports local and CI environments including GitHub Actions, GitLab CI, and Buildkite. + +## Common Development Commands + +### Building and Testing +```bash +# Build the project +cargo build + +# Build in release mode +cargo build --release + +# Run tests +cargo test + +# Run specific test +cargo test + +# Run tests with output +cargo test -- --nocapture +``` + +### Running the Application +```bash +# Build and run +cargo run -- + +# Examples: +cargo run -- auth login +cargo run -- run "cargo bench" +cargo run -- setup +``` + +### Code Quality +```bash +# Check code without building +cargo check + +# Format code +cargo fmt + +# Run linter +cargo clippy +``` + +## Architecture + +The application follows a modular structure: + +### Core Modules +- **`main.rs`**: Entry point with error handling and logging setup +- **`app.rs`**: CLI definition using clap with subcommands (Run, Auth, Setup) +- **`api_client.rs`**: CodSpeed GraphQL API client +- **`auth.rs`**: Authentication management +- **`config.rs`**: Configuration loading and management + +### Run Module (`src/run/`) +The core functionality for running benchmarks: +- **`run_environment/`**: CI provider implementations (GitHub Actions, GitLab CI, Buildkite, local) +- **`runner/`**: Execution modes: + - **`valgrind/`**: Instrumentation mode using custom Valgrind + - **`wall_time/perf/`**: Walltime mode with perf integration +- **`uploader/`**: Results upload to CodSpeed + +### Key Dependencies +- `clap`: CLI framework with derive macros +- `tokio`: Async runtime (current_thread flavor) +- `reqwest`: HTTP client with middleware/retry +- `serde`/`serde_json`: Serialization +- `gql_client`: Custom GraphQL client +- Platform-specific: `procfs` (Linux), `linux-perf-data` + +## Environment Variables + +- `CODSPEED_LOG`: Set logging level (debug, info, warn, error) +- `CODSPEED_API_URL`: Override API endpoint (default: https://gql.codspeed.io/) +- `CODSPEED_OAUTH_TOKEN`: Authentication token + +## Testing + +The project uses: +- Standard Rust `cargo test` +- `insta` for snapshot testing +- `rstest` for parameterized tests +- `temp-env` for environment variable testing + +Test files include snapshots in `snapshots/` directories for various run environment providers. diff --git a/src/run/mod.rs b/src/run/mod.rs index ccdce8c5..fa033aef 100644 --- a/src/run/mod.rs +++ b/src/run/mod.rs @@ -50,9 +50,9 @@ pub enum UnwindingMode { #[derive(Args, Debug, Clone)] pub struct PerfRunArgs { - /// Enable the performance runner, which uses `perf` to collect performance data. + /// Enable the linux perf profiler to collect granular performance data. /// This is only supported on Linux. - #[arg(long, env = "CODSPEED_PERF_ENABLED", default_value_t = false)] + #[arg(long, env = "CODSPEED_PERF_ENABLED", default_value_t = true)] enable_perf: bool, /// The unwinding mode that should be used with perf to collect the call stack. @@ -88,7 +88,7 @@ pub struct RunArgs { pub working_directory: Option, /// The mode to run the benchmarks in. - #[arg(long, default_value_t, value_enum, env = "CODSPEED_RUNNER_MODE")] + #[arg(long, value_enum, env = "CODSPEED_RUNNER_MODE")] pub mode: RunnerMode, /// Comma-separated list of instruments to enable. Possible values: mongodb. @@ -133,10 +133,9 @@ pub struct RunArgs { pub command: Vec, } -#[derive(ValueEnum, Clone, Default, Debug, Serialize)] +#[derive(ValueEnum, Clone, Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum RunnerMode { - #[default] Instrumentation, Walltime, } diff --git a/src/run/runner/wall_time/perf/mod.rs b/src/run/runner/wall_time/perf/mod.rs index 6d2d95c2..54fda1d6 100644 --- a/src/run/runner/wall_time/perf/mod.rs +++ b/src/run/runner/wall_time/perf/mod.rs @@ -206,7 +206,13 @@ impl PerfRunner { harvest_perf_maps_for_pids(profile_folder, &perf_map_pids).await?; // Append perf maps, unwind info and other metadata - bench_data.save_to(profile_folder).unwrap(); + if let Err(BenchmarkDataSaveError::MissingIntegration) = bench_data.save_to(profile_folder) + { + warn!( + "Perf is enabled, but failed to detect benchmarks. If you wish to disable this warning, set CODSPEED_PERF_ENABLED=false" + ); + return Ok(()); + } let elapsed = start.elapsed(); debug!("Perf teardown took: {elapsed:?}"); @@ -371,8 +377,16 @@ pub struct BenchmarkData { unwind_data_by_pid: HashMap>, } +#[derive(Debug)] +pub enum BenchmarkDataSaveError { + MissingIntegration, +} + impl BenchmarkData { - pub fn save_to>(&self, path: P) -> anyhow::Result<()> { + pub fn save_to>( + &self, + path: P, + ) -> Result<(), BenchmarkDataSaveError> { for proc_sym in self.symbols_by_pid.values() { proc_sym.save_to(&path).unwrap(); } @@ -387,7 +401,7 @@ impl BenchmarkData { integration: self .integration .clone() - .context("Couldn't find integration metadata")?, + .ok_or(BenchmarkDataSaveError::MissingIntegration)?, bench_order_by_pid: self.bench_order_by_pid.clone(), ignored_modules: { let mut to_ignore = vec![];