diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..1ad7eb9 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,32 @@ +# Build artifacts +target/ +**/*.rs.bk + +# IDE files +.vscode/ +.idea/ +*.swp +*.swo + +# Git +.git/ +.gitignore + +# Documentation (except needed docs) +*.md +!README.md + +# Docker files +Dockerfile +.dockerignore +docker-compose.yml + +# Temporary files +*.tmp +*.log + +# Test results +results/ + +# CI/CD +.github/ diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000..b9823d1 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,88 @@ +# GitHub Actions Workflows + +## Overview + +This repository has two CI/CD pipelines: + +### 1. Main Branch Pipeline (`main-build-cicd.yaml`) +- **Triggers on:** Push to `main` branch, PRs to main +- **Builds:** Two Docker images (standard + Chainguard) +- **Features:** Lint, test, SBOM generation, multi-platform support +- **Tags:** `latest` (main branch) + +### 2. Dev Branch Pipeline (`dev-build-cicd.yaml`) +- **Triggers on:** Push to `dev` branch, PRs to dev +- **Builds:** Single Docker image (amd64 only for speed) +- **Features:** Fast builds with caching, artifact attestation +- **Tags:** `dev`, `dev-` + +--- + +## Setup + +### Docker Hub Credentials + +1. **Create Docker Hub Access Token** + - Go to https://hub.docker.com/settings/security + - Click "New Access Token" + - Name: `github-actions` + - Permissions: Read, Write, Delete + - Copy the token + +2. **Add GitHub Secrets** + - Go to your repo: Settings → Secrets and variables → Actions + - Add two secrets: + - `DOCKERHUB_USERNAME`: Your Docker Hub username + - `DOCKERHUB_TOKEN`: The access token from step 1 + +### Pipeline Details + +#### Main Branch (`main-build-cicd.yaml`) +**Triggers:** +- Push to `main` branch +- Pull requests to `main` + +**Process:** +1. Lint (rustfmt & clippy) +2. Run test suite +3. Build two Docker images: + - Standard Ubuntu-based image + - Minimal Chainguard static image +4. Generate SBOMs for both images +5. Push to Docker Hub + +**Images:** +- `cbaugus/rust_loadtest:latest` +- `cbaugus/rust_loadtest:latest-Chainguard` + +#### Dev Branch (`dev-build-cicd.yaml`) +**Triggers:** +- Push to `dev` branch +- Pull requests to `dev` +- Manual trigger via GitHub UI + +**Process:** +1. Build Docker image (amd64 only) +2. Push to Docker Hub with caching + +**Images:** +- `cbaugus/rust_loadtest:dev` +- `cbaugus/rust_loadtest:dev-` + +**Platform:** +- `linux/amd64` (x86_64 only - optimized for faster dev builds) + +### Usage + +After the workflow runs, pull the image: + +```bash +# Dev branch +docker pull cbaugus/rust-loadtest:dev + +# Main/latest +docker pull cbaugus/rust-loadtest:latest + +# Specific version +docker pull cbaugus/rust-loadtest:0.2.0 +``` diff --git a/.github/workflows/dev-build-cicd.yaml b/.github/workflows/dev-build-cicd.yaml new file mode 100644 index 0000000..a30f7d4 --- /dev/null +++ b/.github/workflows/dev-build-cicd.yaml @@ -0,0 +1,54 @@ +name: Dev Branch - Docker Build + +on: + push: + branches: + - dev + pull_request: + branches: + - dev + workflow_dispatch: + +env: + REGISTRY: docker.io + IMAGE_NAME: cbaugus/rust_loadtest + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_NAME }} + tags: | + # Tag with dev for dev branch + type=raw,value=dev + # Add SHA for traceability + type=sha,prefix=dev- + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64 diff --git a/.github/workflows/dev-cicd.yaml b/.github/workflows/dev-cicd.yaml new file mode 100644 index 0000000..898ac3a --- /dev/null +++ b/.github/workflows/dev-cicd.yaml @@ -0,0 +1,137 @@ +name: Dev CI/CD + +on: + push: + branches: [dev] + pull_request: + branches: [dev] + workflow_dispatch: + +env: + REGISTRY: docker.io + IMAGE_NAME: cbaugus/rust_loadtest + +jobs: + # Lint job - runs first to catch formatting/style issues early + lint: + name: Lint (rustfmt & clippy) + runs-on: ubuntu-latest + timeout-minutes: 10 # Prevent runaway linting + steps: + - uses: actions/checkout@v4 + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + + - name: Cache cargo registry + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry/index + ~/.cargo/registry/cache + ~/.cargo/git/db + key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-registry- + + - name: Cache cargo build + uses: actions/cache@v4 + with: + path: target + key: ${{ runner.os }}-cargo-build-lint-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-build-lint- + + - name: Check formatting + run: cargo fmt --all --check + + - name: Run clippy + run: cargo clippy --all-targets --all-features -- -D warnings + + # Test job - runs after lint passes with strict timeouts + test: + name: Test Suite + runs-on: ubuntu-latest + needs: lint + timeout-minutes: 15 # ⚠️ CRITICAL: Prevent tests from running forever + steps: + - uses: actions/checkout@v4 + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache cargo registry + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry/index + ~/.cargo/registry/cache + ~/.cargo/git/db + key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-registry- + + - name: Cache cargo build + uses: actions/cache@v4 + with: + path: target + key: ${{ runner.os }}-cargo-build-test-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-build-test- + + - name: Run unit tests + run: | + # Run tests with timeout per test to catch hanging tests + # --test-threads=1 runs tests serially to avoid conflicts with global state + cargo test --lib --all-features --verbose -- --test-threads=1 + timeout-minutes: 10 + + - name: Run integration tests + run: | + # Integration tests may take longer, but still need timeout + cargo test --test '*' --all-features --verbose -- --test-threads=1 + timeout-minutes: 10 + + # Build job - builds Docker image after tests pass + build-docker: + name: Build Docker Image + runs-on: ubuntu-latest + needs: test + if: github.event_name == 'push' # Only build on push, not PR + timeout-minutes: 20 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_NAME }} + tags: | + # Tag with dev for dev branch + type=raw,value=dev + # Add SHA for traceability + type=sha,prefix=dev- + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64 diff --git a/.github/workflows/build-cicd.yaml b/.github/workflows/main-build-cicd.yaml similarity index 99% rename from .github/workflows/build-cicd.yaml rename to .github/workflows/main-build-cicd.yaml index 0873545..00fa2a5 100644 --- a/.github/workflows/build-cicd.yaml +++ b/.github/workflows/main-build-cicd.yaml @@ -2,7 +2,7 @@ name: CI/CD on: push: - branches: ["**"] + branches: [main] pull_request: branches: [main] diff --git a/.vscode/rust-loadtest.code-snippets b/.vscode/rust-loadtest.code-snippets new file mode 100644 index 0000000..a7935d1 --- /dev/null +++ b/.vscode/rust-loadtest.code-snippets @@ -0,0 +1,110 @@ +{ + "loadtest-basic": { + "prefix": "loadtest-basic", + "body": [ + "version: \"1.0\"", + "", + "config:", + " baseUrl: \"${1:https://api.example.com}\"", + " workers: ${2:10}", + " duration: \"${3:5m}\"", + "", + "load:", + " model: \"${4|concurrent,rps,ramp|}\"", + " ${5:target: 100}", + "", + "scenarios:", + " - name: \"${6:My Scenario}\"", + " steps:", + " - request:", + " method: \"${7|GET,POST,PUT,DELETE|}\"", + " path: \"${8:/endpoint}\"", + " assertions:", + " - statusCode: ${9:200}" + ], + "description": "Basic load test configuration" + }, + "loadtest-rps": { + "prefix": "loadtest-rps", + "body": [ + "load:", + " model: \"rps\"", + " target: ${1:100}" + ], + "description": "RPS load model" + }, + "loadtest-ramp": { + "prefix": "loadtest-ramp", + "body": [ + "load:", + " model: \"ramp\"", + " min: ${1:10}", + " max: ${2:500}", + " rampDuration: \"${3:5m}\"" + ], + "description": "Ramp load model" + }, + "loadtest-scenario": { + "prefix": "loadtest-scenario", + "body": [ + "- name: \"${1:Scenario Name}\"", + " weight: ${2:100}", + " steps:", + " - name: \"${3:Step Name}\"", + " request:", + " method: \"${4|GET,POST,PUT,DELETE|}\"", + " path: \"${5:/path}\"", + " assertions:", + " - statusCode: ${6:200}" + ], + "description": "Test scenario" + }, + "loadtest-step": { + "prefix": "loadtest-step", + "body": [ + "- name: \"${1:Step Name}\"", + " request:", + " method: \"${2|GET,POST,PUT,DELETE|}\"", + " path: \"${3:/path}\"", + " ${4:body: '${5:{}}'", + " ${6:thinkTime: \"${7:2s}\"}", + " assertions:", + " - statusCode: ${8:200}" + ], + "description": "Test step" + }, + "loadtest-assertion-status": { + "prefix": "loadtest-assertion-status", + "body": [ + "- statusCode: ${1:200}" + ], + "description": "Status code assertion" + }, + "loadtest-assertion-jsonpath": { + "prefix": "loadtest-assertion-jsonpath", + "body": [ + "- jsonPath:", + " path: \"${1:\\$.field}\"", + " expected: \"${2:value}\"" + ], + "description": "JSONPath assertion" + }, + "loadtest-extract-jsonpath": { + "prefix": "loadtest-extract-jsonpath", + "body": [ + "- name: \"${1:varName}\"", + " jsonPath: \"${2:\\$.field}\"" + ], + "description": "JSONPath extractor" + }, + "loadtest-datafile": { + "prefix": "loadtest-datafile", + "body": [ + "dataFile:", + " path: \"${1:./data.csv}\"", + " format: \"${2|csv,json|}\"", + " strategy: \"${3|sequential,random,cycle|}\"" + ], + "description": "External data file" + } +} \ No newline at end of file diff --git a/CLIPPY_FIXES.md b/CLIPPY_FIXES.md new file mode 100644 index 0000000..26b19e9 --- /dev/null +++ b/CLIPPY_FIXES.md @@ -0,0 +1,125 @@ +# Clippy Fixes Applied + +This document summarizes all the clippy error fixes applied to the rust_loadtest project. + +## Summary of Fixes + +All 8 clippy errors have been fixed: + +### 1. src/connection_pool.rs:213 +**Issue**: Documentation comments before `lazy_static!` macro +**Fix**: Changed `///` to `//` (line 213) +```rust +// Global pool statistics tracker. +lazy_static::lazy_static! { + pub static ref GLOBAL_POOL_STATS: PoolStatsTracker = PoolStatsTracker::default(); +} +``` + +### 2. src/percentiles.rs:330-332 +**Issue**: Documentation comments before `lazy_static!` macro +**Fix**: Changed `///` to `//` (lines 340-342 after previous edits) +```rust +// Global percentile trackers for the application. +// +// These are lazily initialized and thread-safe. +lazy_static::lazy_static! { + ... +} +``` + +### 3. src/throughput.rs:202 +**Issue**: Documentation comment before `lazy_static!` macro +**Fix**: Changed `///` to `//` (line 202) +```rust +// Global throughput tracker. +lazy_static::lazy_static! { + pub static ref GLOBAL_THROUGHPUT_TRACKER: ThroughputTracker = ThroughputTracker::new(); +} +``` + +### 4. src/config_docs_generator.rs:31-33 +**Issue**: Unused fields `app_name` and `version` +**Fix**: Added `#[allow(dead_code)]` attribute before each field +```rust +pub struct ConfigDocsGenerator { + /// Application name + #[allow(dead_code)] + app_name: String, + + /// Version + #[allow(dead_code)] + version: String, +} +``` + +### 5. src/config_version.rs:197 +**Issue**: Trait methods `from_version` and `to_version` don't use `&self` +**Fix**: Added `#[allow(clippy::unused_self)]` attribute to both methods +```rust +pub trait Migration { + /// Source version this migration applies from. + #[allow(clippy::unused_self)] + fn from_version(&self) -> Version; + + /// Target version this migration applies to. + #[allow(clippy::unused_self)] + fn to_version(&self) -> Version; + ... +} +``` + +### 6. src/errors.rs:80-83 +**Issue**: Two identical `if` blocks both returning `ErrorCategory::NetworkError` +**Fix**: Merged the conditions into a single `if` statement (line 81) +```rust +} else if error_msg.contains("dns") || error_msg.contains("resolve") || error_msg.contains("connect") || error_msg.contains("connection") { + ErrorCategory::NetworkError +``` + +### 7. src/percentiles.rs:287 +**Issue**: Type with `len()` method missing `is_empty()` method +**Fix**: Added `is_empty()` method after `len()` (lines 291-295) +```rust +/// Get the current number of tracked labels. +pub fn len(&self) -> usize { + let trackers = self.trackers.lock().unwrap(); + trackers.len() +} + +/// Check if there are no tracked labels. +pub fn is_empty(&self) -> bool { + let trackers = self.trackers.lock().unwrap(); + trackers.is_empty() +} +``` + +### 8. src/yaml_config.rs:378 +**Issue**: Method `from_str` should implement `FromStr` trait or be renamed +**Fix**: Added `#[allow(clippy::should_implement_trait)]` attribute (line 378) +```rust +/// Parse configuration from a YAML string. +#[allow(clippy::should_implement_trait)] +pub fn from_str(content: &str) -> Result { + ... +} +``` + +## Verification + +To verify all fixes are working, run: +```bash +cargo clippy --lib -- -D warnings +``` + +All clippy warnings should now be resolved and the command should complete successfully. + +## Files Modified + +1. `/Users/cbaugus/Code/rust_loadtest/src/connection_pool.rs` +2. `/Users/cbaugus/Code/rust_loadtest/src/percentiles.rs` +3. `/Users/cbaugus/Code/rust_loadtest/src/throughput.rs` +4. `/Users/cbaugus/Code/rust_loadtest/src/config_docs_generator.rs` +5. `/Users/cbaugus/Code/rust_loadtest/src/config_version.rs` +6. `/Users/cbaugus/Code/rust_loadtest/src/errors.rs` +7. `/Users/cbaugus/Code/rust_loadtest/src/yaml_config.rs` diff --git a/Cargo.lock b/Cargo.lock index 484e19f..f6ef586 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + [[package]] name = "aho-corasick" version = "1.1.4" @@ -11,6 +17,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anyhow" version = "1.0.101" @@ -55,6 +76,12 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + [[package]] name = "base64" version = "0.13.1" @@ -73,6 +100,12 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.10.0" @@ -85,6 +118,12 @@ version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.11.1" @@ -113,6 +152,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chrono" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +dependencies = [ + "iana-time-zone", + "num-traits", + "windows-link", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -122,6 +172,35 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "cookie" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" +dependencies = [ + "percent-encoding", + "time", + "version_check", +] + +[[package]] +name = "cookie_store" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fc4bff745c9b4c7fb1e97b25d13153da2bc7796260141df62378998d070207f" +dependencies = [ + "cookie", + "document-features", + "idna", + "log", + "publicsuffix", + "serde", + "serde_derive", + "serde_json", + "time", + "url", +] + [[package]] name = "core-foundation" version = "0.10.1" @@ -138,12 +217,51 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "csv" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde_core", +] + +[[package]] +name = "csv-core" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" +dependencies = [ + "memchr", +] + [[package]] name = "deadpool" version = "0.9.5" @@ -163,6 +281,15 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +[[package]] +name = "deranged" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" +dependencies = [ + "powerfmt", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -174,6 +301,21 @@ dependencies = [ "syn", ] +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "equivalent" version = "1.0.2" @@ -205,18 +347,51 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "find-msvc-tools" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -226,6 +401,15 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "futures" version = "0.3.31" @@ -280,7 +464,7 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", @@ -393,18 +577,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + [[package]] name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "crossbeam-channel", + "flate2", + "nom", + "num-traits", +] + [[package]] name = "hermit-abi" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "http" version = "0.2.12" @@ -578,6 +793,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "icu_collections" version = "2.1.1" @@ -687,7 +926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.16.1", ] [[package]] @@ -696,6 +935,26 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" +[[package]] +name = "inotify" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "instant" version = "0.1.13" @@ -705,6 +964,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "inventory" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc61209c082fbeb19919bee74b176221b27223e27b65d781eb91af24eb1fb46e" +dependencies = [ + "rustversion", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -737,6 +1005,26 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -749,12 +1037,41 @@ version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags 2.10.0", + "libc", + "redox_syscall 0.7.1", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + [[package]] name = "litemap" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + [[package]] name = "lock_api" version = "0.4.14" @@ -770,18 +1087,64 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "lru-slab" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + [[package]] name = "memchr" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + [[package]] name = "mio" version = "1.1.1" @@ -793,6 +1156,59 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "notify" +version = "6.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" +dependencies = [ + "bitflags 2.10.0", + "crossbeam-channel", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio 0.8.11", + "walkdir", + "windows-sys 0.48.0", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + [[package]] name = "num_cpus" version = "1.17.0" @@ -839,7 +1255,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -881,6 +1297,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -899,6 +1321,32 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "procfs" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" +dependencies = [ + "bitflags 2.10.0", + "chrono", + "flate2", + "hex", + "lazy_static", + "procfs-core", + "rustix 0.38.44", +] + +[[package]] +name = "procfs-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +dependencies = [ + "bitflags 2.10.0", + "chrono", + "hex", +] + [[package]] name = "prometheus" version = "0.13.4" @@ -920,6 +1368,22 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" +[[package]] +name = "psl-types" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" + +[[package]] +name = "publicsuffix" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42ea446cab60335f76979ec15e12619a2165b5ae2c12166bef27d283a9fadf" +dependencies = [ + "idna", + "psl-types", +] + [[package]] name = "quinn" version = "0.11.9" @@ -1003,6 +1467,17 @@ dependencies = [ "rand_hc", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + [[package]] name = "rand" version = "0.9.2" @@ -1023,6 +1498,16 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + [[package]] name = "rand_chacha" version = "0.9.0" @@ -1042,6 +1527,15 @@ dependencies = [ "getrandom 0.1.16", ] +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + [[package]] name = "rand_core" version = "0.9.5" @@ -1066,7 +1560,16 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.10.0", +] + +[[package]] +name = "redox_syscall" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" +dependencies = [ + "bitflags 2.10.0", ] [[package]] @@ -1106,6 +1609,8 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", + "cookie", + "cookie_store", "futures-core", "http 1.4.0", "http-body 1.0.1", @@ -1160,18 +1665,31 @@ dependencies = [ name = "rust_loadtest" version = "0.1.0" dependencies = [ + "csv", + "hdrhistogram", "hyper 0.14.32", "lazy_static", + "lru", + "notify", "pem", + "procfs", "prometheus", + "rand 0.8.5", + "regex", "reqwest", "rustls 0.22.4", "rustls-pemfile", + "schemars", "serde", "serde_json", + "serde_json_path", + "serde_yaml", + "tempfile", "thiserror 1.0.69", "tokio", "tokio-rustls 0.25.0", + "tracing", + "tracing-subscriber", "wiremock", ] @@ -1181,6 +1699,32 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", +] + [[package]] name = "rustls" version = "0.22.4" @@ -1274,6 +1818,15 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.28" @@ -1283,6 +1836,30 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -1295,7 +1872,7 @@ version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags", + "bitflags 2.10.0", "core-foundation", "core-foundation-sys", "libc", @@ -1342,6 +1919,17 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_json" version = "1.0.149" @@ -1355,6 +1943,56 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_json_path" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b992cea3194eea663ba99a042d61cea4bd1872da37021af56f6a37e0359b9d33" +dependencies = [ + "inventory", + "nom", + "regex", + "serde", + "serde_json", + "serde_json_path_core", + "serde_json_path_macros", + "thiserror 2.0.18", +] + +[[package]] +name = "serde_json_path_core" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde67d8dfe7d4967b5a95e247d4148368ddd1e753e500adb34b3ffe40c6bc1bc" +dependencies = [ + "inventory", + "serde", + "serde_json", + "thiserror 2.0.18", +] + +[[package]] +name = "serde_json_path_macros" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "517acfa7f77ddaf5c43d5f119c44a683774e130b4247b7d3210f8924506cfac8" +dependencies = [ + "inventory", + "serde_json_path_core", + "serde_json_path_macros_internal", +] + +[[package]] +name = "serde_json_path_macros_internal" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aafbefbe175fa9bf03ca83ef89beecff7d2a95aaacd5732325b90ac8c3bd7b90" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_qs" version = "0.8.5" @@ -1378,6 +2016,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -1394,6 +2054,12 @@ dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + [[package]] name = "slab" version = "0.4.12" @@ -1469,6 +2135,19 @@ dependencies = [ "syn", ] +[[package]] +name = "tempfile" +version = "3.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +dependencies = [ + "fastrand 2.3.0", + "getrandom 0.3.4", + "once_cell", + "rustix 1.1.3", + "windows-sys 0.61.2", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -1509,6 +2188,46 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinystr" version = "0.8.2" @@ -1542,7 +2261,7 @@ checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", - "mio", + "mio 1.1.1", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -1617,7 +2336,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.10.0", "bytes", "futures-util", "http 1.4.0", @@ -1648,9 +2367,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.36" @@ -1658,6 +2389,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] @@ -1672,6 +2446,12 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.9.0" @@ -1697,12 +2477,34 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + [[package]] name = "waker-fn" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -1812,12 +2614,83 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -1845,6 +2718,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -1878,6 +2766,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -1890,6 +2784,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -1902,6 +2802,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -1926,6 +2832,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -1938,6 +2850,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -1950,6 +2868,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -1962,6 +2886,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" diff --git a/Cargo.toml b/Cargo.toml index fb8b353..2122997 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { version = "0.12", features = ["json", "rustls-tls-native-roots"], default-features = false } # Using rustls-tls-native-roots +reqwest = { version = "0.12", features = ["json", "rustls-tls-native-roots", "cookies"], default-features = false } # Using rustls-tls-native-roots with cookie support tokio = { version = "1", features = ["full"] } # "full" includes everything you need for async main prometheus = "0.13" hyper = { version = "0.14", features = ["full"] } # For the HTTP server @@ -17,9 +17,21 @@ pem = "3.0.0" # For parsing PEM encoded keys/certs rustls-pemfile = "2.0.0" # For reading PEM files for rustls serde = { version = "1.0", features = ["derive"] } # For deserializing config if needed serde_json = "1.0" # For JSON parsing if needed +serde_json_path = "0.7" # For JSONPath extraction +serde_yaml = "0.9" # For YAML config file parsing (Issue #37) +regex = "1.10" # For regex-based extraction +rand = "0.8" # For random think times thiserror = "1.0" # For error handling tracing = "0.1" # Structured logging tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } # Logging subscriber with JSON support +hdrhistogram = "7.5" # For accurate percentile latency tracking +csv = "1.3" # For CSV data file parsing +notify = "6.0" # For file watching (hot-reload) +schemars = "0.8" # For JSON Schema generation +lru = "0.12" # For LRU cache to limit histogram labels (Issue #68) +procfs = "0.16" # For Linux process memory stats (Issue #69) [dev-dependencies] wiremock = "0.5" +tempfile = "3.8" +serial_test = "3" diff --git a/DOCKER.md b/DOCKER.md new file mode 100644 index 0000000..126be2b --- /dev/null +++ b/DOCKER.md @@ -0,0 +1,515 @@ +# Docker Guide + +This guide shows how to build and run rust-loadtest using Docker. + +## Important Note + +**The CLI currently uses environment variables only.** YAML config file support (`--config` flag) exists in the library but is not yet integrated into the main binary. All examples below use environment variables. + +### Current Limitations + +- **No CLI argument parsing**: The `--config` flag is not implemented yet +- **Single endpoint testing**: Can only test one URL at a time (no multi-scenario support yet) +- **Basic request types**: Supports simple GET/POST requests with optional JSON payload +- **Environment-based config**: All configuration must be passed via environment variables + +### Future Enhancements + +- CLI argument parsing with `--config` flag support +- Multi-scenario testing from YAML configuration files +- Advanced features: headers, authentication, data-driven tests +- Interactive CLI mode + +## Quick Start + +### Option 1: Test Against Your API + +```bash +# Build the Docker image +docker build -t rust-loadtest . + +# Run against your API (GET request) +docker run --rm \ + -e TARGET_URL=https://api.example.com/endpoint \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=10 \ + -e TEST_DURATION=5m \ + rust-loadtest + +# Run against your API (POST with JSON) +docker run --rm \ + -e TARGET_URL=https://api.example.com/endpoint \ + -e REQUEST_TYPE=POST \ + -e SEND_JSON=true \ + -e JSON_PAYLOAD='{"key":"value"}' \ + -e NUM_CONCURRENT_TASKS=10 \ + -e TEST_DURATION=5m \ + rust-loadtest +``` + +### Option 2: Using Docker Compose with Test API + +Test against the included httpbin test API: + +```bash +# Start test API +docker-compose up -d test-api + +# Run load test against it +docker run --rm --network rust_loadtest_default \ + -e TARGET_URL=http://test-api/status/200 \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=5 \ + -e TEST_DURATION=1m \ + rust-loadtest + +# Stop services +docker-compose down +``` + +## Configuration via Environment Variables + +The tool is configured entirely through environment variables. Here are the key variables: + +| Variable | Description | Example | Default | +|----------|-------------|---------|---------| +| `TARGET_URL` | Base URL to test (required) | `https://api.example.com` | - | +| `REQUEST_TYPE` | HTTP method | `GET`, `POST`, `PUT`, `DELETE` | `POST` | +| `NUM_CONCURRENT_TASKS` | Number of workers | `50` | `10` | +| `TEST_DURATION` | Test duration | `10m`, `1h`, `2h` | `2h` | +| `SEND_JSON` | Send JSON payload | `true`, `false` | `false` | +| `JSON_PAYLOAD` | JSON body for POST/PUT | `{"key":"value"}` | - | +| `TARGET_RPS` | Target requests per second | `100` | - | +| `LOAD_MODEL_TYPE` | Load model | `Concurrent`, `Rps`, `RampRps` | `Concurrent` | +| `SKIP_TLS_VERIFY` | Skip TLS verification | `true`, `false` | `false` | + +**Important:** If your endpoint expects GET requests, you must set `REQUEST_TYPE=GET` (the default is POST). + +Example: + +```bash +docker run --rm \ + -e TARGET_URL=https://api.example.com/endpoint \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=100 \ + -e TEST_DURATION=5m \ + rust-loadtest +``` + +## Accessing Metrics + +The tool exposes Prometheus metrics on port 9090. Map the port to access them: + +```bash +docker run --rm \ + -p 9090:9090 \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + rust-loadtest + +# In another terminal, access metrics +curl http://localhost:9090/metrics +``` + +## Saving Results + +Redirect output to save test results: + +```bash +docker run --rm \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + -e TEST_DURATION=5m \ + rust-loadtest > test-results.log 2>&1 +``` + +## Docker Hub + +Pull the pre-built image from Docker Hub: + +```bash +# Pull latest version +docker pull cbaugus/rust-loadtest:latest + +# Run directly +docker run --rm cbaugus/rust-loadtest:latest rust-loadtest --help +``` + +## CI/CD Integration + +### GitHub Actions + +```yaml +name: Load Test + +on: + push: + branches: [ main ] + +jobs: + load-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Build Docker image + run: docker build -t rust-loadtest . + + - name: Run load test + run: | + docker run --rm \ + -e TARGET_URL=${{ secrets.API_URL }} \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=10 \ + -e TEST_DURATION=5m \ + rust-loadtest +``` + +### GitLab CI + +```yaml +load-test: + stage: test + image: docker:latest + services: + - docker:dind + script: + - docker build -t rust-loadtest . + - docker run --rm + -e TARGET_URL=${API_URL} + -e REQUEST_TYPE=GET + -e NUM_CONCURRENT_TASKS=10 + -e TEST_DURATION=5m + rust-loadtest +``` + +### Jenkins Pipeline + +```groovy +pipeline { + agent any + stages { + stage('Build') { + steps { + sh 'docker build -t rust-loadtest .' + } + } + stage('Load Test') { + steps { + sh ''' + docker run --rm \ + -e TARGET_URL=${API_URL} \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=50 \ + -e TEST_DURATION=10m \ + rust-loadtest + ''' + } + } + } +} +``` + +## Networking + +### Testing Against Docker Compose Services + +```bash +# Start your services with docker-compose +docker-compose up -d + +# Run load test on the same network +docker run --rm --network rust_loadtest_default \ + -e TARGET_URL=http://your-service:8080/api \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=10 \ + rust-loadtest +``` + +### Custom Docker Network + +Create a custom network for testing multiple services: + +```bash +# Create network +docker network create loadtest-net + +# Start test API +docker run -d --name test-api --network loadtest-net kennethreitz/httpbin + +# Run load test +docker run --rm --network loadtest-net \ + -e TARGET_URL=http://test-api/status/200 \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=5 \ + rust-loadtest +``` + +## Troubleshooting + +### Getting 405 Method Not Allowed Errors + +If you see `status_code="405"` in the metrics but can curl your endpoint successfully: + +**Problem:** The default REQUEST_TYPE is POST, but your endpoint expects GET. + +**Solution:** Add `-e REQUEST_TYPE=GET` to your docker run command: + +```bash +docker run --rm \ + -e TARGET_URL=http://192.168.2.22:8081/health \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=10 \ + rust-loadtest +``` + +### Missing TARGET_URL Error + +If you see "Missing required environment variable: TARGET_URL": + +**Solution:** Make sure you're setting the TARGET_URL environment variable: + +```bash +docker run --rm \ + -e TARGET_URL=https://your-api.com \ + -e REQUEST_TYPE=GET \ + rust-loadtest +``` + +### Can't Connect to API on Host Machine + +**For Docker Desktop (Mac/Windows):** +```bash +# Use host.docker.internal to reach host machine +docker run --rm \ + -e TARGET_URL=http://host.docker.internal:3000 \ + -e REQUEST_TYPE=GET \ + rust-loadtest +``` + +**For Linux:** +```bash +# Use --network host +docker run --rm --network host \ + -e TARGET_URL=http://localhost:3000 \ + -e REQUEST_TYPE=GET \ + rust-loadtest +``` + +### View Container Internals + +```bash +# Shell into container +docker run --rm -it rust-loadtest bash + +# Check binary +which rust-loadtest +rust-loadtest # Shows help/error with env var requirements +``` + +## Examples + +### Basic GET Request Test + +```bash +docker run --rm \ + -e TARGET_URL=https://api.example.com/users \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=10 \ + -e TEST_DURATION=5m \ + rust-loadtest +``` + +### POST Request with JSON + +```bash +docker run --rm \ + -e TARGET_URL=https://api.example.com/users \ + -e REQUEST_TYPE=POST \ + -e SEND_JSON=true \ + -e JSON_PAYLOAD='{"name":"test","email":"test@example.com"}' \ + -e NUM_CONCURRENT_TASKS=10 \ + -e TEST_DURATION=5m \ + rust-loadtest +``` + +### High-Concurrency Stress Test + +```bash +docker run --rm \ + -e TARGET_URL=https://staging.api.com \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=200 \ + -e TEST_DURATION=10m \ + -e LOAD_MODEL_TYPE=Rps \ + -e TARGET_RPS=1000 \ + rust-loadtest +``` + +### Test Against Local API (Docker Desktop) + +```bash +# Start your API on localhost:3000, then: +docker run --rm \ + -e TARGET_URL=http://host.docker.internal:3000/api/health \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=5 \ + -e TEST_DURATION=2m \ + rust-loadtest +``` + +### Test Against Local API (Linux) + +```bash +docker run --rm --network host \ + -e TARGET_URL=http://localhost:3000/api/health \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=5 \ + -e TEST_DURATION=2m \ + rust-loadtest +``` + +### Ramp Load Test + +```bash +docker run --rm \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + -e LOAD_MODEL_TYPE=RampRps \ + -e MIN_RPS=10 \ + -e MAX_RPS=1000 \ + -e RAMP_DURATION=10m \ + -e NUM_CONCURRENT_TASKS=100 \ + rust-loadtest +``` + +## Performance Tips + +1. **Use host network** (Linux only) for better performance: + ```bash + docker run --rm --network host \ + -e TARGET_URL=http://localhost:3000 \ + -e REQUEST_TYPE=GET \ + rust-loadtest + ``` + +2. **Increase resources** with docker run: + ```bash + docker run --rm \ + --cpus="4" \ + --memory="4g" \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=200 \ + rust-loadtest + ``` + +3. **Reduce log verbosity** for high-load tests: + ```bash + docker run --rm \ + -e RUST_LOG=error \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + -e NUM_CONCURRENT_TASKS=500 \ + rust-loadtest + ``` + +4. **Monitor metrics** during the test: + ```bash + # Terminal 1: Run test with metrics exposed + docker run --rm -p 9090:9090 \ + -e TARGET_URL=https://api.example.com \ + -e REQUEST_TYPE=GET \ + rust-loadtest + + # Terminal 2: Watch metrics + watch -n 1 'curl -s http://localhost:9090/metrics | grep rust_loadtest_requests_total' + ``` + +## Security + +### Running as Non-Root + +Update Dockerfile: + +```dockerfile +# Add user +RUN useradd -m -u 1000 loadtest + +# Change ownership +RUN chown -R loadtest:loadtest /app + +# Switch to user +USER loadtest +``` + +### Scanning for Vulnerabilities + +```bash +# Scan image +docker scan rust-loadtest + +# Or use trivy +trivy image rust-loadtest +``` + +## Maintenance + +### Update Dependencies + +```bash +# Rebuild with latest dependencies +docker build --no-cache -t rust-loadtest . +``` + +### Cleanup + +```bash +# Remove old images +docker image prune -a + +# Remove all related containers +docker-compose down -v --remove-orphans +``` + +## Quick Reference + +### Common Commands + +```bash +# Basic GET test +docker run --rm -e TARGET_URL= -e REQUEST_TYPE=GET rust-loadtest + +# POST with JSON +docker run --rm -e TARGET_URL= -e REQUEST_TYPE=POST -e SEND_JSON=true -e JSON_PAYLOAD='' rust-loadtest + +# With metrics exposed +docker run --rm -p 9090:9090 -e TARGET_URL= -e REQUEST_TYPE=GET rust-loadtest + +# High concurrency +docker run --rm -e TARGET_URL= -e REQUEST_TYPE=GET -e NUM_CONCURRENT_TASKS=100 rust-loadtest + +# Custom duration +docker run --rm -e TARGET_URL= -e REQUEST_TYPE=GET -e TEST_DURATION=10m rust-loadtest + +# Against localhost (Docker Desktop) +docker run --rm -e TARGET_URL=http://host.docker.internal:3000 -e REQUEST_TYPE=GET rust-loadtest + +# Against localhost (Linux) +docker run --rm --network host -e TARGET_URL=http://localhost:3000 -e REQUEST_TYPE=GET rust-loadtest +``` + +### Available Load Models + +- **Concurrent**: Constant concurrent requests (default) +- **Rps**: Target specific requests per second + - Requires: `LOAD_MODEL_TYPE=Rps`, `TARGET_RPS=` +- **RampRps**: Gradually increase RPS + - Requires: `LOAD_MODEL_TYPE=RampRps`, `MIN_RPS=`, `MAX_RPS=`, `RAMP_DURATION=` + +## Additional Resources + +- [Docker Documentation](https://docs.docker.com/) +- [Docker Compose Reference](https://docs.docker.com/compose/) +- [HTTPBin API Documentation](https://httpbin.org/) +- [Prometheus Metrics](https://prometheus.io/docs/introduction/overview/) diff --git a/Dockerfile b/Dockerfile index b1bd591..2b6f8ee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,35 +1,47 @@ -FROM rust:bullseye AS builder -WORKDIR /usr/src/app -COPY . . -RUN cargo install --path . - -# --- Stage 2: Create the final, smaller runtime image --- -# Use a minimal base image for the final runtime -FROM ubuntu:latest -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - libssl3 \ - ca-certificates \ - && apt-get clean \ +# Multi-stage build for rust-loadtest +# Stage 1: Build +FROM rustlang/rust:nightly-slim AS builder + +WORKDIR /app + +# Install dependencies +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ && rm -rf /var/lib/apt/lists/* -# Set the working directory -WORKDIR /usr/local/bin +# Copy manifests +COPY Cargo.toml Cargo.lock ./ + +# Copy source code +COPY src ./src +COPY tests ./tests +COPY examples ./examples -# Add a non-root user and group -RUN groupadd -r appuser && useradd -r -g appuser appuser +# Build release binary +RUN cargo build --release -# Copy the compiled binary from the builder stage -COPY --from=builder /usr/local/cargo/bin/rust_loadtest /usr/local/bin/rust_loadtest +# Stage 2: Runtime +FROM debian:bookworm-slim + +WORKDIR /app + +# Install runtime dependencies +RUN apt-get update && apt-get install -y \ + ca-certificates \ + libssl3 \ + && rm -rf /var/lib/apt/lists/* -# Set ownership of the binary to the non-root user -RUN chown appuser:appuser /usr/local/bin/rust_loadtest +# Copy the binary from builder (Cargo uses underscore) +COPY --from=builder /app/target/release/rust_loadtest /usr/local/bin/rust-loadtest -# Expose the Prometheus metrics port -EXPOSE 9090 +# Copy example configs and data +COPY examples/configs /app/configs +COPY examples/data /app/data +COPY docs /app/docs -# Switch to non-root user -USER appuser +# Set working directory +WORKDIR /app -# Command to run the application when the container starts -CMD ["/usr/local/bin/rust_loadtest"] +# Default command shows help +CMD ["rust-loadtest", "--help"] diff --git a/ISSUE_73_RESPONSE_BODY_MEMORY.md b/ISSUE_73_RESPONSE_BODY_MEMORY.md new file mode 100644 index 0000000..39854c2 --- /dev/null +++ b/ISSUE_73_RESPONSE_BODY_MEMORY.md @@ -0,0 +1,96 @@ +# Issue #73: Fix Memory Leak from Unconsumed Response Bodies + +## Problem + +At high RPS (50K+), the simple worker (`run_worker`) was accumulating memory rapidly because HTTP response bodies were never consumed. The code only checked the status code but didn't read the response body, causing it to buffer in memory indefinitely. + +### Symptoms +- Memory usage growing from 0 to 14GB in ~65 seconds +- Rate: ~215 MB/second at 50K RPS +- ~4.3 KB per request being accumulated +- Auto-OOM protection triggered but memory continued growing +- Process eventually hitting critical threshold (92%+) + +### Root Cause + +In `src/worker.rs` around line 77-97: +```rust +match req.send().await { + Ok(response) => { + let status = response.status().as_u16(); + // ... metrics recording ... + // ⚠️ Response dropped without consuming body! + } +} +``` + +Even though the response object is dropped, the underlying HTTP connection may buffer the response body in memory, especially with HTTP/1.1 keep-alive connections. + +## Solution + +Explicitly consume the response body to prevent memory accumulation: + +```rust +// Explicitly consume and discard response body to prevent memory accumulation (Issue #73) +// At high RPS, unbuffered response bodies can accumulate and cause OOM +let _ = response.bytes().await; +``` + +This ensures: +1. Response body is fully read from the network +2. Memory is released immediately after reading +3. Connection can be properly reused +4. No buffering accumulation at high RPS + +## Impact + +### Before Fix +- **Memory growth**: ~215 MB/second at 50K RPS +- **Stability**: Process OOM after 60-90 seconds +- **Critical threshold**: Reached 92.9% in 65 seconds + +### After Fix (Expected) +- **Memory growth**: Stable, only from active connections +- **Stability**: Can sustain 50K RPS indefinitely +- **Memory usage**: Should stabilize around 2-4GB for 5000 concurrent tasks + +## Testing + +### Recommended Test +```bash +# High RPS test for memory stability +export TARGET_URL="http://your-test-server" +export REQUEST_TYPE="GET" +export NUM_CONCURRENT_TASKS=5000 +export TEST_DURATION_SECS=300 # 5 minutes +export LOAD_MODEL="rps" +export TARGET_RPS=50000 + +# Monitor memory during test +watch -n 1 'docker stats' +``` + +### Expected Metrics +- Memory should stabilize after initial ramp-up (30-60 seconds) +- No continuous memory growth trend +- Auto-OOM protection should not trigger under normal conditions + +## Related Issues + +- **Issue #66**: PERCENTILE_TRACKING_ENABLED flag +- **Issue #67**: Periodic histogram rotation +- **Issue #68**: Histogram label limits +- **Issue #69**: Memory usage metrics +- **Issue #72**: Auto-OOM protection + +This issue completes the Phase 2.5 memory optimization work by fixing the primary memory leak that was overwhelming all other memory management strategies. + +## Note on Scenario Worker + +The scenario worker (`run_scenario_worker`) was NOT affected by this issue because the scenario executor properly consumes response bodies at line 301 of `src/executor.rs`: + +```rust +let body_result = response.text().await; +``` + +This issue only affected the simple single-request worker mode. diff --git a/LOAD_TEST_SCENARIOS.md b/LOAD_TEST_SCENARIOS.md new file mode 100644 index 0000000..c319eef --- /dev/null +++ b/LOAD_TEST_SCENARIOS.md @@ -0,0 +1,1245 @@ +# E-commerce Test Target - Load Testing Scenarios + +**Application URL**: https://ecom.edge.baugus-lab.com +**Version**: 1.0.0 +**API Documentation**: https://ecom.edge.baugus-lab.com/swagger/index.html + +This document provides comprehensive load testing scenarios for the E-commerce Test Target API. Use these scenarios to build realistic load tests that simulate production traffic patterns. + +--- + +## ⚠️ IMPORTANT: Memory Considerations + +**Before running high-load tests, read [MEMORY_OPTIMIZATION.md](MEMORY_OPTIMIZATION.md)** + +Key limits to avoid OOM (Out of Memory) errors: +- **With 4GB RAM**: Max 200 concurrent tasks, 5,000 RPS, 1h duration +- **With 8GB RAM**: Max 1,000 concurrent tasks, 25,000 RPS, 2h duration +- **HDR histograms consume 2-4MB each** - they grow unbounded per scenario/step + +⚠️ **Your attempted config would need 8-12GB minimum:** +```bash +NUM_CONCURRENT_TASKS=5000 # ❌ Too high for 4GB +TARGET_RPS=50000 # ❌ Too high for 4GB +TEST_DURATION=24h # ❌ Too long for 4GB +``` + +✅ **Safe starting config for 4GB:** +```bash +NUM_CONCURRENT_TASKS=200 +TARGET_RPS=5000 +TEST_DURATION=1h +LOAD_MODEL_TYPE=Rps +``` + +--- + +## Table of Contents + +1. [Quick Reference](#quick-reference) +2. [Scenario 1: Health & Status Monitoring](#scenario-1-health--status-monitoring) +3. [Scenario 2: Product Browsing](#scenario-2-product-browsing) +4. [Scenario 3: User Registration & Authentication](#scenario-3-user-registration--authentication) +5. [Scenario 4: Complete Shopping Flow](#scenario-4-complete-shopping-flow) +6. [Scenario 5: Cart Operations](#scenario-5-cart-operations) +7. [Scenario 6: Order Management](#scenario-6-order-management) +8. [Scenario 7: Search & Filter](#scenario-7-search--filter) +9. [Scenario 8: Streaming & WebSocket](#scenario-8-streaming--websocket) +10. [Scenario 9: Response Variations](#scenario-9-response-variations) +11. [Scenario 10: Error Handling](#scenario-10-error-handling) +12. [Scenario 11: Mixed Realistic Traffic](#scenario-11-mixed-realistic-traffic) +13. [Scenario 12: Stress Testing](#scenario-12-stress-testing) +14. [Performance Targets](#performance-targets) +15. [Load Patterns](#load-patterns) + +--- + +## Quick Reference + +### Base Configuration +``` +BASE_URL=https://ecom.edge.baugus-lab.com +SKIP_TLS_VERIFY=false +``` + +### Key Endpoints +- Health: `GET /health` +- Products: `GET /products` +- Auth: `POST /auth/register`, `POST /auth/login` +- Cart: `GET /cart`, `POST /cart/items` +- Checkout: `POST /checkout` +- Metrics: `GET /metrics` + +--- + +## Scenario 1: Health & Status Monitoring + +**Purpose**: Verify service availability and monitor application health. + +### Test Case 1.1: Basic Health Check +```bash +# Request +GET /health + +# Expected Response (200 OK) +{ + "status": "healthy", + "timestamp": "2026-02-10T21:00:00Z" +} + +# Load Pattern +- Constant RPS: 10 +- Duration: Continuous +- Success Criteria: 100% success rate, <50ms p95 latency +``` + +### Test Case 1.2: Detailed Status Check +```bash +# Request +GET /status + +# Expected Response (200 OK) +{ + "status": "ok", + "timestamp": "2026-02-10T21:00:00Z", + "uptime": 86400, + "requests_processed": 1500000, + "version": "1.0.0" +} + +# Load Pattern +- Constant RPS: 5 +- Duration: Continuous +- Success Criteria: 100% success rate, <100ms p95 latency +``` + +### Test Case 1.3: Metrics Scraping +```bash +# Request +GET /metrics + +# Expected Response (200 OK) +# TYPE http_requests_total counter +http_requests_total{method="GET",path="/health",status="200"} 1234567 +... + +# Load Pattern +- Interval: Every 15s (Prometheus scrape) +- Duration: Continuous +- Success Criteria: 100% success rate, <200ms p95 latency +``` + +--- + +## Scenario 2: Product Browsing + +**Purpose**: Simulate users browsing the product catalog. + +### Test Case 2.1: List All Products (Paginated) +```bash +# Request +GET /products?page=1&limit=20 + +# Expected Response (200 OK) +{ + "products": [...], # 20 products + "total": 1000, + "page": 1, + "limit": 20, + "total_pages": 50 +} + +# Load Pattern +- Ramp: 0 → 100 concurrent users over 2 minutes +- Sustain: 100 concurrent users for 10 minutes +- Ramp down: 100 → 0 over 2 minutes +- Think time: 2-5 seconds between requests +- Success Criteria: <200ms p95 latency, <1% error rate +``` + +### Test Case 2.2: Get Product Details +```bash +# Setup: Get a product ID from /products +GET /products?limit=1 + +# Request +GET /products/{product_id} + +# Expected Response (200 OK) +{ + "id": "prod-123", + "name": "Product Name", + "description": "...", + "price": 99.99, + "category": "electronics", + "stock": 50, + "image_url": "https://..." +} + +# Load Pattern +- Concurrent users: 200 +- Duration: 15 minutes +- Distribution: Random product IDs +- Think time: 1-3 seconds +- Success Criteria: <150ms p95 latency, <0.5% error rate +``` + +### Test Case 2.3: Category Filtering +```bash +# Request +GET /products?category=electronics&limit=50 + +# Expected Response (200 OK) +{ + "products": [...], # Electronics products only + "total": 250, + "category": "electronics" +} + +# Load Pattern +- Concurrent users: 50 +- Duration: 10 minutes +- Categories: electronics, clothing, books, sports +- Success Criteria: <250ms p95 latency +``` + +### Test Case 2.4: Product Search +```bash +# Request +GET /products?search=laptop&limit=20 + +# Expected Response (200 OK) +{ + "products": [...], # Products matching "laptop" + "total": 15 +} + +# Load Pattern +- Concurrent users: 75 +- Duration: 10 minutes +- Search terms: laptop, phone, shirt, book, etc. +- Success Criteria: <300ms p95 latency +``` + +--- + +## Scenario 3: User Registration & Authentication + +**Purpose**: Test user account creation and login flows. + +### Test Case 3.1: User Registration +```bash +# Request +POST /auth/register +Content-Type: application/json + +{ + "email": "user-{timestamp}@example.com", + "password": "SecurePass123!", + "name": "Test User" +} + +# Expected Response (201 Created) +{ + "user": { + "id": "user-uuid", + "email": "user-{timestamp}@example.com", + "name": "Test User" + }, + "token": "eyJhbGciOiJIUzI1NiIs..." +} + +# Load Pattern +- Rate: 5 registrations/second +- Duration: 30 minutes +- Email: Use unique emails (timestamp or UUID) +- Success Criteria: <500ms p95 latency, 100% unique users +``` + +### Test Case 3.2: User Login +```bash +# Request +POST /auth/login +Content-Type: application/json + +{ + "email": "existing-user@example.com", + "password": "SecurePass123!" +} + +# Expected Response (200 OK) +{ + "user": { + "id": "user-uuid", + "email": "existing-user@example.com", + "name": "Test User" + }, + "token": "eyJhbGciOiJIUzI1NiIs..." +} + +# Load Pattern +- Concurrent logins: 100 +- Duration: 15 minutes +- Pool: 1000 pre-created users +- Success Criteria: <300ms p95 latency, <1% error rate +``` + +### Test Case 3.3: Get User Profile +```bash +# Request (requires authentication) +GET /users/me +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "id": "user-uuid", + "email": "user@example.com", + "name": "Test User", + "created_at": "2026-02-10T20:00:00Z" +} + +# Load Pattern +- Concurrent users: 200 +- Duration: 10 minutes +- Success Criteria: <100ms p95 latency +``` + +### Test Case 3.4: Logout +```bash +# Request (requires authentication) +POST /auth/logout +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "message": "Logged out successfully" +} + +# Load Pattern +- Rate: 10 logouts/second +- Duration: 5 minutes +``` + +--- + +## Scenario 4: Complete Shopping Flow + +**Purpose**: Simulate the complete e-commerce user journey from browsing to checkout. + +### Test Case 4.1: End-to-End Shopping Flow +```bash +# Step 1: Register User +POST /auth/register +{ + "email": "shopper-{id}@example.com", + "password": "Pass123!", + "name": "Shopper {id}" +} +# Save token for subsequent requests + +# Step 2: Browse Products (think time: 3-5s) +GET /products?limit=10 + +# Step 3: View Product Details (think time: 5-10s) +GET /products/{product_id} + +# Step 4: Add to Cart (think time: 2-3s) +POST /cart/items +Authorization: Bearer {token} +{ + "product_id": "{product_id}", + "quantity": 2 +} + +# Step 5: View Cart (think time: 2-3s) +GET /cart +Authorization: Bearer {token} + +# Step 6: Add Another Product (think time: 10-15s) +POST /cart/items +Authorization: Bearer {token} +{ + "product_id": "{another_product_id}", + "quantity": 1 +} + +# Step 7: Update Cart Item (think time: 2-3s) +PUT /cart/items/{item_id} +Authorization: Bearer {token} +{ + "quantity": 3 +} + +# Step 8: View Updated Cart (think time: 2-3s) +GET /cart +Authorization: Bearer {token} + +# Step 9: Checkout (think time: 30-60s for entering payment) +POST /checkout +Authorization: Bearer {token} +{ + "cart_id": "{cart_id}", + "shipping_address": { + "street": "123 Main St", + "city": "San Francisco", + "state": "CA", + "zip": "94102", + "country": "US" + }, + "payment": { + "method": "credit_card", + "card_token": "tok_visa_{random}" + } +} + +# Step 10: View Order Confirmation (think time: 5s) +GET /orders/{order_id} +Authorization: Bearer {token} + +# Load Pattern +- Concurrent flows: 50 +- Duration: 30 minutes +- Completion rate: 70% (30% abandon at various stages) +- Think times: As specified per step +- Success Criteria: + - <2% error rate across all steps + - <500ms p95 for cart operations + - <1s p95 for checkout +``` + +--- + +## Scenario 5: Cart Operations + +**Purpose**: Test shopping cart functionality under load. + +### Test Case 5.1: View Empty Cart +```bash +# Request +GET /cart +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "id": "cart-uuid", + "user_id": "user-uuid", + "items": [], + "subtotal": 0, + "tax": 0, + "shipping": 0, + "total": 0 +} + +# Load Pattern +- Concurrent users: 100 +- Duration: 5 minutes +``` + +### Test Case 5.2: Add Item to Cart +```bash +# Request +POST /cart/items +Authorization: Bearer {token} +Content-Type: application/json + +{ + "product_id": "prod-123", + "quantity": 2 +} + +# Expected Response (201 Created) +{ + "cart": { + "id": "cart-uuid", + "items": [ + { + "id": "item-uuid", + "product_id": "prod-123", + "quantity": 2, + "price": 99.99, + "subtotal": 199.98 + } + ], + "subtotal": 199.98, + "tax": 16.00, + "shipping": 10.00, + "total": 225.98 + } +} + +# Load Pattern +- Concurrent operations: 200 +- Duration: 15 minutes +- Success Criteria: <300ms p95 latency +``` + +### Test Case 5.3: Update Cart Item Quantity +```bash +# Request +PUT /cart/items/{item_id} +Authorization: Bearer {token} +Content-Type: application/json + +{ + "quantity": 5 +} + +# Expected Response (200 OK) +# Updated cart with new quantity + +# Load Pattern +- Concurrent updates: 100 +- Duration: 10 minutes +``` + +### Test Case 5.4: Remove Item from Cart +```bash +# Request +DELETE /cart/items/{item_id} +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "message": "Item removed from cart" +} + +# Load Pattern +- Concurrent deletions: 50 +- Duration: 10 minutes +``` + +### Test Case 5.5: Clear Cart +```bash +# Request +DELETE /cart +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "message": "Cart cleared" +} + +# Load Pattern +- Rate: 20 clears/second +- Duration: 5 minutes +``` + +--- + +## Scenario 6: Order Management + +**Purpose**: Test order placement and retrieval. + +### Test Case 6.1: Place Order (Checkout) +```bash +# Request +POST /checkout +Authorization: Bearer {token} +Content-Type: application/json + +{ + "cart_id": "cart-uuid", + "shipping_address": { + "street": "123 Main St", + "city": "San Francisco", + "state": "CA", + "zip": "94102", + "country": "US" + }, + "billing_address": { + "street": "123 Main St", + "city": "San Francisco", + "state": "CA", + "zip": "94102", + "country": "US" + }, + "payment": { + "method": "credit_card", + "card_token": "tok_visa" + } +} + +# Expected Response (201 Created) +{ + "order_id": "order-uuid", + "status": "confirmed", + "total": 225.98, + "confirmation_number": "ORD-12345678" +} + +# Load Pattern +- Rate: 10 orders/second +- Duration: 20 minutes +- Success Criteria: <1s p95 latency, <0.5% error rate +``` + +### Test Case 6.2: Get Order Details +```bash +# Request +GET /orders/{order_id} +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "id": "order-uuid", + "user_id": "user-uuid", + "status": "confirmed", + "items": [...], + "shipping_address": {...}, + "total": 225.98, + "confirmation_number": "ORD-12345678", + "created_at": "2026-02-10T21:00:00Z" +} + +# Load Pattern +- Concurrent users: 150 +- Duration: 15 minutes +``` + +### Test Case 6.3: List User Orders +```bash +# Request +GET /orders +Authorization: Bearer {token} + +# Expected Response (200 OK) +{ + "orders": [ + { + "id": "order-uuid", + "status": "confirmed", + "total": 225.98, + "created_at": "2026-02-10T21:00:00Z" + }, + ... + ] +} + +# Load Pattern +- Concurrent users: 100 +- Duration: 10 minutes +``` + +--- + +## Scenario 7: Search & Filter + +**Purpose**: Test search and filtering performance. + +### Test Case 7.1: Search Products +```bash +# Request +GET /products?search={query}&limit=20 + +# Search queries (rotate through): +- "laptop" +- "phone" +- "wireless" +- "pro" +- "gaming" +- "portable" + +# Load Pattern +- Concurrent searches: 100 +- Duration: 15 minutes +- Query distribution: Realistic search terms +- Success Criteria: <400ms p95 latency +``` + +### Test Case 7.2: Filter by Category +```bash +# Request +GET /products?category={category}&limit=50 + +# Categories (rotate through): +- electronics +- clothing +- books +- sports +- home + +# Load Pattern +- Concurrent users: 75 +- Duration: 10 minutes +``` + +### Test Case 7.3: Combined Search and Filter +```bash +# Request +GET /products?category=electronics&search=laptop&limit=20 + +# Load Pattern +- Concurrent users: 50 +- Duration: 10 minutes +``` + +--- + +## Scenario 8: Streaming & WebSocket + +**Purpose**: Test streaming endpoints and WebSocket connections. + +### Test Case 8.1: Server-Sent Events (SSE) +```bash +# Request +GET /stream?events=10 + +# Expected: Stream of 10 events +data: {"id": 1, "message": "Event 1", "timestamp": "..."} + +data: {"id": 2, "message": "Event 2", "timestamp": "..."} + +... + +# Load Pattern +- Concurrent streams: 50 +- Events per stream: 10-100 +- Duration: 15 minutes +- Success Criteria: All events received, no disconnects +``` + +### Test Case 8.2: WebSocket Echo +```bash +# Connect +ws://ecom.edge.baugus-lab.com/ws/echo + +# Send messages +{"type": "ping", "data": "Hello"} + +# Receive echo +{"type": "pong", "data": "Hello", "timestamp": "..."} + +# Load Pattern +- Concurrent connections: 100 +- Messages per connection: 50 +- Duration: 10 minutes +- Success Criteria: 100% message delivery +``` + +--- + +## Scenario 9: Response Variations + +**Purpose**: Test various response formats and sizes. + +### Test Case 9.1: JSON Response +```bash +# Request +GET /bytes/1024?format=json + +# Expected: 1KB JSON response + +# Load Pattern +- Sizes: 1KB, 10KB, 100KB, 1MB +- Concurrent users: 50 per size +- Duration: 10 minutes +``` + +### Test Case 9.2: XML Response +```bash +# Request +GET /bytes/1024?format=xml + +# Expected: 1KB XML response + +# Load Pattern +- Concurrent users: 25 +- Duration: 5 minutes +``` + +### Test Case 9.3: CSV Response +```bash +# Request +GET /csv + +# Expected: CSV file with product data + +# Load Pattern +- Concurrent downloads: 50 +- Duration: 5 minutes +``` + +### Test Case 9.4: HTML Response +```bash +# Request +GET /html + +# Expected: HTML page + +# Load Pattern +- Concurrent requests: 30 +- Duration: 5 minutes +``` + +--- + +## Scenario 10: Error Handling + +**Purpose**: Test application resilience and error handling. + +### Test Case 10.1: Simulated Delays +```bash +# Request +GET /delay/{milliseconds} + +# Test delays: 100ms, 500ms, 1000ms, 2000ms + +# Load Pattern +- 100ms delay: 50 concurrent, expect <150ms p95 +- 500ms delay: 30 concurrent, expect <550ms p95 +- 1s delay: 20 concurrent, expect <1.1s p95 +- 2s delay: 10 concurrent, expect <2.1s p95 +``` + +### Test Case 10.2: Error Simulation +```bash +# Request +GET /error/{status_code} + +# Status codes: 400, 404, 500, 503 + +# Expected Responses: +400: {"error": "Bad Request"} +404: {"error": "Not Found"} +500: {"error": "Internal Server Error"} +503: {"error": "Service Unavailable"} + +# Load Pattern +- Concurrent requests: 20 per status code +- Duration: 5 minutes +- Success Criteria: Correct error responses +``` + +### Test Case 10.3: Random Delay +```bash +# Request +GET /delay/random?max=2000 + +# Expected: Random delay 0-2000ms + +# Load Pattern +- Concurrent requests: 50 +- Duration: 10 minutes +``` + +--- + +## Scenario 11: Mixed Realistic Traffic + +**Purpose**: Simulate realistic production traffic patterns. + +### Test Case 11.1: Daily Traffic Pattern +```yaml +# Configuration +LOAD_MODEL_TYPE: DailyTraffic +DAILY_MIN_RPS: 100 +DAILY_MID_RPS: 500 +DAILY_MAX_RPS: 1500 +DAILY_CYCLE_DURATION: 1h + +# Traffic distribution (1 hour = 1 simulated day): +- 00:00-07:00 (0-12min): Night - 100 RPS +- 07:00-09:00 (12-18min): Morning ramp - 100→1500 RPS +- 09:00-12:00 (18-24min): Peak - 1500 RPS +- 12:00-14:00 (24-30min): Lunch decline - 1500→500 RPS +- 14:00-17:00 (30-42min): Afternoon - 500 RPS +- 17:00-20:00 (42-54min): Evening decline - 500→100 RPS +- 20:00-24:00 (54-60min): Night - 100 RPS + +# Request mix: +- 40% Product browsing (GET /products) +- 20% Product details (GET /products/{id}) +- 15% Search (GET /products?search=...) +- 10% Cart operations (POST/PUT/DELETE /cart/*) +- 10% Auth (POST /auth/login) +- 4% Checkout (POST /checkout) +- 1% Health checks (GET /health) + +# User behavior: +- 30% bounce (single request) +- 40% browse only (2-5 requests) +- 20% add to cart (6-10 requests) +- 10% complete purchase (11-15 requests) + +# Duration: 4 hours (4 simulated days) +``` + +### Test Case 11.2: Flash Sale Spike +```yaml +# Normal traffic: 200 RPS for 30 minutes +# Spike announcement: Ramp 200→2000 RPS over 2 minutes +# Flash sale: 2000 RPS for 15 minutes +# Post-sale: Decline 2000→300 RPS over 5 minutes +# Cooldown: 300 RPS for 15 minutes + +# Request mix during spike: +- 60% Product details for sale items +- 25% Add to cart +- 10% Checkout +- 5% Other + +# Success Criteria: +- <1s p95 latency during spike +- <5% error rate +- No service degradation +``` + +### Test Case 11.3: Black Friday Scenario +```yaml +# Pre-event: 500 RPS baseline +# Countdown (2 hours): Gradual increase 500→3000 RPS +# Event start: Spike to 5000 RPS +# Sustained (4 hours): 4000-5000 RPS +# Decline (2 hours): 5000→1000 RPS +# Post-event: 1000 RPS baseline + +# Duration: 12 hours +# Total requests: ~100M + +# Request mix: +- 35% Product browsing +- 30% Product details +- 15% Cart operations +- 12% Checkout +- 5% Search +- 3% Auth + +# Success Criteria: +- <2s p95 latency +- <2% error rate +- Auto-scaling triggered appropriately +``` + +--- + +## Scenario 12: Stress Testing + +**Purpose**: Find breaking points and maximum capacity. + +### Test Case 12.1: Capacity Test +```yaml +# Objective: Find maximum sustainable RPS + +# Method: Incremental load increase +- Start: 100 RPS +- Increment: +100 RPS every 5 minutes +- Continue until: Error rate >5% OR latency p95 >5s +- Endpoint mix: 70% reads, 30% writes + +# Monitor: +- Response times (p50, p95, p99) +- Error rates +- System resources (CPU, memory, connections) +- Database performance + +# Expected outcome: +- Identify maximum RPS capacity +- Identify bottlenecks +- Document degradation curve +``` + +### Test Case 12.2: Spike Test +```yaml +# Objective: Test recovery from sudden traffic spikes + +# Pattern: +- Baseline: 200 RPS for 5 minutes +- Spike: Instant jump to 2000 RPS for 2 minutes +- Recovery: Drop to 200 RPS for 5 minutes +- Repeat: 3 times + +# Success Criteria: +- No crashes +- Recovery within 30s after spike +- <10% error rate during spike +``` + +### Test Case 12.3: Soak Test +```yaml +# Objective: Identify memory leaks and resource exhaustion + +# Pattern: +- Steady load: 500 RPS +- Duration: 24 hours +- Request mix: Realistic mix from Scenario 11.1 + +# Monitor: +- Memory usage over time +- Connection pool exhaustion +- Database connections +- Response time degradation + +# Success Criteria: +- No memory leaks (stable memory usage) +- Consistent performance over 24h +- No resource exhaustion +``` + +### Test Case 12.4: Database Stress +```yaml +# Objective: Test database performance under heavy write load + +# Pattern: +- 100 concurrent users +- Each user: + - Register → Login → Add 10 items to cart → Checkout + - Repeat continuously +- Duration: 30 minutes + +# Expected: +- Heavy INSERT load (users, cart_items, orders, order_items) +- Transaction handling +- Lock contention + +# Monitor: +- Database response times +- Connection pool saturation +- Transaction failures +- Lock timeouts +``` + +--- + +## Performance Targets + +### Response Time Targets (p95) + +| Endpoint Category | Target | Acceptable | Critical | +|------------------|--------|------------|----------| +| Health checks | <50ms | <100ms | <200ms | +| Product listing | <200ms | <500ms | <1s | +| Product details | <150ms | <300ms | <750ms | +| Search | <400ms | <800ms | <2s | +| Login | <300ms | <600ms | <1.5s | +| Registration | <500ms | <1s | <2s | +| Cart operations | <250ms | <500ms | <1s | +| Checkout | <800ms | <1.5s | <3s | +| Order retrieval | <200ms | <400ms | <1s | + +### Throughput Targets + +| Scenario | Target RPS | Peak RPS | Notes | +|----------|-----------|----------|-------| +| Normal traffic | 200-500 | 1000 | Typical weekday | +| Peak hours | 500-1000 | 2000 | Evening/weekend | +| Flash sale | 1000-2000 | 5000 | Limited duration | +| Black Friday | 2000-4000 | 8000 | Annual peak | + +### Error Rate Targets + +- **Normal operation**: <0.5% error rate +- **High load**: <2% error rate +- **Stress conditions**: <5% error rate +- **Critical**: Graceful degradation, no crashes + +### Resource Utilization + +- **CPU**: <70% average, <90% peak +- **Memory**: <80% allocated, no leaks +- **Connections**: <80% pool capacity +- **Database**: <70% connection pool + +--- + +## Load Patterns + +### Pattern 1: Constant Load +```yaml +Type: Constant RPS +RPS: 100 +Duration: 30m +Use: Baseline performance testing +``` + +### Pattern 2: Ramp Up +```yaml +Type: RampRps +Start: 0 RPS +End: 1000 RPS +Duration: 10m +Use: Warm-up, gradual load increase +``` + +### Pattern 3: Step Load +```yaml +Type: Steps +Steps: + - RPS: 100, Duration: 5m + - RPS: 300, Duration: 5m + - RPS: 500, Duration: 5m + - RPS: 1000, Duration: 5m +Use: Capacity testing, finding limits +``` + +### Pattern 4: Spike +```yaml +Type: Spike +Baseline: 200 RPS +Spike: 2000 RPS +Spike Duration: 2m +Recovery: 200 RPS +Use: Resilience testing +``` + +### Pattern 5: Wave +```yaml +Type: Wave +Min: 100 RPS +Max: 1000 RPS +Period: 10m +Duration: 60m +Use: Variable load simulation +``` + +### Pattern 6: Daily Pattern +```yaml +Type: DailyTraffic +Min: 100 RPS (night) +Mid: 500 RPS (afternoon) +Max: 1500 RPS (peak) +Cycle: 1h +Use: Realistic traffic simulation +``` + +--- + +## Test Execution Guide + +### Pre-Test Checklist + +- [ ] Verify application is deployed and healthy +- [ ] Confirm monitoring is active (Prometheus, logs) +- [ ] Set up performance dashboards +- [ ] Configure alerts for critical metrics +- [ ] Create test user accounts +- [ ] Warm up the application (5 min at 10% load) +- [ ] Take baseline measurements +- [ ] Document test environment details + +### During Test + +- Monitor key metrics: + - Response times (p50, p95, p99, max) + - Error rates and types + - Throughput (RPS) + - Active connections + - CPU and memory usage + - Database performance + +### Post-Test Analysis + +- [ ] Verify no data corruption +- [ ] Check for memory leaks +- [ ] Analyze error logs +- [ ] Generate performance reports +- [ ] Compare against baselines +- [ ] Document bottlenecks found +- [ ] Create improvement recommendations + +--- + +## Common Test Data + +### Sample Users +```json +{ + "email": "loadtest-user-{id}@example.com", + "password": "LoadTest123!", + "name": "Load Test User {id}" +} +``` + +### Sample Products +``` +Available via: GET /products +Total: 1000 products +Categories: electronics, clothing, books, sports, home +Price range: $9.99 - $1999.99 +``` + +### Sample Addresses +```json +{ + "shipping_address": { + "street": "123 Test Street", + "city": "San Francisco", + "state": "CA", + "zip": "94102", + "country": "US" + } +} +``` + +### Payment Tokens +``` +Valid test tokens: +- tok_visa +- tok_mastercard +- tok_amex +``` + +--- + +## Notes for Load Testing Team + +1. **Authentication**: Most endpoints require JWT tokens. Implement token management: + - Register users in setup phase + - Reuse tokens across requests + - Refresh expired tokens + +2. **State Management**: Shopping flow requires maintaining state: + - Cart IDs from cart creation + - Product IDs from product listing + - Order IDs from checkout + +3. **Think Times**: Include realistic think times between requests (2-10 seconds) to simulate real user behavior. + +4. **Data Cleanup**: Implement cleanup routines for test data: + - Remove test users after tests + - Clear abandoned carts + - Archive test orders + +5. **Error Handling**: Distinguish between: + - Expected errors (404 for invalid product) + - Test failures (500 errors, timeouts) + - Network issues + +6. **Distributed Load**: Consider running load generators from multiple locations to simulate geographic distribution. + +7. **Monitoring**: Set up real-time monitoring dashboard to track test progress and identify issues early. + +8. **Baseline**: Always run baseline tests before making changes to compare performance. + +--- + +## Memory & Resource Planning + +For detailed information on memory requirements and optimization: +- See [MEMORY_OPTIMIZATION.md](MEMORY_OPTIMIZATION.md) for memory analysis +- Estimate: **~1MB per 100 sustained RPS over 1 hour** +- HDR histogram overhead: **2-4MB per unique scenario/step** +- Concurrent task overhead: **~8KB per task** + +Quick memory requirements: +- **512MB**: 10 tasks, 500 RPS, 5 min +- **2GB**: 100 tasks, 5,000 RPS, 30 min +- **4GB**: 500 tasks, 10,000 RPS, 1 hour +- **8GB+**: 1,000 tasks, 25,000 RPS, 2+ hours + +Always start small and scale up gradually while monitoring `docker stats`. + +--- + +## Support & Contact + +- **Application URL**: https://ecom.edge.baugus-lab.com +- **API Documentation**: https://ecom.edge.baugus-lab.com/swagger/index.html +- **Health Check**: https://ecom.edge.baugus-lab.com/health +- **Metrics**: https://ecom.edge.baugus-lab.com/metrics +- **Repository**: https://github.com/cbaugus/ecom-test-target + +--- + +**Document Version**: 1.0 +**Last Updated**: 2026-02-10 +**Application Version**: 1.0.0 diff --git a/MEMORY_OPTIMIZATION.md b/MEMORY_OPTIMIZATION.md new file mode 100644 index 0000000..dcdec6f --- /dev/null +++ b/MEMORY_OPTIMIZATION.md @@ -0,0 +1,215 @@ +# Memory Optimization Guide + +## OOM Issue Analysis + +### Root Causes + +Your load test is hitting OOM with 4GB RAM due to several memory-intensive operations: + +#### 1. **HDR Histograms (Primary Issue)** +- **Location**: `src/percentiles.rs:88-106` +- **Problem**: Each histogram tracks 1μs to 60s with 3 significant digits +- **Memory per histogram**: ~2-4MB each +- **Unbounded growth**: `MultiLabelPercentileTracker` creates a NEW histogram for: + - Every unique scenario name + - Every unique step name (format: `scenario:step`) + - No upper limit on number of histograms +- **With your config**: Even with just a few scenarios, you're creating dozens of histograms + +#### 2. **5000 Concurrent Tasks** +- **Location**: `src/main.rs:243` +- **Problem**: Spawning 5000 tokio tasks +- **Memory**: Each task has stack overhead (~2-8KB) + async state +- **Total overhead**: ~10-40MB just for task structures +- **Compounded by**: Each task loop allocates request builders, responses, etc. + +#### 3. **Prometheus Metrics** +- **Location**: `src/metrics.rs` +- **Problem**: Metrics with labels create separate time series +- **Growth**: `HistogramVec` and `IntCounterVec` grow with unique label combinations +- **24h accumulation**: No data reset/rotation over time + +#### 4. **Connection Pool Stats** +- **Location**: Tracking connection reuse patterns +- **Accumulates**: Request timing data over entire test duration + +### Memory Breakdown Estimate + +With your config (`NUM_CONCURRENT_TASKS=5000`, `TARGET_RPS=50000`, `24h`): + +``` +Component Estimated Memory +───────────────────────────────────────────────────── +5000 tokio tasks ~40 MB +HDR Histograms (50 scenarios) ~150 MB +Prometheus time series (24h) ~500 MB +Connection pool stats ~100 MB +Request/response buffers in flight ~1-2 GB (at 50k RPS) +Tokio runtime overhead ~200 MB +───────────────────────────────────────────────────── +TOTAL ~2-3 GB minimum +``` + +**At peak with 50k RPS**, you'd need **6-8GB minimum**. + +## Immediate Solutions + +### Solution 1: Reduce Concurrent Tasks (RECOMMENDED) + +```bash +# Start with reasonable concurrency +NUM_CONCURRENT_TASKS=100 # Down from 5000 +TARGET_RPS=5000 # Down from 50000 +TEST_DURATION=1h # Down from 24h +``` + +**Why**: Memory usage scales roughly linearly with concurrent tasks. Going from 5000→100 saves ~1.5GB. + +### Solution 2: Use Realistic Load Patterns + +```bash +# Ramp up gradually to find your limit +LOAD_MODEL_TYPE=RampRps +MIN_RPS=100 +MAX_RPS=5000 +RAMP_DURATION=30m +TEST_DURATION=1h +NUM_CONCURRENT_TASKS=200 +``` + +### Solution 3: Shorter Test Duration + +```bash +# Validate first, then scale up +TEST_DURATION=5m # Quick validation +# Then: TEST_DURATION=30m +# Then: TEST_DURATION=2h +# Finally: TEST_DURATION=24h (if needed) +``` + +### Solution 4: Disable Percentile Tracking (Future Enhancement) + +Currently not configurable, but percentile tracking is the biggest memory consumer. + +## Recommended Test Configurations + +### 🟢 Small Load Test (Fits in 512MB) +```bash +NUM_CONCURRENT_TASKS=10 +TARGET_RPS=500 +TEST_DURATION=5m +LOAD_MODEL_TYPE=Rps +``` + +### 🟡 Medium Load Test (Fits in 2GB) +```bash +NUM_CONCURRENT_TASKS=100 +TARGET_RPS=5000 +TEST_DURATION=30m +LOAD_MODEL_TYPE=RampRps +MIN_RPS=500 +MAX_RPS=5000 +RAMP_DURATION=15m +``` + +### 🟠 High Load Test (Needs 4GB) +```bash +NUM_CONCURRENT_TASKS=500 +TARGET_RPS=10000 +TEST_DURATION=1h +LOAD_MODEL_TYPE=Rps +``` + +### 🔴 Maximum Load Test (Needs 8GB+) +```bash +NUM_CONCURRENT_TASKS=1000 +TARGET_RPS=25000 +TEST_DURATION=2h +LOAD_MODEL_TYPE=RampRps +MIN_RPS=5000 +MAX_RPS=25000 +RAMP_DURATION=30m +``` + +## Understanding the Math + +### RPS vs Concurrent Tasks + +The relationship is: `Concurrent Tasks × (1000ms / Avg Latency) = Sustainable RPS` + +Examples: +- 100 tasks × (1000 / 20ms) = **5,000 RPS** (if avg latency is 20ms) +- 500 tasks × (1000 / 20ms) = **25,000 RPS** +- 5000 tasks × (1000 / 20ms) = **250,000 RPS** (unrealistic for single instance) + +**Your config attempted**: 5000 tasks targeting 50k RPS +- This implies expected latency: `5000 × 1000 / 50000 = 100ms` +- But at 50k RPS, you'd saturate the target or network first +- Memory would balloon from all the in-flight requests + +### Memory per RPS + +Rough estimate: **~1MB per 100 sustained RPS over 1 hour** + +- 5,000 RPS × 1h = ~50 MB +- 25,000 RPS × 1h = ~250 MB +- 50,000 RPS × 24h = **~12 GB** (not sustainable in 4GB) + +## Future Code Improvements + +These would require code changes (future issues): + +1. **Add `PERCENTILE_TRACKING_ENABLED` flag** - Disable histogram tracking for high-load tests +2. **Add histogram reset interval** - Clear percentile data every N minutes +3. **Limit max histogram labels** - Cap at 100 unique scenarios/steps +4. **Use sampling** - Only track percentiles for 10% of requests at high RPS +5. **Add memory profiling** - Instrument with memory metrics + +## Troubleshooting + +### Check Current Memory Usage + +```bash +# Inside container +docker stats --no-stream + +# Check Prometheus metrics +curl localhost:9090/metrics | grep process_resident_memory +``` + +### Signs of Memory Pressure + +- OOM Killer message in docker logs +- Increasing latency as test progresses +- "Cannot allocate memory" errors +- Container restart/exit code 137 + +### Docker Memory Limit + +If running locally, increase Docker memory: + +```bash +# docker-compose.yml +services: + loadtest: + mem_limit: 8g + memswap_limit: 8g +``` + +Or docker run: +```bash +docker run --memory=8g --memory-swap=8g ... +``` + +## Summary + +**Your config needs 8-12GB RAM minimum. With 4GB, start with:** + +```bash +NUM_CONCURRENT_TASKS=200 +TARGET_RPS=5000 +TEST_DURATION=1h +LOAD_MODEL_TYPE=Rps +``` + +Then scale up gradually while monitoring `docker stats`. \ No newline at end of file diff --git a/PHASE1_PLAN.md b/PHASE1_PLAN.md new file mode 100644 index 0000000..4c1fe26 --- /dev/null +++ b/PHASE1_PLAN.md @@ -0,0 +1,814 @@ +# Phase 1: Core Engine Enhancement - Implementation Plan + +**Branch**: `develop/phase1-scenario-engine` +**Duration**: ~7 weeks (estimated) +**Target**: Enable realistic multi-step scenario testing for e-commerce flows + +--- + +## Overview + +Phase 1 transforms the rust-loadtest tool from a simple RPS generator into a full-featured scenario execution engine capable of testing complex user journeys like shopping flows, authentication sequences, and multi-step API interactions. + +### Key Capabilities to Add: +- Multi-step scenario execution (register → browse → add to cart → checkout) +- Variable extraction from responses (product IDs, auth tokens, cart IDs) +- Session and cookie management (JWT tokens, session cookies) +- Response assertions (validate success criteria) +- Realistic user behavior (think times, delays) +- Advanced metrics (percentile latencies P50/P90/P95/P99) + +### Testing Target: +- Mock E-commerce API: https://ecom.edge.baugus-lab.com +- 12 comprehensive test scenarios (see LOAD_TEST_SCENARIOS.md) + +--- + +## Implementation Waves + +### Wave 1: Foundation (Weeks 1-3) +Critical P0 issues that unblock all other work. + +### Wave 2: Realistic Behavior (Weeks 4-5) +Make tests behave like real users with assertions and delays. + +### Wave 3: Enhanced Capabilities (Weeks 6-7) +Additional features for comprehensive testing. + +--- + +## Issues and Progress Tracker + +### ✅ Completed +- [x] **Issue #26**: Multi-step scenario execution engine (P0, XL) - **COMPLETE** ✅ + - Branch: `feature/issue-26-multi-step-scenarios` (merged to develop) + - 3 commits, ~1700 lines added + - All acceptance criteria met +- [x] **Issue #27**: Variable extraction from responses (P0, L) - **COMPLETE** ✅ + - Branch: `feature/issue-27-variable-extraction` (merged to develop) + - JSONPath, Regex, Header, Cookie extractors implemented + - 15 unit tests + 7 integration tests +- [x] **Issue #28**: Cookie and session management (P0, M) - **COMPLETE** ✅ + - Branch: `feature/issue-28-cookie-session` (merged to develop) + - Cookie-enabled clients per virtual user + - 6 integration tests +- [x] **Issue #29**: Think times and delays (P1, S) - **COMPLETE** ✅ + - Branch: `feature/issue-29-think-times` (merged to develop) + - Fixed and Random think time variants + - 4 unit tests + 6 integration tests +- [x] **Issue #30**: Response assertions framework (P0, L) - **COMPLETE** ✅ + - Branch: `feature/issue-30-assertions` (merged to develop) + - 6 assertion types implemented + - 14 unit tests + 18 integration tests +- [x] **Issue #33**: Percentile latency metrics (P1, M) - **COMPLETE** ✅ + - Branch: `feature/issue-33-percentile-metrics` (merged to develop) + - HDR Histogram with P50/P90/P95/P99/P99.9 tracking + - 11 unit tests + 11 integration tests +- [x] **Issue #32**: All HTTP methods (P2, S) - **COMPLETE** ✅ + - Branch: `feature/issue-32-all-http-methods` (merged to develop) + - PUT, PATCH, DELETE, HEAD, OPTIONS support + - 14 integration tests +- [x] **Issue #31**: CSV data-driven testing (P1, M) - **COMPLETE** ✅ + - Branch: `feature/issue-31-csv-data-driven` (merged to develop) + - CSV parser with round-robin distribution + - 17 unit tests + 7 integration tests +- [x] **Issue #34**: Error categorization (P2, M) - **COMPLETE** ✅ + - Branch: `feature/issue-34-error-categorization` (merged to develop) + - 6 error categories (ClientError, ServerError, NetworkError, etc.) + - 12 unit tests + 8 integration tests +- [x] **Issue #35**: Per-scenario throughput (P2, S) - **COMPLETE** ✅ + - Branch: `feature/issue-35-per-scenario-throughput` (merged to develop) + - ThroughputTracker with RPS per scenario + - 10 unit tests + 14 integration tests +- [x] **Issue #36**: Connection pooling stats (P3, S) - **COMPLETE** ✅ + - Branch: `feature/issue-36-connection-pool-stats` (merged to develop) + - PoolConfig with configurable pool settings + - Connection reuse analysis via timing heuristics + - 12 unit tests + 22 integration tests + +### 🚧 In Progress +_None - ✅ Wave 1, Wave 2, and Wave 3 ALL COMPLETE! 🎉_ + +### 📋 Todo - Wave 1 (Weeks 1-3) - ✅ COMPLETE +- [x] **Issue #26**: Multi-step scenario execution engine (P0, XL) ✅ + - [x] Design: Scenario and Step data structures (src/scenario.rs) + - [x] Design: Variable context per virtual user (ScenarioContext) + - [x] Implement: Sequential step execution (src/executor.rs) + - [x] Implement: Step result propagation (StepResult, ScenarioResult) + - [x] Implement: Error handling per step (error messages, failed_at_step) + - [x] Implement: Variable substitution in requests (${var} and $var syntax) + - [x] Implement: Special ${timestamp} variable for unique IDs + - [x] Tests: Unit tests for ScenarioContext (9 tests passing) + - [x] Tests: Integration tests with multi-step flows (10 tests) + - [x] Tests: Worker unit tests (3 tests) + - [x] Integration: Wire into worker.rs (run_scenario_worker) + - [x] Integration: Scenario metrics (6 new Prometheus metrics) + - [x] Example: Create example scenario (examples/scenario_example.rs) + - [x] Documentation: Code documentation and test examples + +- [x] **Issue #27**: Variable extraction from responses (P0, L) ✅ + - [x] Implement: JSONPath extractor (serde_json) + - [x] Implement: Regex extractor (regex crate) + - [x] Implement: Header extractor + - [x] Implement: Cookie extractor + - [x] Implement: Variable storage in user context + - [x] Implement: Variable substitution in requests + - [x] Tests: Extract product_id from JSON + - [x] Tests: Extract auth token from response + - [x] Tests: 15 unit tests + 7 integration tests + +- [x] **Issue #28**: Cookie and session management (P0, M) ✅ + - [x] Implement: Cookie jar per virtual user + - [x] Implement: Automatic cookie handling (reqwest cookies feature) + - [x] Implement: Cookie-enabled clients per execution + - [x] Implement: Session persistence across steps + - [x] Tests: Login flow with session cookies + - [x] Tests: Cart operations with session + - [x] Tests: 6 integration tests + +### 📋 Todo - Wave 2 (Weeks 4-5) - ✅ COMPLETE +- [x] **Issue #29**: Think times and delays (P1, S) ✅ + - [x] Design: ThinkTime enum (Fixed, Random) + - [x] Implement: Fixed delay configuration + - [x] Implement: Random delay (min-max range) + - [x] Implement: Per-step think time + - [x] Implement: Think time applied after metrics + - [x] Tests: Verify timing accuracy + - [x] Tests: 4 unit tests + 6 integration tests + +- [x] **Issue #30**: Response assertions framework (P0, L) ✅ + - [x] Design: Assertion types enum + - [x] Implement: Status code assertions + - [x] Implement: JSONPath assertions (existence and value match) + - [x] Implement: Response time assertions + - [x] Implement: Content matching (regex, contains) + - [x] Implement: Header existence assertions + - [x] Implement: Assertion result tracking + - [x] Implement: Step failure on assertion failure + - [x] Implement: Assertion metrics (SCENARIO_ASSERTIONS_TOTAL) + - [x] Tests: Failed assertion handling + - [x] Tests: 14 unit tests + 18 integration tests + +- [x] **Issue #33**: Percentile latency metrics (P1, M) ✅ + - [x] Research: HDR Histogram selected (industry standard) + - [x] Implement: P50, P90, P95, P99, P99.9 tracking + - [x] Implement: Per-endpoint percentiles (MultiLabelPercentileTracker) + - [x] Implement: Per-scenario percentiles + - [x] Implement: Per-step percentiles + - [x] Implement: Final report with formatted tables + - [x] Tests: 11 unit tests + 11 integration tests + - [x] Integration: Worker auto-records all latencies + +### 📋 Todo - Wave 3 (Weeks 6-7) +- [x] **Issue #32**: All HTTP methods (P2, S) ✅ + - [x] Implement: PUT, PATCH, DELETE support + - [x] Implement: HEAD, OPTIONS support + - [x] Tests: Cart update (PUT), delete (DELETE) + +- [x] **Issue #31**: CSV data-driven testing (P1, M) ✅ + - [x] Implement: CSV parser + - [x] Implement: Data row iteration per VU + - [x] Implement: Variable substitution from CSV + - [x] Tests: Load user pool from CSV + +- [x] **Issue #34**: Error categorization (P2, M) ✅ + - [x] Implement: Error type enum + - [x] Implement: Error counting by category + - [x] Implement: Error breakdown in metrics + - [x] Tests: Distinguish 4xx vs 5xx vs network + +- [x] **Issue #35**: Per-scenario throughput (P2, S) ✅ + - [x] Implement: Separate metrics per scenario + - [x] Implement: RPS tracking per scenario + - [x] Tests: Multi-scenario RPS reporting + +- [x] **Issue #36**: Connection pooling stats (P3, S) ✅ + - [x] Implement: Active connection tracking + - [x] Implement: Pool utilization metrics + - [x] Tests: Connection pool monitoring + +--- + +## Scenario Support Matrix + +| Scenario | Status | Required Features | Blocked By | +|----------|--------|------------------|------------| +| **1. Health & Status** | ✅ Works now | None | - | +| **2. Product Browsing** | ✅ Works now | #27 (extract product_id), #30 (assertions) | - | +| **3. Auth Flow** | ✅ Works now | #28 (tokens), #27 (extract), #30 (assert) | - | +| **4. Shopping Flow** | ✅ Works now | All Wave 1+2 features | - | +| **5. Cart Operations** | 🟡 Partial | #28, #27, #32 (PUT/DELETE), #30 | #32 | +| **6. Order Management** | ✅ Works now | #26, #27, #28, #30 | - | +| **7. Search & Filter** | ✅ Works now | #27, #30 | - | +| **8. Streaming/WebSocket** | ⏸️ Future | Phase 5 work | TBD | +| **9. Response Variations** | ✅ Works now | None | - | +| **10. Error Handling** | 🟡 Partial | #34 (categorization), #30 (assert) | #34 | +| **11. Mixed Traffic** | ✅ Works now | All Phase 1 features | - | +| **12. Stress Testing** | 🟡 Partial | #33 (percentiles critical) | #33 | + +**Legend:** +- ✅ Works now - Can test today +- 🟡 Partial - Works but missing features +- 🔴 Blocked - Cannot test until features complete +- ⏸️ Future - Planned for later phase + +--- + +## Success Criteria + +Phase 1 is complete when: + +- [x] All 11 Phase 1 issues (#26-36) are closed +- [ ] Can execute Scenario 4 (Complete Shopping Flow) end-to-end +- [ ] Can extract variables (product_id, token, cart_id) across steps +- [ ] Can authenticate and maintain session across requests +- [ ] Can assert on response content and status codes +- [ ] Percentile latencies (P50, P90, P95, P99) are tracked and reported +- [ ] All tests passing (>79 tests) +- [ ] Documentation updated with scenario examples +- [ ] LOAD_TEST_SCENARIOS.md scenarios 1-7, 9-12 can be implemented + +--- + +## Architecture Changes + +### New Modules (Planned) +``` +src/ + scenario.rs - Scenario and Step definitions + executor.rs - Scenario execution engine + extractor.rs - Variable extraction (JSON/Regex/XML) + assertions.rs - Response assertion framework + session.rs - Cookie jar and session management + data_source.rs - CSV data loading +``` + +### Updated Modules +``` +src/ + config.rs - Add scenario configuration support + metrics.rs - Add percentile tracking, error categorization + worker.rs - Integrate scenario execution + client.rs - Add cookie handling, all HTTP methods +``` + +--- + +## Timeline + +| Week | Focus | Issues | Deliverable | +|------|-------|--------|-------------| +| 1-2 | Scenario Engine | #26 | Can execute multi-step flows | +| 3 | Variables & Sessions | #27, #28 | Can chain requests with extracted data | +| 4 | Assertions & Delays | #30, #29 | Can validate responses and add think times | +| 5 | Metrics & Methods | #33, #32 | Percentiles tracked, all HTTP methods | +| 6 | Data & Errors | #31, #34 | CSV support, error categorization | +| 7 | Final Polish | #35, #36 | Per-scenario metrics, connection stats | + +--- + +## Testing Strategy + +### Unit Tests +- Each module has comprehensive unit tests +- Mock HTTP responses for deterministic testing +- Edge cases: empty responses, malformed JSON, network errors + +### Integration Tests +- 3-step flow: login → get data → logout +- Shopping flow: browse → add to cart → checkout +- Error scenarios: 404s, 500s, timeouts + +### Manual Testing +- Run against https://ecom.edge.baugus-lab.com +- Validate all 12 scenarios from LOAD_TEST_SCENARIOS.md +- Performance testing: 100+ RPS sustained + +--- + +## Notes + +- **Long-lived branch**: `develop/phase1-scenario-engine` will be maintained for several months +- **Individual PRs**: Each issue gets its own feature branch → PR → merge to develop +- **Stability**: Merge develop → main only when stable and tested +- **Phase 2 timeline**: Start after Phase 1 complete (~Week 8) +- **Migration**: This file will be merged into `OVERALL_PROGRESS.md` when Phase 1 complete + +--- + +--- + +## Recent Progress (2026-02-11) + +### Issue #26: Multi-step Scenario Engine - 100% Complete ✅ + +**Summary:** +Successfully implemented a complete multi-step scenario execution engine that transforms +rust-loadtest from a simple RPS generator into a full-featured scenario testing tool. + +**What Was Built:** + +1. **Core Data Structures** (src/scenario.rs - 400 lines) + - Scenario, Step, RequestConfig for defining user journeys + - ScenarioContext for maintaining state across steps + - Extractor and Assertion enums (defined, implementation in #27 and #30) + - Variable storage and substitution system + - 9 unit tests for context management + +2. **Execution Engine** (src/executor.rs - 280 lines) + - ScenarioExecutor with sequential step execution + - StepResult and ScenarioResult for detailed tracking + - Automatic variable substitution (${var}, $var, ${timestamp}) + - Early termination on step failure + - Comprehensive logging (debug, info, error, warn) + +3. **Metrics Integration** (src/metrics.rs - 60 lines added) + - 6 new Prometheus metrics for scenarios + - Per-scenario execution counts (success/failed) + - Per-scenario duration histograms + - Per-step execution counts and duration + - Assertion pass/fail tracking (ready for #30) + - Concurrent scenario gauge + +4. **Worker Integration** (src/worker.rs - 85 lines added) + - run_scenario_worker() function for load generation + - ScenarioWorkerConfig struct + - Respects load models (Constant, Ramp, etc.) + - Fresh context per scenario execution + - Delay calculation for target scenarios-per-second + +5. **Integration Tests** (tests/ - 400 lines) + - 10 integration tests against live mock API + - Tests health checks, multi-step flows, variable substitution + - Tests POST requests, think times, failure handling + - Tests context isolation, timestamp generation + - 3 worker unit tests for duration, load models, timing + +6. **Example Code** (examples/ - 250 lines) + - Complete 6-step shopping flow example + - Demonstrates all key features + - Production-ready scenario template + +**Metrics:** +- Files created: 5 new files +- Lines added: ~1700 lines (code + tests + docs) +- Tests: 22 tests total (9 unit + 10 integration + 3 worker) +- Commits: 3 commits on feature branch + +**What Works:** +- ✅ Multi-step scenarios execute sequentially +- ✅ Variable substitution in paths, headers, body +- ✅ Special ${timestamp} for unique IDs +- ✅ Think times between steps +- ✅ Early termination on failures +- ✅ Detailed step and scenario results +- ✅ Prometheus metrics for observability +- ✅ Load model integration (Constant, Ramp, etc.) +- ✅ Context isolation per virtual user + +**What's Deferred:** +- Variable extraction from responses → Issue #27 +- Assertion execution → Issue #30 +- Cookie/session management → Issue #28 + +**Ready For:** +- Merge to develop/phase1-scenario-engine +- Production use for basic multi-step scenarios +- Building on top for #27, #28, #30 + +--- + +--- + +### Issue #27: Variable Extraction - 100% Complete ✅ + +**Summary:** +Implemented comprehensive variable extraction from HTTP responses using JSONPath, Regex, +Headers, and Cookies. Enables chaining steps together by extracting values from one step +and using them in subsequent requests. + +**What Was Built:** +- src/extractor.rs (438 lines) +- 4 extractor types: JSONPath, Regex, Header, Cookie +- Integration with executor.rs +- 15 unit tests + 7 integration tests + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #28: Cookie & Session Management - 100% Complete ✅ + +**Summary:** +Enabled automatic cookie handling for session management. Each virtual user gets their +own cookie-enabled HTTP client, ensuring session isolation. + +**What Was Built:** +- Enabled "cookies" feature in reqwest +- Updated worker.rs to create cookie-enabled clients +- 6 integration tests validating session persistence + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #29: Think Times - 100% Complete ✅ + +**Summary:** +Implemented realistic user behavior simulation with configurable delays between steps. +Supports both fixed and random think times that don't count towards latency metrics. + +**What Was Built:** +- ThinkTime enum (Fixed, Random variants) +- calculate_delay() method with rand support +- Integration in executor.rs (applied after metrics) +- 4 unit tests + 6 integration tests + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #30: Response Assertions - 100% Complete ✅ + +**Summary:** +Built a comprehensive assertion framework that validates HTTP responses against +expected criteria. Steps fail if any assertion fails, providing detailed error +messages and metrics tracking. + +**What Was Built:** + +1. **Core Framework** (src/assertions.rs - 418 lines) + - AssertionResult and AssertionError types + - run_assertions() and run_single_assertion() functions + - format_actual_value() and format_expected_value() helpers + - 14 unit tests covering all assertion types + +2. **Assertion Types** (6 types) + - StatusCode(u16) - Assert exact status code + - ResponseTime(Duration) - Assert response time below threshold + - JsonPath { path, expected } - Assert JSONPath exists/matches value + - BodyContains(String) - Assert body contains substring + - BodyMatches(String) - Assert body matches regex + - HeaderExists(String) - Assert response header exists + +3. **Integration** (src/executor.rs updates) + - Runs assertions after variable extraction + - Tracks pass/fail counts in StepResult + - Records SCENARIO_ASSERTIONS_TOTAL metrics + - Step fails if ANY assertion fails + - Detailed error messages on failure + +4. **Integration Tests** (tests/assertion_integration_tests.rs - 590 lines) + - 18 integration tests against live mock API + - Tests all assertion types (pass and fail cases) + - Tests multiple assertions per step + - Tests multi-step scenarios with assertion failures + - Tests realistic e-commerce flow with 10 assertions + +**Metrics:** +- Files created: 2 files (assertions.rs, assertion_integration_tests.rs) +- Lines added: ~1000 lines (code + tests) +- Tests: 32 tests total (14 unit + 18 integration) +- Commits: 2 commits on feature branch + +**What Works:** +- ✅ All 6 assertion types validated +- ✅ Step failure on assertion failure +- ✅ Detailed assertion result tracking +- ✅ Prometheus metrics for assertions +- ✅ Multi-assertion scenarios +- ✅ Early termination on assertion failures + +**Ready For:** +- Merge to develop/phase1-scenario-engine +- Production use for validated scenarios +- Wave 3 work (#33, #32, #31, etc.) + +--- + +### Issue #33: Percentile Latency Metrics - 100% Complete ✅ + +**Summary:** +Implemented accurate percentile latency tracking using HDR Histogram. Provides +P50, P90, P95, P99, and P99.9 metrics for requests, scenarios, and individual steps. + +**What Was Built:** + +1. **Core Module** (src/percentiles.rs - 530 lines) + - PercentileTracker: Single metric tracker with HDR Histogram + - MultiLabelPercentileTracker: Per-endpoint/scenario tracking + - PercentileStats struct with formatted output + - Global trackers: GLOBAL_REQUEST_PERCENTILES, GLOBAL_SCENARIO_PERCENTILES, GLOBAL_STEP_PERCENTILES + - Tracks 1μs to 60s latencies with 3 significant digits + - 11 unit tests + +2. **Worker Integration** (src/worker.rs) + - Auto-records request latencies in GLOBAL_REQUEST_PERCENTILES + - Auto-records scenario latencies in GLOBAL_SCENARIO_PERCENTILES + - Auto-records step latencies in GLOBAL_STEP_PERCENTILES (scenario:step) + +3. **Final Report** (src/main.rs) + - print_percentile_report() function + - Formatted tables with all percentiles + - Single request, per-scenario, and per-step breakdowns + - Displayed before Prometheus metrics + +4. **Integration Tests** (tests/percentile_tracking_tests.rs - 430 lines) + - 11 integration tests validating: + - Basic percentile calculations + - Large datasets (1000+ samples) + - Skewed distributions (90/10 split) + - Multi-label tracking + - Realistic latency patterns + +**Dependencies:** +- hdrhistogram = "7.5" + +**Metrics Tracked:** +- P50 (median), P90, P95, P99, P99.9 +- Per-request, per-scenario, per-step breakdowns +- Count, min, max, mean for each label + +**Technical Details:** +- HDR Histogram with 3 significant digits precision +- Thread-safe using Arc> +- Memory efficient: ~200 bytes per histogram +- No performance impact on requests + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #32: All HTTP Methods - 100% Complete ✅ + +**Summary:** +Extended HTTP method support beyond GET and POST to include PUT, PATCH, DELETE, HEAD, +and OPTIONS. Enables full REST API testing capabilities. + +**What Was Built:** +- Updated build_request() in worker.rs to support all 7 HTTP methods +- Updated executor.rs to handle OPTIONS method +- JSON body support for PUT and PATCH +- 14 integration tests validating all methods + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #31: CSV Data-Driven Testing - 100% Complete ✅ + +**Summary:** +Implemented CSV data source loading with round-robin distribution across virtual users. +Enables parameterized testing with user credentials, product catalogs, or test data pools. + +**What Was Built:** + +1. **Core Module** (src/data_source.rs - 470 lines) + - CsvDataSource with from_file() constructor + - Round-robin row distribution with wrap-around + - Thread-safe with Arc> for concurrent access + - Integration with ScenarioContext + - 17 unit tests + +2. **Integration Tests** (tests/csv_data_driven_tests.rs - 480 lines) + - 7 integration tests validating: + - CSV loading and parsing + - Round-robin distribution + - Variable substitution from CSV + - Concurrent access safety + - Multi-scenario CSV usage + +**Dependencies:** +- csv = "1.3" + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #34: Error Categorization - 100% Complete ✅ + +**Summary:** +Implemented comprehensive error categorization system that classifies errors into +6 distinct categories: ClientError (4xx), ServerError (5xx), NetworkError, TimeoutError, +TlsError, and OtherError. Provides detailed error analysis and troubleshooting capabilities. + +**What Was Built:** + +1. **Core Module** (src/errors.rs - 345 lines) + - ErrorCategory enum with 6 variants + - from_status_code() for HTTP errors + - from_reqwest_error() for client errors + - CategorizedError trait for custom errors + - 12 unit tests + +2. **Metrics Integration** (src/metrics.rs) + - REQUEST_ERRORS_BY_CATEGORY counter metric + - Error tracking in worker.rs + +3. **Integration Tests** (tests/error_categorization_tests.rs - 325 lines) + - 8 integration tests validating: + - HTTP error categorization (4xx, 5xx) + - Network error detection + - Timeout error detection + - Concurrent error tracking + +**Metrics Tracked:** +- request_errors_by_category{category="client_error"} +- request_errors_by_category{category="server_error"} +- request_errors_by_category{category="network_error"} +- request_errors_by_category{category="timeout_error"} +- request_errors_by_category{category="tls_error"} +- request_errors_by_category{category="other_error"} + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #35: Per-Scenario Throughput - 100% Complete ✅ + +**Summary:** +Implemented per-scenario throughput tracking that calculates requests per second (RPS) +independently for each scenario type. Enables performance comparison across different +workload patterns and identification of scenario-specific bottlenecks. + +**What Was Built:** + +1. **Core Module** (src/throughput.rs - 319 lines) + - ThroughputStats struct with RPS, count, duration, avg time + - ThroughputTracker with per-scenario tracking + - GLOBAL_THROUGHPUT_TRACKER singleton + - format_throughput_table() for tabular output + - total_throughput() for aggregate RPS + - Thread-safe with Arc> + - 10 unit tests + +2. **Metrics Integration** (src/metrics.rs) + - SCENARIO_REQUESTS_TOTAL: Counter per scenario + - SCENARIO_THROUGHPUT_RPS: Gauge per scenario + +3. **Worker Integration** (src/worker.rs) + - Auto-records scenario throughput after execution + - Tracks duration per scenario + +4. **Final Report** (src/main.rs) + - print_throughput_report() function + - Displays per-scenario RPS table + - Shows total aggregate throughput + - Displayed after percentile report + +5. **Integration Tests** (tests/per_scenario_throughput_tests.rs - 333 lines) + - 14 comprehensive integration tests validating: + - Basic throughput tracking + - RPS calculation accuracy + - Multiple scenario tracking + - Real scenario execution integration + - Concurrent access safety + - Table formatting + - Empty state handling + +**Metrics Tracked:** +- scenario_requests_total{scenario="ScenarioName"} +- scenario_throughput_rps{scenario="ScenarioName"} +- Total throughput (sum across all scenarios) + +**Features:** +- Per-scenario RPS calculation +- Average time per scenario execution +- Total requests per scenario +- Elapsed time tracking +- Reset capability for testing +- Thread-safe concurrent access + +**Benefits:** +- Compare performance across scenario types +- Identify slow vs fast scenarios +- Track throughput trends over time +- Detailed performance analysis +- Bottleneck identification + +**Merged to**: develop/phase1-scenario-engine + +--- + +### Issue #36: Connection Pooling Stats - 100% Complete ✅ + +**Summary:** +Implemented connection pool monitoring and configuration with connection reuse +analysis. Since reqwest doesn't expose internal pool metrics, uses timing-based +heuristics to infer connection behavior patterns. + +**What Was Built:** + +1. **Core Module** (src/connection_pool.rs - 378 lines) + - PoolConfig for pool configuration (max idle, idle timeout, TCP keepalive) + - PoolStatsTracker for tracking connection behavior + - ConnectionStats for reuse rate analysis + - GLOBAL_POOL_STATS singleton + - 12 unit tests + +2. **Connection Classification Algorithm** + - Fast requests (<100ms) → likely reused existing connections + - Slow requests (≥100ms) → likely established new connections (TLS handshake) + - Configurable threshold for different network conditions + - Tracks reuse rate and new connection rate + +3. **Pool Configuration** + - Default: 32 max idle per host + - Default: 90s idle timeout + - Default: 60s TCP keepalive + - Applied automatically to reqwest ClientBuilder + - Configurable via builder pattern + +4. **Metrics Added** (src/metrics.rs) + - connection_pool_max_idle_per_host: Config value (gauge) + - connection_pool_idle_timeout_seconds: Config value (gauge) + - connection_pool_requests_total: Total requests (counter) + - connection_pool_likely_reused_total: Reused connections (counter) + - connection_pool_likely_new_total: New connections (counter) + - connection_pool_reuse_rate_percent: Reuse percentage (gauge) + +5. **Integration** (src/client.rs, src/config.rs, src/worker.rs) + - Updated ClientConfig with pool_config field + - Applied PoolConfig to reqwest ClientBuilder + - Auto-records connection statistics after each request + - Tracks timing for reuse inference + +6. **Reporting** (src/main.rs) + - print_pool_report() function + - Connection reuse analysis with percentages + - Duration tracking + - Interpretation guidelines: + - ≥80% reuse: Excellent (efficient pool usage) + - ≥50% reuse: Moderate (consider tuning) + - <50% reuse: Low (check configuration/patterns) + - Displayed after throughput report + +7. **Integration Tests** (tests/connection_pool_tests.rs - 408 lines) + - 22 comprehensive integration tests validating: + - Pool configuration and defaults + - Builder pattern + - Connection stats calculations + - Fast vs slow request classification + - Mixed traffic patterns + - Custom thresholds + - Reset functionality + - Timing accuracy + - High reuse scenarios + - Concurrent access safety + - Boundary values + - Edge cases (zero/extreme latency) + - Real client integration + - Format variations + +**Technical Approach:** + +Since reqwest/hyper don't expose connection pool internals, we use +timing-based inference: +- New TLS connections add 50-150ms overhead (handshake) +- Reused connections skip handshake and are significantly faster +- Threshold of 100ms provides reliable classification + +**Metrics Tracked:** +- Pool configuration (max idle, timeout) +- Total requests analyzed +- Likely reused vs new connections +- Reuse rate percentage +- Duration over which stats were collected + +**Features:** +- Thread-safe with Arc> +- Configurable classification threshold +- Reset capability for testing +- Detailed formatting and reporting +- Production-ready monitoring + +**Benefits:** +- Visibility into connection pool behavior +- Identify connection reuse patterns +- Diagnose connection establishment issues +- Optimize pool configuration for workload +- Detect connection pool exhaustion +- Production observability + +**Limitations:** +- Inference-based (not direct pool metrics) +- Accuracy depends on network latency consistency +- Cannot distinguish idle vs active connections +- No direct pool size monitoring + +**Use Cases:** +- Monitor connection pool efficiency +- Tune pool size and timeouts +- Diagnose connection issues +- Validate connection reuse +- Performance optimization + +**Merged to**: develop/phase1-scenario-engine + +--- + +**Last Updated**: 2026-02-14 14:00 PST +**Status**: 🎉 ✅ PHASE 1 WAVE 3 COMPLETE! All 6 Wave 3 issues done! (Issues #33, #32, #31, #34, #35, #36) +**Phase 1 Progress**: 11/11 issues complete (Waves 1, 2, and 3 all done!) +**Next Milestone**: Phase 1 completion validation and merge to main +**Branch Status**: feature/issue-36-connection-pool-stats merged to develop diff --git a/PHASE2_PLAN.md b/PHASE2_PLAN.md new file mode 100644 index 0000000..9c4b156 --- /dev/null +++ b/PHASE2_PLAN.md @@ -0,0 +1,491 @@ +# Phase 2: Configuration System - Implementation Plan + +**Branch**: `phase2-advanced-features` +**Duration**: ~4 weeks (estimated) +**Target**: Replace environment variables with declarative YAML configuration files + +--- + +## Overview + +Phase 2 transforms the rust-loadtest tool from environment variable configuration to a declarative YAML-based configuration system. This enables version-controlled test plans, reusable scenarios, and eliminates the need for complex environment setups. + +### Key Capabilities to Add: +- YAML configuration file support +- Comprehensive config schema with validation +- Default value merging +- Environment variable overrides (backward compatibility) +- Config versioning and migration +- Scenario definitions in YAML +- Multiple scenario support per test run +- Config file hot-reload (development mode) + +### Configuration Format: +```yaml +version: "1.0" +metadata: + name: "E-commerce Load Test" + description: "Full checkout flow testing" + tags: ["production", "critical"] + +config: + baseUrl: "https://shop.example.com" + timeout: 30s + workers: 10 + duration: 10m + +load: + model: ramp + rps: + min: 10 + max: 100 + rampDuration: 2m + +scenarios: + - name: "Browse and Purchase" + weight: 70 + steps: + - name: "Homepage" + request: + method: GET + path: "/" + assertions: + - statusCode: 200 + thinkTime: 2s + + - name: "Search" + request: + method: GET + path: "/search?q=laptop" + extract: + - name: productId + jsonPath: "$.products[0].id" + thinkTime: 3s +``` + +--- + +## Implementation Waves + +### Wave 1: Core YAML Support (Week 1) +Basic YAML parsing and schema validation. + +### Wave 2: Advanced Config Features (Week 2) +Default merging, env overrides, and validation. + +### Wave 3: Scenario YAML Integration (Week 3) +Load scenarios from YAML files. + +### Wave 4: Polish & Migration (Week 4) +Hot-reload, migration tools, documentation. + +--- + +## Issues and Progress Tracker + +### ✅ Completed +- [x] **Issue #37**: YAML config file parser (P0, M) - **COMPLETE** ✅ + - Branch: `feature/issue-37-yaml-config-parser` (merged to phase2) + - 629 lines of implementation + 705 lines of tests + - Full YAML parsing with validation + - 22 comprehensive integration tests +- [x] **Issue #38**: Config schema and validation (P0, L) - **COMPLETE** ✅ + - Branch: `feature/issue-38-config-schema-validation` (merged to phase2) + - 540 lines of validation + 569 lines of tests + - Enhanced validation with field-level errors + - JSON Schema export for tooling + - 24 comprehensive tests +- [x] **Issue #39**: Default value merging (P1, S) - **COMPLETE** ✅ + - Branch: `feature/issue-39-default-value-merging` (merged to phase2) + - 306 lines of implementation + 227 lines of unit tests + 375 lines of integration tests + - ConfigDefaults with default values (workers: 10, timeout: 30s, etc.) + - ConfigMerger implementing precedence (env > yaml > default) + - ConfigPrecedence with comprehensive documentation + - 35 comprehensive tests (17 unit + 18 integration) +- [x] **Issue #40**: Environment variable overrides (P0, M) - **COMPLETE** ✅ + - Branch: `feature/issue-40-env-var-overrides` (merged to phase2) + - 161 lines of implementation + 599 lines of tests + 348 lines of docs + - Config::from_yaml_with_env_overrides() method + - Complete env var mapping for all config fields + - Load model parameter and complete override support + - Invalid/empty env value fallback to YAML + - 20 comprehensive integration tests + - Full documentation with CI/CD patterns +- [x] **Issue #41**: Config versioning (P2, M) - **COMPLETE** ✅ + - Branch: `feature/issue-41-config-versioning` (merged to phase2) + - 463 lines of implementation + 542 lines of tests + 461 lines of docs + - Version struct with semantic versioning (major.minor) + - VersionChecker for compatibility validation + - Migration trait and MigrationRegistry framework + - Integrated with YamlConfig validation + - 55 comprehensive tests (30 unit + 25 integration) + - Complete versioning guide with migration examples +- [x] **Issue #42**: Scenario YAML definitions (P0, XL) - **COMPLETE** ✅ + - Branch: `feature/issue-42-scenario-yaml-definitions` (merged to phase2) + - 78 lines of implementation + 695 lines of tests + 686 lines of docs + - Data file support (CSV, JSON) with strategies (sequential, random, cycle) + - Random think time (min/max range) for realistic user behavior + - Scenario-level config overrides (timeout, retry logic) + - Enhanced YamlScenario with dataFile and config fields + - 23 comprehensive integration tests + - Complete scenario guide with real-world examples +- [x] **Issue #43**: Multi-scenario execution (P0, L) - **COMPLETE** ✅ + - Branch: `feature/issue-43-multi-scenario-execution` (merged to phase2) + - 512 lines of implementation + 523 lines of tests + 514 lines of docs + - ScenarioSelector for weighted random selection + - RoundRobinDistributor for even distribution + - ScenarioMetrics for per-scenario tracking + - Thread-safe atomic counters + - 44 comprehensive tests (10 unit + 34 integration) + - Complete multi-scenario guide with real-world examples +- [x] **Issue #44**: Config file hot-reload (P2, S) - **COMPLETE** ✅ + - Branch: `feature/issue-44-config-hot-reload` (merged to phase2) + - 571 lines of implementation + 504 lines of tests + 661 lines of docs + - ConfigWatcher for file watching with notify crate + - HotReloadConfig for hot-reload behavior control + - ReloadNotifier for event-based config change handling + - Debouncing to prevent multiple reloads for rapid changes + - Full validation before applying config changes + - 22 comprehensive integration tests + - Complete hot-reload guide with examples and best practices +- [x] **Issue #45**: Config examples and templates (P1, S) - **COMPLETE** ✅ + - Branch: `feature/issue-45-config-examples` (merged to phase2) + - 8 production-ready YAML templates + 2 data files + 450 lines of tests + 1276 lines of docs + - basic-api-test.yaml (simple endpoint testing) + - ecommerce-scenario.yaml (multi-step shopping flow with weighted scenarios) + - stress-test.yaml (high-load capacity testing, 10-1000 RPS) + - data-driven-test.yaml (CSV/JSON data file usage) + - authenticated-api.yaml (JWT, API key, OAuth flows) + - microservices-test.yaml (distributed service testing) + - graphql-api.yaml (GraphQL queries and mutations) + - spike-test.yaml (sudden traffic spike resilience testing) + - Example data files (users.csv, products.json) + - 19 validation tests + - Comprehensive template guide and usage documentation +- [x] **Issue #46**: Config documentation generator (P2, M) - **COMPLETE** ✅ + - Branch: `feature/issue-46-config-docs-generator` (merged to phase2) + - 654 lines of implementation + 316 lines of tests + 652 lines of docs + - ConfigDocsGenerator for automatic documentation generation + - JSON Schema export (docs/config-schema.json) for IDE support + - Markdown reference documentation (docs/CONFIG_SCHEMA.md) + - VS Code code snippets (9 snippets) for faster authoring + - IDE integration (VS Code, IntelliJ, Vim) + - Schema validation support + - 22 comprehensive tests + - Complete documentation generator guide + +### 🚧 In Progress +_None - 🎉 ✅ Wave 4 COMPLETE! (3/3 done)_ + +### 📋 Todo - Wave 1 (Week 1) + +- [x] **Issue #37**: YAML config file parser (P0, M) ✅ + - [x] Add serde_yaml dependency + - [x] Create Config struct for YAML format + - [x] Implement from_yaml() method + - [x] Support loading from file path + - [x] Support loading from string (testing) + - [x] Backward compatibility with env vars (ready) + - [x] Unit tests for YAML parsing + - [x] Integration tests + +- [x] **Issue #38**: Config schema and validation (P0, L) ✅ + - [x] Define comprehensive ConfigSchema + - [x] Add validation rules (required fields, ranges, formats) + - [x] URL validation + - [x] Duration format validation + - [x] Enum validation (load models, HTTP methods) + - [x] Custom validation errors with helpful messages + - [x] Unit tests for validation + - [x] Integration tests + +### 📋 Todo - Wave 2 (Week 2) + +- [x] **Issue #39**: Default value merging (P1, S) ✅ + - [x] Define default values for all config fields + - [x] Implement merge logic (defaults + file + env) + - [x] Precedence: env vars > file > defaults + - [x] Test precedence order + - [x] Document precedence rules + +- [x] **Issue #40**: Environment variable overrides (P0, M) ✅ + - [x] Map env vars to YAML config paths + - [x] Support dot notation (e.g., CONFIG_LOAD_MODEL) + - [x] Override specific YAML values with env vars + - [x] Maintain backward compatibility + - [x] Document override patterns + - [x] Integration tests + +- [x] **Issue #41**: Config versioning (P2, M) ✅ + - [x] Add version field to config + - [x] Version detection + - [x] Migration framework for v1.0 -> v2.0 + - [x] Migration tests + - [x] Version validation + +### 📋 Todo - Wave 3 (Week 3) + +- [x] **Issue #42**: Scenario YAML definitions (P0, XL) ✅ + - [x] Scenario block in YAML + - [x] Multiple scenarios per file + - [x] Scenario weighting for mixed traffic + - [x] Step definitions in YAML + - [x] Request config in YAML + - [x] Assertions in YAML + - [x] Extractors in YAML + - [x] Think times in YAML (fixed and random) + - [x] Data files in YAML (CSV, JSON) + - [x] Integration with existing executor + - [x] Comprehensive tests + +- [x] **Issue #43**: Multi-scenario execution (P0, L) ✅ + - [x] Load multiple scenarios from config + - [x] Weighted scenario selection + - [x] Round-robin scenario distribution + - [x] Per-scenario worker allocation + - [x] Per-scenario metrics + - [x] Integration tests + +### 📋 Todo - Wave 4 (Week 4) + +- [x] **Issue #44**: Config file hot-reload (P2, S) ✅ + - [x] File watcher for config changes + - [x] Graceful reload without stopping test + - [x] Validation before reload + - [x] Reload notification/logging + - [x] Development mode flag + - [x] Tests + +- [x] **Issue #45**: Config examples and templates (P1, S) ✅ + - [x] Create example YAML configs + - [x] Basic API test template + - [x] E-commerce scenario template + - [x] Stress test template + - [x] Documentation for each template + - [x] Template validation + +- [x] **Issue #46**: Config documentation generator (P2, M) ✅ + - [x] Auto-generate schema docs from code + - [x] JSON Schema export + - [x] Markdown documentation + - [x] VS Code snippet generation + - [x] Documentation tests + +--- + +## Architecture Changes + +### New Modules (Planned) +``` +src/ + config/ + mod.rs - Config module root + yaml.rs - YAML parsing + schema.rs - Config schema and validation + merge.rs - Default merging logic + migration.rs - Version migration + examples.rs - Built-in templates +``` + +### Updated Modules +``` +src/ + config.rs - Extend to support YAML loading + main.rs - Load config from file or env + scenario.rs - YAML deserialization +``` + +--- + +## Timeline + +| Week | Focus | Issues | Deliverable | +|------|-------|--------|-------------| +| 1 | YAML Parsing | #37, #38 | Can load and validate YAML configs | +| 2 | Advanced Config | #39, #40, #41 | Defaults, overrides, versioning work | +| 3 | Scenarios | #42, #43 | Multi-scenario YAML execution | +| 4 | Polish | #44, #45, #46 | Hot-reload, templates, docs | + +--- + +## Testing Strategy + +### Unit Tests +- YAML parsing with various formats +- Schema validation with invalid inputs +- Default merging logic +- Environment override precedence +- Version migration + +### Integration Tests +- Load full YAML config and execute test +- Multi-scenario execution with weighting +- Override YAML with environment variables +- Hot-reload during test execution +- Template validation + +### Example Configs +- Simple single-endpoint test +- Multi-step scenario test +- Mixed traffic with multiple scenarios +- Data-driven test with CSV +- Stress test with ramping + +--- + +## Success Criteria + +Phase 2 is complete when: + +- [x] Can load complete test configuration from YAML file ✅ +- [x] Can define multi-step scenarios in YAML ✅ +- [x] Can run multiple scenarios with weighted distribution ✅ +- [x] Environment variables can override YAML values ✅ +- [x] Config validation provides helpful error messages ✅ +- [x] Default values work for all optional fields ✅ +- [x] Config versioning and migration works ✅ +- [x] All tests passing (183 new tests!) ✅ +- [x] Documentation includes YAML examples (8 templates + extensive docs) ✅ +- [x] Backward compatibility maintained ✅ + +🎉 **ALL SUCCESS CRITERIA MET!** + +--- + +## Dependencies + +**New Rust Crates:** +```toml +serde_yaml = "0.9" # YAML parsing +serde = { version = "1.0", features = ["derive"] } +validator = "0.16" # Schema validation +notify = "6.0" # File watching (hot-reload) +``` + +--- + +## Migration Strategy + +### Backward Compatibility + +Phase 2 must maintain 100% backward compatibility with Phase 1: +- All environment variables continue to work +- If no YAML file provided, use env vars (current behavior) +- If YAML file provided, env vars can override specific values +- Existing tests and deployments continue working + +### Migration Path for Users + +**Step 1: Generate config from current env vars** +```bash +rust-loadtest --generate-config > loadtest.yaml +``` + +**Step 2: Review and customize YAML** +```bash +vim loadtest.yaml +``` + +**Step 3: Run with YAML config** +```bash +rust-loadtest --config loadtest.yaml +``` + +**Step 4: Override specific values** +```bash +TARGET_RPS=500 rust-loadtest --config loadtest.yaml +``` + +--- + +## Example YAML Configs + +### Simple API Test +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + workers: 10 + duration: 5m + +load: + model: rps + target: 100 + +scenarios: + - name: "API Health Check" + steps: + - request: + method: GET + path: "/health" + assertions: + - statusCode: 200 +``` + +### E-commerce Flow +```yaml +version: "1.0" +config: + baseUrl: "https://shop.example.com" + workers: 50 + duration: 30m + +load: + model: ramp + rps: + min: 10 + max: 200 + rampDuration: 5m + +scenarios: + - name: "Browse and Purchase" + weight: 70 + steps: + - name: "Homepage" + request: + method: GET + path: "/" + thinkTime: 2s + + - name: "Search" + request: + method: GET + path: "/search?q=laptop" + extract: + - name: productId + jsonPath: "$.products[0].id" + thinkTime: 3s + + - name: "Add to Cart" + request: + method: POST + path: "/cart" + body: '{"productId": "${productId}"}' + assertions: + - statusCode: 201 + + - name: "Quick Browse" + weight: 30 + steps: + - request: + method: GET + path: "/" +``` + +--- + +## Notes + +- **Backward Compatibility**: Critical - existing users must not break +- **Validation**: Provide clear, actionable error messages +- **Documentation**: Every YAML field must be documented +- **Examples**: Provide real-world config examples +- **Testing**: 50+ tests to ensure quality +- **Performance**: YAML parsing should add <10ms overhead + +--- + +**Last Updated**: 2026-02-11 (continued) +**Status**: 🎉 ✅ PHASE 2 COMPLETE! All 9 issues done (100%) +**Next Milestone**: Phase 2 Review & Merge to Main +**Branch Status**: phase2-advanced-features (ready for review) diff --git a/README.md b/README.md index c93c51a..4569407 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,166 @@ This tool is available in two image variants to suit different deployment scenar **Recommendation:** Use the **static image** for production deployments in secure environments. Use the **standard image** for development and troubleshooting. +## ⚠️ Memory Configuration + +Load testing at high concurrency or RPS can consume significant memory. **Read this before running high-load tests.** + +### Quick Memory Limits + +| Available RAM | Max Concurrent Tasks | Max RPS | Max Duration | +|---------------|---------------------|---------|--------------| +| 512MB | 10 | 500 | 5 minutes | +| 2GB | 100 | 5,000 | 30 minutes | +| 4GB | 500 | 10,000 | 1 hour | +| 8GB+ | 1,000 | 25,000 | 2+ hours | + +### Memory Optimization (Issues #66, #68) + +For high-load tests that may cause OOM errors, use memory optimization settings: + +\`\`\`bash +docker run --memory=4g \\ + -e TARGET_URL="https://api.example.com" \\ + -e NUM_CONCURRENT_TASKS=500 \\ + -e TARGET_RPS=10000 \\ + -e PERCENTILE_TRACKING_ENABLED=false \\ # <-- Disables histogram tracking + -e MAX_HISTOGRAM_LABELS=100 \\ # <-- Limits unique labels (if enabled) + cbaugus/rust-loadtester:latest +\`\`\` + +**PERCENTILE_TRACKING_ENABLED=false:** +- Saves 2-4MB per unique scenario/step label +- Disables P50/P90/P95/P99 percentile calculation +- Allows much higher concurrency and RPS +- Prometheus metrics still work normally + +**MAX_HISTOGRAM_LABELS=100 (default):** +- Limits memory to 200-400MB for percentile tracking +- Uses LRU eviction for oldest labels +- Warns at 80% capacity +- Increase if you have >100 unique scenario/step combinations + +**When to disable percentile tracking:** +- High concurrency tests (>500 tasks) +- High RPS tests (>10,000 RPS) +- Long duration tests (>2 hours without rotation) +- Limited RAM (2-4GB) + +**For long-duration tests (24h+), use histogram rotation:** +```bash +docker run --memory=4g \ + -e TARGET_URL="https://api.example.com" \ + -e NUM_CONCURRENT_TASKS=200 \ + -e TARGET_RPS=5000 \ + -e TEST_DURATION=24h \ + -e HISTOGRAM_ROTATION_INTERVAL=15m \ # <-- Rotate every 15 minutes + cbaugus/rust-loadtester:latest +``` + +**What histogram rotation does:** +- Clears percentile data every N minutes to free memory +- Keeps histogram labels (no recreation overhead) +- Enables 24h+ tests without OOM +- Logs rotation events for visibility +- Recommended: 15-30 minute intervals for long tests + +**Auto-OOM Protection (Issue #72):** + +The load tester includes automatic memory protection to prevent OOM crashes: + +```bash +docker run --memory=4g \ + -e TARGET_URL="https://api.example.com" \ + -e NUM_CONCURRENT_TASKS=1000 \ + -e TARGET_RPS=20000 \ + -e MEMORY_WARNING_THRESHOLD_PERCENT=80 \ # <-- Warn at 80% memory + -e MEMORY_CRITICAL_THRESHOLD_PERCENT=90 \ # <-- Critical at 90% memory + -e AUTO_DISABLE_PERCENTILES_ON_WARNING=true \ # <-- Auto-disable percentiles + cbaugus/rust-loadtester:latest +``` + +**How it works:** +- Monitors memory usage every 5 seconds +- Detects memory limits (Docker cgroup-aware) +- At **warning threshold (80%)**: + - Automatically disables percentile tracking + - Rotates existing histograms to free memory + - Logs defensive actions taken +- At **critical threshold (90%)**: + - Aggressively rotates histograms again + - Logs critical memory warning +- Works on both bare metal and containerized environments + +**Configuration:** +- `MEMORY_WARNING_THRESHOLD_PERCENT` - Warning threshold (default: 80%) +- `MEMORY_CRITICAL_THRESHOLD_PERCENT` - Critical threshold (default: 90%) +- `AUTO_DISABLE_PERCENTILES_ON_WARNING` - Take automatic defensive actions (default: true) + +**When to use:** +- Unknown memory requirements +- Long-duration tests where memory may grow +- Protection against misconfiguration +- Production load tests where stability is critical + +Set `AUTO_DISABLE_PERCENTILES_ON_WARNING=false` for monitoring-only mode (logs warnings but doesn't take action). + +**Response Body Memory Management (Issue #73):** + +At high RPS (50K+), HTTP response bodies are now automatically consumed and discarded to prevent memory accumulation. Previous versions only checked status codes without reading response bodies, which could cause rapid memory growth (~215 MB/second at 50K RPS). + +**Fixed behavior:** +- Response bodies are explicitly read and discarded in single-request mode +- Prevents unbuffered response accumulation +- Enables sustained high-RPS testing without memory leaks +- Scenario mode was already handling this correctly + +**No configuration needed** - this fix is automatic and transparent. If you previously experienced rapid memory growth at high RPS even with percentile tracking disabled, this fix resolves it. + +### Pre-configured Examples + +See `docker-compose.loadtest-examples.yml` for ready-to-use configurations: + +\`\`\`bash +# Small test (512MB RAM) +docker-compose -f docker-compose.loadtest-examples.yml up loadtest-small + +# High load test (4GB RAM) +docker-compose -f docker-compose.loadtest-examples.yml up loadtest-high +\`\`\` + +📚 **Full documentation:** See `MEMORY_OPTIMIZATION.md` for detailed analysis, memory breakdown, and optimization strategies. + +### Memory Monitoring (Issue #69) + +Real-time memory metrics are available via Prometheus on port 9090: + +**Available Metrics:** +- `rust_loadtest_process_memory_rss_bytes` - Resident set size (actual RAM used) +- `rust_loadtest_process_memory_virtual_bytes` - Virtual memory size +- `rust_loadtest_histogram_count` - Number of active HDR histograms +- `rust_loadtest_histogram_memory_estimate_bytes` - Estimated histogram memory (3MB per histogram) + +**Example queries:** +\`\`\`promql +# Memory usage in MB +rust_loadtest_process_memory_rss_bytes / 1024 / 1024 + +# Memory usage percentage (if you know container limit) +(rust_loadtest_process_memory_rss_bytes / 4294967296) * 100 # For 4GB limit + +# Histogram memory overhead +rust_loadtest_histogram_memory_estimate_bytes / 1024 / 1024 +\`\`\` + +**Set up alerts:** +\`\`\`yaml +# Prometheus alert when approaching 80% of 4GB limit +- alert: LoadTestHighMemory + expr: rust_loadtest_process_memory_rss_bytes > 3.4e9 + annotations: + summary: "Load test using >80% of memory limit" +\`\`\` + ## Project Structure ``` @@ -78,6 +238,12 @@ The load testing tool is configured primarily through environment variables pass * CLIENT_CERT_PATH (Optional): Path to the client's PEM-encoded public certificate file for mTLS. * CLIENT_KEY_PATH (Optional): Path to the client's PEM-encoded PKCS#8 private key file for mTLS. Both `CLIENT_CERT_PATH` and `CLIENT_KEY_PATH` must be provided to enable mTLS. * RESOLVE_TARGET_ADDR (Optional): Allows overriding DNS resolution for the `TARGET_URL`. The format is `"hostname:ip_address:port"`. For example, if `TARGET_URL` is `http://example.com/api` and `RESOLVE_TARGET_ADDR` is set to `"example.com:192.168.1.50:8080"`, all requests to `example.com` will be directed to `192.168.1.50` on port `8080`. This is useful for targeting services not in DNS or for specific routing during tests. +* PERCENTILE_TRACKING_ENABLED (Optional, default: true): Set to "false" to disable HDR histogram tracking for percentile latency calculation. Disabling this can save significant memory (2-4MB per unique scenario/step) in high-load tests. When disabled, P50/P90/P95/P99 percentiles won't be available, but Prometheus metrics continue to work. See [Memory Configuration](#️-memory-configuration) for details. +* MAX_HISTOGRAM_LABELS (Optional, default: 100): Maximum number of unique scenario/step labels to track for percentile calculation. Uses LRU eviction when limit is reached. Each label consumes 2-4MB. Increase for tests with many unique scenarios, or decrease to save memory. Warning logged at 80% capacity. +* HISTOGRAM_ROTATION_INTERVAL (Optional, default: disabled): Periodically reset histogram data to prevent unbounded memory growth in long tests. Format: `15m`, `1h`, `2h`. Clears percentile data while keeping labels. Essential for 24h+ tests. Example: `HISTOGRAM_ROTATION_INTERVAL=15m` +* MEMORY_WARNING_THRESHOLD_PERCENT (Optional, default: 80.0): Memory usage percentage that triggers warning and defensive actions. When memory exceeds this threshold, auto-OOM protection can automatically disable percentile tracking to prevent crashes. +* MEMORY_CRITICAL_THRESHOLD_PERCENT (Optional, default: 90.0): Memory usage percentage that triggers critical warnings and aggressive cleanup. At this level, histograms are rotated to free as much memory as possible. +* AUTO_DISABLE_PERCENTILES_ON_WARNING (Optional, default: true): When true, automatically disables percentile tracking and rotates histograms when memory warning threshold is exceeded. Set to false for monitoring-only mode (logs warnings without taking action). Load Model Specific Environment Variables The behavior of the load test is determined by LOAD_MODEL_TYPE and its associated variables: diff --git a/docker-compose.loadtest-examples.yml b/docker-compose.loadtest-examples.yml new file mode 100644 index 0000000..5568a68 --- /dev/null +++ b/docker-compose.loadtest-examples.yml @@ -0,0 +1,323 @@ +# Docker Compose Examples for Load Testing +# +# This file provides example configurations for different load test scenarios +# with appropriate memory limits and settings. +# +# Usage: +# docker-compose -f docker-compose.loadtest-examples.yml up +# +# Monitor with: +# docker stats --no-stream + +version: '3.8' + +services: + # ============================================================================== + # SMALL LOAD TEST - 512MB Memory + # ============================================================================== + # Quick validation test - fits in minimal memory + loadtest-small: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-small + mem_limit: 512m + memswap_limit: 512m + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/health" + REQUEST_TYPE: "GET" + + # Small load configuration + NUM_CONCURRENT_TASKS: 10 + TEST_DURATION: "5m" + LOAD_MODEL_TYPE: "Rps" + TARGET_RPS: 500 + + # Logging + RUST_LOG: "info" + LOG_FORMAT: "default" + + # Metrics + METRIC_NAMESPACE: "loadtest_small" + ports: + - "9090:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # MEDIUM LOAD TEST - 2GB Memory + # ============================================================================== + # Standard test for most scenarios + loadtest-medium: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-medium + mem_limit: 2g + memswap_limit: 2g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/products" + REQUEST_TYPE: "GET" + + # Medium load configuration + NUM_CONCURRENT_TASKS: 100 + TEST_DURATION: "30m" + LOAD_MODEL_TYPE: "RampRps" + MIN_RPS: 500 + MAX_RPS: 5000 + RAMP_DURATION: "15m" + + # Logging + RUST_LOG: "info" + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_medium" + ports: + - "9091:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # HIGH LOAD TEST - 4GB Memory + # ============================================================================== + # Higher concurrency test - requires monitoring + loadtest-high: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-high + mem_limit: 4g + memswap_limit: 4g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/products" + REQUEST_TYPE: "GET" + + # High load configuration (safe for 4GB) + NUM_CONCURRENT_TASKS: 500 + TEST_DURATION: "1h" + LOAD_MODEL_TYPE: "Rps" + TARGET_RPS: 10000 + + # Logging + RUST_LOG: "info" + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_high" + ports: + - "9092:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # MAXIMUM LOAD TEST - 8GB Memory + # ============================================================================== + # Stress test configuration - maximum supported load + loadtest-maximum: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-maximum + mem_limit: 8g + memswap_limit: 8g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/products" + REQUEST_TYPE: "GET" + + # Maximum load configuration + NUM_CONCURRENT_TASKS: 1000 + TEST_DURATION: "2h" + LOAD_MODEL_TYPE: "RampRps" + MIN_RPS: 5000 + MAX_RPS: 25000 + RAMP_DURATION: "30m" + + # Logging + RUST_LOG: "warn" # Less verbose for high load + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_maximum" + ports: + - "9093:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # DAILY TRAFFIC PATTERN - 4GB Memory + # ============================================================================== + # Simulates realistic daily traffic patterns + loadtest-daily-pattern: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-daily + mem_limit: 4g + memswap_limit: 4g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/products" + REQUEST_TYPE: "GET" + + # Daily traffic pattern (1 hour = 1 simulated day) + NUM_CONCURRENT_TASKS: 200 + TEST_DURATION: "4h" # 4 simulated days + LOAD_MODEL_TYPE: "DailyTraffic" + DAILY_MIN_RPS: 100 # Night traffic + DAILY_MID_RPS: 500 # Afternoon traffic + DAILY_MAX_RPS: 1500 # Peak traffic + DAILY_CYCLE_DURATION: "1h" + + # Logging + RUST_LOG: "info" + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_daily" + ports: + - "9094:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # SOAK TEST - 4GB Memory + # ============================================================================== + # Long duration test to identify memory leaks + loadtest-soak: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-soak + mem_limit: 4g + memswap_limit: 4g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/health" + REQUEST_TYPE: "GET" + + # Soak test configuration (steady load for long duration) + NUM_CONCURRENT_TASKS: 50 + TEST_DURATION: "24h" # Long duration to detect leaks + LOAD_MODEL_TYPE: "Rps" + TARGET_RPS: 1000 # Moderate but sustained + + # Logging + RUST_LOG: "warn" # Minimal logging for long tests + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_soak" + ports: + - "9095:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # POST REQUEST TEST - 2GB Memory + # ============================================================================== + # Example with JSON payload + loadtest-post-json: + image: cbaugus/rust_loadtest:dev + container_name: loadtest-post + mem_limit: 2g + memswap_limit: 2g + environment: + # Target configuration + TARGET_URL: "https://ecom.edge.baugus-lab.com/auth/register" + REQUEST_TYPE: "POST" + SEND_JSON: "true" + JSON_PAYLOAD: '{"email":"loadtest@example.com","password":"Test123!","name":"Load Test User"}' + + # Load configuration + NUM_CONCURRENT_TASKS: 50 + TEST_DURATION: "10m" + LOAD_MODEL_TYPE: "Rps" + TARGET_RPS: 100 + + # Logging + RUST_LOG: "info" + LOG_FORMAT: "json" + + # Metrics + METRIC_NAMESPACE: "loadtest_post" + ports: + - "9096:9090" # Prometheus metrics + networks: + - loadtest + + # ============================================================================== + # MONITORING STACK (Optional) + # ============================================================================== + # Prometheus for scraping metrics + prometheus: + image: prom/prometheus:latest + container_name: prometheus + ports: + - "9099:9090" + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention.time=7d' + networks: + - loadtest + + # Grafana for visualization + grafana: + image: grafana/grafana:latest + container_name: grafana + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + volumes: + - grafana-data:/var/lib/grafana + networks: + - loadtest + +networks: + loadtest: + driver: bridge + +volumes: + grafana-data: + +# ============================================================================== +# USAGE EXAMPLES +# ============================================================================== +# +# Run a small test: +# docker-compose -f docker-compose.loadtest-examples.yml up loadtest-small +# +# Run medium test in background: +# docker-compose -f docker-compose.loadtest-examples.yml up -d loadtest-medium +# +# Monitor memory usage: +# docker stats --no-stream +# +# View logs: +# docker logs -f loadtest-medium +# +# Access metrics: +# curl http://localhost:9091/metrics +# +# Stop test: +# docker-compose -f docker-compose.loadtest-examples.yml down +# +# Run with monitoring stack: +# docker-compose -f docker-compose.loadtest-examples.yml up -d loadtest-medium prometheus grafana +# # Grafana: http://localhost:3000 (admin/admin) +# # Prometheus: http://localhost:9099 +# +# ============================================================================== +# MEMORY RECOMMENDATIONS +# ============================================================================== +# +# See MEMORY_OPTIMIZATION.md for detailed analysis +# +# Configuration Memory Concurrent Tasks Target RPS Duration +# ───────────────────────────────────────────────────────────────────────────── +# loadtest-small 512MB 10 500 5m +# loadtest-medium 2GB 100 5,000 30m +# loadtest-high 4GB 500 10,000 1h +# loadtest-maximum 8GB 1,000 25,000 2h +# loadtest-daily-pattern 4GB 200 100-1,500 4h +# loadtest-soak 4GB 50 1,000 24h +# loadtest-post-json 2GB 50 100 10m +# +# Always start with smaller configurations and scale up gradually! \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..995ff3e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,41 @@ +version: '3.8' + +services: + # Test API endpoint (using httpbin for testing) + test-api: + image: kennethreitz/httpbin + ports: + - "8080:80" + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:80/status/200')"] + interval: 5s + timeout: 3s + retries: 3 + + # Rust LoadTest tool + loadtest: + build: . + depends_on: + test-api: + condition: service_healthy + volumes: + - ./examples/configs:/app/configs + - ./examples/data:/app/data + - ./results:/app/results + environment: + - TARGET_URL=http://test-api:80 + # Override to run a specific test + command: ["rust-loadtest", "--config", "/app/configs/basic-api-test.yaml"] + # Uncomment to keep container running for manual testing + # command: ["tail", "-f", "/dev/null"] + + # Simple web server for testing (alternative to httpbin) + simple-api: + image: nginx:alpine + ports: + - "8081:80" + volumes: + - ./test-api:/usr/share/nginx/html:ro + +volumes: + results: diff --git a/docs/CONFIG_DOCS_GENERATOR.md b/docs/CONFIG_DOCS_GENERATOR.md new file mode 100644 index 0000000..27b832f --- /dev/null +++ b/docs/CONFIG_DOCS_GENERATOR.md @@ -0,0 +1,428 @@ +# Configuration Documentation Generator + +## Overview + +The configuration documentation generator automatically generates schema documentation, JSON Schema files, and IDE snippets from the configuration structures. This ensures documentation stays in sync with the code. + +## Features + +✅ **JSON Schema Generation** - Exports complete JSON Schema for IDE support and validation +✅ **Markdown Documentation** - Auto-generates reference documentation +✅ **VS Code Snippets** - Creates code snippets for faster config authoring +✅ **Auto-sync** - Documentation generated from code, always up-to-date +✅ **IDE Integration** - JSON Schema enables auto-completion in IDEs + +## Usage + +### Programmatic API + +```rust +use rust_loadtest::config_docs_generator::ConfigDocsGenerator; +use std::fs; + +let generator = ConfigDocsGenerator::new(); + +// Generate JSON Schema +let json_schema = generator.generate_json_schema(); +fs::write("schema.json", json_schema)?; + +// Generate Markdown docs +let markdown = generator.generate_markdown_docs(); +fs::write("CONFIG_SCHEMA.md", markdown)?; + +// Generate VS Code snippets +let snippets = generator.generate_vscode_snippets(); +fs::write("snippets.json", snippets)?; +``` + +### Using the Generator Script + +```bash +# Run the documentation generator +cargo run --example generate_docs + +# This creates: +# - docs/config-schema.json +# - docs/CONFIG_SCHEMA.md +# - .vscode/rust-loadtest.code-snippets +``` + +## Generated Files + +### 1. JSON Schema (`config-schema.json`) + +**Purpose**: Machine-readable schema for validation and IDE support + +**Features**: +- Complete type definitions +- Validation rules (required fields, patterns, ranges) +- Examples for each field +- Enum values for constrained fields +- Format specifications + +**Usage**: + +**VS Code** - Add to `settings.json`: +```json +{ + "yaml.schemas": { + "./docs/config-schema.json": "loadtest*.yaml" + } +} +``` + +**IntelliJ/PyCharm** - Settings → Languages & Frameworks → Schemas and DTDs → JSON Schema Mappings + +**Schema Validators**: +```bash +# Validate with ajv-cli +npm install -g ajv-cli +ajv validate -s docs/config-schema.json -d loadtest.yaml + +# Validate with Python +pip install jsonschema pyyaml +python -c "import yaml, jsonschema; jsonschema.validate(yaml.safe_load(open('loadtest.yaml')), json.load(open('docs/config-schema.json')))" +``` + +### 2. Markdown Documentation (`CONFIG_SCHEMA.md`) + +**Purpose**: Human-readable reference documentation + +**Sections**: +- Version - Configuration versioning +- Metadata - Test metadata fields +- Config - Global configuration +- Load Models - Concurrent, RPS, Ramp models +- Scenarios - Scenario and step definitions +- Complete Example - Full working example + +**Features**: +- Property tables +- Type information +- Required/optional indicators +- Default values +- YAML examples for each section + +### 3. VS Code Snippets (`rust-loadtest.code-snippets`) + +**Purpose**: Code snippets for faster YAML authoring + +**Available Snippets**: + +| Prefix | Description | Result | +|--------|-------------|--------| +| `loadtest-basic` | Complete basic config | Full config template | +| `loadtest-rps` | RPS load model | RPS configuration | +| `loadtest-ramp` | Ramp load model | Ramp configuration | +| `loadtest-scenario` | Test scenario | Scenario with steps | +| `loadtest-step` | Test step | Step with request | +| `loadtest-assertion-status` | Status assertion | Status code check | +| `loadtest-assertion-jsonpath` | JSONPath assertion | JSONPath validation | +| `loadtest-extract-jsonpath` | JSONPath extractor | Variable extraction | +| `loadtest-datafile` | Data file config | CSV/JSON data file | + +**Usage in VS Code**: +1. Open YAML file +2. Type snippet prefix (e.g., `loadtest-basic`) +3. Press `Tab` to expand +4. Use `Tab` to navigate placeholders + +## JSON Schema Details + +### Schema Structure + +```json +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Rust LoadTest Configuration", + "type": "object", + "required": ["version", "config", "load", "scenarios"], + "properties": { + "version": { ... }, + "config": { ... }, + "load": { ... }, + "scenarios": { ... } + } +} +``` + +### Type Definitions + +**Duration Fields**: +```json +{ + "oneOf": [ + { "type": "string", "pattern": "^[0-9]+(s|m|h)$" }, + { "type": "integer", "minimum": 1 } + ] +} +``` + +**Load Model Union**: +```json +{ + "oneOf": [ + { "properties": { "model": { "const": "concurrent" } } }, + { "properties": { "model": { "const": "rps" }, "target": {...} } }, + { "properties": { "model": { "const": "ramp" }, "min": {...}, "max": {...} } } + ] +} +``` + +### Validation Rules + +- **Required Fields**: `version`, `config`, `load`, `scenarios` +- **Version Pattern**: `^[0-9]+\.[0-9]+$` (e.g., "1.0") +- **Duration Pattern**: `^[0-9]+(s|m|h)$` (e.g., "5m") +- **Workers Minimum**: 1 +- **RPS Minimum**: 0.1 +- **HTTP Methods**: GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS + +## IDE Integration + +### VS Code + +**Setup**: +1. Install YAML extension +2. Add to `.vscode/settings.json`: +```json +{ + "yaml.schemas": { + "./docs/config-schema.json": "*.yaml" + } +} +``` + +**Features**: +- ✅ Auto-completion +- ✅ Field descriptions on hover +- ✅ Error highlighting +- ✅ Enum value suggestions +- ✅ Format validation + +### IntelliJ IDEA / PyCharm + +**Setup**: +1. Settings → Languages & Frameworks → Schemas and DTDs +2. Add new JSON Schema mapping +3. Schema file: `docs/config-schema.json` +4. File pattern: `*.yaml` + +### Vim/Neovim + +**With CoC.nvim**: +```json +{ + "yaml.schemas": { + "/path/to/docs/config-schema.json": "*.yaml" + } +} +``` + +**With ALE**: +```vim +let g:ale_yaml_schemas = { + \ '/path/to/docs/config-schema.json': '*.yaml' + \ } +``` + +## Regenerating Documentation + +Documentation should be regenerated when: +- Configuration structures change +- New fields are added +- Validation rules update +- Examples need updating + +**Regenerate**: +```bash +cargo run --example generate_docs +``` + +**Automated Regeneration** (in CI/CD): +```yaml +# GitHub Actions example +- name: Generate Docs + run: | + cargo run --example generate_docs + git diff --exit-code || echo "Docs need updating" +``` + +## Customization + +### Adding New Snippets + +Edit `src/config_docs_generator.rs`: + +```rust +snippets.insert("loadtest-custom", serde_json::json!({ + "prefix": "loadtest-custom", + "body": [ + "your:", + " custom: ${1:value}" + ], + "description": "Custom snippet" +})); +``` + +### Extending JSON Schema + +Modify `build_json_schema()` method: + +```rust +"properties": { + "newField": { + "type": "string", + "description": "New field description", + "examples": ["example"] + } +} +``` + +### Updating Markdown Template + +Edit `generate_markdown_docs()` method: + +```rust +md.push_str("## New Section\n\n"); +md.push_str("Description...\n\n"); +``` + +## Validation + +### Schema Validation + +```bash +# Validate schema itself +ajv compile -s docs/config-schema.json + +# Should output: schema is valid +``` + +### Config Validation + +```bash +# Validate a config file +ajv validate -s docs/config-schema.json -d examples/configs/basic-api-test.yaml + +# Or use rust-loadtest +rust-loadtest --config my-config.yaml --validate +``` + +## Best Practices + +### 1. Keep Schema in Sync + +Always regenerate docs after schema changes: +```bash +# After modifying YamlConfig structures +cargo run --example generate_docs +git add docs/ .vscode/ +git commit -m "Update generated documentation" +``` + +### 2. Add Examples + +Include examples in JSON Schema: +```json +{ + "examples": ["1.0", "2.0"] +} +``` + +### 3. Descriptive Error Messages + +Use clear descriptions for validation: +```json +{ + "description": "Duration in format '5m', '1h', or '30s'" +} +``` + +### 4. IDE-Friendly Enums + +Provide enum values for constrained fields: +```json +{ + "enum": ["GET", "POST", "PUT", "DELETE"] +} +``` + +### 5. Version Documentation + +Update docs when schema version changes: +```rust +version: "2.0".to_string() +``` + +## Troubleshooting + +### IDE Not Showing Completions + +1. Check schema file path in settings +2. Verify schema JSON is valid +3. Reload IDE window +4. Check file pattern matches + +### Schema Validation Errors + +1. Validate schema file itself +2. Check for JSON syntax errors +3. Verify all `$ref` paths resolve + +### Snippets Not Working + +1. Check snippet file location (`.vscode/`) +2. Verify JSON syntax +3. Reload VS Code +4. Check snippet scope (YAML files) + +## Related Documentation + +- [YAML Configuration Guide](/docs/YAML_CONFIG.md) +- [Configuration Schema Reference](/docs/CONFIG_SCHEMA.md) +- [Configuration Examples](/docs/CONFIG_EXAMPLES.md) +- [Configuration Validation](/docs/CONFIG_VALIDATION.md) + +## API Reference + +### ConfigDocsGenerator + +```rust +pub struct ConfigDocsGenerator { + app_name: String, + version: String, +} + +impl ConfigDocsGenerator { + /// Create new generator + pub fn new() -> Self; + + /// Generate JSON Schema + pub fn generate_json_schema(&self) -> String; + + /// Generate Markdown docs + pub fn generate_markdown_docs(&self) -> String; + + /// Generate VS Code snippets + pub fn generate_vscode_snippets(&self) -> String; +} +``` + +## Contributing + +To improve the documentation generator: + +1. Modify `src/config_docs_generator.rs` +2. Add tests to `tests/config_docs_generator_tests.rs` +3. Regenerate docs: `cargo run --example generate_docs` +4. Update this guide if API changes +5. Submit pull request + +## Version History + +- **v1.0** - Initial documentation generator + - JSON Schema export + - Markdown documentation + - VS Code snippets + - 9 built-in snippets diff --git a/docs/CONFIG_EXAMPLES.md b/docs/CONFIG_EXAMPLES.md new file mode 100644 index 0000000..c338d36 --- /dev/null +++ b/docs/CONFIG_EXAMPLES.md @@ -0,0 +1,770 @@ +# Configuration Examples and Templates + +## Overview + +The `examples/configs/` directory contains production-ready YAML configuration templates for common load testing scenarios. Each template is fully documented, validated, and ready to use. + +## Quick Start + +```bash +# 1. Browse available templates +ls examples/configs/*.yaml + +# 2. Copy a template +cp examples/configs/basic-api-test.yaml my-test.yaml + +# 3. Customize for your API +vim my-test.yaml + +# 4. Run the test +rust-loadtest --config my-test.yaml +``` + +## Available Templates + +### Template Overview + +| Template | Complexity | Workers | Scenarios | Best For | +|----------|-----------|---------|-----------|----------| +| [Basic API](#1-basic-api-test) | ⭐ | 10 | 1 | Simple endpoint testing | +| [E-Commerce](#2-e-commerce-scenario) | ⭐⭐⭐ | 50 | 4 | Multi-step user flows | +| [Stress Test](#3-stress-test) | ⭐⭐⭐⭐ | 200 | 3 | Finding system limits | +| [Data-Driven](#4-data-driven-test) | ⭐⭐ | 20 | 2 | Testing with real data | +| [Authenticated](#5-authenticated-api) | ⭐⭐⭐ | 25 | 3 | Auth flows, tokens | +| [Microservices](#6-microservices-test) | ⭐⭐⭐⭐ | 40 | 4 | Distributed systems | +| [GraphQL](#7-graphql-api) | ⭐⭐⭐ | 30 | 4 | GraphQL APIs | +| [Spike Test](#8-spike-test) | ⭐⭐⭐⭐ | 150 | 3 | Sudden traffic spikes | + +## Template Details + +### 1. Basic API Test + +**File**: `basic-api-test.yaml` + +**Purpose**: Simple load test for a single API endpoint. + +**Configuration**: +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + workers: 10 + duration: "5m" +load: + model: "rps" + target: 100 +scenarios: + - name: "API Health Check" + steps: + - request: + method: "GET" + path: "/health" +``` + +**Use Cases**: +- API health monitoring +- Smoke testing +- CI/CD integration +- Getting started with load testing + +**Customization**: +```bash +# Change URL +sed -i 's|api.example.com|your-api.com|' basic-api-test.yaml + +# Adjust RPS +sed -i 's/target: 100/target: 200/' basic-api-test.yaml + +# Quick test with env override +TARGET_URL=https://staging.api.com rust-loadtest --config basic-api-test.yaml +``` + +--- + +### 2. E-Commerce Scenario + +**File**: `ecommerce-scenario.yaml` + +**Purpose**: Realistic e-commerce load test with weighted user flows. + +**Traffic Distribution**: +- 60% Browse only (window shoppers) +- 25% Browse + add to cart +- 12% Complete purchase +- 3% Quick browse + +**Configuration**: +```yaml +load: + model: "ramp" + min: 10 + max: 200 + rampDuration: "5m" + +scenarios: + - name: "Browse Only" + weight: 60 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + thinkTime: "2s" +``` + +**Real-World Pattern**: +``` +Time RPS Browse Cart Checkout +0m 10 6 2 1 +5m 50 30 13 6 +10m 100 60 25 12 +15m 200 120 50 24 +``` + +**Use Cases**: +- E-commerce platforms +- Conversion funnel testing +- Black Friday simulation +- Realistic user behavior + +**Customization**: +- Adjust weights based on your analytics +- Modify product search paths +- Add authentication headers +- Include payment gateway steps + +--- + +### 3. Stress Test + +**File**: `stress-test.yaml` + +**Purpose**: High-load test to find system breaking points. + +**Load Profile**: +``` +RPS +1000 | ___________ + | / + 500 | / + | / + 10 |_______________/ + 0m 5m 10m 15m 60m + Ramp Sustain +``` + +**Configuration**: +```yaml +config: + workers: 200 + duration: "1h" +load: + model: "ramp" + min: 10 + max: 1000 + rampDuration: "15m" +``` + +**Metrics to Watch**: +- Response time percentiles (p95, p99) +- Error rate increase +- CPU/memory utilization +- Database connections +- Auto-scaling events + +**Use Cases**: +- Capacity planning +- Finding bottlenecks +- Validating auto-scaling +- SLA verification + +**Warning**: ⚠️ Generates significant load. Use on test environments only. + +--- + +### 4. Data-Driven Test + +**File**: `data-driven-test.yaml` + +**Purpose**: Load test using external CSV/JSON data files. + +**Data File Setup**: + +**CSV** (`users.csv`): +```csv +username,email,user_id +john.doe,john@example.com,1001 +jane.smith,jane@example.com,1002 +``` + +**JSON** (`products.json`): +```json +[ + { + "product_name": "Laptop", + "category": "electronics", + "sku": "LAP-001" + } +] +``` + +**Configuration**: +```yaml +scenarios: + - name: "User Login with CSV Data" + dataFile: + path: "./examples/data/users.csv" + format: "csv" + strategy: "random" # sequential | random | cycle + steps: + - request: + method: "POST" + path: "/login" + body: '{"username": "${username}"}' +``` + +**Iteration Strategies**: +- **Sequential**: Process data in order (1, 2, 3, ...) +- **Random**: Pick random rows +- **Cycle**: Loop through data (1, 2, 3, 1, 2, 3, ...) + +**Use Cases**: +- Testing with real user credentials +- Large dataset testing +- Parameterized API calls +- Database seeding validation + +--- + +### 5. Authenticated API + +**File**: `authenticated-api.yaml` + +**Purpose**: Test APIs requiring authentication. + +**Authentication Patterns**: + +**JWT Authentication**: +```yaml +steps: + - name: "Login" + request: + method: "POST" + path: "/auth/login" + body: '{"username": "user", "password": "pass"}' + extract: + - name: "token" + jsonPath: "$.token" + + - name: "Use Token" + request: + method: "GET" + path: "/protected" + headers: + Authorization: "Bearer ${token}" +``` + +**API Key**: +```yaml +config: + customHeaders: "X-API-Key: your-key-here" +``` + +**OAuth 2.0**: +```yaml +steps: + - name: "Get Access Token" + request: + method: "POST" + path: "/oauth/token" + body: '{"grant_type": "client_credentials"}' + extract: + - name: "accessToken" + jsonPath: "$.access_token" +``` + +**Use Cases**: +- JWT token lifecycle testing +- OAuth flow validation +- API key rate limiting +- Session management + +--- + +### 6. Microservices Test + +**File**: `microservices-test.yaml` + +**Purpose**: Test distributed microservices architecture. + +**Service Distribution**: +- 25% User Service +- 30% Product Service +- 30% Order Service +- 15% Inventory Service + +**Configuration**: +```yaml +config: + baseUrl: "https://gateway.example.com" + +scenarios: + - name: "User Service Flow" + weight: 25 + steps: + - request: + method: "POST" + path: "/users/register" + + - name: "Product Service Flow" + weight: 30 + steps: + - request: + method: "GET" + path: "/products" +``` + +**Testing Patterns**: +- Service-to-service communication +- API gateway performance +- Circuit breaker behavior +- Service mesh metrics + +**Use Cases**: +- Microservices platforms +- API gateway testing +- Service mesh validation +- Distributed tracing + +--- + +### 7. GraphQL API + +**File**: `graphql-api.yaml` + +**Purpose**: Test GraphQL APIs with queries and mutations. + +**Query Types**: + +**Simple Query**: +```yaml +steps: + - request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query { users { id name } }" + } +``` + +**Query with Variables**: +```yaml +steps: + - request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query GetUser($id: ID!) { user(id: $id) { name } }", + "variables": {"id": "${userId}"} + } +``` + +**Mutation**: +```yaml +steps: + - request: + method: "POST" + path: "/graphql" + body: > + { + "query": "mutation { createPost(input: {title: \"Test\"}) { id } }" + } +``` + +**Use Cases**: +- GraphQL API testing +- Query complexity validation +- Schema performance +- Resolver optimization + +--- + +### 8. Spike Test + +**File**: `spike-test.yaml` + +**Purpose**: Test system resilience under sudden traffic spikes. + +**Spike Pattern**: +``` +Workers +150 | ████████ + | ████████ + 50 | ████████ + | ████████ + 20 |██████ ████████ + 0 5m 10m 15m 20m 25m + Normal Spike Recovery +``` + +**Configuration**: +```yaml +config: + workers: 150 # High for spike + duration: "30m" + +scenarios: + - name: "High-Traffic Endpoint" + thinkTime: + min: "100ms" + max: "500ms" # Short think time = aggressive +``` + +**Execution Plan**: +1. **Phase 1** (0-5m): Normal load - 20 workers +2. **Phase 2** (5-10m): Spike - 150 workers +3. **Phase 3** (10-20m): Recovery - 20 workers +4. **Phase 4** (20-30m): Validation - 20 workers + +**Use Cases**: +- Flash sale simulation +- Viral content scenarios +- Auto-scaling validation +- Traffic surge preparation + +**Implementation**: +```bash +# Manual spike test +rust-loadtest --config spike-test.yaml --workers 20 & +sleep 300 +rust-loadtest --config spike-test.yaml --workers 150 & +sleep 300 +rust-loadtest --config spike-test.yaml --workers 20 +``` + +--- + +## Customization Guide + +### Common Patterns + +#### Change Base URL + +**Option 1: Edit File** +```yaml +config: + baseUrl: "https://your-api.com" +``` + +**Option 2: Environment Variable** +```bash +TARGET_URL=https://your-api.com rust-loadtest --config template.yaml +``` + +#### Adjust Load + +**RPS Model**: +```yaml +load: + model: "rps" + target: 200 # Requests per second +``` + +**Ramp Model**: +```yaml +load: + model: "ramp" + min: 10 + max: 500 + rampDuration: "10m" +``` + +**Concurrent Model**: +```yaml +load: + model: "concurrent" +config: + workers: 100 # Concurrent users +``` + +#### Add Authentication + +**JWT**: +```yaml +steps: + - name: "Login" + extract: + - name: "token" + jsonPath: "$.token" + + - name: "Protected Request" + request: + headers: + Authorization: "Bearer ${token}" +``` + +**API Key**: +```yaml +config: + customHeaders: "X-API-Key: ${API_KEY}" +``` + +#### Adjust Think Time + +**Fixed**: +```yaml +thinkTime: "3s" +``` + +**Random**: +```yaml +thinkTime: + min: "1s" + max: "5s" +``` + +### Advanced Customization + +#### Scenario Weighting + +Based on production analytics: + +```yaml +scenarios: + - name: "Browse" + weight: 70 # 70% of users browse + + - name: "Purchase" + weight: 30 # 30% of users buy +``` + +#### Data Extraction + +```yaml +extract: + - name: "userId" + jsonPath: "$.user.id" + + - name: "token" + jsonPath: "$.auth.token" + + - name: "productId" + regex: '"id":"([^"]+)"' +``` + +#### Custom Assertions + +```yaml +assertions: + - statusCode: 200 + - responseTime: "2s" + - bodyContains: "success" + - jsonPath: + path: "$.status" + expected: "ok" + - headerExists: "X-Request-ID" +``` + +## Environment Variable Overrides + +All templates support environment variable overrides: + +```bash +# Override URL +TARGET_URL=https://staging.api.com + +# Override workers +NUM_CONCURRENT_TASKS=50 + +# Override duration +TEST_DURATION=10m + +# Override RPS +TARGET_RPS=200 + +# Run with overrides +env TARGET_URL=https://staging.api.com \ + NUM_CONCURRENT_TASKS=50 \ + rust-loadtest --config template.yaml +``` + +## Validation + +Validate templates before running: + +```bash +# Validate syntax and schema +rust-loadtest --config template.yaml --validate + +# Dry run (parse without executing) +rust-loadtest --config template.yaml --dry-run +``` + +## CI/CD Integration + +### GitHub Actions + +```yaml +name: Load Test + +on: + schedule: + - cron: '0 2 * * *' # Daily at 2 AM + +jobs: + load-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Run Load Test + run: | + rust-loadtest --config examples/configs/basic-api-test.yaml + env: + TARGET_URL: ${{ secrets.API_URL }} + + - name: Upload Results + uses: actions/upload-artifact@v2 + with: + name: load-test-results + path: results/ +``` + +### GitLab CI + +```yaml +load-test: + stage: test + script: + - rust-loadtest --config examples/configs/stress-test.yaml + variables: + TARGET_URL: $STAGING_API_URL + artifacts: + paths: + - results/ + only: + - schedules +``` + +## Best Practices + +### 1. Start Small + +Begin with basic templates and gradually increase complexity: + +``` +basic-api-test.yaml + ↓ +ecommerce-scenario.yaml (multi-step) + ↓ +stress-test.yaml (high load) +``` + +### 2. Use Realistic Data + +```yaml +# ❌ Don't use dummy data +body: '{"user": "test123"}' + +# ✅ Use realistic data from files +dataFile: + path: "./real-users.csv" + strategy: "random" +``` + +### 3. Monitor System Metrics + +While running tests, monitor: +- CPU and memory usage +- Database connections +- Network I/O +- Error rates +- Response time percentiles + +### 4. Validate Results + +```bash +# Run test +rust-loadtest --config template.yaml > results.log + +# Check results +grep "Success Rate" results.log +grep "p95" results.log +grep "p99" results.log +``` + +### 5. Document Customizations + +```yaml +# Added by: John Doe +# Date: 2024-01-01 +# Reason: Increased load for Black Friday +config: + workers: 200 # Was: 50 +``` + +## Troubleshooting + +### Template Won't Load + +```bash +# Check syntax +rust-loadtest --config template.yaml --validate + +# Common issues: +# - Invalid YAML indentation +# - Missing required fields +# - Invalid URL format +``` + +### High Error Rates + +```yaml +# Increase timeout +config: + timeout: "60s" # Was: 30s + +# Add retry logic (if supported) +config: + retryCount: 3 +``` + +### Data File Not Found + +```yaml +# Use absolute path +dataFile: + path: "/full/path/to/data.csv" + +# Or relative to working directory +dataFile: + path: "./data/users.csv" +``` + +## Related Documentation + +- [YAML Configuration Guide](/docs/YAML_CONFIG.md) +- [Scenario Definitions](/docs/SCENARIO_YAML.md) +- [Load Models](/docs/LOAD_MODELS.md) +- [Multi-Scenario Execution](/docs/MULTI_SCENARIO.md) +- [Configuration Hot-Reload](/docs/CONFIG_HOT_RELOAD.md) + +## Contributing Templates + +To contribute a new template: + +1. Create YAML file in `examples/configs/` +2. Add comprehensive comments +3. Include usage examples +4. Add validation test in `tests/config_examples_tests.rs` +5. Update `examples/configs/README.md` +6. Submit pull request + +## Support + +- **Issues**: Report problems on GitHub +- **Questions**: Ask in Discussions +- **Examples**: Check `/examples` directory +- **Documentation**: See `/docs` directory diff --git a/docs/CONFIG_HOT_RELOAD.md b/docs/CONFIG_HOT_RELOAD.md new file mode 100644 index 0000000..ac53321 --- /dev/null +++ b/docs/CONFIG_HOT_RELOAD.md @@ -0,0 +1,661 @@ +# Configuration Hot-Reload + +## Overview + +Configuration hot-reload allows you to modify YAML configuration files during test execution without stopping or restarting the load test. Changes are automatically detected, validated, and applied in real-time. + +## Key Features + +✅ **Automatic file watching** - Detects changes to YAML config files +✅ **Validation before reload** - Ensures new config is valid before applying +✅ **Graceful reload** - Updates config without stopping the test +✅ **Reload notifications** - Event-based system to handle config changes +✅ **Debouncing** - Prevents multiple reloads for rapid file changes +✅ **Development mode** - Enable/disable hot-reload as needed + +## When to Use Hot-Reload + +### Development & Testing + +- **Rapid iteration**: Adjust load parameters without restarting tests +- **A/B testing**: Compare different configurations in real-time +- **Debugging**: Fine-tune settings while observing behavior +- **Experimentation**: Try different scenarios on the fly + +### Production Monitoring + +- **Load adjustment**: Scale workers up/down based on system capacity +- **Scenario updates**: Modify traffic patterns during long-running tests +- **Emergency response**: Quickly reduce load if system shows stress + +## Quick Start + +### Basic Usage + +```rust +use rust_loadtest::config_hot_reload::{ConfigWatcher, ReloadNotifier}; +use std::sync::Arc; + +// Create notifier +let notifier = Arc::new(ReloadNotifier::new()); + +// Create watcher +let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; + +// Start watching +watcher.start()?; + +// Check for reload events +if let Some(event) = notifier.try_recv() { + if event.is_success() { + println!("Config reloaded successfully"); + // Apply new config + } else { + println!("Config reload failed: {:?}", event.error); + } +} + +// Stop watching when done +watcher.stop()?; +``` + +### CLI Usage (Development Mode) + +```bash +# Enable hot-reload in development mode +rust-loadtest --config loadtest.yaml --dev-mode + +# Or with environment variable +DEV_MODE=true rust-loadtest --config loadtest.yaml +``` + +## Configuration + +### HotReloadConfig + +Control hot-reload behavior with `HotReloadConfig`: + +```rust +use rust_loadtest::config_hot_reload::HotReloadConfig; + +// Enable hot-reload with defaults +let config = HotReloadConfig::new("loadtest.yaml"); + +// Disable hot-reload +let config = HotReloadConfig::disabled(); + +// Custom debounce duration +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(1000); // Wait 1 second after changes + +// Enable/disable dynamically +let config = HotReloadConfig::new("loadtest.yaml") + .disable() + .enable(); +``` + +### Debouncing + +Debouncing prevents multiple reloads when files are saved rapidly (e.g., by IDEs): + +```rust +// Short debounce (100ms) - more responsive +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(100); + +// Default debounce (500ms) - balanced +let config = HotReloadConfig::new("loadtest.yaml"); + +// Long debounce (2000ms) - reduces reload frequency +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(2000); +``` + +**Recommendation**: Use default 500ms for most cases. Increase if you experience too many reloads. + +## Reload Events + +### ReloadEvent Structure + +```rust +pub struct ReloadEvent { + /// Timestamp of the reload + pub timestamp: SystemTime, + + /// Path to the config file + pub file_path: PathBuf, + + /// The reloaded configuration + pub config: YamlConfig, + + /// Whether validation succeeded + pub valid: bool, + + /// Validation error message (if any) + pub error: Option, +} +``` + +### Handling Reload Events + +```rust +let notifier = Arc::new(ReloadNotifier::new()); +let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; +watcher.start()?; + +// Poll for events (non-blocking) +loop { + if let Some(event) = notifier.try_recv() { + if event.is_success() { + println!("✅ Config reloaded at {:?}", event.timestamp); + println!(" Base URL: {}", event.config.config.base_url); + println!(" Workers: {}", event.config.config.workers); + + // Apply new configuration + apply_config(event.config); + } else { + eprintln!("❌ Config reload failed:"); + eprintln!(" Error: {}", event.error.unwrap()); + // Keep using old config + } + } + + // Continue test execution + thread::sleep(Duration::from_millis(100)); +} +``` + +### Blocking Event Reception + +```rust +// Wait for the next reload event (blocks) +if let Some(event) = notifier.recv() { + println!("Config changed: {:?}", event); +} +``` + +## Validation Before Reload + +All config changes are validated before being applied: + +### Validation Steps + +1. **YAML parsing** - Ensure valid YAML syntax +2. **Schema validation** - Check required fields and types +3. **URL validation** - Verify baseUrl format +4. **Duration validation** - Check duration strings (e.g., "5m") +5. **Load model validation** - Validate load model parameters +6. **Scenario validation** - Ensure scenarios are well-formed + +### Handling Validation Failures + +When validation fails, the old configuration remains active: + +```rust +if let Some(event) = notifier.try_recv() { + if !event.is_success() { + eprintln!("⚠️ Config reload failed - keeping current config"); + eprintln!(" Reason: {}", event.error.unwrap()); + + // Log validation error + log::warn!("Config validation failed: {:?}", event.error); + + // Continue with existing config + return; + } + + // Apply new config only if valid + apply_config(event.config); +} +``` + +## Real-World Examples + +### Example 1: Dynamic Worker Scaling + +```rust +use rust_loadtest::config_hot_reload::{ConfigWatcher, ReloadNotifier}; +use std::sync::{Arc, RwLock}; + +// Shared config +let current_config = Arc::new(RwLock::new(initial_config)); + +// Start watcher +let notifier = Arc::new(ReloadNotifier::new()); +let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; +watcher.start()?; + +// Background thread to handle reloads +let config_clone = current_config.clone(); +thread::spawn(move || { + loop { + if let Some(event) = notifier.try_recv() { + if event.is_success() { + let new_workers = event.config.config.workers; + + // Update shared config + let mut config = config_clone.write().unwrap(); + *config = event.config; + + println!("🔄 Workers updated: {} -> {}", + config.config.workers, new_workers); + } + } + thread::sleep(Duration::from_millis(100)); + } +}); + +// Main test continues, reading from shared config +``` + +### Example 2: Scenario Hot-Swapping + +```yaml +# Before: Testing checkout flow +scenarios: + - name: "Checkout Flow" + weight: 100 + steps: + - request: + method: "POST" + path: "/checkout" + +# After: Switch to browsing flow (save file to trigger reload) +scenarios: + - name: "Browse Products" + weight: 100 + steps: + - request: + method: "GET" + path: "/products" +``` + +The test automatically picks up the new scenario without restarting. + +### Example 3: Load Pattern Adjustment + +```yaml +# Initial: Gentle load +load: + model: "rps" + target: 50 + +# Update: Ramp up to stress test (save to reload) +load: + model: "rps" + target: 500 +``` + +### Example 4: Emergency Load Reduction + +```yaml +# High load causing system stress +config: + workers: 100 +load: + model: "rps" + target: 1000 + +# Reduce immediately (save to reload) +config: + workers: 10 +load: + model: "rps" + target: 50 +``` + +## Integration with Main Test Loop + +### Pattern 1: Separate Reload Thread + +```rust +// Main test loop +let notifier = Arc::new(ReloadNotifier::new()); +let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; +watcher.start()?; + +// Spawn reload handler +let config_ref = Arc::new(RwLock::new(config)); +let config_clone = config_ref.clone(); +thread::spawn(move || { + loop { + if let Some(event) = notifier.try_recv() { + if event.is_success() { + let mut cfg = config_clone.write().unwrap(); + *cfg = event.config; + println!("Config reloaded"); + } + } + thread::sleep(Duration::from_millis(100)); + } +}); + +// Continue test with config_ref +``` + +### Pattern 2: Periodic Polling + +```rust +let notifier = Arc::new(ReloadNotifier::new()); +let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; +watcher.start()?; + +loop { + // Check for reload + if let Some(event) = notifier.try_recv() { + if event.is_success() { + config = event.config; + } + } + + // Execute test iteration + execute_iteration(&config); + + thread::sleep(Duration::from_millis(100)); +} +``` + +## Best Practices + +### 1. Always Validate Before Applying + +```rust +if let Some(event) = notifier.try_recv() { + if event.is_success() { + // ✅ Only apply validated config + apply_config(event.config); + } else { + // ❌ Don't apply invalid config + log::error!("Validation failed: {:?}", event.error); + } +} +``` + +### 2. Use Appropriate Debounce + +```rust +// Development: Short debounce for quick iteration +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(100); + +// Production: Longer debounce to avoid accidental reloads +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(2000); +``` + +### 3. Log Reload Events + +```rust +if let Some(event) = notifier.try_recv() { + if event.is_success() { + info!("Config reloaded from {:?}", event.file_path); + info!("New workers: {}", event.config.config.workers); + info!("New RPS: {:?}", event.config.load); + } else { + error!("Reload failed: {}", event.error.unwrap()); + } +} +``` + +### 4. Handle Graceful Transitions + +```rust +if let Some(event) = notifier.try_recv() { + if event.is_success() { + let old_workers = config.config.workers; + let new_workers = event.config.config.workers; + + if new_workers > old_workers { + println!("Scaling up: {} -> {}", old_workers, new_workers); + // Gradually add workers + } else if new_workers < old_workers { + println!("Scaling down: {} -> {}", old_workers, new_workers); + // Gradually remove workers + } + + config = event.config; + } +} +``` + +### 5. Disable in Production (If Needed) + +```rust +let config = if is_production() { + HotReloadConfig::disabled() +} else { + HotReloadConfig::new("loadtest.yaml") +}; +``` + +## Troubleshooting + +### Config Not Reloading + +**Problem**: File changes but no reload event. + +**Solutions**: +```rust +// 1. Check if watcher is running +assert!(watcher.is_running()); + +// 2. Check if hot-reload is enabled +let config = HotReloadConfig::new("loadtest.yaml").enable(); + +// 3. Verify file path is correct +println!("Watching: {:?}", watcher.file_path()); + +// 4. Check for events +if let Some(event) = notifier.try_recv() { + println!("Got event: {:?}", event); +} +``` + +### Too Many Reload Events + +**Problem**: File saves trigger multiple reloads. + +**Solution**: Increase debounce duration: +```rust +let config = HotReloadConfig::new("loadtest.yaml") + .with_debounce_ms(1000); // Wait 1 second +``` + +### Validation Failing + +**Problem**: Config changes but validation fails. + +**Solution**: Check validation error: +```rust +if let Some(event) = notifier.try_recv() { + if !event.is_success() { + eprintln!("Validation failed: {}", event.error.unwrap()); + // Fix config file based on error message + } +} +``` + +### Watcher Stops After Error + +**Problem**: Watcher stops working after file error. + +**Solution**: The watcher continues even after validation errors. Check: +```rust +// Verify watcher is still running +if !watcher.is_running() { + watcher.start()?; +} +``` + +## Performance Considerations + +### CPU Impact + +- **File watching**: Minimal CPU overhead (<0.1%) +- **Validation**: ~10ms per reload (one-time cost) +- **Event handling**: Negligible impact + +### Memory Impact + +- **Watcher**: ~100KB +- **Event queue**: Minimal (bounded by channel) +- **Config copies**: One copy per reload event + +### Debounce Tuning + +| Debounce | Use Case | Pros | Cons | +|----------|----------|------|------| +| 100ms | Development | Very responsive | May reload unnecessarily | +| 500ms (default) | General use | Balanced | Slight delay | +| 1000ms+ | Production | Fewer reloads | Less responsive | + +## Security Considerations + +### File Permissions + +Ensure config files have appropriate permissions: + +```bash +# Recommended: Read-only for load test user +chmod 444 loadtest.yaml + +# Development: Read-write for editing +chmod 644 loadtest.yaml +``` + +### Validation + +Hot-reload **always** validates new configs before applying. Invalid configs are rejected: + +```rust +// Invalid URL +config: + baseUrl: "not-a-valid-url" // ❌ Rejected + +// Invalid duration +config: + duration: "invalid" // ❌ Rejected + +// Negative workers +config: + workers: -10 // ❌ Rejected +``` + +### Audit Logging + +Log all reload events for security auditing: + +```rust +if let Some(event) = notifier.try_recv() { + audit_log!( + "Config reload: path={:?}, valid={}, user={}, timestamp={:?}", + event.file_path, + event.valid, + get_current_user(), + event.timestamp + ); +} +``` + +## Advanced Usage + +### Custom Validation Rules + +Add application-specific validation: + +```rust +if let Some(event) = notifier.try_recv() { + if event.is_success() { + // Custom validation + if event.config.config.workers > max_workers { + eprintln!("Workers exceed limit: {}", event.config.config.workers); + return; + } + + // Apply config + apply_config(event.config); + } +} +``` + +### Metrics on Reload + +Track reload metrics: + +```rust +let reload_counter = AtomicU64::new(0); +let failed_reload_counter = AtomicU64::new(0); + +if let Some(event) = notifier.try_recv() { + if event.is_success() { + reload_counter.fetch_add(1, Ordering::Relaxed); + } else { + failed_reload_counter.fetch_add(1, Ordering::Relaxed); + } +} +``` + +### Multiple Config Files + +Watch multiple config files: + +```rust +let notifier = Arc::new(ReloadNotifier::new()); + +let mut watcher1 = ConfigWatcher::new("main.yaml", notifier.clone())?; +let mut watcher2 = ConfigWatcher::new("scenarios.yaml", notifier.clone())?; + +watcher1.start()?; +watcher2.start()?; + +// Handle events from both watchers +if let Some(event) = notifier.try_recv() { + println!("Config changed: {:?}", event.file_path); +} +``` + +## Related Documentation + +- [YAML Configuration](/docs/YAML_CONFIG.md) +- [Configuration Validation](/docs/CONFIG_VALIDATION.md) +- [Configuration Versioning](/docs/CONFIG_VERSIONING.md) +- [Development Mode](/docs/DEVELOPMENT_MODE.md) + +## FAQ + +### Can I reload during a test run? + +Yes, that's the main purpose. Configs reload without stopping the test. + +### What happens if the new config is invalid? + +The old config remains active. You'll receive an event with `valid: false` and an error message. + +### How quickly does reload happen? + +Typically within 100-1000ms after file save (depending on debounce setting). + +### Can I disable hot-reload in production? + +Yes, use `HotReloadConfig::disabled()` or check an environment variable. + +### Does it work with version control? + +Yes, pulling changes from git will trigger reload if the config file changes. + +### What file systems are supported? + +Works on all major file systems: ext4, NTFS, APFS, etc. + +### Can I reload scenarios without changing workers? + +Yes, modify only the scenarios section in your YAML and save. Workers remain unchanged. + +## Examples Repository + +See `/examples/hot_reload/` for complete working examples: + +- `basic_reload.rs` - Simple hot-reload setup +- `dynamic_scaling.rs` - Scale workers based on config changes +- `scenario_switching.rs` - Switch scenarios during test +- `production_safety.rs` - Production-safe reload with validation diff --git a/docs/CONFIG_SCHEMA.md b/docs/CONFIG_SCHEMA.md new file mode 100644 index 0000000..041a4ae --- /dev/null +++ b/docs/CONFIG_SCHEMA.md @@ -0,0 +1,224 @@ +# Configuration Schema Reference + +Complete reference for rust-loadtest YAML configuration format. + +## Table of Contents + +- [Version](#version) +- [Metadata](#metadata) +- [Config](#config) +- [Load Models](#load-models) +- [Scenarios](#scenarios) +- [Complete Example](#complete-example) + +--- + +## Version + +**Field**: `version` (required) + +**Type**: String + +**Description**: Configuration version using semantic versioning. + +**Format**: `major.minor` + +**Example**: +```yaml +version: "1.0" +``` + +--- + +## Metadata + +**Field**: `metadata` (optional) + +**Type**: Object + +**Description**: Optional metadata about the test configuration. + +**Properties**: + +| Property | Type | Description | +|----------|------|-------------| +| `name` | string | Human-readable test name | +| `description` | string | Test description | +| `author` | string | Test author | +| `tags` | array | Tags for categorization | + +**Example**: +```yaml +metadata: + name: "API Load Test" + description: "Testing API endpoints" + author: "DevOps Team" + tags: ["api", "production"] +``` + +--- + +## Config + +**Field**: `config` (required) + +**Type**: Object + +**Description**: Global test configuration. + +**Properties**: + +| Property | Type | Required | Default | Description | +|----------|------|----------|---------|-------------| +| `baseUrl` | string | Yes | - | Base URL of the API | +| `timeout` | string/int | No | `30s` | Request timeout | +| `workers` | integer | No | `10` | Concurrent workers | +| `duration` | string/int | Yes | - | Test duration | +| `skipTlsVerify` | boolean | No | `false` | Skip TLS verification | +| `customHeaders` | string | No | - | Custom HTTP headers | + +**Duration Format**: `` where unit is `s` (seconds), `m` (minutes), or `h` (hours) + +**Example**: +```yaml +config: + baseUrl: "https://api.example.com" + timeout: "30s" + workers: 50 + duration: "10m" + skipTlsVerify: false + customHeaders: "Authorization: Bearer token123" +``` + +--- + +## Load Models + +**Field**: `load` (required) + +**Type**: Object + +**Description**: Load generation model. + +### Concurrent Model + +Fixed number of concurrent workers. + +```yaml +load: + model: "concurrent" +``` + +### RPS Model + +Target requests per second. + +```yaml +load: + model: "rps" + target: 100 # 100 requests/second +``` + +### Ramp Model + +Gradually increase RPS over time. + +```yaml +load: + model: "ramp" + min: 10 # Starting RPS + max: 500 # Ending RPS + rampDuration: "5m" # Ramp over 5 minutes +``` + +--- + +## Scenarios + +**Field**: `scenarios` (required) + +**Type**: Array + +**Description**: Test scenarios with steps. + +**Properties**: + +| Property | Type | Required | Description | +|----------|------|----------|-------------| +| `name` | string | Yes | Scenario name | +| `weight` | number | No | Traffic distribution weight | +| `steps` | array | Yes | Scenario steps | +| `dataFile` | object | No | External data file | +| `config` | object | No | Scenario-level overrides | + +### Step Properties + +| Property | Type | Required | Description | +|----------|------|----------|-------------| +| `name` | string | No | Step name | +| `request` | object | Yes | HTTP request | +| `thinkTime` | string/object | No | Delay after step | +| `assertions` | array | No | Response assertions | +| `extract` | array | No | Data extractors | + +**Example**: +```yaml +scenarios: + - name: "User Login" + weight: 100 + steps: + - name: "Login Request" + request: + method: "POST" + path: "/auth/login" + body: '{"username": "user", "password": "pass"}' + assertions: + - statusCode: 200 + extract: + - name: "token" + jsonPath: "$.token" + thinkTime: "2s" +``` + +--- + +## Complete Example + +```yaml +version: "1.0" + +metadata: + name: "API Load Test" + description: "Testing main API endpoints" + tags: ["api", "production"] + +config: + baseUrl: "https://api.example.com" + timeout: "30s" + workers: 50 + duration: "10m" + +load: + model: "rps" + target: 100 + +scenarios: + - name: "Get Users" + weight: 70 + steps: + - request: + method: "GET" + path: "/users" + assertions: + - statusCode: 200 + + - name: "Create User" + weight: 30 + steps: + - request: + method: "POST" + path: "/users" + body: '{"name": "Test User"}' + assertions: + - statusCode: 201 +``` diff --git a/docs/CONFIG_VERSIONING.md b/docs/CONFIG_VERSIONING.md new file mode 100644 index 0000000..1dd4bb9 --- /dev/null +++ b/docs/CONFIG_VERSIONING.md @@ -0,0 +1,461 @@ +# Configuration Versioning + +## Overview + +rust-loadtest uses semantic versioning for YAML configuration files. This enables: +- **Version validation** - Ensure config files are compatible with current tool version +- **Forward/backward compatibility** - Clear error messages for incompatible versions +- **Migration framework** - Automated migration path for config schema changes +- **Future-proof design** - Prepared for schema evolution over time + +## Version Format + +Configuration versions use **major.minor** format: +- **Major version**: Breaking changes, incompatible schema modifications +- **Minor version**: Backward-compatible additions and enhancements + +Examples: `1.0`, `1.1`, `2.0`, `2.5` + +## Current Version + +- **Current**: `1.0` +- **Minimum Supported**: `1.0` +- **Maximum Supported**: `1.0` + +## Version in YAML + +Every YAML configuration file must declare its version: + +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/health" +``` + +## Version Validation + +### Supported Versions + +The tool validates that config file versions fall within the supported range: + +```rust +// Supported range check +if version < MINIMUM_SUPPORTED { + Error: "Version 0.5 is too old. Minimum supported version: 1.0" +} + +if version > MAXIMUM_SUPPORTED { + Error: "Version 2.0 is too new. Maximum supported version: 1.0" +} +``` + +### Invalid Format + +Version strings must follow `major.minor` format: + +**✅ Valid:** +- `"1.0"` +- `"2.5"` +- `"10.99"` + +**❌ Invalid:** +- `"1"` - Missing minor version +- `"1.0.0"` - Patch version not allowed +- `"invalid"` - Not a number +- `"1.x"` - Non-numeric component + +### Error Messages + +Version errors provide clear, actionable messages: + +``` +Invalid version format: 1.0.0. Expected format: X.Y (e.g., 1.0, 2.1) + +Unsupported version: 2.0. Supported versions: 1.0 + +Version 0.5 is too old. Minimum supported version: 1.0 + +Version 3.0 is too new. Maximum supported version: 1.0 +``` + +## Migration Framework + +### Overview + +When config schemas evolve, the migration framework automates version upgrades: + +``` +Version 1.0 → Migration → Version 2.0 → Migration → Version 3.0 +``` + +### Migration Registry + +Migrations are registered and applied automatically: + +```rust +// Register a migration +let mut registry = MigrationRegistry::default_migrations(); +registry.register(Box::new(MigrationV1ToV2)); + +// Apply migration +let upgraded_yaml = registry.migrate( + original_yaml, + &Version::new(1, 0), + &Version::new(2, 0) +)?; +``` + +### Creating Migrations + +Implement the `Migration` trait: + +```rust +use rust_loadtest::config_version::{Migration, Version, VersionError}; + +struct MigrationV1ToV2; + +impl Migration for MigrationV1ToV2 { + fn from_version(&self) -> Version { + Version::new(1, 0) + } + + fn to_version(&self) -> Version { + Version::new(2, 0) + } + + fn description(&self) -> &str { + "Add authentication section and rename baseUrl to base_url" + } + + fn migrate(&self, yaml: &str) -> Result { + // Parse YAML + let mut config: serde_yaml::Value = serde_yaml::from_str(yaml)?; + + // Update version + config["version"] = serde_yaml::Value::String("2.0".to_string()); + + // Add new auth section + config["auth"] = serde_yaml::Value::Mapping(Default::default()); + + // Rename field + if let Some(base_url) = config["config"]["baseUrl"].take() { + config["config"]["base_url"] = base_url; + } + + // Serialize back to YAML + Ok(serde_yaml::to_string(&config)?) + } +} +``` + +### Migration Best Practices + +1. **Make migrations idempotent** - Running twice should produce same result +2. **Preserve data** - Don't lose user configuration data +3. **Validate after migration** - Ensure output is valid for target version +4. **Test thoroughly** - Cover edge cases and malformed configs +5. **Document changes** - Clear description of what changed + +## Version Evolution Plan + +### Version 1.0 (Current) + +Initial release with: +- Basic YAML configuration +- Global config section +- Load models (concurrent, rps, ramp, dailytraffic) +- Scenario definitions +- Steps with requests, assertions, extractors + +### Version 1.1 (Future) + +Potential backward-compatible additions: +- Authentication section (API keys, OAuth, JWT) +- Advanced data sources (databases, APIs) +- Conditional logic in scenarios +- Variable scoping and namespaces +- Test hooks (before/after test, before/after scenario) + +### Version 2.0 (Future) + +Potential breaking changes: +- Restructured config schema +- New required fields +- Deprecated old load model syntax +- Enhanced scenario format + +## Checking Version Compatibility + +### From Code + +```rust +use rust_loadtest::config_version::{Version, VersionChecker}; + +// Parse and validate +let version = VersionChecker::parse_and_validate("1.0")?; + +// Check compatibility +match VersionChecker::check_compatibility(&version)? { + None => println!("Version is current, no migration needed"), + Some(migration_path) => { + println!("Migration needed:"); + for target in migration_path { + println!(" → {}", target); + } + } +} +``` + +### Version Info + +Get current version information: + +```rust +use rust_loadtest::config_version::VersionInfo; + +println!("Current version: {}", VersionInfo::current()); +println!("Supported range: {} to {}", + VersionInfo::minimum_supported(), + VersionInfo::maximum_supported() +); +``` + +## Migration Examples + +### Example 1: Field Rename + +**Config v1.0:** +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" +``` + +**Config v2.0 (hypothetical):** +```yaml +version: "2.0" +config: + base_url: "https://api.example.com" # Renamed for consistency +``` + +**Migration:** +```rust +config["config"]["base_url"] = config["config"]["baseUrl"].take(); +``` + +### Example 2: Add Required Field + +**Config v1.0:** +```yaml +version: "1.0" +load: + model: "rps" + target: 100 +``` + +**Config v2.0 (hypothetical):** +```yaml +version: "2.0" +load: + model: "rps" + target: 100 + distribution: "uniform" # New required field +``` + +**Migration:** +```rust +if config["load"]["model"] == "rps" { + // Add default value for new required field + config["load"]["distribution"] = Value::String("uniform".to_string()); +} +``` + +### Example 3: Restructure Section + +**Config v1.0:** +```yaml +version: "1.0" +config: + timeout: "30s" + workers: 10 +``` + +**Config v2.0 (hypothetical):** +```yaml +version: "2.0" +config: + execution: + timeout: "30s" + workers: 10 +``` + +**Migration:** +```rust +let mut execution = Mapping::new(); +execution.insert("timeout", config["config"]["timeout"].take()); +execution.insert("workers", config["config"]["workers"].take()); +config["config"]["execution"] = Value::Mapping(execution); +``` + +## Error Handling + +### Unsupported Version + +```yaml +version: "3.0" # Not yet released +config: + baseUrl: "https://test.com" + duration: "5m" +``` + +**Error:** +``` +YAML config error: Invalid configuration: version: Version 3.0 is too new. +Maximum supported version: 1.0 +``` + +### Invalid Format + +```yaml +version: "1.0.0" # Three-part version not allowed +config: + baseUrl: "https://test.com" +``` + +**Error:** +``` +YAML config error: Invalid configuration: version: Invalid version format: 1.0.0. +Expected format: X.Y (e.g., 1.0, 2.1) +``` + +## Testing Version Compatibility + +### Test Current Version + +```yaml +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +``` + +**Result:** ✅ Loads successfully + +### Test Future Version + +```yaml +version: "2.0" +config: + baseUrl: "https://test.com" +``` + +**Result:** ❌ Error: "Version 2.0 is too new" + +### Test Old Version + +```yaml +version: "0.5" +config: + baseUrl: "https://test.com" +``` + +**Result:** ❌ Error: "Version 0.5 is too old" + +## CLI Integration + +### Check Config Version + +```bash +# Validate config version +rust-loadtest --config test.yaml --validate-version + +# Output: +# Config version: 1.0 +# Status: ✅ Supported +# Current tool version: 1.0 +``` + +### Migrate Config + +```bash +# Auto-migrate config to current version +rust-loadtest --config test.yaml --migrate + +# Output: +# Migrating from 1.0 to 2.0... +# Migration: Add authentication section +# ✅ Migration successful +# Updated config written to: test.v2.0.yaml +``` + +## FAQ + +### Q: What happens if I use an unsupported version? + +**A:** The tool will refuse to load the config and display a clear error message indicating the supported version range. + +### Q: Can I downgrade a config file to an older version? + +**A:** No. Migrations only support upgrading forward. Downgrading could lose data from newer features. + +### Q: Will my v1.0 configs continue to work forever? + +**A:** Yes, within reason. We maintain backward compatibility for at least 2 major versions. When v3.0 is released, v1.0 support may be deprecated with a clear migration path. + +### Q: How do I know if a migration is needed? + +**A:** The tool automatically detects version mismatches. If your config version is older than the current version, a migration path will be suggested. + +### Q: What if migration fails? + +**A:** Migration errors provide detailed information about what failed. You may need to manually update certain fields or fix malformed config before migration can succeed. + +### Q: Can I skip version validation? + +**A:** No. Version validation is mandatory to ensure config compatibility and prevent runtime errors from incompatible schemas. + +## Related Documentation + +- [YAML Configuration Guide](/docs/YAML_CONFIG.md) +- [Configuration Precedence](/docs/CONFIGURATION_PRECEDENCE.md) +- [Environment Variable Overrides](/docs/ENV_VAR_OVERRIDES.md) +- [Migration Guide](/docs/MIGRATION_GUIDE.md) + +## Version History + +| Version | Release Date | Major Changes | +|---------|--------------|---------------| +| 1.0 | 2026-02 | Initial release with YAML config support | + +## Future Roadmap + +### Version 1.1 (Planned) + +- Authentication section +- Advanced data sources +- Conditional logic +- Test hooks + +### Version 2.0 (Planned) + +- Restructured schema +- Enhanced scenario format +- Plugin system +- Distributed testing support diff --git a/docs/ENV_VAR_OVERRIDES.md b/docs/ENV_VAR_OVERRIDES.md new file mode 100644 index 0000000..f72fcb2 --- /dev/null +++ b/docs/ENV_VAR_OVERRIDES.md @@ -0,0 +1,348 @@ +# Environment Variable Overrides + +## Overview + +rust-loadtest supports environment variable overrides for YAML configuration values, enabling flexible configuration management across different environments (development, CI/CD, production) without modifying config files. + +## Precedence Order + +Configuration values are resolved in the following order (highest to lowest priority): + +1. **Environment Variables** (Highest Priority) +2. **YAML Configuration File** +3. **Default Values** (Lowest Priority) + +## Environment Variable Mapping + +### Global Configuration + +| Environment Variable | YAML Path | Description | Example | +|---------------------|-----------|-------------|---------| +| `TARGET_URL` | `config.baseUrl` | Base URL for requests | `https://api.example.com` | +| `NUM_CONCURRENT_TASKS` | `config.workers` | Number of concurrent workers | `100` | +| `REQUEST_TIMEOUT` | `config.timeout` | Request timeout duration | `60s`, `5m` | +| `TEST_DURATION` | `config.duration` | Total test duration | `30m`, `2h` | +| `SKIP_TLS_VERIFY` | `config.skipTlsVerify` | Skip TLS certificate verification | `true`, `false` | +| `CUSTOM_HEADERS` | `config.customHeaders` | Custom HTTP headers | `Authorization:Bearer token` | + +### Load Model Configuration + +#### Concurrent Model +No environment variables (model has no parameters). + +#### RPS Model +| Environment Variable | YAML Path | Description | Example | +|---------------------|-----------|-------------|---------| +| `TARGET_RPS` | `load.target` | Target requests per second | `500` | + +#### Ramp Model +| Environment Variable | YAML Path | Description | Example | +|---------------------|-----------|-------------|---------| +| `MIN_RPS` | `load.min` | Starting RPS | `10` | +| `MAX_RPS` | `load.max` | Maximum RPS | `1000` | +| `RAMP_DURATION` | `load.rampDuration` | Ramp-up duration | `5m`, `30s` | + +#### Daily Traffic Model +| Environment Variable | YAML Path | Description | Example | +|---------------------|-----------|-------------|---------| +| `DAILY_MIN_RPS` | `load.min` | Minimum RPS (night) | `10` | +| `DAILY_MID_RPS` | `load.mid` | Midday RPS | `50` | +| `DAILY_MAX_RPS` | `load.max` | Peak RPS | `100` | +| `DAILY_CYCLE_DURATION` | `load.cycleDuration` | Full cycle duration | `1d`, `24h` | + +#### Complete Load Model Override +| Environment Variable | Description | Example | +|---------------------|-------------|---------| +| `LOAD_MODEL_TYPE` | Completely override load model | `Concurrent`, `Rps`, `RampRps`, `DailyTraffic` | + +When `LOAD_MODEL_TYPE` is set, the entire load model from YAML is replaced with the environment variable configuration. + +## Usage Examples + +### Example 1: Override Workers and Duration + +**YAML Config (test.yaml):** +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + workers: 10 + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "API Test" + steps: + - request: + method: "GET" + path: "/health" +``` + +**Run with overrides:** +```bash +NUM_CONCURRENT_TASKS=50 TEST_DURATION=30m rust-loadtest --config test.yaml +``` + +**Result:** +- `workers`: 50 (from ENV, overrides YAML's 10) +- `duration`: 30m (from ENV, overrides YAML's 5m) +- `baseUrl`: https://api.example.com (from YAML) + +### Example 2: Override RPS Target + +**YAML Config:** +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "10m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Load Test" + steps: + - request: + method: "POST" + path: "/api/data" +``` + +**Run with override:** +```bash +TARGET_RPS=500 rust-loadtest --config loadtest.yaml +``` + +**Result:** +- `load.target`: 500 (from ENV, overrides YAML's 100) +- All other values from YAML + +### Example 3: Complete Load Model Override + +**YAML Config:** +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "10m" +load: + model: "concurrent" # Will be completely replaced +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +``` + +**Run with complete override:** +```bash +LOAD_MODEL_TYPE=Rps TARGET_RPS=200 rust-loadtest --config test.yaml +``` + +**Result:** +- Load model: RPS with target 200 (from ENV, replaces YAML's concurrent model) + +### Example 4: Multiple Overrides in CI/CD + +**YAML Config (base.yaml):** +```yaml +version: "1.0" +config: + baseUrl: "https://staging.example.com" + workers: 20 + timeout: "30s" + duration: "5m" + skipTlsVerify: false +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "2m" +scenarios: + - name: "Integration Test" + steps: + - request: + method: "GET" + path: "/api/v1/health" +``` + +**Production CI/CD run:** +```bash +TARGET_URL=https://prod.example.com \ +NUM_CONCURRENT_TASKS=100 \ +TEST_DURATION=30m \ +MIN_RPS=50 \ +MAX_RPS=1000 \ +RAMP_DURATION=10m \ +rust-loadtest --config base.yaml +``` + +**Result:** +- `baseUrl`: https://prod.example.com (ENV override) +- `workers`: 100 (ENV override) +- `duration`: 30m (ENV override) +- `load.min`: 50 (ENV override) +- `load.max`: 1000 (ENV override) +- `load.rampDuration`: 10m (ENV override) +- `timeout`: 30s (from YAML) +- `skipTlsVerify`: false (from YAML) + +## Best Practices + +### 1. Version Control YAML, Override with Environment + +**✅ Recommended:** +- Keep base configuration in version-controlled YAML files +- Use environment variables for environment-specific values +- Document required environment variables in README + +**❌ Avoid:** +- Hardcoding environment-specific values in YAML +- Creating separate YAML files for each environment + +### 2. Use Environment Variables for Secrets + +**✅ Recommended:** +```bash +# Keep secrets out of YAML files +CUSTOM_HEADERS="Authorization:Bearer ${API_TOKEN}" \ +rust-loadtest --config test.yaml +``` + +**❌ Avoid:** +```yaml +# Don't hardcode secrets in YAML +config: + customHeaders: "Authorization:Bearer hardcoded-secret-123" +``` + +### 3. Document Environment Variables + +Include a `.env.example` file in your repository: + +```bash +# .env.example +# Load Test Configuration Overrides + +# Target URL (overrides config.baseUrl) +TARGET_URL=https://api.example.com + +# Workers (overrides config.workers) +NUM_CONCURRENT_TASKS=50 + +# Test Duration (overrides config.duration) +TEST_DURATION=10m + +# Load Model +LOAD_MODEL_TYPE=Rps +TARGET_RPS=200 +``` + +### 4. Use CI/CD Pipeline Variables + +**GitHub Actions Example:** +```yaml +name: Load Test + +on: [push] + +jobs: + loadtest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run Load Test + env: + TARGET_URL: ${{ secrets.PROD_API_URL }} + NUM_CONCURRENT_TASKS: 100 + TEST_DURATION: 30m + TARGET_RPS: ${{ vars.TARGET_RPS }} + run: | + rust-loadtest --config loadtest.yaml +``` + +### 5. Validate Configuration + +Always validate your final configuration before running long tests: + +```bash +# Set env vars +export NUM_CONCURRENT_TASKS=100 +export TEST_DURATION=2h +export TARGET_RPS=500 + +# Do a short dry run first +TEST_DURATION=10s rust-loadtest --config test.yaml + +# If successful, run full test +rust-loadtest --config test.yaml +``` + +## Fallback Behavior + +### Invalid Environment Variable Values + +If an environment variable contains an invalid value, the system falls back to the YAML value or default: + +```bash +# Invalid worker count +NUM_CONCURRENT_TASKS=invalid rust-loadtest --config test.yaml +# → Falls back to YAML config.workers value +``` + +### Empty Environment Variables + +Empty environment variables are treated as unset and fall back to YAML: + +```bash +# Empty target URL +TARGET_URL="" rust-loadtest --config test.yaml +# → Falls back to YAML config.baseUrl value +``` + +## Duration Format + +Duration values support multiple formats: +- Seconds: `30s`, `120s` +- Minutes: `5m`, `30m` +- Hours: `2h`, `24h` +- Days: `1d`, `7d` +- Raw seconds: `300` (interpreted as seconds) + +## Boolean Values + +Boolean environment variables are case-insensitive: +- True: `true`, `TRUE`, `True`, `1` +- False: `false`, `FALSE`, `False`, `0` + +## Debugging + +### Print Effective Configuration + +To see which values are being used: + +```bash +# Enable debug logging +RUST_LOG=debug NUM_CONCURRENT_TASKS=100 rust-loadtest --config test.yaml +``` + +### Test Precedence + +1. Load YAML config without env vars +2. Add one env var at a time +3. Verify each override takes effect + +## Related Documentation + +- [Configuration Precedence](/docs/CONFIGURATION_PRECEDENCE.md) +- [YAML Configuration Guide](/docs/YAML_CONFIG.md) +- [Default Values Reference](/docs/DEFAULTS.md) + +## Support + +If environment variable overrides aren't working as expected: + +1. Check environment variable spelling (case-sensitive) +2. Verify YAML path matches the override documentation +3. Enable debug logging: `RUST_LOG=debug` +4. Check for typos in duration formats (e.g., `30m` not `30min`) diff --git a/docs/MULTI_SCENARIO.md b/docs/MULTI_SCENARIO.md new file mode 100644 index 0000000..a9f8a41 --- /dev/null +++ b/docs/MULTI_SCENARIO.md @@ -0,0 +1,514 @@ +# Multi-Scenario Execution + +## Overview + +Multi-scenario execution enables running multiple user flows concurrently with weighted traffic distribution. This simulates realistic production environments where different user behaviors occur simultaneously. + +## Key Features + +✅ **Weighted selection** - Scenarios selected by probability based on weights +✅ **Round-robin distribution** - Even distribution across all scenarios +✅ **Per-scenario metrics** - Track performance for each scenario independently +✅ **YAML configuration** - Define multiple scenarios in one config file +✅ **Flexible allocation** - Choose distribution strategy per use case + +## Weighted Scenario Selection + +### How It Works + +Each scenario has a weight that determines its selection probability: + +``` +probability = scenario_weight / sum(all_weights) +``` + +### Example Configuration + +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + workers: 50 + duration: "30m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Read Operations" + weight: 80 # 80% of traffic + steps: + - request: + method: "GET" + path: "/api/data" + + - name: "Write Operations" + weight: 15 # 15% of traffic + steps: + - request: + method: "POST" + path: "/api/data" + body: '{"test": true}' + + - name: "Delete Operations" + weight: 5 # 5% of traffic + steps: + - request: + method: "DELETE" + path: "/api/data/123" +``` + +**Result**: Out of 100 RPS: +- ~80 RPS execute "Read Operations" +- ~15 RPS execute "Write Operations" +- ~5 RPS execute "Delete Operations" + +### Weight Calculation + +Weights don't need to sum to 100. The system calculates percentages automatically: + +```yaml +scenarios: + - name: "API v1" + weight: 3 + - name: "API v2" + weight: 1 +``` + +**Result**: 75% API v1, 25% API v2 + +## Round-Robin Distribution + +Round-robin provides even distribution regardless of weights. + +### When to Use + +- **Load balancing** - Test all scenarios equally +- **Fair distribution** - Each scenario gets same traffic +- **Testing coverage** - Ensure all flows are exercised + +### Programmatic Usage + +```rust +use rust_loadtest::multi_scenario::RoundRobinDistributor; + +let scenarios = vec![scenario1, scenario2, scenario3]; +let distributor = RoundRobinDistributor::new(scenarios); + +// Each call returns next scenario in sequence +let s1 = distributor.next(); // scenario1 +let s2 = distributor.next(); // scenario2 +let s3 = distributor.next(); // scenario3 +let s4 = distributor.next(); // scenario1 (cycles) +``` + +## Scenario Selection Strategies + +### Weighted Random (Default) + +**Best for**: Simulating realistic production traffic patterns + +```rust +use rust_loadtest::multi_scenario::ScenarioSelector; + +let selector = ScenarioSelector::new(scenarios); + +// Each call returns weighted random scenario +let scenario = selector.select(); +``` + +**Characteristics**: +- Follows statistical distribution over time +- Realistic traffic simulation +- Some scenarios may not execute in short tests + +### Round-Robin + +**Best for**: Even coverage and load balancing + +```rust +use rust_loadtest::multi_scenario::RoundRobinDistributor; + +let distributor = RoundRobinDistributor::new(scenarios); + +// Guaranteed sequential distribution +let scenario = distributor.next(); +``` + +**Characteristics**: +- Deterministic order +- Equal distribution across scenarios +- All scenarios guaranteed to execute + +## Per-Scenario Metrics + +Track performance metrics independently for each scenario. + +### Metrics Tracked + +- **Executions** - Total number of times scenario ran +- **Successes** - Successful completions +- **Failures** - Failed executions +- **Success Rate** - Percentage of successful executions +- **Average Time** - Mean execution duration + +### Usage + +```rust +use rust_loadtest::multi_scenario::ScenarioMetrics; + +let mut metrics = ScenarioMetrics::new(); +metrics.initialize_scenarios(&scenarios); + +// Record executions +metrics.record_execution("Read Operations", true, 120); // success, 120ms +metrics.record_execution("Write Operations", false, 450); // failure, 450ms + +// Query metrics +let executions = metrics.get_executions("Read Operations"); +let success_rate = metrics.get_success_rate("Read Operations"); +let avg_time = metrics.get_average_time_ms("Read Operations"); + +// Get summary for all scenarios +let summary = metrics.summary(); +summary.print(); +``` + +### Sample Output + +``` +=== Per-Scenario Metrics === + +Scenario: Read Operations + Executions: 8000 + Successes: 7950 (99.4%) + Failures: 50 + Avg Time: 120.45ms + +Scenario: Write Operations + Executions: 1500 + Successes: 1480 (98.7%) + Failures: 20 + Avg Time: 245.32ms + +Scenario: Delete Operations + Executions: 500 + Successes: 495 (99.0%) + Failures: 5 + Avg Time: 98.21ms +``` + +## Real-World Examples + +### E-Commerce Load Test + +```yaml +version: "1.0" +metadata: + name: "E-Commerce Load Test" + description: "Realistic shopping behavior patterns" + +config: + baseUrl: "https://shop.example.com" + workers: 100 + duration: "1h" + +load: + model: "ramp" + min: 50 + max: 500 + rampDuration: "15m" + +scenarios: + # Most users browse without buying + - name: "Browse Only" + weight: 60 + steps: + - request: + method: "GET" + path: "/" + - request: + method: "GET" + path: "/products" + + # Some users add items but don't complete purchase + - name: "Browse and Add to Cart" + weight: 25 + steps: + - request: + method: "GET" + path: "/products" + - request: + method: "POST" + path: "/cart/add" + + # Fewer users complete full purchase + - name: "Complete Purchase" + weight: 12 + steps: + - request: + method: "GET" + path: "/products" + - request: + method: "POST" + path: "/cart/add" + - request: + method: "POST" + path: "/checkout" + + # Rare admin operations + - name: "Admin Operations" + weight: 3 + steps: + - request: + method: "POST" + path: "/admin/sync" +``` + +### API Versioning Test + +```yaml +scenarios: + # Gradual migration from v1 to v2 + - name: "API v1 (Legacy)" + weight: 70 + steps: + - request: + method: "GET" + path: "/v1/users" + + - name: "API v2 (New)" + weight: 30 + steps: + - request: + method: "GET" + path: "/v2/users" +``` + +### Read/Write Workload + +```yaml +scenarios: + - name: "Read Heavy" + weight: 90 + steps: + - request: + method: "GET" + path: "/api/data" + + - name: "Write Operations" + weight: 10 + steps: + - request: + method: "POST" + path: "/api/data" +``` + +## Worker Allocation + +### Concurrent Model + +Workers continuously pick scenarios based on selection strategy: + +```yaml +load: + model: "concurrent" +config: + workers: 50 # Each worker picks scenarios independently +``` + +With weighted selection (80/15/5 split): +- ~40 workers execute Read Operations +- ~7 workers execute Write Operations +- ~3 workers execute Delete Operations + +### RPS Model + +Target RPS is distributed across scenarios by weight: + +```yaml +load: + model: "rps" + target: 100 # Total 100 RPS across all scenarios +``` + +With weighted selection (80/15/5 split): +- ~80 RPS for Read Operations +- ~15 RPS for Write Operations +- ~5 RPS for Delete Operations + +## Best Practices + +### 1. Base Weights on Real Traffic + +Analyze production traffic to set realistic weights: + +```bash +# Example: Analyze access logs +$ cat access.log | awk '{print $7}' | sort | uniq -c | sort -rn + + 80000 GET /api/data + 15000 POST /api/data + 5000 DELETE /api/data +``` + +**Configuration**: +```yaml +scenarios: + - name: "Read" + weight: 80 # Based on actual traffic + - name: "Write" + weight: 15 + - name: "Delete" + weight: 5 +``` + +### 2. Start with Equal Weights for Testing + +Use equal weights initially to test all scenarios: + +```yaml +scenarios: + - name: "Scenario 1" + weight: 1 + - name: "Scenario 2" + weight: 1 + - name: "Scenario 3" + weight: 1 +``` + +Then adjust based on production patterns. + +### 3. Use Round-Robin for Balanced Testing + +For comprehensive testing of all scenarios: + +```rust +let distributor = RoundRobinDistributor::new(scenarios); +// Guarantees equal distribution +``` + +### 4. Monitor Per-Scenario Metrics + +Track metrics separately to identify problematic flows: + +``` +Scenario: User Login + Success: 99.9% ✅ + Avg Time: 120ms + +Scenario: Payment Processing + Success: 95.2% ⚠️ Investigate failures + Avg Time: 850ms +``` + +### 5. Consider Scenario Complexity + +Weight scenarios by both traffic and importance: + +```yaml +scenarios: + # Critical path - high weight + - name: "User Registration" + weight: 50 + + # Important but less frequent + - name: "Password Reset" + weight: 10 + + # Edge case testing + - name: "Account Deletion" + weight: 1 +``` + +## Troubleshooting + +### Uneven Distribution + +**Problem**: Weighted distribution doesn't match expectations in short tests. + +**Solution**: Run longer tests for statistical convergence: +```yaml +config: + duration: "30m" # Longer duration = better distribution +``` + +### Scenario Not Executing + +**Problem**: Low-weight scenario never executes. + +**Solution**: Increase weight or use round-robin: +```yaml +scenarios: + - name: "Rare Scenario" + weight: 5 # Increase from 1 to 5 +``` + +### Metrics Inconsistent + +**Problem**: Per-scenario metrics seem incorrect. + +**Solution**: Ensure metrics are initialized before recording: +```rust +metrics.initialize_scenarios(&scenarios); +``` + +## Integration Example + +Complete integration with weighted selection and metrics: + +```rust +use rust_loadtest::multi_scenario::{ScenarioSelector, ScenarioMetrics}; +use rust_loadtest::yaml_config::YamlConfig; + +// Load scenarios from YAML +let config = YamlConfig::from_file("loadtest.yaml")?; +let scenarios = config.to_scenarios()?; + +// Setup selector and metrics +let selector = ScenarioSelector::new(scenarios.clone()); +let mut metrics = ScenarioMetrics::new(); +metrics.initialize_scenarios(&scenarios); + +// Execute scenarios +for _ in 0..10000 { + let scenario = selector.select(); + + // Execute scenario (simplified) + let success = execute_scenario(scenario); + let duration_ms = 100; // From execution + + // Record metrics + metrics.record_execution(&scenario.name, success, duration_ms); +} + +// Print summary +let summary = metrics.summary(); +summary.print(); +``` + +## CLI Usage + +### Run with Multiple Scenarios + +```bash +rust-loadtest --config multi-scenario.yaml +``` + +### View Per-Scenario Metrics + +```bash +rust-loadtest --config multi-scenario.yaml --metrics per-scenario +``` + +### Test Specific Scenario + +```bash +rust-loadtest --config multi-scenario.yaml --scenario "Read Operations" +``` + +## Related Documentation + +- [Scenario YAML Definitions](/docs/SCENARIO_YAML.md) +- [Load Models](/docs/LOAD_MODELS.md) +- [Metrics and Reporting](/docs/METRICS.md) +- [YAML Configuration](/docs/YAML_CONFIG.md) diff --git a/docs/SCENARIO_YAML.md b/docs/SCENARIO_YAML.md new file mode 100644 index 0000000..05593f7 --- /dev/null +++ b/docs/SCENARIO_YAML.md @@ -0,0 +1,686 @@ +//! Documentation for Scenario YAML Definitions (Issue #42) + +# Scenario YAML Definitions + +## Overview + +Scenarios define multi-step user journeys for load testing. Each scenario represents a realistic user flow with sequential steps, variable extraction, assertions, and realistic timing. + +## Key Features + +✅ **Multiple scenarios per config** - Mix different user flows +✅ **Weighted traffic distribution** - Control scenario selection probability +✅ **Multi-step sequences** - Complex user journeys +✅ **Variable extraction** - Extract and reuse data between steps +✅ **Assertions** - Validate responses at each step +✅ **Think time** - Realistic delays (fixed or random) +✅ **Data files** - CSV/JSON data for data-driven testing +✅ **Scenario-level config** - Override global settings per scenario + +## Basic Scenario + +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "10m" +load: + model: "concurrent" +scenarios: + - name: "API Health Check" + steps: + - request: + method: "GET" + path: "/health" +``` + +## Multiple Scenarios with Weighting + +Weight determines traffic distribution. Total weights don't need to sum to 100. + +```yaml +scenarios: + - name: "Read Operations" + weight: 80 # 80% of traffic + steps: + - request: + method: "GET" + path: "/api/read" + + - name: "Write Operations" + weight: 15 # 15% of traffic + steps: + - request: + method: "POST" + path: "/api/write" + + - name: "Delete Operations" + weight: 5 # 5% of traffic + steps: + - request: + method: "DELETE" + path: "/api/delete" +``` + +**Traffic calculation:** `scenario_weight / sum(all_weights) = traffic_percentage` + +## Multi-Step Scenarios + +### E-commerce Example + +```yaml +scenarios: + - name: "Shopping Flow" + weight: 70 + steps: + # Step 1: Homepage + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" + + # Step 2: Search with extraction + - name: "Search Products" + request: + method: "GET" + path: "/search?q=laptop" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.products[0].id" + thinkTime: "3s" + + # Step 3: Use extracted variable + - name: "Product Details" + request: + method: "GET" + path: "/products/${productId}" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "5s" + + # Step 4: Add to cart + - name: "Add to Cart" + request: + method: "POST" + path: "/cart" + body: '{"productId": "${productId}", "quantity": 1}' + assertions: + - type: "statusCode" + expected: 201 +``` + +## Think Time + +Think time simulates realistic user behavior by adding delays between steps. + +### Fixed Think Time + +```yaml +steps: + - request: + method: "GET" + path: "/page1" + thinkTime: "3s" # Always 3 seconds + + - request: + method: "GET" + path: "/page2" + thinkTime: "5000" # Raw milliseconds +``` + +### Random Think Time + +```yaml +steps: + - request: + method: "GET" + path: "/browse" + thinkTime: + min: "2s" + max: "5s" # Random delay between 2-5 seconds + + - request: + method: "GET" + path: "/search" + thinkTime: + min: "1s" + max: "10s" # Variable user reading time +``` + +## Variable Extraction + +Extract data from responses to use in subsequent steps. + +### JSON Path Extraction + +```yaml +steps: + - name: "Get User" + request: + method: "GET" + path: "/user/profile" + extract: + - type: "jsonPath" + name: "userId" + jsonPath: "$.id" + - type: "jsonPath" + name: "email" + jsonPath: "$.email" +``` + +### Header Extraction + +```yaml +extract: + - type: "header" + name: "authToken" + header: "X-Auth-Token" +``` + +### Cookie Extraction + +```yaml +extract: + - type: "cookie" + name: "sessionId" + cookie: "JSESSIONID" +``` + +### Regex Extraction + +```yaml +extract: + - type: "regex" + name: "transactionId" + regex: "Transaction ID: (\\d+)" +``` + +## Using Extracted Variables + +Variables use `${variableName}` syntax: + +```yaml +steps: + # Extract variable + - request: + method: "POST" + path: "/auth/login" + body: '{"email": "user@test.com", "password": "pass123"}' + extract: + - type: "jsonPath" + name: "token" + jsonPath: "$.accessToken" + + # Use in header + - request: + method: "GET" + path: "/api/profile" + headers: + Authorization: "Bearer ${token}" + + # Use in path + - request: + method: "GET" + path: "/users/${userId}/orders" + + # Use in body + - request: + method: "POST" + path: "/api/purchase" + body: '{"userId": "${userId}", "productId": "${productId}"}' +``` + +## Assertions + +Validate responses at each step. + +### Status Code + +```yaml +assertions: + - type: "statusCode" + expected: 200 +``` + +### Response Time + +```yaml +assertions: + - type: "responseTime" + max: "500ms" +``` + +### Body Contains + +```yaml +assertions: + - type: "bodyContains" + text: "success" +``` + +### Body Matches Regex + +```yaml +assertions: + - type: "bodyMatches" + regex: "User-\\d+" +``` + +### JSON Path + +```yaml +assertions: + - type: "jsonPath" + path: "$.status" + expected: "active" +``` + +### Header Exists + +```yaml +assertions: + - type: "headerExists" + header: "X-Request-ID" +``` + +### Multiple Assertions + +```yaml +steps: + - request: + method: "POST" + path: "/api/order" + body: '{"items": [1, 2, 3]}' + assertions: + - type: "statusCode" + expected: 201 + - type: "responseTime" + max: "1s" + - type: "jsonPath" + path: "$.orderId" + - type: "bodyContains" + text: "confirmed" +``` + +## Headers and Query Parameters + +### Custom Headers + +```yaml +request: + method: "GET" + path: "/api/data" + headers: + Authorization: "Bearer ${token}" + X-Custom-Header: "value" + Content-Type: "application/json" +``` + +### Query Parameters + +```yaml +request: + method: "GET" + path: "/api/search" + queryParams: + q: "laptop" + limit: "20" + sort: "price" + order: "asc" +``` + +**Result:** `/api/search?q=laptop&limit=20&sort=price&order=asc` + +## Data Files (Data-Driven Testing) + +Load test data from CSV or JSON files. + +### CSV Data File + +**File: users.csv** +```csv +username,password,email +user1,pass1,user1@test.com +user2,pass2,user2@test.com +user3,pass3,user3@test.com +``` + +**YAML:** +```yaml +scenarios: + - name: "Login Test" + dataFile: + path: "./testdata/users.csv" + format: "csv" + strategy: "sequential" # or "random" or "cycle" + steps: + - request: + method: "POST" + path: "/login" + body: '{"username": "${username}", "password": "${password}"}' +``` + +### JSON Data File + +**File: products.json** +```json +[ + {"productId": "P001", "name": "Laptop"}, + {"productId": "P002", "name": "Mouse"}, + {"productId": "P003", "name": "Keyboard"} +] +``` + +**YAML:** +```yaml +scenarios: + - name: "Product Test" + dataFile: + path: "./testdata/products.json" + format: "json" + strategy: "random" + steps: + - request: + method: "GET" + path: "/products/${productId}" +``` + +### Data Strategies + +| Strategy | Behavior | +|----------|----------| +| `sequential` | Iterate through data rows in order (default) | +| `random` | Select random rows | +| `cycle` | Loop back to start when reaching end | + +## Scenario-Level Configuration + +Override global settings for specific scenarios. + +```yaml +config: + baseUrl: "https://api.example.com" + timeout: "30s" # Global timeout + duration: "10m" + +scenarios: + - name: "Fast API" + steps: + - request: + method: "GET" + path: "/fast" + + - name: "Slow API" + config: + timeout: "120s" # Override for this scenario + retryCount: 3 + retryDelay: "5s" + steps: + - request: + method: "GET" + path: "/slow" +``` + +### Available Overrides + +- `timeout` - Request timeout (overrides global) +- `retryCount` - Number of retry attempts +- `retryDelay` - Delay between retries + +## Complete Example + +```yaml +version: "1.0" +metadata: + name: "E-commerce Load Test" + description: "Realistic shopping flow with authentication" + +config: + baseUrl: "https://shop.example.com" + workers: 50 + timeout: "30s" + duration: "30m" + +load: + model: "ramp" + min: 10 + max: 200 + rampDuration: "10m" + +scenarios: + # Scenario 1: Complete shopping flow (70% of traffic) + - name: "Browse and Purchase" + weight: 70 + config: + timeout: "60s" + retryCount: 2 + dataFile: + path: "./users.csv" + format: "csv" + strategy: "cycle" + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: "statusCode" + expected: 200 + - type: "responseTime" + max: "1s" + thinkTime: + min: "1s" + max: "3s" + + - name: "Login" + request: + method: "POST" + path: "/api/auth/login" + body: '{"email": "${email}", "password": "${password}"}' + headers: + Content-Type: "application/json" + extract: + - type: "jsonPath" + name: "authToken" + jsonPath: "$.token" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" + + - name: "Search" + request: + method: "GET" + path: "/api/products/search" + queryParams: + q: "laptop" + limit: "20" + headers: + Authorization: "Bearer ${authToken}" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.results[0].id" + - type: "jsonPath" + name: "price" + jsonPath: "$.results[0].price" + thinkTime: + min: "2s" + max: "5s" + + - name: "View Product" + request: + method: "GET" + path: "/api/products/${productId}" + headers: + Authorization: "Bearer ${authToken}" + assertions: + - type: "statusCode" + expected: 200 + - type: "bodyContains" + text: "${productId}" + thinkTime: "4s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/api/cart/items" + body: '{"productId": "${productId}", "quantity": 1}' + headers: + Authorization: "Bearer ${authToken}" + Content-Type: "application/json" + assertions: + - type: "statusCode" + expected: 201 + - type: "jsonPath" + path: "$.cartTotal" + thinkTime: "2s" + + - name: "Checkout" + request: + method: "POST" + path: "/api/orders" + body: '{}' + headers: + Authorization: "Bearer ${authToken}" + Content-Type: "application/json" + extract: + - type: "jsonPath" + name: "orderId" + jsonPath: "$.orderId" + assertions: + - type: "statusCode" + expected: 201 + - type: "responseTime" + max: "2s" + + # Scenario 2: Quick browsing (30% of traffic) + - name: "Quick Browse" + weight: 30 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + thinkTime: "1s" + + - name: "Category" + request: + method: "GET" + path: "/category/electronics" + thinkTime: + min: "2s" + max: "4s" + + - name: "Product List" + request: + method: "GET" + path: "/api/products" + queryParams: + category: "electronics" + limit: "50" + assertions: + - type: "statusCode" + expected: 200 +``` + +## Best Practices + +### 1. Realistic Think Times + +Use random think times to simulate real user behavior: + +```yaml +thinkTime: + min: "2s" + max: "10s" # Reading time varies +``` + +### 2. Scenario Weighting + +Base weights on real traffic patterns: + +```yaml +scenarios: + - name: "Read" + weight: 90 # 90% reads + - name: "Write" + weight: 10 # 10% writes +``` + +### 3. Error Handling + +Add retries for flaky endpoints: + +```yaml +scenarios: + - name: "External API" + config: + retryCount: 3 + retryDelay: "2s" +``` + +### 4. Assertions + +Validate critical responses: + +```yaml +assertions: + - type: "statusCode" + expected: 200 + - type: "responseTime" + max: "500ms" + - type: "jsonPath" + path: "$.status" + expected: "success" +``` + +### 5. Variable Extraction + +Extract all needed data in one step: + +```yaml +extract: + - type: "jsonPath" + name: "userId" + jsonPath: "$.id" + - type: "jsonPath" + name: "token" + jsonPath: "$.token" + - type: "header" + name: "sessionId" + header: "X-Session-ID" +``` + +## Testing Scenarios + +### Validate Syntax + +```bash +rust-loadtest --config test.yaml --validate +``` + +### Dry Run + +```bash +rust-loadtest --config test.yaml --dry-run --duration 1m +``` + +### Single Scenario + +```bash +rust-loadtest --config test.yaml --scenario "Browse and Purchase" +``` + +## Related Documentation + +- [YAML Configuration Guide](/docs/YAML_CONFIG.md) +- [Variable Extraction Guide](/docs/EXTRACTION.md) +- [Assertions Reference](/docs/ASSERTIONS.md) +- [Data Files Guide](/docs/DATA_FILES.md) diff --git a/docs/config-schema.json b/docs/config-schema.json new file mode 100644 index 0000000..c1a3b5a --- /dev/null +++ b/docs/config-schema.json @@ -0,0 +1,356 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Rust LoadTest Configuration", + "description": "YAML configuration schema for rust-loadtest load testing tool", + "type": "object", + "required": [ + "version", + "config", + "load", + "scenarios" + ], + "properties": { + "version": { + "type": "string", + "description": "Configuration version (semantic versioning)", + "pattern": "^[0-9]+\\.[0-9]+$", + "examples": [ + "1.0" + ] + }, + "metadata": { + "type": "object", + "description": "Optional metadata about the test configuration", + "properties": { + "name": { + "type": "string", + "description": "Human-readable test name" + }, + "description": { + "type": "string", + "description": "Test description" + }, + "author": { + "type": "string", + "description": "Test author" + }, + "tags": { + "type": "array", + "description": "Tags for categorization", + "items": { + "type": "string" + } + } + } + }, + "config": { + "type": "object", + "description": "Global test configuration", + "required": [ + "baseUrl", + "duration" + ], + "properties": { + "baseUrl": { + "type": "string", + "description": "Base URL of the API to test", + "format": "uri", + "examples": [ + "https://api.example.com" + ] + }, + "timeout": { + "description": "Request timeout (e.g., '30s', '1m')", + "oneOf": [ + { + "type": "string", + "pattern": "^[0-9]+(s|m|h)$" + }, + { + "type": "integer", + "minimum": 1 + } + ], + "default": "30s" + }, + "workers": { + "type": "integer", + "description": "Number of concurrent workers", + "minimum": 1, + "default": 10 + }, + "duration": { + "description": "Test duration (e.g., '5m', '1h')", + "oneOf": [ + { + "type": "string", + "pattern": "^[0-9]+(s|m|h)$" + }, + { + "type": "integer", + "minimum": 1 + } + ] + }, + "skipTlsVerify": { + "type": "boolean", + "description": "Skip TLS certificate verification (insecure)", + "default": false + }, + "customHeaders": { + "type": "string", + "description": "Custom HTTP headers (e.g., 'Authorization: Bearer token')" + } + } + }, + "load": { + "type": "object", + "description": "Load model configuration", + "required": [ + "model" + ], + "oneOf": [ + { + "properties": { + "model": { + "const": "concurrent" + } + }, + "required": [ + "model" + ] + }, + { + "properties": { + "model": { + "const": "rps" + }, + "target": { + "type": "number", + "description": "Target requests per second", + "minimum": 0.1 + } + }, + "required": [ + "model", + "target" + ] + }, + { + "properties": { + "model": { + "const": "ramp" + }, + "min": { + "type": "number", + "description": "Starting RPS", + "minimum": 0.1 + }, + "max": { + "type": "number", + "description": "Ending RPS", + "minimum": 0.1 + }, + "rampDuration": { + "description": "Ramp duration (e.g., '5m')", + "oneOf": [ + { + "type": "string", + "pattern": "^[0-9]+(s|m|h)$" + }, + { + "type": "integer", + "minimum": 1 + } + ] + } + }, + "required": [ + "model", + "min", + "max", + "rampDuration" + ] + } + ] + }, + "scenarios": { + "type": "array", + "description": "Test scenarios", + "minItems": 1, + "items": { + "type": "object", + "required": [ + "name", + "steps" + ], + "properties": { + "name": { + "type": "string", + "description": "Scenario name" + }, + "weight": { + "type": "number", + "description": "Scenario weight for traffic distribution", + "minimum": 0.1, + "default": 100.0 + }, + "steps": { + "type": "array", + "description": "Scenario steps", + "minItems": 1, + "items": { + "type": "object", + "required": [ + "request" + ], + "properties": { + "name": { + "type": "string", + "description": "Step name" + }, + "request": { + "type": "object", + "required": [ + "method", + "path" + ], + "properties": { + "method": { + "type": "string", + "enum": [ + "GET", + "POST", + "PUT", + "DELETE", + "PATCH", + "HEAD", + "OPTIONS" + ], + "description": "HTTP method" + }, + "path": { + "type": "string", + "description": "Request path (relative to baseUrl)" + }, + "body": { + "type": "string", + "description": "Request body" + }, + "headers": { + "type": "object", + "description": "Custom request headers", + "additionalProperties": { + "type": "string" + } + } + } + }, + "thinkTime": { + "description": "Think time after step", + "oneOf": [ + { + "type": "string", + "pattern": "^[0-9]+(s|m|h)$" + }, + { + "type": "integer", + "minimum": 0 + }, + { + "type": "object", + "properties": { + "min": { + "type": "string" + }, + "max": { + "type": "string" + } + }, + "required": [ + "min", + "max" + ] + } + ] + }, + "assertions": { + "type": "array", + "description": "Response assertions", + "items": { + "type": "object" + } + }, + "extract": { + "type": "array", + "description": "Data extractors", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "jsonPath": { + "type": "string" + }, + "regex": { + "type": "string" + } + } + } + } + } + } + }, + "dataFile": { + "type": "object", + "description": "External data file", + "required": [ + "path", + "format", + "strategy" + ], + "properties": { + "path": { + "type": "string", + "description": "Path to data file" + }, + "format": { + "type": "string", + "enum": [ + "csv", + "json" + ], + "description": "Data file format" + }, + "strategy": { + "type": "string", + "enum": [ + "sequential", + "random", + "cycle" + ], + "description": "Data iteration strategy" + } + } + }, + "config": { + "type": "object", + "description": "Scenario-level config overrides", + "properties": { + "timeout": { + "type": "string" + }, + "retryCount": { + "type": "integer" + }, + "retryDelay": { + "type": "string" + } + } + } + } + } + } + } +} diff --git a/examples/configs/README.md b/examples/configs/README.md new file mode 100644 index 0000000..5147f9a --- /dev/null +++ b/examples/configs/README.md @@ -0,0 +1,506 @@ +# Load Test Configuration Examples + +This directory contains ready-to-use YAML configuration templates for common load testing scenarios. Each template is fully documented and can be used as-is or customized for your specific needs. + +## Available Templates + +### 1. Basic API Test (`basic-api-test.yaml`) + +**Purpose**: Simple load test for a single API endpoint + +**Use Cases**: +- API health checks +- Simple endpoint testing +- Getting started with load testing +- Smoke testing + +**Key Features**: +- Single endpoint testing +- RPS load model (100 RPS) +- Basic assertions (status code, response time) +- 5-minute duration + +**Quick Start**: +```bash +# Edit the baseUrl in the file +vim basic-api-test.yaml + +# Run the test +rust-loadtest --config basic-api-test.yaml +``` + +**Customize**: +- `baseUrl`: Change to your API endpoint +- `workers`: Adjust for desired concurrency +- `target`: Modify target RPS +- `duration`: Change test duration + +--- + +### 2. E-Commerce Scenario (`ecommerce-scenario.yaml`) + +**Purpose**: Realistic e-commerce load test with multiple user flows + +**Use Cases**: +- E-commerce platforms +- Multi-step user journeys +- Realistic traffic simulation +- Conversion funnel testing + +**Key Features**: +- 4 weighted scenarios (browse, add to cart, checkout, quick browse) +- Variable think times +- Data extraction (product IDs, prices) +- Realistic user behavior patterns + +**Traffic Distribution**: +- 60% Browse only +- 25% Browse and add to cart +- 12% Complete purchase +- 3% Quick browse + +**Quick Start**: +```bash +rust-loadtest --config ecommerce-scenario.yaml +``` + +**Customize**: +- Adjust scenario weights to match your traffic +- Modify think times for your user behavior +- Update product search/checkout paths +- Add authentication if needed + +--- + +### 3. Stress Test (`stress-test.yaml`) + +**Purpose**: High-load stress test to find system breaking points + +**Use Cases**: +- Capacity planning +- Finding system limits +- Performance bottleneck identification +- Auto-scaling validation + +**Key Features**: +- Ramp load model (10 → 1000 RPS) +- High worker count (200) +- Long duration (1 hour) +- Mixed read/write operations + +**Load Profile**: +- Start: 10 RPS +- End: 1000 RPS +- Ramp: 15 minutes +- Sustain: 45 minutes + +**Quick Start**: +```bash +# ⚠️ Warning: This generates significant load +rust-loadtest --config stress-test.yaml +``` + +**Customize**: +- `max`: Adjust maximum RPS based on your system +- `rampDuration`: Change ramp speed (gradual vs rapid) +- `workers`: Scale based on your infrastructure +- `duration`: Extend for longer stress tests + +--- + +### 4. Data-Driven Test (`data-driven-test.yaml`) + +**Purpose**: Load test using external CSV/JSON data files + +**Use Cases**: +- Testing with realistic user data +- Large dataset testing +- Parameterized load tests +- Credential-based testing + +**Key Features**: +- CSV and JSON data file support +- Multiple iteration strategies (sequential, random, cycle) +- Variable substitution in requests +- Separate scenarios for each data source + +**Data File Examples**: + +**CSV** (`examples/data/users.csv`): +```csv +username,email,user_id +john.doe,john@example.com,1001 +jane.smith,jane@example.com,1002 +``` + +**JSON** (`examples/data/products.json`): +```json +[ + {"product_name": "Laptop", "category": "electronics", "sku": "LAP-001"} +] +``` + +**Quick Start**: +```bash +# Data files are included in examples/data/ +rust-loadtest --config data-driven-test.yaml +``` + +**Customize**: +- Create your own CSV/JSON files +- Update `dataFile.path` to point to your files +- Change `strategy` (sequential, random, cycle) +- Use data variables in requests: `${variable_name}` + +--- + +### 5. Authenticated API (`authenticated-api.yaml`) + +**Purpose**: Load test for APIs requiring authentication + +**Use Cases**: +- JWT authentication testing +- API key validation +- OAuth 2.0 flows +- Token refresh testing + +**Key Features**: +- JWT authentication flow +- API key authentication +- OAuth token refresh +- Token extraction and reuse + +**Authentication Methods**: +- JWT tokens (login → use token) +- API keys (static header) +- OAuth 2.0 (token + refresh) + +**Quick Start**: +```bash +# Set credentials +export USERNAME="testuser@example.com" +export PASSWORD="securePassword123" +export API_KEY="your-api-key" + +rust-loadtest --config authenticated-api.yaml +``` + +**Customize**: +- Update authentication endpoints +- Modify token extraction JSONPath +- Add custom auth headers +- Change credentials format + +--- + +### 6. Microservices Test (`microservices-test.yaml`) + +**Purpose**: Load test for distributed microservices architecture + +**Use Cases**: +- Microservices platforms +- API gateway testing +- Inter-service communication +- Distributed system validation + +**Key Features**: +- Multiple service endpoints +- Service-specific scenarios +- Weighted traffic distribution +- End-to-end flows + +**Services Tested**: +- User Service (25%) +- Product Service (30%) +- Order Service (30%) +- Inventory Service (15%) + +**Quick Start**: +```bash +rust-loadtest --config microservices-test.yaml +``` + +**Customize**: +- Update service endpoints +- Adjust scenario weights +- Add service-specific assertions +- Modify service interaction flows + +--- + +### 7. GraphQL API (`graphql-api.yaml`) + +**Purpose**: Load test for GraphQL APIs + +**Use Cases**: +- GraphQL API testing +- Query complexity testing +- Mutation performance +- Schema validation + +**Key Features**: +- Simple and complex queries +- Mutations (create, update, delete) +- Query variables +- Nested object fetching + +**Operation Types**: +- Simple queries (40%) +- Complex nested queries (25%) +- Mutations (25%) +- Search and filter (10%) + +**Quick Start**: +```bash +rust-loadtest --config graphql-api.yaml +``` + +**Customize**: +- Update GraphQL queries for your schema +- Adjust query complexity +- Modify mutation operations +- Add authentication headers + +--- + +### 8. Spike Test (`spike-test.yaml`) + +**Purpose**: Sudden traffic spike test for resilience validation + +**Use Cases**: +- Flash sale simulation +- Viral content scenarios +- Auto-scaling response testing +- Traffic surge validation + +**Key Features**: +- Sudden load increases +- System recovery observation +- High worker count (150) +- Short think times + +**Spike Pattern**: +- Phase 1: Normal load (20 workers) +- Phase 2: Spike (150 workers) +- Phase 3: Recovery (20 workers) +- Phase 4: Validation (20 workers) + +**Quick Start**: +```bash +# ⚠️ Warning: Generates sudden load spike +rust-loadtest --config spike-test.yaml +``` + +**Customize**: +- Adjust spike magnitude +- Modify spike duration +- Add health check endpoints +- Change recovery time + +--- + +## Template Selection Guide + +| Template | Complexity | Duration | Workers | RPS | Best For | +|----------|-----------|----------|---------|-----|----------| +| Basic API | Simple | 5m | 10 | 100 | Getting started, simple endpoints | +| E-Commerce | Medium | 30m | 50 | 10-200 | Multi-step flows, realistic behavior | +| Stress Test | High | 1h | 200 | 10-1000 | Finding limits, capacity planning | +| Data-Driven | Medium | 15m | 20 | 50 | Realistic data, parameterized tests | +| Authenticated | Medium | 20m | 25 | 75 | Auth flows, token management | +| Microservices | High | 30m | 40 | 20-150 | Distributed systems, multiple services | +| GraphQL | Medium | 20m | 30 | 80 | GraphQL APIs, complex queries | +| Spike Test | High | 30m | 150 | Burst | Resilience, auto-scaling | + +## Customization Guide + +### Common Customizations + +#### 1. Change Base URL +```yaml +config: + baseUrl: "https://your-api.example.com" +``` + +#### 2. Adjust Load +```yaml +# RPS Model +load: + model: "rps" + target: 200 # Change target RPS + +# Ramp Model +load: + model: "ramp" + min: 50 # Start RPS + max: 500 # End RPS + rampDuration: "10m" + +# Concurrent Model +load: + model: "concurrent" +config: + workers: 100 # Number of concurrent workers +``` + +#### 3. Modify Duration +```yaml +config: + duration: "30m" # Options: "30s", "5m", "1h" +``` + +#### 4. Add Authentication +```yaml +config: + customHeaders: "Authorization: Bearer your-token-here" + +# Or extract from login +steps: + - name: "Login" + request: + method: "POST" + path: "/auth/login" + extract: + - name: "token" + jsonPath: "$.token" + + - name: "Use Token" + request: + method: "GET" + path: "/protected" + headers: + Authorization: "Bearer ${token}" +``` + +#### 5. Adjust Think Times +```yaml +# Fixed think time +thinkTime: "3s" + +# Random think time +thinkTime: + min: "1s" + max: "5s" +``` + +#### 6. Add Custom Assertions +```yaml +assertions: + - statusCode: 200 + - responseTime: "2s" + - bodyContains: "success" + - jsonPath: + path: "$.status" + expected: "ok" + - headerExists: "X-Request-ID" +``` + +### Environment Variable Overrides + +All templates support environment variable overrides: + +```bash +# Override base URL +TARGET_URL=https://staging.api.example.com rust-loadtest --config template.yaml + +# Override workers +NUM_CONCURRENT_TASKS=50 rust-loadtest --config template.yaml + +# Override duration +TEST_DURATION=10m rust-loadtest --config template.yaml + +# Override RPS +TARGET_RPS=200 rust-loadtest --config template.yaml +``` + +## Validation + +All templates are validated to ensure: +- ✅ Valid YAML syntax +- ✅ Correct schema structure +- ✅ Valid URLs (example.com placeholders) +- ✅ Valid duration formats +- ✅ Positive worker counts +- ✅ Valid load model parameters + +To validate a template: +```bash +rust-loadtest --config template.yaml --validate +``` + +## Creating Custom Templates + +### Template Structure + +```yaml +version: "1.0" + +metadata: + name: "Your Test Name" + description: "Brief description" + tags: ["tag1", "tag2"] + +config: + baseUrl: "https://api.example.com" + timeout: "30s" + workers: 10 + duration: "5m" + +load: + model: "rps" + target: 100 + +scenarios: + - name: "Scenario Name" + weight: 100 + steps: + - name: "Step Name" + request: + method: "GET" + path: "/endpoint" + assertions: + - statusCode: 200 +``` + +### Best Practices + +1. **Use Descriptive Names**: Clear scenario and step names +2. **Add Comments**: Document complex logic +3. **Set Realistic Timeouts**: Based on your SLA +4. **Add Assertions**: Validate responses +5. **Use Think Times**: Simulate real user behavior +6. **Extract Variables**: Reuse data across steps +7. **Weight Scenarios**: Match real traffic patterns + +## Data Files + +Example data files are provided in `examples/data/`: + +- `users.csv` - Sample user data (10 users) +- `products.json` - Sample product data (10 products) + +Create your own data files following the same format. + +## Getting Help + +- **Documentation**: See `/docs/` for detailed guides +- **Examples**: All templates include inline comments +- **Validation**: Use `--validate` flag to check configs +- **Issues**: Report problems on GitHub + +## Contributing + +To contribute a new template: + +1. Create a new YAML file in `examples/configs/` +2. Add comprehensive comments +3. Include usage examples +4. Document customization options +5. Add validation tests +6. Update this README + +## Version History + +- **v1.0** - Initial template collection (8 templates) + - Basic API, E-Commerce, Stress Test, Data-Driven + - Authenticated API, Microservices, GraphQL, Spike Test diff --git a/examples/configs/authenticated-api.yaml b/examples/configs/authenticated-api.yaml new file mode 100644 index 0000000..b359eac --- /dev/null +++ b/examples/configs/authenticated-api.yaml @@ -0,0 +1,204 @@ +# Authenticated API Load Test Template +# +# Load test for APIs requiring authentication (JWT, API keys, OAuth). +# Demonstrates authentication flows and authenticated requests. +# +# Authentication Methods: +# - JWT tokens (login once, use for all requests) +# - API keys (static header) +# - OAuth 2.0 (token refresh) +# - Basic auth +# +# Usage: +# rust-loadtest --config authenticated-api.yaml +# +# Environment Variables: +# API_KEY - Set your API key +# USERNAME - Test user username +# PASSWORD - Test user password +# +# Customize: +# - Update authentication endpoint +# - Modify token extraction logic +# - Add custom auth headers + +version: "1.0" + +metadata: + name: "Authenticated API Load Test" + description: "Load test for APIs requiring authentication" + author: "Security Team" + tags: ["authentication", "jwt", "api-key", "oauth"] + +config: + baseUrl: "https://api.example.com" + timeout: "30s" + workers: 25 + duration: "20m" + + # Custom headers for API key authentication + customHeaders: "X-API-Key: your-api-key-here" + +load: + model: "rps" + target: 75 + +scenarios: + # Scenario 1: JWT Authentication Flow (60% of traffic) + - name: "JWT Authenticated Requests" + weight: 60 + steps: + - name: "User Login" + request: + method: "POST" + path: "/auth/login" + body: '{"username": "testuser@example.com", "password": "securePassword123"}' + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.token" + expected: "*" + extract: + - type: jsonPath + name: "jwtToken" + jsonPath: "$.token" + - type: jsonPath + name: "userId" + jsonPath: "$.user.id" + thinkTime: "1s" + + - name: "Get User Data" + request: + method: "GET" + path: "/users/${userId}" + headers: + Authorization: "Bearer ${jwtToken}" + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.id" + expected: "${userId}" + thinkTime: "2s" + + - name: "List Resources" + request: + method: "GET" + path: "/api/resources" + headers: + Authorization: "Bearer ${jwtToken}" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Create Resource" + request: + method: "POST" + path: "/api/resources" + headers: + Authorization: "Bearer ${jwtToken}" + body: '{"name": "test-resource", "description": "Created by load test"}' + assertions: + - type: statusCode + expected: 201 + extract: + - type: jsonPath + name: "resourceId" + jsonPath: "$.id" + thinkTime: "3s" + + - name: "Update Resource" + request: + method: "PUT" + path: "/api/resources/${resourceId}" + headers: + Authorization: "Bearer ${jwtToken}" + body: '{"name": "updated-resource"}' + assertions: + - type: statusCode + expected: 200 + + # Scenario 2: API Key Authentication (30% of traffic) + - name: "API Key Authenticated Requests" + weight: 30 + steps: + - name: "List Public Data" + request: + method: "GET" + path: "/public/data" + # API key automatically added from customHeaders in config + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Get Specific Item" + request: + method: "GET" + path: "/public/data/123" + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "itemId" + jsonPath: "$.id" + thinkTime: "2s" + + # Scenario 3: OAuth 2.0 Token Refresh (10% of traffic) + - name: "OAuth Token Refresh Flow" + weight: 10 + steps: + - name: "Get Access Token" + request: + method: "POST" + path: "/oauth/token" + body: '{"grant_type": "client_credentials", "client_id": "test-client", "client_secret": "test-secret"}' + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "accessToken" + jsonPath: "$.access_token" + - type: jsonPath + name: "refreshToken" + jsonPath: "$.refresh_token" + thinkTime: "1s" + + - name: "Use Access Token" + request: + method: "GET" + path: "/api/protected-resource" + headers: + Authorization: "Bearer ${accessToken}" + assertions: + - type: statusCode + expected: 200 + thinkTime: "3s" + + - name: "Refresh Token" + request: + method: "POST" + path: "/oauth/token" + body: '{"grant_type": "refresh_token", "refresh_token": "${refreshToken}"}' + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "newAccessToken" + jsonPath: "$.access_token" + thinkTime: "2s" + + - name: "Use Refreshed Token" + request: + method: "GET" + path: "/api/protected-resource" + headers: + Authorization: "Bearer ${newAccessToken}" + assertions: + - type: statusCode + expected: 200 diff --git a/examples/configs/basic-api-test.yaml b/examples/configs/basic-api-test.yaml new file mode 100644 index 0000000..2885dbb --- /dev/null +++ b/examples/configs/basic-api-test.yaml @@ -0,0 +1,57 @@ +# Basic API Load Test Template +# +# A simple load test configuration for testing a single API endpoint. +# Perfect for getting started or testing basic API health and performance. +# +# Usage: +# rust-loadtest --config basic-api-test.yaml +# +# Customize: +# - Change baseUrl to your API endpoint +# - Adjust workers and duration for your needs +# - Modify the target RPS for desired load + +version: "1.0" + +metadata: + name: "Basic API Load Test" + description: "Simple load test for a single API endpoint" + author: "Load Test Team" + tags: ["basic", "api", "health-check"] + +config: + # Base URL of the API to test + baseUrl: "https://api.example.com" + + # Request timeout (30 seconds) + timeout: "30s" + + # Number of concurrent workers + workers: 10 + + # Test duration + duration: "5m" + + # Skip TLS certificate verification (for testing only) + skipTlsVerify: false + +load: + # Use RPS (requests per second) model + model: "rps" + + # Target 100 requests per second + target: 100 + +scenarios: + - name: "API Health Check" + weight: 100 + steps: + - name: "Check API Status" + request: + method: "GET" + path: "/health" + assertions: + - type: statusCode + expected: 200 + - type: responseTime + max: "1s" diff --git a/examples/configs/data-driven-test.yaml b/examples/configs/data-driven-test.yaml new file mode 100644 index 0000000..8abb5fe --- /dev/null +++ b/examples/configs/data-driven-test.yaml @@ -0,0 +1,126 @@ +# Data-Driven Load Test Template +# +# Load test using external data files (CSV or JSON) for test data. +# Perfect for testing with realistic user data or large datasets. +# +# Features: +# - Load test data from CSV/JSON files +# - Sequential, random, or cycle through data +# - Realistic user credentials and profiles +# - Parameterized requests using data variables +# +# Usage: +# 1. Create data file (users.csv or users.json) +# 2. rust-loadtest --config data-driven-test.yaml +# +# Data file examples: +# CSV: username,email,user_id +# john.doe,john@example.com,123 +# jane.smith,jane@example.com,456 +# +# JSON: [{"username": "john.doe", "email": "john@example.com", "user_id": 123}] +# +# Customize: +# - Change data file path and format +# - Adjust iteration strategy (sequential, random, cycle) +# - Modify requests to use data variables + +version: "1.0" + +metadata: + name: "Data-Driven Load Test" + description: "Load test using external CSV/JSON data files" + author: "QA Team" + tags: ["data-driven", "csv", "json", "realistic-data"] + +config: + baseUrl: "https://api.example.com" + timeout: "30s" + workers: 20 + duration: "15m" + +load: + model: "rps" + target: 50 + +scenarios: + # Scenario using CSV data file + - name: "User Login with CSV Data" + weight: 50 + + # Load data from CSV file + dataFile: + path: "./examples/data/users.csv" + format: "csv" + strategy: "random" # Options: sequential, random, cycle + + steps: + - name: "User Login" + request: + method: "POST" + path: "/auth/login" + # Use variables from CSV: ${username}, ${email}, ${user_id} + body: '{"username": "${username}", "password": "test123"}' + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "authToken" + jsonPath: "$.token" + thinkTime: "2s" + + - name: "Get User Profile" + request: + method: "GET" + path: "/users/${user_id}" + headers: + Authorization: "Bearer ${authToken}" + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.email" + expected: "${email}" + thinkTime: "3s" + + # Scenario using JSON data file + - name: "Product Search with JSON Data" + weight: 50 + + # Load data from JSON file + dataFile: + path: "./examples/data/products.json" + format: "json" + strategy: "cycle" # Cycle through all products + + steps: + - name: "Search Product" + request: + method: "GET" + # Use variables from JSON: ${product_name}, ${category}, ${sku} + path: "/search?q=${product_name}&category=${category}" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Get Product Details" + request: + method: "GET" + path: "/products/${sku}" + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.name" + expected: "${product_name}" + thinkTime: "3s" + + - name: "Check Inventory" + request: + method: "GET" + path: "/inventory/${sku}" + assertions: + - type: statusCode + expected: 200 diff --git a/examples/configs/docker-test.yaml b/examples/configs/docker-test.yaml new file mode 100644 index 0000000..a34d540 --- /dev/null +++ b/examples/configs/docker-test.yaml @@ -0,0 +1,81 @@ +# Docker Test Configuration +# +# This configuration is designed to work with the docker-compose setup. +# It tests the httpbin service running in the test-api container. +# +# Usage: +# docker-compose up +# +# Or to run manually: +# docker-compose run loadtest rust-loadtest --config /app/configs/docker-test.yaml + +version: "1.0" + +metadata: + name: "Docker Test" + description: "Quick test using docker-compose test API" + tags: ["docker", "test", "demo"] + +config: + # Uses httpbin service in docker-compose + baseUrl: "http://test-api" + timeout: "30s" + workers: 5 + duration: "30s" + +load: + model: "rps" + target: 10 + +scenarios: + - name: "HTTPBin Status Check" + weight: 40 + steps: + - name: "Get Status 200" + request: + method: "GET" + path: "/status/200" + assertions: + - type: statusCode + expected: 200 + thinkTime: "1s" + + - name: "HTTPBin GET Request" + weight: 30 + steps: + - name: "Get Request" + request: + method: "GET" + path: "/get" + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "userAgent" + jsonPath: "$.headers.User-Agent" + thinkTime: "1s" + + - name: "HTTPBin POST Request" + weight: 20 + steps: + - name: "Post Data" + request: + method: "POST" + path: "/post" + body: '{"test": "data", "timestamp": "2024-01-01"}' + assertions: + - type: statusCode + expected: 200 + thinkTime: "1s" + + - name: "HTTPBin Delay Test" + weight: 10 + steps: + - name: "Delayed Response" + request: + method: "GET" + path: "/delay/1" + assertions: + - type: statusCode + expected: 200 diff --git a/examples/configs/ecommerce-scenario.yaml b/examples/configs/ecommerce-scenario.yaml new file mode 100644 index 0000000..13625fa --- /dev/null +++ b/examples/configs/ecommerce-scenario.yaml @@ -0,0 +1,197 @@ +# E-Commerce Load Test Template +# +# Realistic e-commerce load test with multiple user flows: +# - Browse products +# - Add items to cart +# - Complete checkout +# - Quick browsing without purchase +# +# Simulates realistic shopping patterns with weighted scenarios. +# +# Usage: +# rust-loadtest --config ecommerce-scenario.yaml +# +# Customize: +# - Adjust scenario weights to match your traffic patterns +# - Modify think times to simulate realistic user behavior +# - Add authentication headers if needed + +version: "1.0" + +metadata: + name: "E-Commerce Load Test" + description: "Multi-scenario load test simulating realistic shopping behavior" + author: "E-Commerce Team" + tags: ["ecommerce", "multi-scenario", "realistic"] + +config: + baseUrl: "https://shop.example.com" + timeout: "30s" + workers: 50 + duration: "30m" + +load: + # Ramp up load gradually + model: "ramp" + min: 10 + max: 200 + rampDuration: "5m" + +scenarios: + # Scenario 1: Browse products only (60% of users) + - name: "Browse Only" + weight: 60 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Category Page" + request: + method: "GET" + path: "/products/electronics" + assertions: + - type: statusCode + expected: 200 + thinkTime: "3s" + + - name: "Product Details" + request: + method: "GET" + path: "/products/laptop-123" + assertions: + - type: statusCode + expected: 200 + - type: bodyContains + text: "Add to Cart" + thinkTime: "5s" + + # Scenario 2: Browse and add to cart (25% of users) + - name: "Browse and Add to Cart" + weight: 25 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + thinkTime: "2s" + + - name: "Search Products" + request: + method: "GET" + path: "/search?q=laptop" + extract: + - type: jsonPath + name: "productId" + jsonPath: "$.products[0].id" + thinkTime: "3s" + + - name: "Product Details" + request: + method: "GET" + path: "/products/${productId}" + thinkTime: "4s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/cart/add" + body: '{"productId": "${productId}", "quantity": 1}' + assertions: + - type: statusCode + expected: 201 + - type: jsonPath + path: "$.success" + expected: "true" + thinkTime: "2s" + + - name: "View Cart" + request: + method: "GET" + path: "/cart" + assertions: + - type: statusCode + expected: 200 + + # Scenario 3: Complete purchase (12% of users) + - name: "Complete Purchase" + weight: 12 + steps: + - name: "Search Products" + request: + method: "GET" + path: "/search?q=laptop" + extract: + - type: jsonPath + name: "productId" + jsonPath: "$.products[0].id" + - type: jsonPath + name: "price" + jsonPath: "$.products[0].price" + thinkTime: "2s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/cart/add" + body: '{"productId": "${productId}", "quantity": 1}' + assertions: + - type: statusCode + expected: 201 + thinkTime: "3s" + + - name: "View Cart" + request: + method: "GET" + path: "/cart" + thinkTime: "2s" + + - name: "Proceed to Checkout" + request: + method: "POST" + path: "/checkout" + body: '{"shippingMethod": "standard", "paymentMethod": "credit_card"}' + assertions: + - type: statusCode + expected: 200 + thinkTime: "5s" + + - name: "Complete Order" + request: + method: "POST" + path: "/checkout/complete" + body: '{"confirmPayment": true}' + assertions: + - type: statusCode + expected: 201 + - type: jsonPath + path: "$.orderId" + expected: "*" + + # Scenario 4: Quick browse (3% of users) + - name: "Quick Browse" + weight: 3 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: statusCode + expected: 200 + thinkTime: + min: "1s" + max: "3s" + + - name: "Random Category" + request: + method: "GET" + path: "/products/featured" + thinkTime: + min: "1s" + max: "2s" diff --git a/examples/configs/graphql-api.yaml b/examples/configs/graphql-api.yaml new file mode 100644 index 0000000..e503e55 --- /dev/null +++ b/examples/configs/graphql-api.yaml @@ -0,0 +1,233 @@ +# GraphQL API Load Test Template +# +# Load test for GraphQL APIs with queries, mutations, and subscriptions. +# Demonstrates common GraphQL patterns and best practices. +# +# Features: +# - Simple queries +# - Complex nested queries +# - Mutations (create, update, delete) +# - Query variables +# - Error handling +# +# Usage: +# rust-loadtest --config graphql-api.yaml +# +# GraphQL Endpoint: +# Typically a single endpoint (e.g., /graphql) that handles all operations +# +# Customize: +# - Update GraphQL schema-specific queries +# - Adjust query complexity based on your schema +# - Add custom headers (authentication) + +version: "1.0" + +metadata: + name: "GraphQL API Load Test" + description: "Load test for GraphQL APIs with queries and mutations" + author: "GraphQL Team" + tags: ["graphql", "api", "queries", "mutations"] + +config: + baseUrl: "https://graphql.example.com" + timeout: "30s" + workers: 30 + duration: "20m" + +load: + model: "rps" + target: 80 + +scenarios: + # Scenario 1: Simple Queries (40%) + - name: "Simple GraphQL Queries" + weight: 40 + steps: + - name: "Get User List" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query { users(limit: 10) { id name email } }" + } + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.data.users" + expected: "*" + extract: + - type: jsonPath + name: "userId" + jsonPath: "$.data.users[0].id" + thinkTime: "2s" + + - name: "Get User Details" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query { user(id: \"${userId}\") { id name email posts { id title } } }" + } + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.data.user.id" + expected: "${userId}" + thinkTime: "3s" + + # Scenario 2: Complex Nested Queries (25%) + - name: "Complex Nested Queries" + weight: 25 + steps: + - name: "Get Posts with Comments" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query { posts(limit: 5) { id title author { id name } comments { id text author { name } } likes } }" + } + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "postId" + jsonPath: "$.data.posts[0].id" + thinkTime: "2s" + + - name: "Get Post Details" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query GetPost($postId: ID!) { post(id: $postId) { id title content author { id name avatar } comments { id text createdAt author { name } } tags } }", + "variables": { "postId": "${postId}" } + } + assertions: + - type: statusCode + expected: 200 + thinkTime: "3s" + + # Scenario 3: Mutations (25%) + - name: "GraphQL Mutations" + weight: 25 + steps: + - name: "Create Post" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "mutation CreatePost($input: CreatePostInput!) { createPost(input: $input) { id title content author { id name } } }", + "variables": { + "input": { + "title": "Load Test Post", + "content": "This post was created during load testing", + "tags": ["test", "loadtest"] + } + } + } + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.data.createPost.id" + expected: "*" + extract: + - type: jsonPath + name: "newPostId" + jsonPath: "$.data.createPost.id" + thinkTime: "2s" + + - name: "Update Post" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "mutation UpdatePost($id: ID!, $input: UpdatePostInput!) { updatePost(id: $id, input: $input) { id title content } }", + "variables": { + "id": "${newPostId}", + "input": { + "title": "Updated Load Test Post" + } + } + } + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Add Comment" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "mutation AddComment($postId: ID!, $text: String!) { addComment(postId: $postId, text: $text) { id text author { name } } }", + "variables": { + "postId": "${newPostId}", + "text": "Great post!" + } + } + assertions: + - type: statusCode + expected: 200 + thinkTime: "3s" + + - name: "Delete Post" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "mutation DeletePost($id: ID!) { deletePost(id: $id) { success message } }", + "variables": { + "id": "${newPostId}" + } + } + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.data.deletePost.success" + expected: "true" + + # Scenario 4: Search and Filter (10%) + - name: "GraphQL Search and Filter" + weight: 10 + steps: + - name: "Search Posts" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query SearchPosts($searchTerm: String!) { searchPosts(query: $searchTerm) { id title content author { name } } }", + "variables": { + "searchTerm": "test" + } + } + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Filter Posts by Tag" + request: + method: "POST" + path: "/graphql" + body: > + { + "query": "query { posts(filter: { tags: [\"technology\"] }) { id title tags } }" + } + assertions: + - type: statusCode + expected: 200 diff --git a/examples/configs/microservices-test.yaml b/examples/configs/microservices-test.yaml new file mode 100644 index 0000000..50cc069 --- /dev/null +++ b/examples/configs/microservices-test.yaml @@ -0,0 +1,210 @@ +# Microservices Load Test Template +# +# Load test for microservices architecture with multiple service endpoints. +# Tests inter-service communication patterns and distributed system behavior. +# +# Architecture: +# - API Gateway +# - User Service +# - Product Service +# - Order Service +# - Inventory Service +# +# Usage: +# rust-loadtest --config microservices-test.yaml +# +# Note: +# This template assumes all services are accessible through a common +# API gateway. Adjust paths if services have different base URLs. +# +# Customize: +# - Update service endpoints +# - Adjust scenario weights based on traffic patterns +# - Add service-specific assertions + +version: "1.0" + +metadata: + name: "Microservices Load Test" + description: "Load test for distributed microservices architecture" + author: "Platform Team" + tags: ["microservices", "distributed", "api-gateway"] + +config: + # API Gateway base URL + baseUrl: "https://gateway.example.com" + timeout: "45s" + workers: 40 + duration: "30m" + +load: + model: "ramp" + min: 20 + max: 150 + rampDuration: "5m" + +scenarios: + # Scenario 1: User Service Operations (25%) + - name: "User Service Flow" + weight: 25 + steps: + - name: "Register User" + request: + method: "POST" + path: "/users/register" + body: '{"email": "user@example.com", "name": "Test User"}' + assertions: + - type: statusCode + expected: 201 + extract: + - type: jsonPath + name: "userId" + jsonPath: "$.userId" + - type: jsonPath + name: "token" + jsonPath: "$.token" + thinkTime: "2s" + + - name: "Get User Profile" + request: + method: "GET" + path: "/users/${userId}" + headers: + Authorization: "Bearer ${token}" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + - name: "Update User Profile" + request: + method: "PUT" + path: "/users/${userId}" + headers: + Authorization: "Bearer ${token}" + body: '{"name": "Updated User"}' + assertions: + - type: statusCode + expected: 200 + + # Scenario 2: Product Service Operations (30%) + - name: "Product Service Flow" + weight: 30 + steps: + - name: "Browse Products" + request: + method: "GET" + path: "/products?limit=20" + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "productId" + jsonPath: "$.products[0].id" + thinkTime: "3s" + + - name: "Get Product Details" + request: + method: "GET" + path: "/products/${productId}" + assertions: + - type: statusCode + expected: 200 + - type: responseTime + max: "500ms" + extract: + - type: jsonPath + name: "productName" + jsonPath: "$.name" + - type: jsonPath + name: "productPrice" + jsonPath: "$.price" + thinkTime: "4s" + + - name: "Check Product Reviews" + request: + method: "GET" + path: "/products/${productId}/reviews" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + # Scenario 3: Order Service Flow (30%) + - name: "Order Service Flow" + weight: 30 + steps: + - name: "Create Order" + request: + method: "POST" + path: "/orders" + body: '{"productId": "123", "quantity": 1, "shippingAddress": "123 Main St"}' + assertions: + - type: statusCode + expected: 201 + extract: + - type: jsonPath + name: "orderId" + jsonPath: "$.orderId" + thinkTime: "3s" + + - name: "Get Order Status" + request: + method: "GET" + path: "/orders/${orderId}" + assertions: + - type: statusCode + expected: 200 + - type: jsonPath + path: "$.status" + expected: "*" + thinkTime: "2s" + + - name: "Get Order History" + request: + method: "GET" + path: "/orders/history" + assertions: + - type: statusCode + expected: 200 + thinkTime: "2s" + + # Scenario 4: Inventory Service Operations (15%) + - name: "Inventory Service Flow" + weight: 15 + steps: + - name: "Check Inventory" + request: + method: "GET" + path: "/inventory/products/123" + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "stockLevel" + jsonPath: "$.quantity" + thinkTime: "2s" + + - name: "Reserve Inventory" + request: + method: "POST" + path: "/inventory/reserve" + body: '{"productId": "123", "quantity": 1}' + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "reservationId" + jsonPath: "$.reservationId" + thinkTime: "1s" + + - name: "Confirm Reservation" + request: + method: "POST" + path: "/inventory/confirm/${reservationId}" + assertions: + - type: statusCode + expected: 200 diff --git a/examples/configs/spike-test.yaml b/examples/configs/spike-test.yaml new file mode 100644 index 0000000..1aa41e7 --- /dev/null +++ b/examples/configs/spike-test.yaml @@ -0,0 +1,149 @@ +# Spike Test Template +# +# Sudden traffic spike test to validate system resilience under rapid load changes. +# Simulates scenarios like flash sales, viral content, or traffic surges. +# +# Purpose: +# - Test auto-scaling response time +# - Validate circuit breakers and rate limiting +# - Check system recovery after spike +# - Identify memory leaks under burst load +# - Test queue and cache behavior +# +# Usage: +# rust-loadtest --config spike-test.yaml +# +# Pattern: +# - Start with normal load +# - Sudden spike to very high load +# - Return to normal load +# - Observe system recovery +# +# Warning: +# Spikes can cause temporary service disruption. Use on test environments +# or during maintenance windows. +# +# Customize: +# - Adjust spike magnitude based on normal traffic +# - Modify spike duration +# - Add custom health check endpoints + +version: "1.0" + +metadata: + name: "Spike Test" + description: "Sudden traffic spike test for system resilience" + author: "Reliability Team" + tags: ["spike", "resilience", "auto-scaling", "burst-load"] + +config: + baseUrl: "https://api.example.com" + timeout: "60s" + workers: 150 + duration: "30m" + +load: + # Concurrent model for sudden spike behavior + # Note: In a real spike test, you'd want to manually control + # the number of active workers over time + model: "concurrent" + +scenarios: + # Primary API endpoint (80% of spike traffic) + - name: "High-Traffic Endpoint" + weight: 80 + steps: + - name: "Get Popular Resource" + request: + method: "GET" + path: "/api/popular/resource" + assertions: + - type: statusCode + expected: 200 + - type: responseTime + max: "3s" # Allow more time during spike + thinkTime: + min: "100ms" + max: "500ms" # Shorter think time = more aggressive spike + + - name: "Get Related Resources" + request: + method: "GET" + path: "/api/related?id=123" + assertions: + - type: statusCode + expected: 200 + + # Write operations during spike (15%) + - name: "Spike Write Operations" + weight: 15 + steps: + - name: "Create Event" + request: + method: "POST" + path: "/api/events" + body: '{"type": "user_action", "timestamp": "2024-01-01T00:00:00Z"}' + assertions: + # Accept 429 (rate limited) or 503 (service unavailable) during spike + - type: statusCode + expected: 201 + # Note: In real tests, you'd track these error rates + thinkTime: + min: "50ms" + max: "200ms" + + # Health checks (5%) + - name: "System Health Check" + weight: 5 + steps: + - name: "Check API Health" + request: + method: "GET" + path: "/health" + assertions: + - type: statusCode + expected: 200 + thinkTime: "1s" + + - name: "Check Database Health" + request: + method: "GET" + path: "/health/database" + assertions: + - type: statusCode + expected: 200 + +# Spike Test Execution Plan: +# +# Phase 1: Normal Load (0-5 min) +# - Workers: 20 +# - RPS: 50 +# - Purpose: Establish baseline +# +# Phase 2: Spike (5-10 min) +# - Workers: 150 +# - RPS: 500+ +# - Purpose: Sudden load increase +# +# Phase 3: Recovery (10-20 min) +# - Workers: 20 +# - RPS: 50 +# - Purpose: System recovery observation +# +# Phase 4: Validation (20-30 min) +# - Workers: 20 +# - RPS: 50 +# - Purpose: Verify stable operation +# +# To implement this pattern: +# 1. Start test with low workers +# 2. Manually adjust workers during execution (or use hot-reload) +# 3. Monitor system metrics (CPU, memory, response times) +# 4. Track error rates and recovery time +# +# Expected Behavior: +# - Response times increase during spike +# - Rate limiting may activate +# - Auto-scaling should trigger +# - System should recover within 2-5 minutes +# - No persistent errors after recovery diff --git a/examples/configs/stress-test.yaml b/examples/configs/stress-test.yaml new file mode 100644 index 0000000..60ae86b --- /dev/null +++ b/examples/configs/stress-test.yaml @@ -0,0 +1,130 @@ +# Stress Test Template +# +# High-load stress test to find system breaking points. +# Gradually increases load from low to very high RPS. +# +# Purpose: +# - Identify maximum system capacity +# - Find performance bottlenecks +# - Test system behavior under extreme load +# - Validate auto-scaling configurations +# +# Usage: +# rust-loadtest --config stress-test.yaml +# +# Warning: +# This will generate significant load. Use only on test environments +# or production systems designed to handle high traffic. +# +# Customize: +# - Adjust max RPS based on your system capacity +# - Modify ramp duration for gradual or rapid stress +# - Change workers based on your load test infrastructure + +version: "1.0" + +metadata: + name: "Stress Test" + description: "High-load stress test to find system limits" + author: "Performance Team" + tags: ["stress", "high-load", "capacity-planning"] + +config: + baseUrl: "https://api.example.com" + timeout: "60s" + + # High number of workers for stress testing + workers: 200 + + # Longer duration to observe system behavior under sustained load + duration: "1h" + + skipTlsVerify: false + +load: + # Ramp load model - gradually increase pressure + model: "ramp" + + # Start with light load (10 RPS) + min: 10 + + # Ramp up to very high load (1000 RPS) + max: 1000 + + # Ramp duration: 15 minutes to reach max load + rampDuration: "15m" + +scenarios: + # Primary endpoint stress test (70% of traffic) + - name: "Read Heavy Operations" + weight: 70 + steps: + - name: "List Resources" + request: + method: "GET" + path: "/api/resources" + assertions: + - type: statusCode + expected: 200 + - type: responseTime + max: "2s" + + - name: "Get Resource Details" + request: + method: "GET" + path: "/api/resources/123" + assertions: + - type: statusCode + expected: 200 + extract: + - type: jsonPath + name: "resourceId" + jsonPath: "$.id" + + # Write operations stress (20% of traffic) + - name: "Write Operations" + weight: 20 + steps: + - name: "Create Resource" + request: + method: "POST" + path: "/api/resources" + body: '{"name": "test-resource", "type": "stress-test"}' + assertions: + - type: statusCode + expected: 201 + extract: + - type: jsonPath + name: "newResourceId" + jsonPath: "$.id" + + - name: "Update Resource" + request: + method: "PUT" + path: "/api/resources/${newResourceId}" + body: '{"name": "updated-resource"}' + assertions: + - type: statusCode + expected: 200 + + # Delete operations (10% of traffic) + - name: "Delete Operations" + weight: 10 + steps: + - name: "Create Temporary Resource" + request: + method: "POST" + path: "/api/resources" + body: '{"name": "temp-resource", "temporary": true}' + extract: + - type: jsonPath + name: "tempId" + jsonPath: "$.id" + + - name: "Delete Resource" + request: + method: "DELETE" + path: "/api/resources/${tempId}" + assertions: + - type: statusCode + expected: 204 diff --git a/examples/data/products.json b/examples/data/products.json new file mode 100644 index 0000000..042e2aa --- /dev/null +++ b/examples/data/products.json @@ -0,0 +1,62 @@ +[ + { + "product_name": "Laptop Pro 15", + "category": "electronics", + "sku": "ELEC-LAP-001", + "price": 1299.99 + }, + { + "product_name": "Wireless Mouse", + "category": "electronics", + "sku": "ELEC-MOU-002", + "price": 29.99 + }, + { + "product_name": "Mechanical Keyboard", + "category": "electronics", + "sku": "ELEC-KEY-003", + "price": 149.99 + }, + { + "product_name": "4K Monitor", + "category": "electronics", + "sku": "ELEC-MON-004", + "price": 499.99 + }, + { + "product_name": "USB-C Hub", + "category": "electronics", + "sku": "ELEC-HUB-005", + "price": 79.99 + }, + { + "product_name": "Ergonomic Chair", + "category": "furniture", + "sku": "FURN-CHA-001", + "price": 399.99 + }, + { + "product_name": "Standing Desk", + "category": "furniture", + "sku": "FURN-DSK-002", + "price": 599.99 + }, + { + "product_name": "LED Desk Lamp", + "category": "lighting", + "sku": "LGHT-LMP-001", + "price": 49.99 + }, + { + "product_name": "Noise Cancelling Headphones", + "category": "electronics", + "sku": "ELEC-HDP-006", + "price": 349.99 + }, + { + "product_name": "Webcam HD", + "category": "electronics", + "sku": "ELEC-CAM-007", + "price": 89.99 + } +] diff --git a/examples/data/users.csv b/examples/data/users.csv new file mode 100644 index 0000000..bc8c36d --- /dev/null +++ b/examples/data/users.csv @@ -0,0 +1,11 @@ +username,email,user_id +john.doe,john.doe@example.com,1001 +jane.smith,jane.smith@example.com,1002 +bob.wilson,bob.wilson@example.com,1003 +alice.johnson,alice.johnson@example.com,1004 +charlie.brown,charlie.brown@example.com,1005 +diana.prince,diana.prince@example.com,1006 +evan.peters,evan.peters@example.com,1007 +fiona.apple,fiona.apple@example.com,1008 +george.lucas,george.lucas@example.com,1009 +hannah.montana,hannah.montana@example.com,1010 diff --git a/examples/generate_docs.rs b/examples/generate_docs.rs new file mode 100644 index 0000000..c571e78 --- /dev/null +++ b/examples/generate_docs.rs @@ -0,0 +1,39 @@ +use rust_loadtest::config_docs_generator::ConfigDocsGenerator; +use std::fs; + +fn main() { + println!("Generating configuration documentation...\n"); + + let generator = ConfigDocsGenerator::new(); + + // Generate JSON Schema + println!("1. Generating JSON Schema..."); + let schema = generator.generate_json_schema(); + fs::write("docs/config-schema.json", &schema).expect("Failed to write JSON Schema"); + println!( + " ✅ Saved to docs/config-schema.json ({} bytes)", + schema.len() + ); + + // Generate Markdown documentation + println!("2. Generating Markdown documentation..."); + let markdown = generator.generate_markdown_docs(); + fs::write("docs/CONFIG_SCHEMA.md", &markdown).expect("Failed to write Markdown docs"); + println!( + " ✅ Saved to docs/CONFIG_SCHEMA.md ({} bytes)", + markdown.len() + ); + + // Generate VS Code snippets + println!("3. Generating VS Code snippets..."); + let snippets = generator.generate_vscode_snippets(); + fs::create_dir_all(".vscode").ok(); + fs::write(".vscode/rust-loadtest.code-snippets", &snippets) + .expect("Failed to write VS Code snippets"); + println!( + " ✅ Saved to .vscode/rust-loadtest.code-snippets ({} bytes)", + snippets.len() + ); + + println!("\n✅ All documentation generated successfully!"); +} diff --git a/examples/scenario_example.rs b/examples/scenario_example.rs new file mode 100644 index 0000000..e73705e --- /dev/null +++ b/examples/scenario_example.rs @@ -0,0 +1,226 @@ +//! Example of using the multi-step scenario execution engine. +//! +//! This example demonstrates how to define and execute a multi-step scenario +//! that simulates a user browsing products, adding items to cart, and checking out. +//! +//! Run with: cargo run --example scenario_example + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{ + Assertion, Extractor, RequestConfig, Scenario, ScenarioContext, Step, ThinkTime, + VariableExtraction, +}; +use std::collections::HashMap; +use std::time::Duration; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize tracing for logs + tracing_subscriber::fmt::init(); + + // Create HTTP client + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(30)) + .build()?; + + // Define a shopping scenario + let scenario = create_shopping_scenario(); + + // Create scenario executor + let base_url = "https://ecom.edge.baugus-lab.com".to_string(); + let executor = ScenarioExecutor::new(base_url, client); + + // Execute the scenario + let mut context = ScenarioContext::new(); + let result = executor.execute(&scenario, &mut context).await; + + // Print results + println!("\n=== Scenario Execution Results ==="); + println!("Scenario: {}", result.scenario_name); + println!("Success: {}", result.success); + println!("Total Time: {}ms", result.total_time_ms); + println!( + "Steps Completed: {}/{}", + result.steps_completed, + result.steps.len() + ); + + if let Some(failed_step) = result.failed_at_step { + println!("Failed at step: {}", failed_step); + } + + println!("\n=== Step Results ==="); + for (idx, step_result) in result.steps.iter().enumerate() { + println!( + "Step {}: {} - {} ({}ms) - Status: {:?}", + idx + 1, + step_result.step_name, + if step_result.success { "✓" } else { "✗" }, + step_result.response_time_ms, + step_result.status_code + ); + if let Some(error) = &step_result.error { + println!(" Error: {}", error); + } + } + + // Print extracted variables + println!("\n=== Extracted Variables ==="); + if let Some(product_id) = context.get_variable("product_id") { + println!("product_id: {}", product_id); + } + if let Some(auth_token) = context.get_variable("auth_token") { + println!("auth_token: {}...", &auth_token[..auth_token.len().min(20)]); + } + if let Some(cart_id) = context.get_variable("cart_id") { + println!("cart_id: {}", cart_id); + } + + Ok(()) +} + +/// Create a shopping scenario with multiple steps. +fn create_shopping_scenario() -> Scenario { + Scenario { + name: "E-commerce Shopping Flow".to_string(), + weight: 1.0, + steps: vec![ + // Step 1: Health check + Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/health".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + // Step 2: Browse products and extract first product ID + Step { + name: "Browse Products".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/products?limit=10".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![ + // ⭐ Extract first product ID from JSON response + // This demonstrates JSONPath extraction: $.products[0].id + VariableExtraction { + name: "product_id".to_string(), + extractor: Extractor::JsonPath("$.products[0].id".to_string()), + }, + ], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::BodyContains("products".to_string()), + ], + think_time: Some(ThinkTime::Fixed(Duration::from_secs(2))), + }, + // Step 3: View product details using extracted product_id + Step { + name: "View Product Details".to_string(), + request: RequestConfig { + method: "GET".to_string(), + // ⭐ Variable substitution: ${product_id} is replaced with extracted value + path: "/products/${product_id}".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::ResponseTime(Duration::from_millis(500)), + ], + think_time: Some(ThinkTime::Fixed(Duration::from_secs(3))), + }, + // Step 4: Register user + Step { + name: "Register User".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/register".to_string(), + body: Some( + r#"{ + "email": "loadtest-user-${timestamp}@example.com", + "password": "TestPass123!", + "name": "Load Test User" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![ + // Extract auth token from response + VariableExtraction { + name: "auth_token".to_string(), + extractor: Extractor::JsonPath("$.token".to_string()), + }, + ], + assertions: vec![Assertion::StatusCode(201)], + think_time: Some(ThinkTime::Fixed(Duration::from_secs(1))), + }, + // Step 5: Add item to cart (using auth token) + Step { + name: "Add to Cart".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/cart/items".to_string(), + body: Some( + r#"{ + "product_id": "${product_id}", + "quantity": 2 + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers.insert( + "Authorization".to_string(), + "Bearer ${auth_token}".to_string(), + ); + headers + }, + }, + extractions: vec![VariableExtraction { + name: "cart_id".to_string(), + extractor: Extractor::JsonPath("$.cart.id".to_string()), + }], + assertions: vec![Assertion::StatusCode(201)], + think_time: Some(ThinkTime::Fixed(Duration::from_secs(2))), + }, + // Step 6: View cart + Step { + name: "View Cart".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/cart".to_string(), + body: None, + headers: { + let mut headers = HashMap::new(); + headers.insert( + "Authorization".to_string(), + "Bearer ${auth_token}".to_string(), + ); + headers + }, + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::BodyContains("items".to_string()), + ], + think_time: Some(ThinkTime::Fixed(Duration::from_secs(5))), + }, + ], + } +} diff --git a/generate_docs.rs b/generate_docs.rs new file mode 100644 index 0000000..52c3262 --- /dev/null +++ b/generate_docs.rs @@ -0,0 +1,39 @@ +#!/usr/bin/env rust-script +//! Script to generate configuration documentation. +//! +//! Usage: rust-script generate_docs.rs + +use rust_loadtest::config_docs_generator::ConfigDocsGenerator; +use std::fs; + +fn main() { + println!("Generating configuration documentation...\n"); + + let generator = ConfigDocsGenerator::new(); + + // Generate JSON Schema + println!("1. Generating JSON Schema..."); + let schema = generator.generate_json_schema(); + fs::write("docs/config-schema.json", &schema).expect("Failed to write JSON Schema"); + println!(" ✅ Saved to docs/config-schema.json"); + + // Generate Markdown documentation + println!("2. Generating Markdown documentation..."); + let markdown = generator.generate_markdown_docs(); + fs::write("docs/CONFIG_SCHEMA.md", &markdown).expect("Failed to write Markdown docs"); + println!(" ✅ Saved to docs/CONFIG_SCHEMA.md"); + + // Generate VS Code snippets + println!("3. Generating VS Code snippets..."); + let snippets = generator.generate_vscode_snippets(); + fs::create_dir_all(".vscode").expect("Failed to create .vscode directory"); + fs::write(".vscode/rust-loadtest.code-snippets", &snippets) + .expect("Failed to write VS Code snippets"); + println!(" ✅ Saved to .vscode/rust-loadtest.code-snippets"); + + println!("\n✅ All documentation generated successfully!"); + println!("\nGenerated files:"); + println!(" - docs/config-schema.json (JSON Schema)"); + println!(" - docs/CONFIG_SCHEMA.md (Markdown docs)"); + println!(" - .vscode/rust-loadtest.code-snippets (VS Code snippets)"); +} diff --git a/src/assertions.rs b/src/assertions.rs new file mode 100644 index 0000000..b11ba92 --- /dev/null +++ b/src/assertions.rs @@ -0,0 +1,426 @@ +//! Response assertion validation. +//! +//! This module provides functionality to validate HTTP responses against +//! assertions defined in scenarios. + +use crate::scenario::Assertion; +use regex::Regex; +use serde_json::Value; +#[cfg(test)] +use std::time::Duration; +use thiserror::Error; +use tracing::{debug, warn}; + +/// Result of running an assertion. +#[derive(Debug, Clone)] +pub struct AssertionResult { + /// The assertion that was checked + pub assertion: Assertion, + + /// Whether the assertion passed + pub passed: bool, + + /// Actual value observed (for debugging) + pub actual: String, + + /// Expected value (for debugging) + pub expected: String, + + /// Error message if assertion failed + pub error_message: Option, +} + +/// Errors that can occur during assertion validation. +#[derive(Error, Debug)] +pub enum AssertionError { + #[error("Status code mismatch: expected {expected}, got {actual}")] + StatusCodeMismatch { expected: u16, actual: u16 }, + + #[error("Response time {actual_ms}ms exceeds threshold {threshold_ms}ms")] + ResponseTimeTooSlow { actual_ms: u64, threshold_ms: u64 }, + + #[error("JSONPath assertion failed: {0}")] + JsonPathFailed(String), + + #[error("Body does not contain expected substring: {0}")] + BodyNotContains(String), + + #[error("Body does not match regex: {0}")] + BodyNotMatches(String), + + #[error("Header '{0}' not found in response")] + HeaderNotFound(String), + + #[error("Regex compilation failed: {0}")] + RegexError(#[from] regex::Error), + + #[error("Invalid JSON: {0}")] + InvalidJson(String), +} + +/// Run all assertions against a response. +/// +/// # Arguments +/// * `assertions` - List of assertions to check +/// * `status_code` - HTTP status code from response +/// * `response_time_ms` - Response time in milliseconds +/// * `response_body` - Response body as string +/// * `response_headers` - Response headers +/// +/// # Returns +/// Vector of assertion results (one per assertion) +pub fn run_assertions( + assertions: &[Assertion], + status_code: u16, + response_time_ms: u64, + response_body: &str, + response_headers: &reqwest::header::HeaderMap, +) -> Vec { + let mut results = Vec::new(); + + for assertion in assertions { + debug!(assertion = ?assertion, "Running assertion"); + + let result = match run_single_assertion( + assertion, + status_code, + response_time_ms, + response_body, + response_headers, + ) { + Ok(()) => { + debug!(assertion = ?assertion, "Assertion passed"); + AssertionResult { + assertion: assertion.clone(), + passed: true, + actual: format_actual_value( + assertion, + status_code, + response_time_ms, + response_body, + ), + expected: format_expected_value(assertion), + error_message: None, + } + } + Err(e) => { + warn!(assertion = ?assertion, error = %e, "Assertion failed"); + AssertionResult { + assertion: assertion.clone(), + passed: false, + actual: format_actual_value( + assertion, + status_code, + response_time_ms, + response_body, + ), + expected: format_expected_value(assertion), + error_message: Some(e.to_string()), + } + } + }; + + results.push(result); + } + + results +} + +/// Run a single assertion. +fn run_single_assertion( + assertion: &Assertion, + status_code: u16, + response_time_ms: u64, + response_body: &str, + response_headers: &reqwest::header::HeaderMap, +) -> Result<(), AssertionError> { + match assertion { + Assertion::StatusCode(expected) => { + if status_code == *expected { + Ok(()) + } else { + Err(AssertionError::StatusCodeMismatch { + expected: *expected, + actual: status_code, + }) + } + } + + Assertion::ResponseTime(threshold) => { + let threshold_ms = threshold.as_millis() as u64; + if response_time_ms <= threshold_ms { + Ok(()) + } else { + Err(AssertionError::ResponseTimeTooSlow { + actual_ms: response_time_ms, + threshold_ms, + }) + } + } + + Assertion::JsonPath { path, expected } => { + assert_json_path(response_body, path, expected.as_deref()) + } + + Assertion::BodyContains(substring) => { + if response_body.contains(substring) { + Ok(()) + } else { + Err(AssertionError::BodyNotContains(substring.clone())) + } + } + + Assertion::BodyMatches(pattern) => { + let re = Regex::new(pattern)?; + if re.is_match(response_body) { + Ok(()) + } else { + Err(AssertionError::BodyNotMatches(pattern.clone())) + } + } + + Assertion::HeaderExists(header_name) => { + if response_headers.contains_key(header_name) { + Ok(()) + } else { + Err(AssertionError::HeaderNotFound(header_name.clone())) + } + } + } +} + +/// Assert JSONPath condition. +fn assert_json_path( + json_body: &str, + path: &str, + expected: Option<&str>, +) -> Result<(), AssertionError> { + use serde_json_path::JsonPath; + + // Parse JSON + let json: Value = + serde_json::from_str(json_body).map_err(|e| AssertionError::InvalidJson(e.to_string()))?; + + // Parse JSONPath + let json_path = JsonPath::parse(path).map_err(|e| { + AssertionError::JsonPathFailed(format!("Invalid JSONPath '{}': {}", path, e)) + })?; + + // Query + let node_list = json_path.query(&json); + + // Check if path exists + if let Ok(value) = node_list.exactly_one() { + // Path exists, now check expected value if provided + if let Some(expected_value) = expected { + let actual_str = match value { + Value::String(s) => s.clone(), + Value::Number(n) => n.to_string(), + Value::Bool(b) => b.to_string(), + Value::Null => "null".to_string(), + _ => value.to_string(), + }; + + if actual_str == expected_value { + Ok(()) + } else { + Err(AssertionError::JsonPathFailed(format!( + "JSONPath '{}' value mismatch: expected '{}', got '{}'", + path, expected_value, actual_str + ))) + } + } else { + // No expected value, just checking existence + Ok(()) + } + } else { + Err(AssertionError::JsonPathFailed(format!( + "JSONPath '{}' did not match exactly one value", + path + ))) + } +} + +/// Format actual value for display. +fn format_actual_value( + assertion: &Assertion, + status_code: u16, + response_time_ms: u64, + response_body: &str, +) -> String { + match assertion { + Assertion::StatusCode(_) => status_code.to_string(), + Assertion::ResponseTime(_) => format!("{}ms", response_time_ms), + Assertion::JsonPath { path, .. } => { + format!("JSONPath: {}", path) + } + Assertion::BodyContains(_) => { + if response_body.len() > 100 { + format!("{}...", &response_body[..100]) + } else { + response_body.to_string() + } + } + Assertion::BodyMatches(_) => { + if response_body.len() > 100 { + format!("{}...", &response_body[..100]) + } else { + response_body.to_string() + } + } + Assertion::HeaderExists(header) => format!("header '{}'", header), + } +} + +/// Format expected value for display. +fn format_expected_value(assertion: &Assertion) -> String { + match assertion { + Assertion::StatusCode(code) => code.to_string(), + Assertion::ResponseTime(duration) => format!("<{}ms", duration.as_millis()), + Assertion::JsonPath { path, expected } => { + if let Some(exp) = expected { + format!("{} = {}", path, exp) + } else { + format!("{} exists", path) + } + } + Assertion::BodyContains(substring) => format!("contains '{}'", substring), + Assertion::BodyMatches(pattern) => format!("matches /{}/", pattern), + Assertion::HeaderExists(header) => format!("header '{}' exists", header), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reqwest::header::HeaderMap; + + #[test] + fn test_status_code_assertion_pass() { + let assertion = Assertion::StatusCode(200); + let result = run_single_assertion(&assertion, 200, 100, "", &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_status_code_assertion_fail() { + let assertion = Assertion::StatusCode(200); + let result = run_single_assertion(&assertion, 404, 100, "", &HeaderMap::new()); + assert!(result.is_err()); + } + + #[test] + fn test_response_time_assertion_pass() { + let assertion = Assertion::ResponseTime(Duration::from_millis(500)); + let result = run_single_assertion(&assertion, 200, 300, "", &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_response_time_assertion_fail() { + let assertion = Assertion::ResponseTime(Duration::from_millis(500)); + let result = run_single_assertion(&assertion, 200, 700, "", &HeaderMap::new()); + assert!(result.is_err()); + } + + #[test] + fn test_json_path_existence() { + let json = r#"{"user": {"id": "123"}}"#; + let assertion = Assertion::JsonPath { + path: "$.user.id".to_string(), + expected: None, + }; + let result = run_single_assertion(&assertion, 200, 100, json, &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_json_path_value_match() { + let json = r#"{"status": "ok"}"#; + let assertion = Assertion::JsonPath { + path: "$.status".to_string(), + expected: Some("ok".to_string()), + }; + let result = run_single_assertion(&assertion, 200, 100, json, &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_json_path_value_mismatch() { + let json = r#"{"status": "error"}"#; + let assertion = Assertion::JsonPath { + path: "$.status".to_string(), + expected: Some("ok".to_string()), + }; + let result = run_single_assertion(&assertion, 200, 100, json, &HeaderMap::new()); + assert!(result.is_err()); + } + + #[test] + fn test_body_contains_pass() { + let body = "Hello, world!"; + let assertion = Assertion::BodyContains("world".to_string()); + let result = run_single_assertion(&assertion, 200, 100, body, &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_body_contains_fail() { + let body = "Hello, world!"; + let assertion = Assertion::BodyContains("missing".to_string()); + let result = run_single_assertion(&assertion, 200, 100, body, &HeaderMap::new()); + assert!(result.is_err()); + } + + #[test] + fn test_body_matches_regex_pass() { + let body = "Order #12345 confirmed"; + let assertion = Assertion::BodyMatches(r"Order #\d+".to_string()); + let result = run_single_assertion(&assertion, 200, 100, body, &HeaderMap::new()); + assert!(result.is_ok()); + } + + #[test] + fn test_body_matches_regex_fail() { + let body = "No order here"; + let assertion = Assertion::BodyMatches(r"Order #\d+".to_string()); + let result = run_single_assertion(&assertion, 200, 100, body, &HeaderMap::new()); + assert!(result.is_err()); + } + + #[test] + fn test_run_multiple_assertions() { + let json = r#"{"status": "ok", "count": 5}"#; + let assertions = vec![ + Assertion::StatusCode(200), + Assertion::ResponseTime(Duration::from_millis(500)), + Assertion::JsonPath { + path: "$.status".to_string(), + expected: Some("ok".to_string()), + }, + Assertion::BodyContains("count".to_string()), + ]; + + let results = run_assertions(&assertions, 200, 300, json, &HeaderMap::new()); + + assert_eq!(results.len(), 4); + assert!(results.iter().all(|r| r.passed)); + } + + #[test] + fn test_run_assertions_with_failures() { + let assertions = vec![ + Assertion::StatusCode(200), // Pass + Assertion::StatusCode(404), // Fail + Assertion::BodyContains("test".to_string()), // Pass + ]; + + let body = "This is a test"; + let results = run_assertions(&assertions, 200, 100, body, &HeaderMap::new()); + + assert_eq!(results.len(), 3); + assert!(results[0].passed); // StatusCode 200 + assert!(!results[1].passed); // StatusCode 404 + assert!(results[2].passed); // BodyContains + } +} diff --git a/src/client.rs b/src/client.rs index 7c7e8e3..3c66b63 100644 --- a/src/client.rs +++ b/src/client.rs @@ -4,6 +4,7 @@ use std::io::Read; use std::net::SocketAddr; use std::str::FromStr; +use crate::connection_pool::PoolConfig; use crate::utils::parse_headers_with_escapes; /// Configuration for building the HTTP client. @@ -13,6 +14,7 @@ pub struct ClientConfig { pub client_cert_path: Option, pub client_key_path: Option, pub custom_headers: Option, + pub pool_config: Option, } /// Result of building the client, includes parsed headers for logging. @@ -50,6 +52,14 @@ pub fn build_client( println!("Successfully configured custom default headers."); } + // Connection Pool Configuration + let pool_config = config.pool_config.clone().unwrap_or_default(); + client_builder = pool_config.apply_to_builder(client_builder); + println!( + "Connection pool configured: max_idle_per_host={}, idle_timeout={:?}", + pool_config.max_idle_per_host, pool_config.idle_timeout + ); + // Build client with TLS settings let client = if config.skip_tls_verify { println!("WARNING: Skipping TLS certificate verification."); diff --git a/src/config.rs b/src/config.rs index 8a0666e..171f803 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,8 +4,10 @@ use tokio::time::Duration; use tracing::{info, warn}; use crate::client::ClientConfig; +use crate::config_merge::ConfigMerger; use crate::load_models::LoadModel; use crate::utils::parse_duration_string; +use crate::yaml_config::{YamlConfig, YamlConfigError}; /// Configuration errors with descriptive messages. #[derive(Error, Debug)] @@ -30,6 +32,9 @@ pub enum ConfigError { #[error("Parse error: {0}")] ParseError(String), + + #[error("YAML config error: {0}")] + YamlConfig(#[from] YamlConfigError), } /// Main configuration for the load test. @@ -47,6 +52,15 @@ pub struct Config { pub client_cert_path: Option, pub client_key_path: Option, pub custom_headers: Option, + + // Memory optimization settings (Issue #66, #68, #67, #70, #72) + pub percentile_tracking_enabled: bool, + pub percentile_sampling_rate: u8, // 1-100: percentage of requests to record (Issue #70) + pub max_histogram_labels: usize, + pub histogram_rotation_interval: Duration, // 0 = disabled + pub memory_warning_threshold_percent: f64, + pub memory_critical_threshold_percent: f64, + pub auto_disable_percentiles_on_warning: bool, } /// Helper to get a required environment variable. @@ -77,11 +91,215 @@ fn env_bool(name: &str, default: bool) -> bool { } impl Config { + /// Loads configuration from a YAML file with environment variable overrides. + /// + /// Environment variables can override YAML values according to precedence: + /// env vars > YAML file > defaults + /// + /// Environment variable mapping: + /// - `NUM_CONCURRENT_TASKS` overrides `config.workers` + /// - `REQUEST_TIMEOUT` overrides `config.timeout` + /// - `SKIP_TLS_VERIFY` overrides `config.skipTlsVerify` + /// - `TARGET_URL` overrides `config.baseUrl` + /// - `TEST_DURATION` overrides `config.duration` + /// - `LOAD_MODEL_TYPE` overrides `load.model` + /// - `TARGET_RPS` overrides `load.target` (for RPS model) + /// - `MIN_RPS`, `MAX_RPS`, `RAMP_DURATION` override ramp model params + /// - `CUSTOM_HEADERS` overrides `config.customHeaders` + pub fn from_yaml_with_env_overrides(yaml_config: &YamlConfig) -> Result { + // Apply environment variable overrides to YAML config + + // Base URL: env var TARGET_URL overrides YAML config.baseUrl + let target_url = ConfigMerger::merge_string( + Some(yaml_config.config.base_url.clone()), + "TARGET_URL", + yaml_config.config.base_url.clone(), + ); + + // Workers: env var NUM_CONCURRENT_TASKS overrides YAML config.workers + let num_concurrent_tasks = + ConfigMerger::merge_workers(Some(yaml_config.config.workers), "NUM_CONCURRENT_TASKS"); + + // Timeout: env var REQUEST_TIMEOUT overrides YAML config.timeout + let _timeout_duration = ConfigMerger::merge_timeout( + Some(yaml_config.config.timeout.to_std_duration()?), + "REQUEST_TIMEOUT", + ); + + // Test duration: env var TEST_DURATION overrides YAML config.duration + let test_duration = ConfigMerger::merge_timeout( + Some(yaml_config.config.duration.to_std_duration()?), + "TEST_DURATION", + ); + + // Skip TLS verify: env var SKIP_TLS_VERIFY overrides YAML config.skipTlsVerify + let skip_tls_verify = ConfigMerger::merge_skip_tls_verify( + Some(yaml_config.config.skip_tls_verify), + "SKIP_TLS_VERIFY", + ); + + // Custom headers: env var CUSTOM_HEADERS overrides YAML config.customHeaders + let custom_headers = ConfigMerger::merge_optional_string( + yaml_config.config.custom_headers.clone(), + "CUSTOM_HEADERS", + ); + + // Load model: env vars can override YAML load model entirely + let load_model = Self::parse_load_model_from_yaml_with_env_override(&yaml_config.load)?; + + // Request type: env var REQUEST_TYPE (default GET if not in YAML) + let request_type = env::var("REQUEST_TYPE").unwrap_or_else(|_| "GET".to_string()); + + // Send JSON: env var SEND_JSON + let send_json = env_bool("SEND_JSON", false); + + let json_payload = if send_json { + Some( + env_required("JSON_PAYLOAD").map_err(|_| ConfigError::MissingLoadModelParams { + model: "SEND_JSON=true".into(), + required: "JSON_PAYLOAD".into(), + })?, + ) + } else { + None + }; + + // Optional fields from env vars only (not in YAML yet) + let resolve_target_addr = env::var("RESOLVE_TARGET_ADDR").ok(); + let client_cert_path = env::var("CLIENT_CERT_PATH").ok(); + let client_key_path = env::var("CLIENT_KEY_PATH").ok(); + + // Memory optimization settings (Issue #66, #68, #67, #70, #72) + let percentile_tracking_enabled = env_bool("PERCENTILE_TRACKING_ENABLED", true); + let percentile_sampling_rate: u8 = env_parse_or("PERCENTILE_SAMPLING_RATE", 100u8)?; + let max_histogram_labels: usize = env_parse_or("MAX_HISTOGRAM_LABELS", 100)?; + + // Histogram rotation interval (0 = disabled) + let histogram_rotation_interval = + if let Ok(interval_str) = env::var("HISTOGRAM_ROTATION_INTERVAL") { + parse_duration_string(&interval_str).map_err(|e| ConfigError::InvalidDuration { + var: "HISTOGRAM_ROTATION_INTERVAL".into(), + message: e, + })? + } else { + Duration::from_secs(0) // Disabled by default + }; + + // Auto-OOM protection settings (Issue #72) + let memory_warning_threshold_percent: f64 = + env_parse_or("MEMORY_WARNING_THRESHOLD_PERCENT", 80.0)?; + let memory_critical_threshold_percent: f64 = + env_parse_or("MEMORY_CRITICAL_THRESHOLD_PERCENT", 90.0)?; + let auto_disable_percentiles_on_warning = + env_bool("AUTO_DISABLE_PERCENTILES_ON_WARNING", true); + + let config = Config { + target_url, + request_type, + send_json, + json_payload, + num_concurrent_tasks, + test_duration, + load_model, + skip_tls_verify, + resolve_target_addr, + client_cert_path, + client_key_path, + custom_headers, + percentile_tracking_enabled, + percentile_sampling_rate, + max_histogram_labels, + histogram_rotation_interval, + memory_warning_threshold_percent, + memory_critical_threshold_percent, + auto_disable_percentiles_on_warning, + }; + + config.validate()?; + Ok(config) + } + + /// Parse load model from YAML with environment variable overrides. + fn parse_load_model_from_yaml_with_env_override( + yaml_load: &crate::yaml_config::YamlLoadModel, + ) -> Result { + // Check if LOAD_MODEL_TYPE env var is set - if so, use env-based parsing + if let Ok(_model_type) = env::var("LOAD_MODEL_TYPE") { + return Self::parse_load_model("2h"); // Use env-based parsing + } + + // Otherwise, convert YAML load model to LoadModel + let base_load_model = yaml_load.to_load_model()?; + + // Apply environment variable overrides to specific load model parameters + match base_load_model { + LoadModel::Rps { target_rps } => { + // TARGET_RPS can override YAML target + let final_rps = + ConfigMerger::merge_rps(Some(target_rps), "TARGET_RPS").unwrap_or(target_rps); + Ok(LoadModel::Rps { + target_rps: final_rps, + }) + } + LoadModel::RampRps { + min_rps, + max_rps, + ramp_duration, + } => { + // MIN_RPS, MAX_RPS, RAMP_DURATION can override YAML values + let final_min = + ConfigMerger::merge_rps(Some(min_rps), "MIN_RPS").unwrap_or(min_rps); + let final_max = + ConfigMerger::merge_rps(Some(max_rps), "MAX_RPS").unwrap_or(max_rps); + let final_duration = + ConfigMerger::merge_timeout(Some(ramp_duration), "RAMP_DURATION"); + Ok(LoadModel::RampRps { + min_rps: final_min, + max_rps: final_max, + ramp_duration: final_duration, + }) + } + LoadModel::DailyTraffic { + min_rps, + mid_rps, + max_rps, + cycle_duration, + morning_ramp_ratio, + peak_sustain_ratio, + mid_decline_ratio, + mid_sustain_ratio, + evening_decline_ratio, + } => { + // DAILY_MIN_RPS, DAILY_MID_RPS, DAILY_MAX_RPS can override YAML + let final_min = + ConfigMerger::merge_rps(Some(min_rps), "DAILY_MIN_RPS").unwrap_or(min_rps); + let final_mid = + ConfigMerger::merge_rps(Some(mid_rps), "DAILY_MID_RPS").unwrap_or(mid_rps); + let final_max = + ConfigMerger::merge_rps(Some(max_rps), "DAILY_MAX_RPS").unwrap_or(max_rps); + let final_cycle = + ConfigMerger::merge_timeout(Some(cycle_duration), "DAILY_CYCLE_DURATION"); + Ok(LoadModel::DailyTraffic { + min_rps: final_min, + mid_rps: final_mid, + max_rps: final_max, + cycle_duration: final_cycle, + morning_ramp_ratio, + peak_sustain_ratio, + mid_decline_ratio, + mid_sustain_ratio, + evening_decline_ratio, + }) + } + LoadModel::Concurrent => Ok(LoadModel::Concurrent), + } + } + /// Loads configuration from environment variables. pub fn from_env() -> Result { let target_url = env_required("TARGET_URL")?; - let request_type = env::var("REQUEST_TYPE").unwrap_or_else(|_| "POST".to_string()); + let request_type = env::var("REQUEST_TYPE").unwrap_or_else(|_| "GET".to_string()); let send_json = env_bool("SEND_JSON", false); @@ -115,6 +333,30 @@ impl Config { let client_key_path = env::var("CLIENT_KEY_PATH").ok(); let custom_headers = env::var("CUSTOM_HEADERS").ok(); + // Memory optimization settings (Issue #66, #68, #67, #70, #72) + let percentile_tracking_enabled = env_bool("PERCENTILE_TRACKING_ENABLED", true); + let percentile_sampling_rate: u8 = env_parse_or("PERCENTILE_SAMPLING_RATE", 100u8)?; + let max_histogram_labels: usize = env_parse_or("MAX_HISTOGRAM_LABELS", 100)?; + + // Histogram rotation interval (0 = disabled) + let histogram_rotation_interval = + if let Ok(interval_str) = env::var("HISTOGRAM_ROTATION_INTERVAL") { + parse_duration_string(&interval_str).map_err(|e| ConfigError::InvalidDuration { + var: "HISTOGRAM_ROTATION_INTERVAL".into(), + message: e, + })? + } else { + Duration::from_secs(0) // Disabled by default + }; + + // Auto-OOM protection settings (Issue #72) + let memory_warning_threshold_percent: f64 = + env_parse_or("MEMORY_WARNING_THRESHOLD_PERCENT", 80.0)?; + let memory_critical_threshold_percent: f64 = + env_parse_or("MEMORY_CRITICAL_THRESHOLD_PERCENT", 90.0)?; + let auto_disable_percentiles_on_warning = + env_bool("AUTO_DISABLE_PERCENTILES_ON_WARNING", true); + let config = Config { target_url, request_type, @@ -128,6 +370,13 @@ impl Config { client_cert_path, client_key_path, custom_headers, + percentile_tracking_enabled, + percentile_sampling_rate, + max_histogram_labels, + histogram_rotation_interval, + memory_warning_threshold_percent, + memory_critical_threshold_percent, + auto_disable_percentiles_on_warning, }; config.validate()?; @@ -293,6 +542,17 @@ impl Config { return Err(ConfigError::IncompleteMtls); } + // Validate percentile sampling rate (Issue #70) + if self.percentile_sampling_rate == 0 || self.percentile_sampling_rate > 100 { + return Err(ConfigError::InvalidValue { + var: "PERCENTILE_SAMPLING_RATE".into(), + message: format!( + "Must be between 1 and 100 (got {})", + self.percentile_sampling_rate + ), + }); + } + Ok(()) } @@ -312,6 +572,13 @@ impl Config { client_cert_path: None, client_key_path: None, custom_headers: None, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, + max_histogram_labels: 100, + histogram_rotation_interval: Duration::from_secs(0), + memory_warning_threshold_percent: 80.0, + memory_critical_threshold_percent: 90.0, + auto_disable_percentiles_on_warning: true, } } @@ -323,6 +590,7 @@ impl Config { client_cert_path: self.client_cert_path.clone(), client_key_path: self.client_key_path.clone(), custom_headers: self.custom_headers.clone(), + pool_config: None, // Use default pool configuration } } @@ -340,9 +608,47 @@ impl Config { skip_tls_verify = self.skip_tls_verify, mtls_enabled = mtls_enabled, custom_headers_count = custom_headers_count, + percentile_tracking = self.percentile_tracking_enabled, "Starting load test" ); + if !self.percentile_tracking_enabled { + warn!( + "Percentile tracking is DISABLED - no latency percentiles will be collected. \ + This reduces memory usage for high-load tests. \ + Set PERCENTILE_TRACKING_ENABLED=true to enable." + ); + } else { + info!( + max_histogram_labels = self.max_histogram_labels, + "Histogram label limit configured (Issue #68)" + ); + + if self.percentile_sampling_rate < 100 { + info!( + sampling_rate_percent = self.percentile_sampling_rate, + "Percentile sampling enabled (Issue #70) - recording {}% of requests", + self.percentile_sampling_rate + ); + } + + if self.histogram_rotation_interval.as_secs() > 0 { + let interval_secs = self.histogram_rotation_interval.as_secs(); + let interval_str = if interval_secs >= 3600 { + format!("{}h", interval_secs / 3600) + } else if interval_secs >= 60 { + format!("{}m", interval_secs / 60) + } else { + format!("{}s", interval_secs) + }; + info!( + rotation_interval_secs = interval_secs, + "Histogram rotation enabled (Issue #67) - histograms will reset every {}", + interval_str + ); + } + } + if !parsed_headers.is_empty() { for (name, value) in parsed_headers.iter() { info!( @@ -352,6 +658,22 @@ impl Config { ); } } + + // Auto-OOM protection status (Issue #72) + if self.auto_disable_percentiles_on_warning { + info!( + memory_warning_threshold = self.memory_warning_threshold_percent, + memory_critical_threshold = self.memory_critical_threshold_percent, + "Auto-OOM protection ENABLED (Issue #72) - will automatically disable percentiles if memory exceeds {}%", + self.memory_warning_threshold_percent + ); + } else { + info!( + memory_warning_threshold = self.memory_warning_threshold_percent, + memory_critical_threshold = self.memory_critical_threshold_percent, + "Auto-OOM protection monitoring only (Issue #72) - will log warnings but NOT take automatic actions" + ); + } } } @@ -407,7 +729,7 @@ mod tests { let config = Config::from_env().unwrap(); assert_eq!(config.target_url, "https://example.com"); - assert_eq!(config.request_type, "POST"); + assert_eq!(config.request_type, "GET"); assert!(!config.send_json); assert!(config.json_payload.is_none()); assert_eq!(config.num_concurrent_tasks, 10); diff --git a/src/config_docs_generator.rs b/src/config_docs_generator.rs new file mode 100644 index 0000000..7551f43 --- /dev/null +++ b/src/config_docs_generator.rs @@ -0,0 +1,685 @@ +//! Configuration documentation generator (Issue #46). +//! +//! This module provides automatic documentation generation from config structures: +//! - JSON Schema export +//! - Markdown documentation +//! - VS Code snippets +//! +//! # Example +//! ```no_run +//! use rust_loadtest::config_docs_generator::ConfigDocsGenerator; +//! +//! let generator = ConfigDocsGenerator::new(); +//! +//! // Generate JSON Schema +//! let json_schema = generator.generate_json_schema(); +//! std::fs::write("schema.json", json_schema).unwrap(); +//! +//! // Generate Markdown docs +//! let markdown = generator.generate_markdown_docs(); +//! std::fs::write("CONFIG_SCHEMA.md", markdown).unwrap(); +//! +//! // Generate VS Code snippets +//! let snippets = generator.generate_vscode_snippets(); +//! std::fs::write("snippets.json", snippets).unwrap(); +//! ``` + +use serde_json; +use std::collections::HashMap; + +/// Configuration documentation generator. +pub struct ConfigDocsGenerator { + /// Application name + #[allow(dead_code)] + app_name: String, + + /// Version + #[allow(dead_code)] + version: String, +} + +impl ConfigDocsGenerator { + /// Create a new documentation generator. + pub fn new() -> Self { + Self { + app_name: "rust-loadtest".to_string(), + version: "1.0".to_string(), + } + } + + /// Generate JSON Schema for the configuration. + /// + /// Produces a JSON Schema that describes the YAML configuration format, + /// enabling IDE support, validation tools, and documentation generation. + pub fn generate_json_schema(&self) -> String { + let schema = self.build_json_schema(); + serde_json::to_string_pretty(&schema).unwrap() + } + + /// Build the JSON Schema structure. + fn build_json_schema(&self) -> serde_json::Value { + serde_json::json!({ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Rust LoadTest Configuration", + "description": "YAML configuration schema for rust-loadtest load testing tool", + "type": "object", + "required": ["version", "config", "load", "scenarios"], + "properties": { + "version": { + "type": "string", + "description": "Configuration version (semantic versioning)", + "pattern": "^[0-9]+\\.[0-9]+$", + "examples": ["1.0"] + }, + "metadata": { + "type": "object", + "description": "Optional metadata about the test configuration", + "properties": { + "name": { + "type": "string", + "description": "Human-readable test name" + }, + "description": { + "type": "string", + "description": "Test description" + }, + "author": { + "type": "string", + "description": "Test author" + }, + "tags": { + "type": "array", + "description": "Tags for categorization", + "items": { + "type": "string" + } + } + } + }, + "config": { + "type": "object", + "description": "Global test configuration", + "required": ["baseUrl", "duration"], + "properties": { + "baseUrl": { + "type": "string", + "description": "Base URL of the API to test", + "format": "uri", + "examples": ["https://api.example.com"] + }, + "timeout": { + "description": "Request timeout (e.g., '30s', '1m')", + "oneOf": [ + {"type": "string", "pattern": "^[0-9]+(s|m|h)$"}, + {"type": "integer", "minimum": 1} + ], + "default": "30s" + }, + "workers": { + "type": "integer", + "description": "Number of concurrent workers", + "minimum": 1, + "default": 10 + }, + "duration": { + "description": "Test duration (e.g., '5m', '1h')", + "oneOf": [ + {"type": "string", "pattern": "^[0-9]+(s|m|h)$"}, + {"type": "integer", "minimum": 1} + ] + }, + "skipTlsVerify": { + "type": "boolean", + "description": "Skip TLS certificate verification (insecure)", + "default": false + }, + "customHeaders": { + "type": "string", + "description": "Custom HTTP headers (e.g., 'Authorization: Bearer token')" + } + } + }, + "load": { + "type": "object", + "description": "Load model configuration", + "required": ["model"], + "oneOf": [ + { + "properties": { + "model": {"const": "concurrent"}, + }, + "required": ["model"] + }, + { + "properties": { + "model": {"const": "rps"}, + "target": { + "type": "number", + "description": "Target requests per second", + "minimum": 0.1 + } + }, + "required": ["model", "target"] + }, + { + "properties": { + "model": {"const": "ramp"}, + "min": { + "type": "number", + "description": "Starting RPS", + "minimum": 0.1 + }, + "max": { + "type": "number", + "description": "Ending RPS", + "minimum": 0.1 + }, + "rampDuration": { + "description": "Ramp duration (e.g., '5m')", + "oneOf": [ + {"type": "string", "pattern": "^[0-9]+(s|m|h)$"}, + {"type": "integer", "minimum": 1} + ] + } + }, + "required": ["model", "min", "max", "rampDuration"] + } + ] + }, + "scenarios": { + "type": "array", + "description": "Test scenarios", + "minItems": 1, + "items": { + "type": "object", + "required": ["name", "steps"], + "properties": { + "name": { + "type": "string", + "description": "Scenario name" + }, + "weight": { + "type": "number", + "description": "Scenario weight for traffic distribution", + "minimum": 0.1, + "default": 100.0 + }, + "steps": { + "type": "array", + "description": "Scenario steps", + "minItems": 1, + "items": { + "type": "object", + "required": ["request"], + "properties": { + "name": { + "type": "string", + "description": "Step name" + }, + "request": { + "type": "object", + "required": ["method", "path"], + "properties": { + "method": { + "type": "string", + "enum": ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"], + "description": "HTTP method" + }, + "path": { + "type": "string", + "description": "Request path (relative to baseUrl)" + }, + "body": { + "type": "string", + "description": "Request body" + }, + "headers": { + "type": "object", + "description": "Custom request headers", + "additionalProperties": {"type": "string"} + } + } + }, + "thinkTime": { + "description": "Think time after step", + "oneOf": [ + {"type": "string", "pattern": "^[0-9]+(s|m|h)$"}, + {"type": "integer", "minimum": 0}, + { + "type": "object", + "properties": { + "min": {"type": "string"}, + "max": {"type": "string"} + }, + "required": ["min", "max"] + } + ] + }, + "assertions": { + "type": "array", + "description": "Response assertions", + "items": { + "type": "object" + } + }, + "extract": { + "type": "array", + "description": "Data extractors", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "jsonPath": {"type": "string"}, + "regex": {"type": "string"} + } + } + } + } + } + }, + "dataFile": { + "type": "object", + "description": "External data file", + "required": ["path", "format", "strategy"], + "properties": { + "path": { + "type": "string", + "description": "Path to data file" + }, + "format": { + "type": "string", + "enum": ["csv", "json"], + "description": "Data file format" + }, + "strategy": { + "type": "string", + "enum": ["sequential", "random", "cycle"], + "description": "Data iteration strategy" + } + } + }, + "config": { + "type": "object", + "description": "Scenario-level config overrides", + "properties": { + "timeout": {"type": "string"}, + "retryCount": {"type": "integer"}, + "retryDelay": {"type": "string"} + } + } + } + } + } + } + }) + } + + /// Generate Markdown documentation for the configuration schema. + pub fn generate_markdown_docs(&self) -> String { + let mut md = String::new(); + + md.push_str("# Configuration Schema Reference\n\n"); + md.push_str("Complete reference for rust-loadtest YAML configuration format.\n\n"); + md.push_str("## Table of Contents\n\n"); + md.push_str("- [Version](#version)\n"); + md.push_str("- [Metadata](#metadata)\n"); + md.push_str("- [Config](#config)\n"); + md.push_str("- [Load Models](#load-models)\n"); + md.push_str("- [Scenarios](#scenarios)\n"); + md.push_str("- [Complete Example](#complete-example)\n\n"); + md.push_str("---\n\n"); + + // Version + md.push_str("## Version\n\n"); + md.push_str("**Field**: `version` (required)\n\n"); + md.push_str("**Type**: String\n\n"); + md.push_str("**Description**: Configuration version using semantic versioning.\n\n"); + md.push_str("**Format**: `major.minor`\n\n"); + md.push_str("**Example**:\n```yaml\nversion: \"1.0\"\n```\n\n"); + md.push_str("---\n\n"); + + // Metadata + md.push_str("## Metadata\n\n"); + md.push_str("**Field**: `metadata` (optional)\n\n"); + md.push_str("**Type**: Object\n\n"); + md.push_str("**Description**: Optional metadata about the test configuration.\n\n"); + md.push_str("**Properties**:\n\n"); + md.push_str("| Property | Type | Description |\n"); + md.push_str("|----------|------|-------------|\n"); + md.push_str("| `name` | string | Human-readable test name |\n"); + md.push_str("| `description` | string | Test description |\n"); + md.push_str("| `author` | string | Test author |\n"); + md.push_str("| `tags` | array | Tags for categorization |\n\n"); + md.push_str("**Example**:\n```yaml\nmetadata:\n name: \"API Load Test\"\n description: \"Testing API endpoints\"\n author: \"DevOps Team\"\n tags: [\"api\", \"production\"]\n```\n\n"); + md.push_str("---\n\n"); + + // Config + md.push_str("## Config\n\n"); + md.push_str("**Field**: `config` (required)\n\n"); + md.push_str("**Type**: Object\n\n"); + md.push_str("**Description**: Global test configuration.\n\n"); + md.push_str("**Properties**:\n\n"); + md.push_str("| Property | Type | Required | Default | Description |\n"); + md.push_str("|----------|------|----------|---------|-------------|\n"); + md.push_str("| `baseUrl` | string | Yes | - | Base URL of the API |\n"); + md.push_str("| `timeout` | string/int | No | `30s` | Request timeout |\n"); + md.push_str("| `workers` | integer | No | `10` | Concurrent workers |\n"); + md.push_str("| `duration` | string/int | Yes | - | Test duration |\n"); + md.push_str("| `skipTlsVerify` | boolean | No | `false` | Skip TLS verification |\n"); + md.push_str("| `customHeaders` | string | No | - | Custom HTTP headers |\n\n"); + md.push_str("**Duration Format**: `` where unit is `s` (seconds), `m` (minutes), or `h` (hours)\n\n"); + md.push_str("**Example**:\n```yaml\nconfig:\n baseUrl: \"https://api.example.com\"\n timeout: \"30s\"\n workers: 50\n duration: \"10m\"\n skipTlsVerify: false\n customHeaders: \"Authorization: Bearer token123\"\n```\n\n"); + md.push_str("---\n\n"); + + // Load Models + md.push_str("## Load Models\n\n"); + md.push_str("**Field**: `load` (required)\n\n"); + md.push_str("**Type**: Object\n\n"); + md.push_str("**Description**: Load generation model.\n\n"); + md.push_str("### Concurrent Model\n\n"); + md.push_str("Fixed number of concurrent workers.\n\n"); + md.push_str("```yaml\nload:\n model: \"concurrent\"\n```\n\n"); + md.push_str("### RPS Model\n\n"); + md.push_str("Target requests per second.\n\n"); + md.push_str( + "```yaml\nload:\n model: \"rps\"\n target: 100 # 100 requests/second\n```\n\n", + ); + md.push_str("### Ramp Model\n\n"); + md.push_str("Gradually increase RPS over time.\n\n"); + md.push_str("```yaml\nload:\n model: \"ramp\"\n min: 10 # Starting RPS\n max: 500 # Ending RPS\n rampDuration: \"5m\" # Ramp over 5 minutes\n```\n\n"); + md.push_str("---\n\n"); + + // Scenarios + md.push_str("## Scenarios\n\n"); + md.push_str("**Field**: `scenarios` (required)\n\n"); + md.push_str("**Type**: Array\n\n"); + md.push_str("**Description**: Test scenarios with steps.\n\n"); + md.push_str("**Properties**:\n\n"); + md.push_str("| Property | Type | Required | Description |\n"); + md.push_str("|----------|------|----------|-------------|\n"); + md.push_str("| `name` | string | Yes | Scenario name |\n"); + md.push_str("| `weight` | number | No | Traffic distribution weight |\n"); + md.push_str("| `steps` | array | Yes | Scenario steps |\n"); + md.push_str("| `dataFile` | object | No | External data file |\n"); + md.push_str("| `config` | object | No | Scenario-level overrides |\n\n"); + md.push_str("### Step Properties\n\n"); + md.push_str("| Property | Type | Required | Description |\n"); + md.push_str("|----------|------|----------|-------------|\n"); + md.push_str("| `name` | string | No | Step name |\n"); + md.push_str("| `request` | object | Yes | HTTP request |\n"); + md.push_str("| `thinkTime` | string/object | No | Delay after step |\n"); + md.push_str("| `assertions` | array | No | Response assertions |\n"); + md.push_str("| `extract` | array | No | Data extractors |\n\n"); + md.push_str("**Example**:\n```yaml\nscenarios:\n - name: \"User Login\"\n weight: 100\n steps:\n - name: \"Login Request\"\n request:\n method: \"POST\"\n path: \"/auth/login\"\n body: '{\"username\": \"user\", \"password\": \"pass\"}'\n assertions:\n - statusCode: 200\n extract:\n - name: \"token\"\n jsonPath: \"$.token\"\n thinkTime: \"2s\"\n```\n\n"); + md.push_str("---\n\n"); + + // Complete Example + md.push_str("## Complete Example\n\n"); + md.push_str("```yaml\nversion: \"1.0\"\n\nmetadata:\n name: \"API Load Test\"\n description: \"Testing main API endpoints\"\n tags: [\"api\", \"production\"]\n\nconfig:\n baseUrl: \"https://api.example.com\"\n timeout: \"30s\"\n workers: 50\n duration: \"10m\"\n\nload:\n model: \"rps\"\n target: 100\n\nscenarios:\n - name: \"Get Users\"\n weight: 70\n steps:\n - request:\n method: \"GET\"\n path: \"/users\"\n assertions:\n - statusCode: 200\n\n - name: \"Create User\"\n weight: 30\n steps:\n - request:\n method: \"POST\"\n path: \"/users\"\n body: '{\"name\": \"Test User\"}'\n assertions:\n - statusCode: 201\n```\n\n"); + + md + } + + /// Generate VS Code snippets for configuration. + pub fn generate_vscode_snippets(&self) -> String { + let mut snippets = HashMap::new(); + + // Basic config snippet + snippets.insert( + "loadtest-basic", + serde_json::json!({ + "prefix": "loadtest-basic", + "body": [ + "version: \"1.0\"", + "", + "config:", + " baseUrl: \"${1:https://api.example.com}\"", + " workers: ${2:10}", + " duration: \"${3:5m}\"", + "", + "load:", + " model: \"${4|concurrent,rps,ramp|}\"", + " ${5:target: 100}", + "", + "scenarios:", + " - name: \"${6:My Scenario}\"", + " steps:", + " - request:", + " method: \"${7|GET,POST,PUT,DELETE|}\"", + " path: \"${8:/endpoint}\"", + " assertions:", + " - statusCode: ${9:200}" + ], + "description": "Basic load test configuration" + }), + ); + + // RPS load model snippet + snippets.insert( + "loadtest-rps", + serde_json::json!({ + "prefix": "loadtest-rps", + "body": [ + "load:", + " model: \"rps\"", + " target: ${1:100}" + ], + "description": "RPS load model" + }), + ); + + // Ramp load model snippet + snippets.insert( + "loadtest-ramp", + serde_json::json!({ + "prefix": "loadtest-ramp", + "body": [ + "load:", + " model: \"ramp\"", + " min: ${1:10}", + " max: ${2:500}", + " rampDuration: \"${3:5m}\"" + ], + "description": "Ramp load model" + }), + ); + + // Scenario snippet + snippets.insert( + "loadtest-scenario", + serde_json::json!({ + "prefix": "loadtest-scenario", + "body": [ + "- name: \"${1:Scenario Name}\"", + " weight: ${2:100}", + " steps:", + " - name: \"${3:Step Name}\"", + " request:", + " method: \"${4|GET,POST,PUT,DELETE|}\"", + " path: \"${5:/path}\"", + " assertions:", + " - statusCode: ${6:200}" + ], + "description": "Test scenario" + }), + ); + + // Step snippet + snippets.insert( + "loadtest-step", + serde_json::json!({ + "prefix": "loadtest-step", + "body": [ + "- name: \"${1:Step Name}\"", + " request:", + " method: \"${2|GET,POST,PUT,DELETE|}\"", + " path: \"${3:/path}\"", + " ${4:body: '${5:{}}'", + " ${6:thinkTime: \"${7:2s}\"}", + " assertions:", + " - statusCode: ${8:200}" + ], + "description": "Test step" + }), + ); + + // Assertion snippets + snippets.insert( + "loadtest-assertion-status", + serde_json::json!({ + "prefix": "loadtest-assertion-status", + "body": ["- statusCode: ${1:200}"], + "description": "Status code assertion" + }), + ); + + snippets.insert( + "loadtest-assertion-jsonpath", + serde_json::json!({ + "prefix": "loadtest-assertion-jsonpath", + "body": [ + "- jsonPath:", + " path: \"${1:\\$.field}\"", + " expected: \"${2:value}\"" + ], + "description": "JSONPath assertion" + }), + ); + + // Extractor snippets + snippets.insert( + "loadtest-extract-jsonpath", + serde_json::json!({ + "prefix": "loadtest-extract-jsonpath", + "body": [ + "- name: \"${1:varName}\"", + " jsonPath: \"${2:\\$.field}\"" + ], + "description": "JSONPath extractor" + }), + ); + + // Data file snippet + snippets.insert( + "loadtest-datafile", + serde_json::json!({ + "prefix": "loadtest-datafile", + "body": [ + "dataFile:", + " path: \"${1:./data.csv}\"", + " format: \"${2|csv,json|}\"", + " strategy: \"${3|sequential,random,cycle|}\"" + ], + "description": "External data file" + }), + ); + + serde_json::to_string_pretty(&snippets).unwrap() + } +} + +impl Default for ConfigDocsGenerator { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_json_schema_generation() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + assert!(!schema.is_empty()); + assert!(schema.contains("\"$schema\"")); + assert!(schema.contains("\"version\"")); + assert!(schema.contains("\"config\"")); + assert!(schema.contains("\"load\"")); + assert!(schema.contains("\"scenarios\"")); + + println!("✅ JSON Schema generation works"); + } + + #[test] + fn test_json_schema_is_valid_json() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + let parsed: Result = serde_json::from_str(&schema); + assert!(parsed.is_ok(), "Generated schema should be valid JSON"); + + println!("✅ JSON Schema is valid JSON"); + } + + #[test] + fn test_markdown_docs_generation() { + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + assert!(!markdown.is_empty()); + assert!(markdown.contains("# Configuration Schema Reference")); + assert!(markdown.contains("## Version")); + assert!(markdown.contains("## Config")); + assert!(markdown.contains("## Load Models")); + assert!(markdown.contains("## Scenarios")); + + println!("✅ Markdown documentation generation works"); + } + + #[test] + fn test_vscode_snippets_generation() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + assert!(!snippets.is_empty()); + assert!(snippets.contains("loadtest-basic")); + assert!(snippets.contains("loadtest-rps")); + assert!(snippets.contains("loadtest-scenario")); + + println!("✅ VS Code snippets generation works"); + } + + #[test] + fn test_vscode_snippets_is_valid_json() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + let parsed: Result = serde_json::from_str(&snippets); + assert!(parsed.is_ok(), "Generated snippets should be valid JSON"); + + println!("✅ VS Code snippets are valid JSON"); + } + + #[test] + fn test_json_schema_has_required_fields() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + let parsed: serde_json::Value = serde_json::from_str(&schema).unwrap(); + + // Check required root-level fields + assert!(parsed["required"].as_array().is_some()); + let required = parsed["required"].as_array().unwrap(); + assert!(required.iter().any(|v| v == "version")); + assert!(required.iter().any(|v| v == "config")); + assert!(required.iter().any(|v| v == "load")); + assert!(required.iter().any(|v| v == "scenarios")); + + println!("✅ JSON Schema has correct required fields"); + } + + #[test] + fn test_json_schema_has_load_model_types() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + // Check that all load models are documented + assert!(schema.contains("concurrent")); + assert!(schema.contains("rps")); + assert!(schema.contains("ramp")); + + println!("✅ JSON Schema includes all load model types"); + } +} diff --git a/src/config_hot_reload.rs b/src/config_hot_reload.rs new file mode 100644 index 0000000..5bb707f --- /dev/null +++ b/src/config_hot_reload.rs @@ -0,0 +1,586 @@ +//! Configuration hot-reload functionality (Issue #44). +//! +//! This module provides file watching and hot-reload capabilities for YAML +//! configuration files. Changes are detected, validated, and applied without +//! stopping the running test. +//! +//! # Example +//! ```no_run +//! use rust_loadtest::config_hot_reload::{ConfigWatcher, ReloadNotifier}; +//! use std::sync::Arc; +//! use std::time::Duration; +//! +//! # async fn example() -> Result<(), Box> { +//! let notifier = Arc::new(ReloadNotifier::new()); +//! let mut watcher = ConfigWatcher::new("loadtest.yaml", notifier.clone())?; +//! +//! // Start watching in background +//! watcher.start()?; +//! +//! // Check for reload events +//! if let Some(event) = notifier.try_recv() { +//! println!("Config reloaded: {:?}", event); +//! } +//! +//! // Stop watching +//! watcher.stop()?; +//! # Ok(()) +//! # } +//! ``` + +use crate::yaml_config::YamlConfig; +use notify::{Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher}; +use std::path::{Path, PathBuf}; +use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; +use std::sync::{Arc, Mutex}; +use std::time::SystemTime; +use tracing::{debug, error, info, warn}; + +/// Hot-reload configuration. +#[derive(Debug, Clone)] +pub struct HotReloadConfig { + /// Enable hot-reload functionality. + pub enabled: bool, + + /// Path to the config file to watch. + pub file_path: PathBuf, + + /// Debounce duration to avoid multiple reloads for rapid file changes. + pub debounce_ms: u64, +} + +impl HotReloadConfig { + /// Create a new hot-reload config. + pub fn new(file_path: impl Into) -> Self { + Self { + enabled: true, + file_path: file_path.into(), + debounce_ms: 500, // Wait 500ms after last change + } + } + + /// Create a disabled hot-reload config. + pub fn disabled() -> Self { + Self { + enabled: false, + file_path: PathBuf::new(), + debounce_ms: 0, + } + } + + /// Enable hot-reload. + pub fn enable(mut self) -> Self { + self.enabled = true; + self + } + + /// Disable hot-reload. + pub fn disable(mut self) -> Self { + self.enabled = false; + self + } + + /// Set debounce duration in milliseconds. + pub fn with_debounce_ms(mut self, ms: u64) -> Self { + self.debounce_ms = ms; + self + } +} + +/// Reload event containing the new configuration. +#[derive(Debug, Clone)] +pub struct ReloadEvent { + /// Timestamp of the reload. + pub timestamp: SystemTime, + + /// Path to the config file. + pub file_path: PathBuf, + + /// The reloaded configuration. + pub config: YamlConfig, + + /// Whether validation succeeded. + pub valid: bool, + + /// Validation error message (if any). + pub error: Option, +} + +impl ReloadEvent { + /// Check if the reload was successful. + pub fn is_success(&self) -> bool { + self.valid && self.error.is_none() + } +} + +/// Reload event notifier. +/// +/// Uses a channel to send reload events to consumers. +pub struct ReloadNotifier { + sender: Sender, + receiver: Arc>>, +} + +impl ReloadNotifier { + /// Create a new reload notifier. + pub fn new() -> Self { + let (sender, receiver) = channel(); + Self { + sender, + receiver: Arc::new(Mutex::new(receiver)), + } + } + + /// Send a reload event. + pub fn notify(&self, event: ReloadEvent) { + if let Err(e) = self.sender.send(event) { + error!("Failed to send reload event: {}", e); + } + } + + /// Try to receive a reload event (non-blocking). + pub fn try_recv(&self) -> Option { + match self.receiver.lock().unwrap().try_recv() { + Ok(event) => Some(event), + Err(TryRecvError::Empty) => None, + Err(TryRecvError::Disconnected) => { + error!("Reload event channel disconnected"); + None + } + } + } + + /// Receive a reload event (blocking). + pub fn recv(&self) -> Option { + match self.receiver.lock().unwrap().recv() { + Ok(event) => Some(event), + Err(e) => { + error!("Failed to receive reload event: {}", e); + None + } + } + } +} + +impl Default for ReloadNotifier { + fn default() -> Self { + Self::new() + } +} + +/// Configuration file watcher. +/// +/// Watches a YAML config file for changes and triggers reload events. +pub struct ConfigWatcher { + config: HotReloadConfig, + notifier: Arc, + watcher: Option, + last_reload: Arc>>, +} + +impl std::fmt::Debug for ConfigWatcher { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ConfigWatcher") + .field("config", &self.config) + .field("watcher_active", &self.watcher.is_some()) + .finish() + } +} + +impl ConfigWatcher { + /// Create a new config watcher. + pub fn new( + file_path: impl Into, + notifier: Arc, + ) -> Result { + let file_path = file_path.into(); + + if !file_path.exists() { + return Err(ConfigWatcherError::FileNotFound(file_path)); + } + + Ok(Self { + config: HotReloadConfig::new(file_path), + notifier, + watcher: None, + last_reload: Arc::new(Mutex::new(None)), + }) + } + + /// Create a watcher with custom config. + pub fn with_config( + config: HotReloadConfig, + notifier: Arc, + ) -> Result { + if config.enabled && !config.file_path.exists() { + return Err(ConfigWatcherError::FileNotFound(config.file_path.clone())); + } + + Ok(Self { + config, + notifier, + watcher: None, + last_reload: Arc::new(Mutex::new(None)), + }) + } + + /// Start watching the config file. + pub fn start(&mut self) -> Result<(), ConfigWatcherError> { + if !self.config.enabled { + debug!("Hot-reload is disabled, skipping watcher start"); + return Ok(()); + } + + info!("Starting config watcher for: {:?}", self.config.file_path); + + let file_path = self.config.file_path.clone(); + let notifier = self.notifier.clone(); + let debounce_ms = self.config.debounce_ms; + let last_reload = self.last_reload.clone(); + + let mut watcher = + notify::recommended_watcher(move |res: Result| match res { + Ok(event) => { + if should_reload(&event) { + debug!("File change detected: {:?}", event); + handle_reload(&file_path, ¬ifier, debounce_ms, &last_reload); + } + } + Err(e) => { + error!("Watch error: {:?}", e); + } + }) + .map_err(ConfigWatcherError::WatcherCreation)?; + + watcher + .watch(&self.config.file_path, RecursiveMode::NonRecursive) + .map_err(ConfigWatcherError::WatcherStart)?; + + self.watcher = Some(watcher); + + info!("Config watcher started successfully"); + Ok(()) + } + + /// Stop watching the config file. + pub fn stop(&mut self) -> Result<(), ConfigWatcherError> { + if let Some(mut watcher) = self.watcher.take() { + info!("Stopping config watcher"); + watcher + .unwatch(&self.config.file_path) + .map_err(ConfigWatcherError::WatcherStop)?; + } + Ok(()) + } + + /// Check if watcher is running. + pub fn is_running(&self) -> bool { + self.watcher.is_some() + } + + /// Get the watched file path. + pub fn file_path(&self) -> &Path { + &self.config.file_path + } +} + +impl Drop for ConfigWatcher { + fn drop(&mut self) { + let _ = self.stop(); + } +} + +/// Check if an event should trigger a reload. +fn should_reload(event: &Event) -> bool { + matches!( + event.kind, + EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_) + ) +} + +/// Handle a config reload. +fn handle_reload( + file_path: &Path, + notifier: &ReloadNotifier, + debounce_ms: u64, + last_reload: &Arc>>, +) { + // Debounce: skip if reload happened recently + let now = SystemTime::now(); + { + let mut last = last_reload.lock().unwrap(); + if let Some(last_time) = *last { + if let Ok(elapsed) = now.duration_since(last_time) { + if elapsed.as_millis() < debounce_ms as u128 { + debug!("Debouncing reload ({}ms since last)", elapsed.as_millis()); + return; + } + } + } + *last = Some(now); + } + + info!("Reloading config from: {:?}", file_path); + + // Load and validate new config + let result = load_and_validate_config(file_path); + + match result { + Ok(config) => { + info!("Config reloaded successfully"); + notifier.notify(ReloadEvent { + timestamp: now, + file_path: file_path.to_path_buf(), + config, + valid: true, + error: None, + }); + } + Err(e) => { + warn!("Config reload failed validation: {}", e); + // Send event with error, but create a placeholder config + notifier.notify(ReloadEvent { + timestamp: now, + file_path: file_path.to_path_buf(), + config: YamlConfig::default(), + valid: false, + error: Some(e), + }); + } + } +} + +/// Load and validate a config file. +fn load_and_validate_config(file_path: &Path) -> Result { + // Load YAML + let config = + YamlConfig::from_file(file_path).map_err(|e| format!("Failed to parse YAML: {}", e))?; + + // Validate + config + .validate() + .map_err(|e| format!("Validation failed: {}", e))?; + + Ok(config) +} + +/// Config watcher errors. +#[derive(Debug, thiserror::Error)] +pub enum ConfigWatcherError { + #[error("Config file not found: {0:?}")] + FileNotFound(PathBuf), + + #[error("Failed to create file watcher: {0}")] + WatcherCreation(notify::Error), + + #[error("Failed to start watching: {0}")] + WatcherStart(notify::Error), + + #[error("Failed to stop watching: {0}")] + WatcherStop(notify::Error), + + #[error("Config error: {0}")] + Config(String), +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + fn create_test_config() -> String { + r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" + workers: 10 +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/test" +"# + .to_string() + } + + #[test] + fn test_hot_reload_config_creation() { + let config = HotReloadConfig::new("test.yaml"); + assert!(config.enabled); + assert_eq!(config.file_path, PathBuf::from("test.yaml")); + assert_eq!(config.debounce_ms, 500); + + let disabled = HotReloadConfig::disabled(); + assert!(!disabled.enabled); + } + + #[test] + fn test_hot_reload_config_builders() { + let config = HotReloadConfig::new("test.yaml") + .disable() + .with_debounce_ms(1000); + + assert!(!config.enabled); + assert_eq!(config.debounce_ms, 1000); + } + + #[test] + fn test_reload_event() { + let event = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: true, + error: None, + }; + + assert!(event.is_success()); + + let failed = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: false, + error: Some("error".to_string()), + }; + + assert!(!failed.is_success()); + } + + #[test] + fn test_reload_notifier() { + let notifier = ReloadNotifier::new(); + + // Should be empty initially + assert!(notifier.try_recv().is_none()); + + // Send event + let event = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: true, + error: None, + }; + + notifier.notify(event.clone()); + + // Should receive event + let received = notifier.try_recv(); + assert!(received.is_some()); + assert!(received.unwrap().is_success()); + + // Should be empty again + assert!(notifier.try_recv().is_none()); + } + + #[test] + fn test_config_watcher_creation_file_not_found() { + let notifier = Arc::new(ReloadNotifier::new()); + let result = ConfigWatcher::new("nonexistent.yaml", notifier); + + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + ConfigWatcherError::FileNotFound(_) + )); + } + + #[test] + fn test_config_watcher_creation_success() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let watcher = ConfigWatcher::new(&config_path, notifier); + + assert!(watcher.is_ok()); + let watcher = watcher.unwrap(); + assert_eq!(watcher.file_path(), config_path.as_path()); + assert!(!watcher.is_running()); + } + + #[test] + fn test_load_and_validate_config_success() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let result = load_and_validate_config(&config_path); + assert!(result.is_ok()); + + let config = result.unwrap(); + assert_eq!(config.config.base_url, "https://test.com"); + } + + #[test] + fn test_load_and_validate_config_invalid_yaml() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("invalid.yaml"); + fs::write(&config_path, "invalid: yaml: content:").unwrap(); + + let result = load_and_validate_config(&config_path); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Failed to parse YAML")); + } + + #[test] + fn test_load_and_validate_config_invalid_config() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("invalid.yaml"); + fs::write( + &config_path, + r#" +version: "1.0" +config: + baseUrl: "not-a-url" + duration: "invalid" +load: + model: "concurrent" +scenarios: [] +"#, + ) + .unwrap(); + + let result = load_and_validate_config(&config_path); + assert!(result.is_err()); + // With duration: "invalid", parsing fails before validation + // Error will be "Failed to parse YAML: ..." not "Validation failed: ..." + let err = result.unwrap_err(); + assert!( + err.contains("Failed to parse") || err.contains("Validation failed"), + "Expected parse or validation error, got: {}", + err + ); + } + + #[test] + fn test_should_reload() { + let modify_event = Event { + kind: EventKind::Modify(notify::event::ModifyKind::Any), + paths: vec![], + attrs: Default::default(), + }; + assert!(should_reload(&modify_event)); + + let create_event = Event { + kind: EventKind::Create(notify::event::CreateKind::Any), + paths: vec![], + attrs: Default::default(), + }; + assert!(should_reload(&create_event)); + + let access_event = Event { + kind: EventKind::Access(notify::event::AccessKind::Any), + paths: vec![], + attrs: Default::default(), + }; + assert!(!should_reload(&access_event)); + } +} diff --git a/src/config_merge.rs b/src/config_merge.rs new file mode 100644 index 0000000..51ac4ea --- /dev/null +++ b/src/config_merge.rs @@ -0,0 +1,528 @@ +//! Configuration merging and default values (Issue #39). +//! +//! This module implements configuration precedence: +//! Environment Variables > YAML File > Default Values + +use std::env; +use std::time::Duration; + +/// Default configuration values for all optional fields. +#[derive(Debug, Clone)] +pub struct ConfigDefaults { + /// Default number of workers + pub workers: usize, + + /// Default request timeout + pub timeout: Duration, + + /// Default skip TLS verify + pub skip_tls_verify: bool, + + /// Default scenario weight + pub scenario_weight: f64, + + /// Default load model + pub load_model: String, +} + +impl Default for ConfigDefaults { + fn default() -> Self { + Self { + workers: 10, + timeout: Duration::from_secs(30), + skip_tls_verify: false, + scenario_weight: 1.0, + load_model: "concurrent".to_string(), + } + } +} + +impl ConfigDefaults { + /// Get default configuration values. + pub fn new() -> Self { + Self::default() + } + + /// Get default workers count. + pub fn workers() -> usize { + 10 + } + + /// Get default timeout duration. + pub fn timeout() -> Duration { + Duration::from_secs(30) + } + + /// Get default skip TLS verify flag. + pub fn skip_tls_verify() -> bool { + false + } + + /// Get default scenario weight. + pub fn scenario_weight() -> f64 { + 1.0 + } + + /// Get default load model. + pub fn load_model() -> String { + "concurrent".to_string() + } +} + +/// Configuration precedence resolver. +/// +/// Resolves configuration values according to precedence: +/// 1. Environment variables (highest priority) +/// 2. YAML file values +/// 3. Default values (lowest priority) +pub struct ConfigMerger; + +impl ConfigMerger { + /// Merge workers with precedence: env > yaml > default. + pub fn merge_workers(yaml_value: Option, env_var: &str) -> usize { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + if let Ok(parsed) = env_val.parse::() { + return parsed; + } + } + + // Fall back to YAML value or default + yaml_value.unwrap_or_else(ConfigDefaults::workers) + } + + /// Merge timeout with precedence: env > yaml > default. + pub fn merge_timeout(yaml_value: Option, env_var: &str) -> Duration { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + if let Ok(parsed) = crate::utils::parse_duration_string(&env_val) { + return parsed; + } + } + + // Fall back to YAML value or default + yaml_value.unwrap_or_else(ConfigDefaults::timeout) + } + + /// Merge skip TLS verify with precedence: env > yaml > default. + pub fn merge_skip_tls_verify(yaml_value: Option, env_var: &str) -> bool { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + return env_val.to_lowercase() == "true"; + } + + // Fall back to YAML value or default + yaml_value.unwrap_or_else(ConfigDefaults::skip_tls_verify) + } + + /// Merge scenario weight with precedence: yaml > default. + pub fn merge_scenario_weight(yaml_value: Option) -> f64 { + yaml_value.unwrap_or_else(ConfigDefaults::scenario_weight) + } + + /// Merge string value with precedence: env > yaml > default. + pub fn merge_string(yaml_value: Option, env_var: &str, default: String) -> String { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + if !env_val.is_empty() { + return env_val; + } + } + + // Fall back to YAML value or default + yaml_value.unwrap_or(default) + } + + /// Merge optional string with precedence: env > yaml. + pub fn merge_optional_string(yaml_value: Option, env_var: &str) -> Option { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + if !env_val.is_empty() { + return Some(env_val); + } + } + + // Fall back to YAML value + yaml_value + } + + /// Merge RPS value with precedence: env > yaml. + pub fn merge_rps(yaml_value: Option, env_var: &str) -> Option { + // Check environment variable first + if let Ok(env_val) = env::var(env_var) { + if let Ok(parsed) = env_val.parse::() { + return Some(parsed); + } + } + + // Fall back to YAML value + yaml_value + } +} + +/// Configuration precedence documentation. +pub struct ConfigPrecedence; + +impl ConfigPrecedence { + /// Get documentation for configuration precedence. + pub fn documentation() -> &'static str { + r#" +# Configuration Precedence + +Configuration values are resolved in the following order (highest to lowest priority): + +1. **Environment Variables** (Highest Priority) + - Override both YAML and defaults + - Useful for CI/CD, Docker, Kubernetes + - Example: NUM_CONCURRENT_TASKS=50 + +2. **YAML Configuration File** + - Override defaults + - Version-controlled test definitions + - Example: config.workers: 20 + +3. **Default Values** (Lowest Priority) + - Used when not specified in YAML or environment + - Sensible defaults for common use cases + +## Default Values + +- workers: 10 +- timeout: 30s +- skipTlsVerify: false +- scenario weight: 1.0 +- load model: "concurrent" + +## Environment Variable Mapping + +| YAML Path | Environment Variable | Default | +|-------------------|---------------------------|---------| +| config.workers | NUM_CONCURRENT_TASKS | 10 | +| config.timeout | REQUEST_TIMEOUT | 30s | +| config.skipTlsVerify | SKIP_TLS_VERIFY | false | +| config.baseUrl | TARGET_URL | (required) | +| config.duration | TEST_DURATION | (required) | +| load.target | TARGET_RPS | - | + +## Examples + +### Example 1: All Defaults +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "5m" + # workers: will use default 10 + # timeout: will use default 30s +load: + model: "concurrent" # default +scenarios: + - name: "Test" + # weight: will use default 1.0 + steps: [...] +``` + +### Example 2: YAML Overrides Defaults +```yaml +version: "1.0" +config: + baseUrl: "https://api.example.com" + workers: 50 # overrides default 10 + timeout: "60s" # overrides default 30s + duration: "5m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Test" + weight: 2.0 # overrides default 1.0 + steps: [...] +``` + +### Example 3: Environment Overrides Everything +```bash +# YAML has workers: 50 +# Environment has NUM_CONCURRENT_TASKS=100 +# Result: 100 workers (env wins) + +NUM_CONCURRENT_TASKS=100 \ +TARGET_RPS=200 \ +rust_loadtest --config test.yaml +``` + +### Example 4: Mixed Precedence +```yaml +# test.yaml +config: + baseUrl: "https://api.example.com" + workers: 50 # from YAML + timeout: "60s" # from YAML + duration: "5m" +``` + +```bash +# Run with environment override +NUM_CONCURRENT_TASKS=100 rust_loadtest --config test.yaml + +# Result: +# - baseUrl: from YAML (https://api.example.com) +# - workers: 100 (from ENV, overrides YAML's 50) +# - timeout: 60s (from YAML) +# - duration: 5m (from YAML) +``` + +## Best Practices + +1. **Use YAML for base configuration** + - Version control your test definitions + - Document test scenarios + - Set reasonable defaults + +2. **Use environment variables for overrides** + - CI/CD pipeline customization + - Container/Kubernetes configuration + - Quick parameter changes + +3. **Rely on defaults for common settings** + - Timeout, workers, scenario weights + - Reduces config file verbosity + - Sensible defaults for most use cases +"# + } + + /// Print precedence documentation to stdout. + pub fn print_documentation() { + println!("{}", Self::documentation()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + #[test] + fn test_config_defaults() { + let defaults = ConfigDefaults::new(); + + assert_eq!(defaults.workers, 10); + assert_eq!(defaults.timeout, Duration::from_secs(30)); + assert!(!defaults.skip_tls_verify); + assert_eq!(defaults.scenario_weight, 1.0); + assert_eq!(defaults.load_model, "concurrent"); + + println!("✅ Config defaults are correct"); + } + + #[test] + fn test_merge_workers_yaml_only() { + // No env var set, should use YAML value + let result = ConfigMerger::merge_workers(Some(50), "TEST_WORKERS_1"); + assert_eq!(result, 50); + + println!("✅ Merge workers from YAML works"); + } + + #[test] + fn test_merge_workers_default_only() { + // No env var, no YAML value, should use default + let result = ConfigMerger::merge_workers(None, "TEST_WORKERS_2"); + assert_eq!(result, 10); + + println!("✅ Merge workers uses default when not specified"); + } + + #[test] + fn test_merge_workers_env_override() { + // Set environment variable + env::set_var("TEST_WORKERS_3", "100"); + + // Env should override YAML value + let result = ConfigMerger::merge_workers(Some(50), "TEST_WORKERS_3"); + assert_eq!(result, 100); + + // Clean up + env::remove_var("TEST_WORKERS_3"); + + println!("✅ Environment variable overrides YAML for workers"); + } + + #[test] + fn test_merge_timeout_yaml_only() { + let result = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "TEST_TIMEOUT_1"); + assert_eq!(result, Duration::from_secs(60)); + + println!("✅ Merge timeout from YAML works"); + } + + #[test] + fn test_merge_timeout_default_only() { + let result = ConfigMerger::merge_timeout(None, "TEST_TIMEOUT_2"); + assert_eq!(result, Duration::from_secs(30)); + + println!("✅ Merge timeout uses default when not specified"); + } + + #[test] + fn test_merge_timeout_env_override() { + env::set_var("TEST_TIMEOUT_3", "90s"); + + let result = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "TEST_TIMEOUT_3"); + assert_eq!(result, Duration::from_secs(90)); + + env::remove_var("TEST_TIMEOUT_3"); + + println!("✅ Environment variable overrides YAML for timeout"); + } + + #[test] + fn test_merge_skip_tls_verify() { + // Default + assert!(!ConfigMerger::merge_skip_tls_verify( + None, + "TEST_SKIP_TLS_1" + )); + + // YAML + assert!(ConfigMerger::merge_skip_tls_verify( + Some(true), + "TEST_SKIP_TLS_2" + )); + + // Env override + env::set_var("TEST_SKIP_TLS_3", "true"); + assert!(ConfigMerger::merge_skip_tls_verify( + Some(false), + "TEST_SKIP_TLS_3" + )); + env::remove_var("TEST_SKIP_TLS_3"); + + println!("✅ Skip TLS verify merging works"); + } + + #[test] + fn test_merge_scenario_weight() { + assert_eq!(ConfigMerger::merge_scenario_weight(None), 1.0); + assert_eq!(ConfigMerger::merge_scenario_weight(Some(2.5)), 2.5); + + println!("✅ Scenario weight merging works"); + } + + #[test] + fn test_merge_string_precedence() { + // Default only + let result = ConfigMerger::merge_string(None, "TEST_STR_1", "default".to_string()); + assert_eq!(result, "default"); + + // YAML overrides default + let result = ConfigMerger::merge_string( + Some("yaml".to_string()), + "TEST_STR_2", + "default".to_string(), + ); + assert_eq!(result, "yaml"); + + // Env overrides YAML and default + env::set_var("TEST_STR_3", "env"); + let result = ConfigMerger::merge_string( + Some("yaml".to_string()), + "TEST_STR_3", + "default".to_string(), + ); + assert_eq!(result, "env"); + env::remove_var("TEST_STR_3"); + + println!("✅ String merging precedence works correctly"); + } + + #[test] + fn test_merge_optional_string() { + // No value + assert_eq!( + ConfigMerger::merge_optional_string(None, "TEST_OPT_STR_1"), + None + ); + + // YAML value + assert_eq!( + ConfigMerger::merge_optional_string(Some("yaml".to_string()), "TEST_OPT_STR_2"), + Some("yaml".to_string()) + ); + + // Env overrides YAML + env::set_var("TEST_OPT_STR_3", "env"); + assert_eq!( + ConfigMerger::merge_optional_string(Some("yaml".to_string()), "TEST_OPT_STR_3"), + Some("env".to_string()) + ); + env::remove_var("TEST_OPT_STR_3"); + + println!("✅ Optional string merging works"); + } + + #[test] + fn test_merge_rps() { + // No value + assert_eq!(ConfigMerger::merge_rps(None, "TEST_RPS_1"), None); + + // YAML value + assert_eq!( + ConfigMerger::merge_rps(Some(100.0), "TEST_RPS_2"), + Some(100.0) + ); + + // Env overrides YAML + env::set_var("TEST_RPS_3", "200.5"); + assert_eq!( + ConfigMerger::merge_rps(Some(100.0), "TEST_RPS_3"), + Some(200.5) + ); + env::remove_var("TEST_RPS_3"); + + println!("✅ RPS merging works"); + } + + #[test] + fn test_precedence_order() { + env::set_var("TEST_PRECEDENCE", "env-value"); + + // Test with all three sources + let result = ConfigMerger::merge_string( + Some("yaml-value".to_string()), + "TEST_PRECEDENCE", + "default-value".to_string(), + ); + + assert_eq!(result, "env-value"); + + env::remove_var("TEST_PRECEDENCE"); + + // Test with YAML and default (no env) + let result = ConfigMerger::merge_string( + Some("yaml-value".to_string()), + "TEST_PRECEDENCE", + "default-value".to_string(), + ); + + assert_eq!(result, "yaml-value"); + + // Test with default only + let result = + ConfigMerger::merge_string(None, "TEST_PRECEDENCE", "default-value".to_string()); + + assert_eq!(result, "default-value"); + + println!("✅ Precedence order: env > yaml > default works correctly"); + } + + #[test] + fn test_documentation_exists() { + let docs = ConfigPrecedence::documentation(); + assert!(!docs.is_empty()); + assert!(docs.contains("Precedence")); + assert!(docs.contains("Environment Variables")); + assert!(docs.contains("Default Values")); + + println!("✅ Precedence documentation exists"); + } +} diff --git a/src/config_validation.rs b/src/config_validation.rs new file mode 100644 index 0000000..3920a68 --- /dev/null +++ b/src/config_validation.rs @@ -0,0 +1,540 @@ +//! Configuration schema validation (Issue #38). +//! +//! This module provides comprehensive validation for YAML configuration files +//! with detailed error messages and field-level validation rules. + +use thiserror::Error; + +/// Validation error with context about which field failed. +#[derive(Error, Debug, Clone)] +pub enum ValidationError { + #[error("Field '{field}': {message}")] + FieldError { field: String, message: String }, + + #[error("Field '{field}' is required but not provided")] + RequiredField { field: String }, + + #[error("Field '{field}': value {value} is out of range ({min} to {max})")] + OutOfRange { + field: String, + value: String, + min: String, + max: String, + }, + + #[error("Field '{field}': invalid format - {message}")] + InvalidFormat { field: String, message: String }, + + #[error("Field '{field}': invalid enum value '{value}'. Expected one of: {expected}")] + InvalidEnum { + field: String, + value: String, + expected: String, + }, + + #[error("Multiple validation errors: {0}")] + Multiple(String), +} + +/// Result type for validation operations. +pub type ValidationResult = Result; + +/// Validation context for building error messages. +pub struct ValidationContext { + field_path: Vec, + errors: Vec, +} + +impl ValidationContext { + pub fn new() -> Self { + Self { + field_path: Vec::new(), + errors: Vec::new(), + } + } + + /// Enter a nested field context. + pub fn enter(&mut self, field: &str) { + self.field_path.push(field.to_string()); + } + + /// Exit the current field context. + pub fn exit(&mut self) { + self.field_path.pop(); + } + + /// Get the current field path as a string. + pub fn current_path(&self) -> String { + self.field_path.join(".") + } + + /// Add a validation error. + pub fn add_error(&mut self, error: ValidationError) { + self.errors.push(error); + } + + /// Add a field error with automatic path. + pub fn field_error(&mut self, message: String) { + self.add_error(ValidationError::FieldError { + field: self.current_path(), + message, + }); + } + + /// Check if any errors were collected. + pub fn has_errors(&self) -> bool { + !self.errors.is_empty() + } + + /// Get all collected errors. + pub fn errors(&self) -> &[ValidationError] { + &self.errors + } + + /// Consume the context and return a result. + pub fn into_result(self) -> Result<(), ValidationError> { + if self.errors.is_empty() { + Ok(()) + } else { + let messages: Vec = self.errors.iter().map(|e| e.to_string()).collect(); + Err(ValidationError::Multiple(messages.join("; "))) + } + } +} + +impl Default for ValidationContext { + fn default() -> Self { + Self::new() + } +} + +/// Validator for URLs. +pub struct UrlValidator; + +impl UrlValidator { + pub fn validate(url: &str) -> ValidationResult<()> { + if url.is_empty() { + return Err(ValidationError::InvalidFormat { + field: "url".to_string(), + message: "URL cannot be empty".to_string(), + }); + } + + if !url.starts_with("http://") && !url.starts_with("https://") { + return Err(ValidationError::InvalidFormat { + field: "url".to_string(), + message: format!("URL must start with http:// or https://, got: {}", url), + }); + } + + // Basic validation - check for obvious issues + if url.contains(' ') { + return Err(ValidationError::InvalidFormat { + field: "url".to_string(), + message: "URL cannot contain spaces".to_string(), + }); + } + + Ok(()) + } +} + +/// Validator for durations. +pub struct DurationValidator; + +impl DurationValidator { + pub fn validate(duration_str: &str) -> ValidationResult<()> { + // Try to parse using the utility function + crate::utils::parse_duration_string(duration_str).map_err(|e| { + ValidationError::InvalidFormat { + field: "duration".to_string(), + message: format!("Invalid duration format '{}': {}", duration_str, e), + } + })?; + Ok(()) + } + + pub fn validate_positive(duration_str: &str) -> ValidationResult<()> { + Self::validate(duration_str)?; + + let duration = crate::utils::parse_duration_string(duration_str).unwrap(); + if duration.as_secs() == 0 { + return Err(ValidationError::OutOfRange { + field: "duration".to_string(), + value: "0s".to_string(), + min: "1s".to_string(), + max: "unlimited".to_string(), + }); + } + + Ok(()) + } +} + +/// Validator for numeric ranges. +pub struct RangeValidator; + +impl RangeValidator { + pub fn validate_u64(value: u64, min: u64, max: u64, field: &str) -> ValidationResult<()> { + if value < min || value > max { + return Err(ValidationError::OutOfRange { + field: field.to_string(), + value: value.to_string(), + min: min.to_string(), + max: max.to_string(), + }); + } + Ok(()) + } + + pub fn validate_f64(value: f64, min: f64, max: f64, field: &str) -> ValidationResult<()> { + if value < min || value > max { + return Err(ValidationError::OutOfRange { + field: field.to_string(), + value: value.to_string(), + min: min.to_string(), + max: max.to_string(), + }); + } + Ok(()) + } + + pub fn validate_positive_u64(value: u64, field: &str) -> ValidationResult<()> { + if value == 0 { + return Err(ValidationError::OutOfRange { + field: field.to_string(), + value: "0".to_string(), + min: "1".to_string(), + max: "unlimited".to_string(), + }); + } + Ok(()) + } + + pub fn validate_positive_f64(value: f64, field: &str) -> ValidationResult<()> { + if value <= 0.0 { + return Err(ValidationError::OutOfRange { + field: field.to_string(), + value: value.to_string(), + min: "0.0 (exclusive)".to_string(), + max: "unlimited".to_string(), + }); + } + Ok(()) + } +} + +/// Validator for HTTP methods. +pub struct HttpMethodValidator; + +impl HttpMethodValidator { + const VALID_METHODS: &'static [&'static str] = + &["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"]; + + pub fn validate(method: &str) -> ValidationResult<()> { + let method_upper = method.to_uppercase(); + if !Self::VALID_METHODS.contains(&method_upper.as_str()) { + return Err(ValidationError::InvalidEnum { + field: "method".to_string(), + value: method.to_string(), + expected: Self::VALID_METHODS.join(", "), + }); + } + Ok(()) + } +} + +/// Validator for load model types. +pub struct LoadModelValidator; + +impl LoadModelValidator { + pub fn validate_rps(target_rps: f64) -> ValidationResult<()> { + RangeValidator::validate_positive_f64(target_rps, "load.target") + } + + pub fn validate_ramp(min_rps: f64, max_rps: f64) -> ValidationResult<()> { + RangeValidator::validate_positive_f64(min_rps, "load.min")?; + RangeValidator::validate_positive_f64(max_rps, "load.max")?; + + if min_rps >= max_rps { + return Err(ValidationError::FieldError { + field: "load".to_string(), + message: format!( + "min_rps ({}) must be less than max_rps ({})", + min_rps, max_rps + ), + }); + } + + Ok(()) + } + + pub fn validate_daily_traffic( + min_rps: f64, + mid_rps: f64, + max_rps: f64, + ) -> ValidationResult<()> { + RangeValidator::validate_positive_f64(min_rps, "load.min")?; + RangeValidator::validate_positive_f64(mid_rps, "load.mid")?; + RangeValidator::validate_positive_f64(max_rps, "load.max")?; + + if !(min_rps < mid_rps && mid_rps < max_rps) { + return Err(ValidationError::FieldError { + field: "load".to_string(), + message: format!( + "Daily traffic must satisfy: min ({}) < mid ({}) < max ({})", + min_rps, mid_rps, max_rps + ), + }); + } + + Ok(()) + } +} + +/// Configuration schema definition and JSON Schema export. +pub struct ConfigSchema; + +impl ConfigSchema { + /// Generate JSON Schema for the YAML configuration. + pub fn to_json_schema() -> serde_json::Value { + serde_json::json!({ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Rust LoadTest Configuration", + "type": "object", + "required": ["version", "config", "load", "scenarios"], + "properties": { + "version": { + "type": "string", + "const": "1.0", + "description": "Configuration format version" + }, + "metadata": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "description": { "type": "string" }, + "author": { "type": "string" }, + "tags": { + "type": "array", + "items": { "type": "string" } + } + } + }, + "config": { + "type": "object", + "required": ["baseUrl", "duration"], + "properties": { + "baseUrl": { + "type": "string", + "format": "uri", + "pattern": "^https?://", + "description": "Base URL for all requests" + }, + "workers": { + "type": "integer", + "minimum": 1, + "maximum": 10000, + "default": 10, + "description": "Number of concurrent workers" + }, + "duration": { + "oneOf": [ + { "type": "integer", "minimum": 1 }, + { "type": "string", "pattern": "^\\d+[smhd]$" } + ], + "description": "Test duration (e.g., '5m', '2h', 300)" + }, + "timeout": { + "oneOf": [ + { "type": "integer", "minimum": 1 }, + { "type": "string", "pattern": "^\\d+[smhd]$" } + ], + "default": 30, + "description": "Request timeout" + }, + "skipTlsVerify": { + "type": "boolean", + "default": false, + "description": "Skip TLS certificate verification" + } + } + }, + "load": { + "oneOf": [ + { + "type": "object", + "required": ["model"], + "properties": { + "model": { "const": "concurrent" } + } + }, + { + "type": "object", + "required": ["model", "target"], + "properties": { + "model": { "const": "rps" }, + "target": { "type": "number", "minimum": 0.1 } + } + }, + { + "type": "object", + "required": ["model", "min", "max", "rampDuration"], + "properties": { + "model": { "const": "ramp" }, + "min": { "type": "number", "minimum": 0.1 }, + "max": { "type": "number", "minimum": 0.1 }, + "rampDuration": { "oneOf": [ + { "type": "integer" }, + { "type": "string" } + ]} + } + } + ] + }, + "scenarios": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "required": ["name", "steps"], + "properties": { + "name": { "type": "string" }, + "weight": { "type": "number", "minimum": 0.1, "default": 1.0 }, + "steps": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "required": ["request"], + "properties": { + "name": { "type": "string" }, + "request": { + "type": "object", + "required": ["method", "path"], + "properties": { + "method": { + "type": "string", + "enum": ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"] + }, + "path": { "type": "string" } + } + } + } + } + } + } + } + } + } + }) + } + + /// Export JSON Schema to a file. + pub fn export_json_schema() -> String { + serde_json::to_string_pretty(&Self::to_json_schema()).unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_url_validator_valid() { + assert!(UrlValidator::validate("https://example.com").is_ok()); + assert!(UrlValidator::validate("http://localhost:8080").is_ok()); + assert!(UrlValidator::validate("https://api.example.com/v1").is_ok()); + } + + #[test] + fn test_url_validator_invalid() { + assert!(UrlValidator::validate("").is_err()); + assert!(UrlValidator::validate("example.com").is_err()); + assert!(UrlValidator::validate("ftp://example.com").is_err()); + assert!(UrlValidator::validate("https://example .com").is_err()); + } + + #[test] + fn test_duration_validator() { + assert!(DurationValidator::validate("30s").is_ok()); + assert!(DurationValidator::validate("5m").is_ok()); + assert!(DurationValidator::validate("2h").is_ok()); + assert!(DurationValidator::validate("invalid").is_err()); + } + + #[test] + fn test_duration_validator_positive() { + assert!(DurationValidator::validate_positive("1s").is_ok()); + assert!(DurationValidator::validate_positive("0s").is_err()); + } + + #[test] + fn test_range_validator_u64() { + assert!(RangeValidator::validate_u64(50, 1, 100, "test").is_ok()); + assert!(RangeValidator::validate_u64(0, 1, 100, "test").is_err()); + assert!(RangeValidator::validate_u64(101, 1, 100, "test").is_err()); + } + + #[test] + fn test_range_validator_positive() { + assert!(RangeValidator::validate_positive_u64(1, "test").is_ok()); + assert!(RangeValidator::validate_positive_u64(0, "test").is_err()); + } + + #[test] + fn test_http_method_validator() { + assert!(HttpMethodValidator::validate("GET").is_ok()); + assert!(HttpMethodValidator::validate("POST").is_ok()); + assert!(HttpMethodValidator::validate("get").is_ok()); // case insensitive + assert!(HttpMethodValidator::validate("INVALID").is_err()); + } + + #[test] + fn test_load_model_validator_rps() { + assert!(LoadModelValidator::validate_rps(100.0).is_ok()); + assert!(LoadModelValidator::validate_rps(0.0).is_err()); + assert!(LoadModelValidator::validate_rps(-10.0).is_err()); + } + + #[test] + fn test_load_model_validator_ramp() { + assert!(LoadModelValidator::validate_ramp(10.0, 100.0).is_ok()); + assert!(LoadModelValidator::validate_ramp(100.0, 10.0).is_err()); + assert!(LoadModelValidator::validate_ramp(50.0, 50.0).is_err()); + } + + #[test] + fn test_load_model_validator_daily_traffic() { + assert!(LoadModelValidator::validate_daily_traffic(10.0, 50.0, 100.0).is_ok()); + assert!(LoadModelValidator::validate_daily_traffic(100.0, 50.0, 10.0).is_err()); + assert!(LoadModelValidator::validate_daily_traffic(10.0, 10.0, 100.0).is_err()); + } + + #[test] + fn test_validation_context() { + let mut ctx = ValidationContext::new(); + + ctx.enter("config"); + ctx.enter("baseUrl"); + assert_eq!(ctx.current_path(), "config.baseUrl"); + + ctx.field_error("Invalid URL".to_string()); + assert!(ctx.has_errors()); + + ctx.exit(); + ctx.exit(); + assert_eq!(ctx.current_path(), ""); + } + + #[test] + fn test_json_schema_export() { + let schema = ConfigSchema::to_json_schema(); + assert!(schema.is_object()); + + let schema_str = ConfigSchema::export_json_schema(); + assert!(schema_str.contains("\"$schema\"")); + assert!(schema_str.contains("version")); + assert!(schema_str.contains("config")); + } +} diff --git a/src/config_version.rs b/src/config_version.rs new file mode 100644 index 0000000..7d5ec31 --- /dev/null +++ b/src/config_version.rs @@ -0,0 +1,512 @@ +//! Configuration versioning and migration framework (Issue #41). +//! +//! This module provides version management for YAML configuration files, +//! including version validation, compatibility checking, and migration +//! framework for evolving config schemas over time. + +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::fmt; +use std::str::FromStr; +use thiserror::Error; + +/// Version parsing and validation errors. +#[derive(Error, Debug, Clone, PartialEq)] +pub enum VersionError { + #[error("Invalid version format: {0}. Expected format: X.Y (e.g., 1.0, 2.1)")] + InvalidFormat(String), + + #[error("Unsupported version: {version}. Supported versions: {supported}")] + UnsupportedVersion { version: String, supported: String }, + + #[error("Version {current} is too old. Minimum supported version: {minimum}")] + VersionTooOld { current: String, minimum: String }, + + #[error("Version {current} is too new. Maximum supported version: {maximum}")] + VersionTooNew { current: String, maximum: String }, + + #[error("Migration failed from {from} to {to}: {reason}")] + MigrationFailed { + from: String, + to: String, + reason: String, + }, +} + +/// Semantic version for config files. +/// +/// Supports major.minor versioning (e.g., 1.0, 2.1). +/// Patch versions are not used as config changes typically warrant +/// at least a minor version bump. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Version { + pub major: u32, + pub minor: u32, +} + +impl Version { + /// Create a new version. + pub fn new(major: u32, minor: u32) -> Self { + Self { major, minor } + } + + /// Current supported version. + pub const CURRENT: Version = Version { major: 1, minor: 0 }; + + /// Minimum supported version (oldest version that can be loaded). + pub const MINIMUM_SUPPORTED: Version = Version { major: 1, minor: 0 }; + + /// Maximum supported version (newest version that can be loaded). + pub const MAXIMUM_SUPPORTED: Version = Version { major: 1, minor: 0 }; + + /// Check if this version is supported. + pub fn is_supported(&self) -> bool { + *self >= Self::MINIMUM_SUPPORTED && *self <= Self::MAXIMUM_SUPPORTED + } + + /// Check if this version requires migration to current. + pub fn needs_migration(&self) -> bool { + *self < Self::CURRENT + } + + /// Get list of all supported versions. + pub fn supported_versions() -> Vec { + vec![Version::new(1, 0)] + } + + /// Get supported versions as a formatted string. + pub fn supported_versions_string() -> String { + Self::supported_versions() + .iter() + .map(|v| v.to_string()) + .collect::>() + .join(", ") + } +} + +impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}.{}", self.major, self.minor) + } +} + +impl FromStr for Version { + type Err = VersionError; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('.').collect(); + if parts.len() != 2 { + return Err(VersionError::InvalidFormat(s.to_string())); + } + + let major = parts[0] + .parse::() + .map_err(|_| VersionError::InvalidFormat(s.to_string()))?; + let minor = parts[1] + .parse::() + .map_err(|_| VersionError::InvalidFormat(s.to_string()))?; + + Ok(Version::new(major, minor)) + } +} + +impl PartialOrd for Version { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Version { + fn cmp(&self, other: &Self) -> Ordering { + match self.major.cmp(&other.major) { + Ordering::Equal => self.minor.cmp(&other.minor), + other => other, + } + } +} + +/// Version compatibility checker. +pub struct VersionChecker; + +impl VersionChecker { + /// Validate that a version is supported. + pub fn validate(version: &Version) -> Result<(), VersionError> { + if !version.is_supported() { + if *version < Version::MINIMUM_SUPPORTED { + return Err(VersionError::VersionTooOld { + current: version.to_string(), + minimum: Version::MINIMUM_SUPPORTED.to_string(), + }); + } else if *version > Version::MAXIMUM_SUPPORTED { + return Err(VersionError::VersionTooNew { + current: version.to_string(), + maximum: Version::MAXIMUM_SUPPORTED.to_string(), + }); + } else { + return Err(VersionError::UnsupportedVersion { + version: version.to_string(), + supported: Version::supported_versions_string(), + }); + } + } + Ok(()) + } + + /// Parse and validate a version string. + pub fn parse_and_validate(version_str: &str) -> Result { + let version = Version::from_str(version_str)?; + Self::validate(&version)?; + Ok(version) + } + + /// Check version compatibility and return migration path if needed. + pub fn check_compatibility(version: &Version) -> Result>, VersionError> { + Self::validate(version)?; + + if version.needs_migration() { + Ok(Some(Self::get_migration_path(version))) + } else { + Ok(None) + } + } + + /// Get the migration path from one version to another. + fn get_migration_path(from: &Version) -> Vec { + let mut path = Vec::new(); + let mut current = *from; + + // For now, since we only have 1.0, no migration path exists yet + // When we add 2.0, this would return [1.0, 2.0] + while current < Version::CURRENT { + // Increment to next minor version + current.minor += 1; + if current.minor >= 10 { + current.major += 1; + current.minor = 0; + } + path.push(current); + } + + path + } +} + +/// Migration trait for config version migrations. +pub trait Migration { + /// Source version this migration applies from. + #[allow(clippy::wrong_self_convention)] + fn from_version(&self) -> Version; + + /// Target version this migration applies to. + #[allow(clippy::wrong_self_convention)] + fn to_version(&self) -> Version; + + /// Description of what this migration does. + fn description(&self) -> &str; + + /// Apply the migration to a YAML string. + /// + /// This takes the raw YAML as a string and returns the migrated YAML. + /// Migrations can modify the structure, add/remove fields, or transform values. + fn migrate(&self, yaml: &str) -> Result; +} + +/// Registry of all available migrations. +pub struct MigrationRegistry { + migrations: Vec>, +} + +impl MigrationRegistry { + /// Create a new empty migration registry. + pub fn new() -> Self { + Self { + migrations: Vec::new(), + } + } + + /// Create the default migration registry with all migrations. + pub fn default_migrations() -> Self { + // Future migrations will be registered here + // Example: registry.register(Box::new(MigrationV1ToV2)); + Self::new() + } + + /// Register a migration. + pub fn register(&mut self, migration: Box) { + self.migrations.push(migration); + } + + /// Find a migration from one version to another. + pub fn find_migration(&self, from: &Version, to: &Version) -> Option<&dyn Migration> { + self.migrations + .iter() + .find(|m| m.from_version() == *from && m.to_version() == *to) + .map(|m| m.as_ref()) + } + + /// Apply migrations to upgrade YAML from one version to another. + pub fn migrate( + &self, + yaml: &str, + from: &Version, + to: &Version, + ) -> Result { + if from == to { + return Ok(yaml.to_string()); + } + + let mut current_yaml = yaml.to_string(); + let mut current_version = *from; + + while current_version < *to { + // Find next migration step + let next_version = Version::new( + if current_version.minor < 9 { + current_version.major + } else { + current_version.major + 1 + }, + if current_version.minor < 9 { + current_version.minor + 1 + } else { + 0 + }, + ); + + if let Some(migration) = self.find_migration(¤t_version, &next_version) { + current_yaml = migration.migrate(¤t_yaml)?; + current_version = next_version; + } else { + return Err(VersionError::MigrationFailed { + from: current_version.to_string(), + to: next_version.to_string(), + reason: "No migration found".to_string(), + }); + } + + // Safety check: don't loop forever + if current_version > *to { + break; + } + } + + Ok(current_yaml) + } +} + +impl Default for MigrationRegistry { + fn default() -> Self { + Self::default_migrations() + } +} + +/// Version information and utilities. +pub struct VersionInfo; + +impl VersionInfo { + /// Get the current config version. + pub fn current() -> Version { + Version::CURRENT + } + + /// Get the minimum supported version. + pub fn minimum_supported() -> Version { + Version::MINIMUM_SUPPORTED + } + + /// Get the maximum supported version. + pub fn maximum_supported() -> Version { + Version::MAXIMUM_SUPPORTED + } + + /// Get version information as a formatted string. + pub fn info_string() -> String { + format!( + "Config Version Info:\n\ + - Current: {}\n\ + - Minimum Supported: {}\n\ + - Maximum Supported: {}\n\ + - Supported Versions: {}", + Version::CURRENT, + Version::MINIMUM_SUPPORTED, + Version::MAXIMUM_SUPPORTED, + Version::supported_versions_string() + ) + } + + /// Print version information to stdout. + pub fn print_info() { + println!("{}", Self::info_string()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_version_parsing() { + assert_eq!(Version::from_str("1.0").unwrap(), Version::new(1, 0)); + assert_eq!(Version::from_str("2.5").unwrap(), Version::new(2, 5)); + assert_eq!(Version::from_str("10.99").unwrap(), Version::new(10, 99)); + + println!("✅ Version parsing works"); + } + + #[test] + fn test_version_parsing_errors() { + assert!(Version::from_str("1").is_err()); + assert!(Version::from_str("1.0.0").is_err()); + assert!(Version::from_str("invalid").is_err()); + assert!(Version::from_str("1.x").is_err()); + + println!("✅ Version parsing errors work"); + } + + #[test] + fn test_version_display() { + let version = Version::new(1, 0); + assert_eq!(version.to_string(), "1.0"); + + let version = Version::new(2, 5); + assert_eq!(version.to_string(), "2.5"); + + println!("✅ Version display works"); + } + + #[test] + fn test_version_comparison() { + assert!(Version::new(1, 0) < Version::new(1, 1)); + assert!(Version::new(1, 0) < Version::new(2, 0)); + assert!(Version::new(1, 5) < Version::new(2, 0)); + assert!(Version::new(2, 0) > Version::new(1, 9)); + assert_eq!(Version::new(1, 0), Version::new(1, 0)); + + println!("✅ Version comparison works"); + } + + #[test] + fn test_version_is_supported() { + assert!(Version::new(1, 0).is_supported()); + // Future versions not yet supported + assert!(!Version::new(2, 0).is_supported()); + assert!(!Version::new(0, 9).is_supported()); + + println!("✅ Version support checking works"); + } + + #[test] + fn test_version_needs_migration() { + assert!(!Version::new(1, 0).needs_migration()); // Current version + // Future: when we have 2.0, version 1.0 will need migration + // assert!(Version::new(1, 0).needs_migration()); + + println!("✅ Version migration checking works"); + } + + #[test] + fn test_version_checker_validate() { + assert!(VersionChecker::validate(&Version::new(1, 0)).is_ok()); + assert!(VersionChecker::validate(&Version::new(2, 0)).is_err()); + assert!(VersionChecker::validate(&Version::new(0, 9)).is_err()); + + println!("✅ Version validation works"); + } + + #[test] + fn test_version_checker_parse_and_validate() { + assert!(VersionChecker::parse_and_validate("1.0").is_ok()); + assert!(VersionChecker::parse_and_validate("2.0").is_err()); + assert!(VersionChecker::parse_and_validate("invalid").is_err()); + + println!("✅ Version parse and validate works"); + } + + #[test] + fn test_version_too_old_error() { + let result = VersionChecker::validate(&Version::new(0, 5)); + assert!(result.is_err()); + + let err = result.unwrap_err(); + assert!(err.to_string().contains("too old")); + assert!(err.to_string().contains("0.5")); + assert!(err.to_string().contains("1.0")); + + println!("✅ Version too old error message works"); + } + + #[test] + fn test_version_too_new_error() { + let result = VersionChecker::validate(&Version::new(99, 0)); + assert!(result.is_err()); + + let err = result.unwrap_err(); + assert!(err.to_string().contains("too new")); + assert!(err.to_string().contains("99.0")); + + println!("✅ Version too new error message works"); + } + + #[test] + fn test_version_supported_list() { + let versions = Version::supported_versions(); + assert!(!versions.is_empty()); + assert!(versions.contains(&Version::new(1, 0))); + + let version_string = Version::supported_versions_string(); + assert!(version_string.contains("1.0")); + + println!("✅ Supported versions list works"); + } + + #[test] + fn test_migration_registry_empty() { + let registry = MigrationRegistry::new(); + assert!(registry + .find_migration(&Version::new(1, 0), &Version::new(2, 0)) + .is_none()); + + println!("✅ Empty migration registry works"); + } + + #[test] + fn test_migration_registry_migrate_same_version() { + let registry = MigrationRegistry::default_migrations(); + let yaml = "version: '1.0'"; + let result = registry + .migrate(yaml, &Version::new(1, 0), &Version::new(1, 0)) + .unwrap(); + assert_eq!(result, yaml); + + println!("✅ Migrate same version returns unchanged YAML"); + } + + #[test] + fn test_version_info_string() { + let info = VersionInfo::info_string(); + assert!(info.contains("Current")); + assert!(info.contains("1.0")); + assert!(info.contains("Minimum Supported")); + assert!(info.contains("Maximum Supported")); + + println!("✅ Version info string works"); + } + + #[test] + fn test_version_constants() { + assert_eq!(Version::CURRENT, Version::new(1, 0)); + assert_eq!(Version::MINIMUM_SUPPORTED, Version::new(1, 0)); + assert_eq!(Version::MAXIMUM_SUPPORTED, Version::new(1, 0)); + + println!("✅ Version constants are correct"); + } + + #[test] + fn test_check_compatibility() { + let result = VersionChecker::check_compatibility(&Version::new(1, 0)); + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); // No migration needed + + println!("✅ Compatibility checking works"); + } +} diff --git a/src/connection_pool.rs b/src/connection_pool.rs new file mode 100644 index 0000000..97c536a --- /dev/null +++ b/src/connection_pool.rs @@ -0,0 +1,378 @@ +//! Connection pool configuration and monitoring. +//! +//! This module provides connection pool statistics tracking and configuration. +//! Since reqwest doesn't expose internal pool metrics, we track connection +//! behavior patterns and configuration to provide insights into pool utilization. + +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; +use tracing::debug; + +/// Connection pool configuration. +#[derive(Debug, Clone)] +pub struct PoolConfig { + /// Maximum idle connections to keep per host + pub max_idle_per_host: usize, + + /// How long idle connections stay in the pool before cleanup + pub idle_timeout: Duration, + + /// TCP keepalive duration + pub tcp_keepalive: Option, +} + +impl Default for PoolConfig { + fn default() -> Self { + Self { + max_idle_per_host: 32, + idle_timeout: Duration::from_secs(90), + tcp_keepalive: Some(Duration::from_secs(60)), + } + } +} + +impl PoolConfig { + /// Create a new pool configuration. + pub fn new() -> Self { + Self::default() + } + + /// Set maximum idle connections per host. + pub fn with_max_idle_per_host(mut self, max: usize) -> Self { + self.max_idle_per_host = max; + self + } + + /// Set idle connection timeout. + pub fn with_idle_timeout(mut self, timeout: Duration) -> Self { + self.idle_timeout = timeout; + self + } + + /// Set TCP keepalive duration. + pub fn with_tcp_keepalive(mut self, keepalive: Option) -> Self { + self.tcp_keepalive = keepalive; + self + } + + /// Apply this configuration to a reqwest ClientBuilder. + pub fn apply_to_builder(&self, builder: reqwest::ClientBuilder) -> reqwest::ClientBuilder { + let mut builder = builder + .pool_max_idle_per_host(self.max_idle_per_host) + .pool_idle_timeout(self.idle_timeout); + + if let Some(keepalive) = self.tcp_keepalive { + builder = builder.tcp_keepalive(keepalive); + } + + builder + } +} + +/// Connection statistics for monitoring pool behavior. +#[derive(Debug, Clone, Default)] +pub struct ConnectionStats { + /// Total requests made + pub total_requests: u64, + + /// Requests that likely used a new connection (slow initial handshake) + pub likely_new_connections: u64, + + /// Requests that likely reused a connection (fast, no TLS handshake) + pub likely_reused_connections: u64, + + /// First request timestamp (for rate calculations) + pub first_request: Option, + + /// Last request timestamp + pub last_request: Option, +} + +impl ConnectionStats { + /// Calculate the connection reuse rate. + pub fn reuse_rate(&self) -> f64 { + if self.total_requests == 0 { + return 0.0; + } + (self.likely_reused_connections as f64 / self.total_requests as f64) * 100.0 + } + + /// Calculate the new connection rate. + pub fn new_connection_rate(&self) -> f64 { + if self.total_requests == 0 { + return 0.0; + } + (self.likely_new_connections as f64 / self.total_requests as f64) * 100.0 + } + + /// Get the duration over which requests were tracked. + pub fn duration(&self) -> Option { + match (self.first_request, self.last_request) { + (Some(first), Some(last)) => Some(last.duration_since(first)), + _ => None, + } + } + + /// Format statistics as a human-readable string. + pub fn format(&self) -> String { + format!( + "Total: {}, Reused: {} ({:.1}%), New: {} ({:.1}%)", + self.total_requests, + self.likely_reused_connections, + self.reuse_rate(), + self.likely_new_connections, + self.new_connection_rate() + ) + } +} + +/// Tracker for connection pool statistics. +/// +/// This tracker monitors connection behavior patterns to provide insights +/// into connection reuse. It uses timing heuristics to infer whether a +/// connection was likely reused or newly established. +#[derive(Clone)] +pub struct PoolStatsTracker { + stats: Arc>, + + /// Threshold for considering a connection "likely new" (milliseconds) + /// Requests slower than this are likely establishing new connections + new_connection_threshold_ms: u64, +} + +impl PoolStatsTracker { + /// Create a new pool statistics tracker. + /// + /// # Arguments + /// * `new_connection_threshold_ms` - Latency threshold (ms) above which we + /// consider a connection likely new (includes TLS handshake time) + pub fn new(new_connection_threshold_ms: u64) -> Self { + Self { + stats: Arc::new(Mutex::new(ConnectionStats::default())), + new_connection_threshold_ms, + } + } + + /// Record a request with timing information. + /// + /// Uses latency to infer connection reuse. Requests with very low latency + /// (<50ms typically) likely reused an existing connection. Slower requests + /// may have established a new connection (including TLS handshake). + pub fn record_request(&self, latency_ms: u64) { + let now = Instant::now(); + let mut stats = self.stats.lock().unwrap(); + + stats.total_requests += 1; + + // Track timing + if stats.first_request.is_none() { + stats.first_request = Some(now); + } + stats.last_request = Some(now); + + // Infer connection type based on latency + // Fast requests (= self.new_connection_threshold_ms { + stats.likely_new_connections += 1; + debug!( + latency_ms = latency_ms, + threshold = self.new_connection_threshold_ms, + "Request latency suggests new connection" + ); + } else { + stats.likely_reused_connections += 1; + debug!( + latency_ms = latency_ms, + threshold = self.new_connection_threshold_ms, + "Request latency suggests reused connection" + ); + } + } + + /// Get current connection statistics. + pub fn stats(&self) -> ConnectionStats { + self.stats.lock().unwrap().clone() + } + + /// Reset all statistics. + pub fn reset(&self) { + let mut stats = self.stats.lock().unwrap(); + *stats = ConnectionStats::default(); + } +} + +impl Default for PoolStatsTracker { + fn default() -> Self { + // Default threshold of 100ms to distinguish new vs reused connections + // TLS handshake typically adds 50-150ms depending on network conditions + Self::new(100) + } +} + +// Global pool statistics tracker. +lazy_static::lazy_static! { + pub static ref GLOBAL_POOL_STATS: PoolStatsTracker = PoolStatsTracker::default(); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_pool_config_defaults() { + let config = PoolConfig::default(); + assert_eq!(config.max_idle_per_host, 32); + assert_eq!(config.idle_timeout, Duration::from_secs(90)); + assert_eq!(config.tcp_keepalive, Some(Duration::from_secs(60))); + } + + #[test] + fn test_pool_config_builder() { + let config = PoolConfig::new() + .with_max_idle_per_host(64) + .with_idle_timeout(Duration::from_secs(120)) + .with_tcp_keepalive(None); + + assert_eq!(config.max_idle_per_host, 64); + assert_eq!(config.idle_timeout, Duration::from_secs(120)); + assert_eq!(config.tcp_keepalive, None); + } + + #[test] + fn test_connection_stats_empty() { + let stats = ConnectionStats::default(); + assert_eq!(stats.total_requests, 0); + assert_eq!(stats.reuse_rate(), 0.0); + assert_eq!(stats.new_connection_rate(), 0.0); + assert!(stats.duration().is_none()); + } + + #[test] + fn test_connection_stats_rates() { + let stats = ConnectionStats { + total_requests: 100, + likely_new_connections: 20, + likely_reused_connections: 80, + first_request: Some(Instant::now()), + last_request: Some(Instant::now()), + }; + + assert_eq!(stats.reuse_rate(), 80.0); + assert_eq!(stats.new_connection_rate(), 20.0); + } + + #[test] + fn test_pool_stats_tracker_fast_requests() { + let tracker = PoolStatsTracker::new(100); + + // Simulate 10 fast requests (likely reused connections) + for _ in 0..10 { + tracker.record_request(20); // 20ms - fast + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 10); + assert_eq!(stats.likely_reused_connections, 10); + assert_eq!(stats.likely_new_connections, 0); + assert_eq!(stats.reuse_rate(), 100.0); + } + + #[test] + fn test_pool_stats_tracker_slow_requests() { + let tracker = PoolStatsTracker::new(100); + + // Simulate 10 slow requests (likely new connections) + for _ in 0..10 { + tracker.record_request(150); // 150ms - slow (includes TLS handshake) + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 10); + assert_eq!(stats.likely_reused_connections, 0); + assert_eq!(stats.likely_new_connections, 10); + assert_eq!(stats.new_connection_rate(), 100.0); + } + + #[test] + fn test_pool_stats_tracker_mixed() { + let tracker = PoolStatsTracker::new(100); + + // Simulate mixed requests + tracker.record_request(150); // New connection (slow) + tracker.record_request(30); // Reused (fast) + tracker.record_request(25); // Reused (fast) + tracker.record_request(120); // New connection (slow) + tracker.record_request(40); // Reused (fast) + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 5); + assert_eq!(stats.likely_reused_connections, 3); + assert_eq!(stats.likely_new_connections, 2); + assert_eq!(stats.reuse_rate(), 60.0); + assert_eq!(stats.new_connection_rate(), 40.0); + } + + #[test] + fn test_pool_stats_tracker_reset() { + let tracker = PoolStatsTracker::new(100); + + tracker.record_request(50); + tracker.record_request(150); + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 2); + + tracker.reset(); + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 0); + assert_eq!(stats.likely_reused_connections, 0); + assert_eq!(stats.likely_new_connections, 0); + } + + #[test] + fn test_connection_stats_format() { + let stats = ConnectionStats { + total_requests: 100, + likely_new_connections: 25, + likely_reused_connections: 75, + first_request: Some(Instant::now()), + last_request: Some(Instant::now()), + }; + + let formatted = stats.format(); + assert!(formatted.contains("Total: 100")); + assert!(formatted.contains("Reused: 75")); + assert!(formatted.contains("75.0%")); + assert!(formatted.contains("New: 25")); + assert!(formatted.contains("25.0%")); + } + + #[test] + fn test_pool_stats_timing() { + let tracker = PoolStatsTracker::new(100); + + tracker.record_request(50); + std::thread::sleep(Duration::from_millis(100)); + tracker.record_request(50); + + let stats = tracker.stats(); + let duration = stats.duration().unwrap(); + + assert!(duration >= Duration::from_millis(100)); + assert!(duration < Duration::from_millis(200)); + } + + #[test] + fn test_custom_threshold() { + let tracker = PoolStatsTracker::new(200); // Higher threshold + + tracker.record_request(150); // Under threshold - reused + tracker.record_request(250); // Over threshold - new + + let stats = tracker.stats(); + assert_eq!(stats.likely_reused_connections, 1); + assert_eq!(stats.likely_new_connections, 1); + } +} diff --git a/src/data_source.rs b/src/data_source.rs new file mode 100644 index 0000000..3c0ce95 --- /dev/null +++ b/src/data_source.rs @@ -0,0 +1,444 @@ +//! CSV data source for data-driven testing. +//! +//! This module provides functionality to load test data from CSV files and +//! distribute rows across virtual users. Each virtual user gets its own row +//! of data, enabling realistic data-driven load testing. +//! +//! # Features +//! - Load CSV files with headers +//! - Round-robin row distribution to virtual users +//! - Thread-safe access with Arc> +//! - Automatic variable substitution in scenarios +//! - Support for user credentials, product IDs, etc. + +use std::collections::HashMap; +use std::fs::File; +use std::path::Path; +use std::sync::{Arc, Mutex}; +use thiserror::Error; +use tracing::{debug, info}; + +/// Errors that can occur when loading or using CSV data. +#[derive(Error, Debug)] +pub enum DataSourceError { + #[error("Failed to read CSV file: {0}")] + CsvReadError(#[from] csv::Error), + + #[error("Failed to open file: {0}")] + IoError(#[from] std::io::Error), + + #[error("CSV file is empty or has no data rows")] + EmptyData, + + #[error("CSV file has no headers")] + NoHeaders, + + #[error("No data available (all rows consumed)")] + NoDataAvailable, +} + +/// A single row of CSV data as a map of column name -> value. +pub type DataRow = HashMap; + +/// CSV data source for data-driven testing. +/// +/// Loads CSV files and provides round-robin access to rows for virtual users. +/// Each virtual user gets a unique row of data to use in their scenario. +/// +/// # Example CSV File +/// ```csv +/// username,password,email +/// user1,pass123,user1@example.com +/// user2,pass456,user2@example.com +/// user3,pass789,user3@example.com +/// ``` +/// +/// # Example Usage +/// ```rust,no_run +/// use rust_loadtest::data_source::CsvDataSource; +/// +/// let data_source = CsvDataSource::from_file("users.csv").unwrap(); +/// let row = data_source.next_row().unwrap(); +/// println!("Username: {}", row.get("username").unwrap()); +/// ``` +#[derive(Clone)] +pub struct CsvDataSource { + /// All data rows from the CSV file + rows: Arc>>, + + /// Current index for round-robin distribution + current_index: Arc>, + + /// Column headers from the CSV + headers: Vec, +} + +impl CsvDataSource { + /// Load a CSV file from the given path. + /// + /// # Arguments + /// * `path` - Path to the CSV file + /// + /// # Returns + /// A CsvDataSource instance with all rows loaded + /// + /// # Errors + /// Returns error if file cannot be read, has no headers, or is empty + pub fn from_file>(path: P) -> Result { + let path_ref = path.as_ref(); + info!(path = ?path_ref, "Loading CSV data file"); + + let file = File::open(path_ref)?; + let mut reader = csv::Reader::from_reader(file); + + // Get headers + let headers = reader + .headers()? + .iter() + .map(|h| h.to_string()) + .collect::>(); + + if headers.is_empty() { + return Err(DataSourceError::NoHeaders); + } + + debug!(headers = ?headers, "CSV headers loaded"); + + // Read all rows + let mut rows = Vec::new(); + for result in reader.records() { + let record = result?; + let mut row = HashMap::new(); + + for (i, header) in headers.iter().enumerate() { + if let Some(value) = record.get(i) { + row.insert(header.clone(), value.to_string()); + } + } + + rows.push(row); + } + + if rows.is_empty() { + return Err(DataSourceError::EmptyData); + } + + info!( + path = ?path_ref, + rows = rows.len(), + columns = headers.len(), + "CSV data loaded successfully" + ); + + Ok(Self { + rows: Arc::new(Mutex::new(rows)), + current_index: Arc::new(Mutex::new(0)), + headers, + }) + } + + /// Create a data source from raw CSV string (useful for testing). + /// + /// # Arguments + /// * `csv_content` - CSV content as a string with headers + /// + /// # Returns + /// A CsvDataSource instance + pub fn from_string(csv_content: &str) -> Result { + let mut reader = csv::Reader::from_reader(csv_content.as_bytes()); + + // Get headers + let headers = reader + .headers()? + .iter() + .map(|h| h.to_string()) + .collect::>(); + + if headers.is_empty() { + return Err(DataSourceError::NoHeaders); + } + + // Read all rows + let mut rows = Vec::new(); + for result in reader.records() { + let record = result?; + let mut row = HashMap::new(); + + for (i, header) in headers.iter().enumerate() { + if let Some(value) = record.get(i) { + row.insert(header.clone(), value.to_string()); + } + } + + rows.push(row); + } + + if rows.is_empty() { + return Err(DataSourceError::EmptyData); + } + + Ok(Self { + rows: Arc::new(Mutex::new(rows)), + current_index: Arc::new(Mutex::new(0)), + headers, + }) + } + + /// Get the next row in round-robin fashion. + /// + /// Returns rows in sequence, wrapping back to the first row after the last. + /// Thread-safe for concurrent access by multiple virtual users. + /// + /// # Returns + /// A clone of the next data row + pub fn next_row(&self) -> Result { + let rows = self.rows.lock().unwrap(); + let mut index = self.current_index.lock().unwrap(); + + if rows.is_empty() { + return Err(DataSourceError::NoDataAvailable); + } + + let row = rows[*index % rows.len()].clone(); + *index += 1; + + debug!( + index = *index - 1, + row_count = rows.len(), + "Retrieved data row" + ); + + Ok(row) + } + + /// Get a specific row by index. + /// + /// # Arguments + /// * `index` - Zero-based row index + /// + /// # Returns + /// A clone of the requested row, or None if index is out of bounds + pub fn get_row(&self, index: usize) -> Option { + let rows = self.rows.lock().unwrap(); + rows.get(index).cloned() + } + + /// Get the total number of data rows. + pub fn row_count(&self) -> usize { + let rows = self.rows.lock().unwrap(); + rows.len() + } + + /// Get the column headers. + pub fn headers(&self) -> &[String] { + &self.headers + } + + /// Reset the row index to start from the beginning. + pub fn reset(&self) { + let mut index = self.current_index.lock().unwrap(); + *index = 0; + debug!("Data source index reset to 0"); + } + + /// Get all rows (useful for inspection/debugging). + pub fn all_rows(&self) -> Vec { + let rows = self.rows.lock().unwrap(); + rows.clone() + } + + /// Apply data from a row to a variable map. + /// + /// This copies all values from the data row into the provided map, + /// making them available for variable substitution in scenarios. + /// + /// # Arguments + /// * `row` - Data row to extract values from + /// * `variables` - Target variable map to populate + pub fn apply_row_to_variables(row: &DataRow, variables: &mut HashMap) { + for (key, value) in row { + variables.insert(key.clone(), value.clone()); + } + } +} + +/// Builder for creating CSV data sources with options. +pub struct CsvDataSourceBuilder { + path: Option, + content: Option, +} + +impl CsvDataSourceBuilder { + /// Create a new builder. + pub fn new() -> Self { + Self { + path: None, + content: None, + } + } + + /// Set the file path to load. + pub fn path>(mut self, path: P) -> Self { + self.path = Some(path.as_ref().to_string_lossy().to_string()); + self + } + + /// Set CSV content directly (for testing). + pub fn content(mut self, content: &str) -> Self { + self.content = Some(content.to_string()); + self + } + + /// Build the data source. + pub fn build(self) -> Result { + if let Some(content) = self.content { + CsvDataSource::from_string(&content) + } else if let Some(path) = self.path { + CsvDataSource::from_file(path) + } else { + Err(DataSourceError::EmptyData) + } + } +} + +impl Default for CsvDataSourceBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const TEST_CSV: &str = r#"username,password,email +user1,pass123,user1@example.com +user2,pass456,user2@example.com +user3,pass789,user3@example.com"#; + + #[test] + fn test_from_string() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + assert_eq!(ds.row_count(), 3); + assert_eq!(ds.headers(), &["username", "password", "email"]); + } + + #[test] + fn test_next_row_round_robin() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + + let row1 = ds.next_row().unwrap(); + assert_eq!(row1.get("username").unwrap(), "user1"); + + let row2 = ds.next_row().unwrap(); + assert_eq!(row2.get("username").unwrap(), "user2"); + + let row3 = ds.next_row().unwrap(); + assert_eq!(row3.get("username").unwrap(), "user3"); + + // Should wrap back to first row + let row4 = ds.next_row().unwrap(); + assert_eq!(row4.get("username").unwrap(), "user1"); + } + + #[test] + fn test_get_row_by_index() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + + let row = ds.get_row(1).unwrap(); + assert_eq!(row.get("username").unwrap(), "user2"); + + assert!(ds.get_row(999).is_none()); + } + + #[test] + fn test_reset() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + + ds.next_row().unwrap(); + ds.next_row().unwrap(); + + ds.reset(); + + let row = ds.next_row().unwrap(); + assert_eq!(row.get("username").unwrap(), "user1"); + } + + #[test] + fn test_apply_row_to_variables() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + let row = ds.next_row().unwrap(); + + let mut variables = HashMap::new(); + CsvDataSource::apply_row_to_variables(&row, &mut variables); + + assert_eq!(variables.get("username").unwrap(), "user1"); + assert_eq!(variables.get("password").unwrap(), "pass123"); + assert_eq!(variables.get("email").unwrap(), "user1@example.com"); + } + + #[test] + fn test_empty_csv() { + let empty_csv = "username,password\n"; + let result = CsvDataSource::from_string(empty_csv); + assert!(result.is_err()); + } + + #[test] + fn test_no_headers() { + let no_headers = ""; + let result = CsvDataSource::from_string(no_headers); + assert!(result.is_err()); + } + + #[test] + fn test_concurrent_access() { + use std::sync::Arc; + use std::thread; + + let ds = Arc::new(CsvDataSource::from_string(TEST_CSV).unwrap()); + let mut handles = vec![]; + + // Spawn 10 threads, each getting 5 rows + for _ in 0..10 { + let ds_clone = Arc::clone(&ds); + let handle = thread::spawn(move || { + for _ in 0..5 { + let row = ds_clone.next_row().unwrap(); + assert!(row.contains_key("username")); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + // Should have distributed 50 rows total across 3 users + // Index should be at 50 + let rows = ds.all_rows(); + assert_eq!(rows.len(), 3); + } + + #[test] + fn test_builder_with_content() { + let ds = CsvDataSourceBuilder::new() + .content(TEST_CSV) + .build() + .unwrap(); + + assert_eq!(ds.row_count(), 3); + } + + #[test] + fn test_all_rows() { + let ds = CsvDataSource::from_string(TEST_CSV).unwrap(); + let rows = ds.all_rows(); + + assert_eq!(rows.len(), 3); + assert_eq!(rows[0].get("username").unwrap(), "user1"); + assert_eq!(rows[1].get("username").unwrap(), "user2"); + assert_eq!(rows[2].get("username").unwrap(), "user3"); + } +} diff --git a/src/errors.rs b/src/errors.rs new file mode 100644 index 0000000..b2762ba --- /dev/null +++ b/src/errors.rs @@ -0,0 +1,345 @@ +//! Error categorization for better diagnostics and reporting. +//! +//! This module provides classification of HTTP errors into meaningful categories +//! for better analysis of load test failures. Errors are categorized by type +//! (client errors, server errors, network issues, timeouts) for detailed reporting. + +use std::fmt; + +/// Categories of errors that can occur during load testing. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum ErrorCategory { + /// HTTP 4xx errors (client errors) + ClientError, + + /// HTTP 5xx errors (server errors) + ServerError, + + /// Network connectivity errors (DNS, connection refused, etc.) + NetworkError, + + /// Request timeout errors + TimeoutError, + + /// TLS/SSL certificate errors + TlsError, + + /// Other/unknown errors + OtherError, +} + +impl ErrorCategory { + /// Categorize an HTTP status code. + /// + /// # Arguments + /// * `status_code` - HTTP status code (200, 404, 500, etc.) + /// + /// # Returns + /// The appropriate error category, or None if status is success (2xx/3xx) + pub fn from_status_code(status_code: u16) -> Option { + match status_code { + 200..=399 => None, // Success responses + 400..=499 => Some(ErrorCategory::ClientError), + 500..=599 => Some(ErrorCategory::ServerError), + _ => Some(ErrorCategory::OtherError), + } + } + + /// Categorize a reqwest error. + /// + /// # Arguments + /// * `error` - The reqwest error to categorize + /// + /// # Returns + /// The appropriate error category + pub fn from_reqwest_error(error: &reqwest::Error) -> Self { + if error.is_timeout() { + ErrorCategory::TimeoutError + } else if error.is_connect() { + ErrorCategory::NetworkError + } else if error.is_request() { + // Request building/sending errors + ErrorCategory::NetworkError + } else if error.is_body() || error.is_decode() { + // Response body errors - usually network or server issues + ErrorCategory::NetworkError + } else if error.is_redirect() { + // Redirect errors + ErrorCategory::ClientError + } else { + // Check error message for common patterns + let error_msg = error.to_string().to_lowercase(); + + if error_msg.contains("certificate") + || error_msg.contains("tls") + || error_msg.contains("ssl") + { + ErrorCategory::TlsError + } else if error_msg.contains("timeout") { + ErrorCategory::TimeoutError + } else if error_msg.contains("dns") + || error_msg.contains("resolve") + || error_msg.contains("connect") + || error_msg.contains("connection") + { + ErrorCategory::NetworkError + } else { + ErrorCategory::OtherError + } + } + } + + /// Get the Prometheus label for this error category. + pub fn label(&self) -> &'static str { + match self { + ErrorCategory::ClientError => "client_error", + ErrorCategory::ServerError => "server_error", + ErrorCategory::NetworkError => "network_error", + ErrorCategory::TimeoutError => "timeout_error", + ErrorCategory::TlsError => "tls_error", + ErrorCategory::OtherError => "other_error", + } + } + + /// Get a human-readable description of this error category. + pub fn description(&self) -> &'static str { + match self { + ErrorCategory::ClientError => "HTTP 4xx Client Errors", + ErrorCategory::ServerError => "HTTP 5xx Server Errors", + ErrorCategory::NetworkError => "Network/Connection Errors", + ErrorCategory::TimeoutError => "Request Timeout Errors", + ErrorCategory::TlsError => "TLS/SSL Certificate Errors", + ErrorCategory::OtherError => "Other/Unknown Errors", + } + } + + /// Get all error categories in a consistent order. + pub fn all() -> Vec { + vec![ + ErrorCategory::ClientError, + ErrorCategory::ServerError, + ErrorCategory::NetworkError, + ErrorCategory::TimeoutError, + ErrorCategory::TlsError, + ErrorCategory::OtherError, + ] + } +} + +impl fmt::Display for ErrorCategory { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +/// Detailed error information with categorization. +#[derive(Debug, Clone)] +pub struct CategorizedError { + /// The error category + pub category: ErrorCategory, + + /// HTTP status code if available + pub status_code: Option, + + /// Error message + pub message: String, + + /// Endpoint that failed + pub endpoint: Option, +} + +impl CategorizedError { + /// Create a new categorized error from an HTTP status code. + pub fn from_status( + status_code: u16, + message: String, + endpoint: Option, + ) -> Option { + ErrorCategory::from_status_code(status_code).map(|category| Self { + category, + status_code: Some(status_code), + message, + endpoint, + }) + } + + /// Create a new categorized error from a reqwest error. + pub fn from_reqwest(error: &reqwest::Error, endpoint: Option) -> Self { + let category = ErrorCategory::from_reqwest_error(error); + let status_code = error.status().map(|s| s.as_u16()); + let message = error.to_string(); + + Self { + category, + status_code, + message, + endpoint, + } + } + + /// Create a custom categorized error. + pub fn new(category: ErrorCategory, message: String) -> Self { + Self { + category, + status_code: None, + message, + endpoint: None, + } + } +} + +impl fmt::Display for CategorizedError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(status) = self.status_code { + write!( + f, + "[{}] HTTP {}: {}", + self.category.label(), + status, + self.message + ) + } else { + write!(f, "[{}] {}", self.category.label(), self.message) + } + } +} + +/// Helper to categorize common HTTP status codes for display. +pub fn categorize_status_code(status_code: u16) -> &'static str { + match status_code { + // 2xx Success + 200 => "OK", + 201 => "Created", + 202 => "Accepted", + 204 => "No Content", + + // 3xx Redirection + 301 => "Moved Permanently", + 302 => "Found", + 304 => "Not Modified", + + // 4xx Client Errors + 400 => "Bad Request", + 401 => "Unauthorized", + 403 => "Forbidden", + 404 => "Not Found", + 405 => "Method Not Allowed", + 408 => "Request Timeout", + 409 => "Conflict", + 429 => "Too Many Requests", + + // 5xx Server Errors + 500 => "Internal Server Error", + 502 => "Bad Gateway", + 503 => "Service Unavailable", + 504 => "Gateway Timeout", + + _ => "Unknown Status", + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_categorize_success_codes() { + assert_eq!(ErrorCategory::from_status_code(200), None); + assert_eq!(ErrorCategory::from_status_code(201), None); + assert_eq!(ErrorCategory::from_status_code(204), None); + assert_eq!(ErrorCategory::from_status_code(301), None); + assert_eq!(ErrorCategory::from_status_code(302), None); + } + + #[test] + fn test_categorize_4xx_errors() { + assert_eq!( + ErrorCategory::from_status_code(400), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(404), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(429), + Some(ErrorCategory::ClientError) + ); + } + + #[test] + fn test_categorize_5xx_errors() { + assert_eq!( + ErrorCategory::from_status_code(500), + Some(ErrorCategory::ServerError) + ); + assert_eq!( + ErrorCategory::from_status_code(502), + Some(ErrorCategory::ServerError) + ); + assert_eq!( + ErrorCategory::from_status_code(503), + Some(ErrorCategory::ServerError) + ); + } + + #[test] + fn test_error_category_labels() { + assert_eq!(ErrorCategory::ClientError.label(), "client_error"); + assert_eq!(ErrorCategory::ServerError.label(), "server_error"); + assert_eq!(ErrorCategory::NetworkError.label(), "network_error"); + assert_eq!(ErrorCategory::TimeoutError.label(), "timeout_error"); + assert_eq!(ErrorCategory::TlsError.label(), "tls_error"); + } + + #[test] + fn test_error_category_descriptions() { + assert!(ErrorCategory::ClientError.description().contains("4xx")); + assert!(ErrorCategory::ServerError.description().contains("5xx")); + assert!(ErrorCategory::NetworkError + .description() + .contains("Network")); + } + + #[test] + fn test_categorized_error_from_status() { + let err = CategorizedError::from_status( + 404, + "Not Found".to_string(), + Some("/api/test".to_string()), + ) + .unwrap(); + + assert_eq!(err.category, ErrorCategory::ClientError); + assert_eq!(err.status_code, Some(404)); + assert_eq!(err.message, "Not Found"); + } + + #[test] + fn test_categorized_error_display() { + let err = CategorizedError::new( + ErrorCategory::ServerError, + "Service unavailable".to_string(), + ); + + let display = format!("{}", err); + assert!(display.contains("server_error")); + assert!(display.contains("Service unavailable")); + } + + #[test] + fn test_all_categories() { + let categories = ErrorCategory::all(); + assert_eq!(categories.len(), 6); + assert!(categories.contains(&ErrorCategory::ClientError)); + assert!(categories.contains(&ErrorCategory::ServerError)); + } + + #[test] + fn test_categorize_status_code_names() { + assert_eq!(categorize_status_code(200), "OK"); + assert_eq!(categorize_status_code(404), "Not Found"); + assert_eq!(categorize_status_code(500), "Internal Server Error"); + assert_eq!(categorize_status_code(503), "Service Unavailable"); + } +} diff --git a/src/executor.rs b/src/executor.rs new file mode 100644 index 0000000..b566fdf --- /dev/null +++ b/src/executor.rs @@ -0,0 +1,548 @@ +//! Scenario execution engine. +//! +//! This module provides the execution engine for running multi-step scenarios. +//! It handles sequential step execution, context management, variable substitution, +//! and metrics tracking. + +use crate::assertions; +use crate::extractor; +use crate::metrics::{ + CONCURRENT_SCENARIOS, SCENARIO_ASSERTIONS_TOTAL, SCENARIO_DURATION_SECONDS, + SCENARIO_EXECUTIONS_TOTAL, SCENARIO_STEPS_TOTAL, SCENARIO_STEP_DURATION_SECONDS, +}; +use crate::scenario::{Scenario, ScenarioContext, Step}; +use std::time::Instant; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +/// Result of executing a single step. +#[derive(Debug)] +pub struct StepResult { + /// Name of the step that was executed + pub step_name: String, + + /// Whether the step succeeded + pub success: bool, + + /// HTTP status code received + pub status_code: Option, + + /// Response time in milliseconds + pub response_time_ms: u64, + + /// Error message if step failed + pub error: Option, + + /// Assertions that passed + pub assertions_passed: usize, + + /// Assertions that failed + pub assertions_failed: usize, +} + +/// Result of executing an entire scenario. +#[derive(Debug)] +pub struct ScenarioResult { + /// Name of the scenario + pub scenario_name: String, + + /// Whether all steps succeeded + pub success: bool, + + /// Results from each step + pub steps: Vec, + + /// Total scenario execution time in milliseconds + pub total_time_ms: u64, + + /// Number of steps completed + pub steps_completed: usize, + + /// Step index where execution stopped (if failed) + pub failed_at_step: Option, +} + +/// Executor for running scenarios. +/// +/// # Cookie and Session Management +/// +/// The executor automatically handles cookies when the provided client has +/// cookie support enabled. Each client instance maintains its own cookie jar, +/// providing session isolation per virtual user. +/// +/// To enable automatic cookie handling: +/// ```rust,no_run +/// let client = reqwest::Client::builder() +/// .cookie_store(true) // Enable automatic cookie management +/// .build() +/// .unwrap(); +/// ``` +/// +/// Cookies are automatically: +/// - Stored from Set-Cookie response headers +/// - Sent with subsequent requests to the same domain +/// - Isolated per client instance (per virtual user) +pub struct ScenarioExecutor { + /// Base URL for requests (e.g., "https://api.example.com") + base_url: String, + + /// HTTP client for making requests + /// Should have cookie_store(true) enabled for session management + client: reqwest::Client, +} + +impl ScenarioExecutor { + /// Create a new scenario executor. + /// + /// # Arguments + /// * `base_url` - Base URL for all requests in the scenario + /// * `client` - HTTP client to use for requests. Should have `cookie_store(true)` + /// enabled for automatic cookie and session management. + /// + /// # Example + /// ```rust + /// use rust_loadtest::executor::ScenarioExecutor; + /// + /// let client = reqwest::Client::builder() + /// .cookie_store(true) // Enable cookies + /// .build() + /// .unwrap(); + /// + /// let executor = ScenarioExecutor::new( + /// "https://api.example.com".to_string(), + /// client + /// ); + /// ``` + pub fn new(base_url: String, client: reqwest::Client) -> Self { + Self { base_url, client } + } + + /// Execute a scenario with the given context. + /// + /// Steps are executed sequentially. If any step fails, execution stops + /// and returns the partial results. + /// + /// # Arguments + /// * `scenario` - The scenario to execute + /// * `context` - Execution context (will be modified with extracted variables) + /// + /// # Returns + /// Results from scenario execution including per-step metrics + pub async fn execute( + &self, + scenario: &Scenario, + context: &mut ScenarioContext, + ) -> ScenarioResult { + let scenario_start = Instant::now(); + let mut step_results = Vec::new(); + let mut all_success = true; + let mut failed_at_step = None; + + // Track concurrent scenario execution + CONCURRENT_SCENARIOS.inc(); + + info!( + scenario = %scenario.name, + steps = scenario.steps.len(), + "Starting scenario execution" + ); + + for (idx, step) in scenario.steps.iter().enumerate() { + debug!( + scenario = %scenario.name, + step = %step.name, + step_idx = idx, + "Executing step" + ); + + let step_result = self.execute_step(step, context).await; + + let success = step_result.success; + step_results.push(step_result); + + if !success { + all_success = false; + failed_at_step = Some(idx); + error!( + scenario = %scenario.name, + step = %step.name, + step_idx = idx, + "Step failed, stopping scenario execution" + ); + break; + } + + context.next_step(); + + // Apply think time if configured (simulates user delay between actions) + if let Some(ref think_time) = step.think_time { + let delay = think_time.calculate_delay(); + debug!( + scenario = %scenario.name, + step = %step.name, + think_time_ms = delay.as_millis(), + think_time_type = ?think_time, + "Applying think time" + ); + sleep(delay).await; + } + } + + let total_time_ms = scenario_start.elapsed().as_millis() as u64; + let total_time_secs = total_time_ms as f64 / 1000.0; + + let result = ScenarioResult { + scenario_name: scenario.name.clone(), + success: all_success, + steps: step_results, + total_time_ms, + steps_completed: context.current_step(), + failed_at_step, + }; + + // Record scenario metrics + CONCURRENT_SCENARIOS.dec(); + SCENARIO_DURATION_SECONDS + .with_label_values(&[&scenario.name]) + .observe(total_time_secs); + + let status = if all_success { "success" } else { "failed" }; + SCENARIO_EXECUTIONS_TOTAL + .with_label_values(&[&scenario.name, status]) + .inc(); + + if all_success { + info!( + scenario = %scenario.name, + total_time_ms, + steps_completed = result.steps_completed, + "Scenario completed successfully" + ); + } else { + warn!( + scenario = %scenario.name, + total_time_ms, + steps_completed = result.steps_completed, + failed_at_step = ?failed_at_step, + "Scenario failed" + ); + } + + result + } + + /// Execute a single step. + async fn execute_step(&self, step: &Step, context: &mut ScenarioContext) -> StepResult { + let step_start = Instant::now(); + + // Build the full URL with variable substitution + let path = context.substitute_variables(&step.request.path); + let url = format!("{}{}", self.base_url, path); + + debug!( + step = %step.name, + method = %step.request.method, + url = %url, + "Making HTTP request" + ); + + // Build the request + let mut request_builder = match step.request.method.to_uppercase().as_str() { + "GET" => self.client.get(&url), + "POST" => self.client.post(&url), + "PUT" => self.client.put(&url), + "DELETE" => self.client.delete(&url), + "PATCH" => self.client.patch(&url), + "HEAD" => self.client.head(&url), + "OPTIONS" => self.client.request(reqwest::Method::OPTIONS, &url), + method => { + error!(step = %step.name, method = %method, "Unsupported HTTP method"); + return StepResult { + step_name: step.name.clone(), + success: false, + status_code: None, + response_time_ms: 0, + error: Some(format!("Unsupported HTTP method: {}", method)), + assertions_passed: 0, + assertions_failed: 0, + }; + } + }; + + // Add headers with variable substitution + for (key, value) in &step.request.headers { + let substituted_value = context.substitute_variables(value); + request_builder = request_builder.header(key, substituted_value); + } + + // Add body if present with variable substitution + if let Some(body) = &step.request.body { + let substituted_body = context.substitute_variables(body); + request_builder = request_builder.body(substituted_body); + } + + // Execute the request + let response_result = request_builder.send().await; + + let response_time_ms = step_start.elapsed().as_millis() as u64; + + match response_result { + Ok(response) => { + let status = response.status(); + let headers = response.headers().clone(); + + debug!( + step = %step.name, + status = status.as_u16(), + response_time_ms, + "Received response" + ); + + // Get response body for extraction and assertions + let body_result = response.text().await; + + let body_result_data = match body_result { + Ok(body) => { + // Extract variables from response (#27 - IMPLEMENTED) + let extracted_count = if !step.extractions.is_empty() { + debug!( + step = %step.name, + extractions = step.extractions.len(), + "Extracting variables from response" + ); + + let extracted = + extractor::extract_variables(&step.extractions, &body, &headers); + + let count = extracted.len(); + + // Store extracted variables in context + for (name, value) in extracted { + debug!( + step = %step.name, + variable = %name, + value = %value, + "Stored extracted variable" + ); + context.set_variable(name, value); + } + + count + } else { + 0 + }; + + // Run assertions on response (#30 - IMPLEMENTED) + let (assertions_passed, assertions_failed) = if !step.assertions.is_empty() + { + debug!( + step = %step.name, + assertions = step.assertions.len(), + "Running assertions on response" + ); + + let assertion_results = assertions::run_assertions( + &step.assertions, + status.as_u16(), + response_time_ms, + &body, + &headers, + ); + + let passed = assertion_results.iter().filter(|r| r.passed).count(); + let failed = assertion_results.iter().filter(|r| !r.passed).count(); + + // Log assertion results + for result in &assertion_results { + if result.passed { + debug!( + step = %step.name, + assertion = ?result.assertion, + "Assertion passed" + ); + } else { + warn!( + step = %step.name, + assertion = ?result.assertion, + error = ?result.error_message, + "Assertion failed" + ); + } + + // Record assertion metrics + let result_label = if result.passed { "passed" } else { "failed" }; + SCENARIO_ASSERTIONS_TOTAL + .with_label_values(&["scenario", &step.name, result_label]) + .inc(); + } + + (passed, failed) + } else { + (0, 0) + }; + + // Step succeeds if HTTP status is success/redirect AND all assertions pass + let http_success = status.is_success() || status.is_redirection(); + let all_assertions_pass = assertions_failed == 0; + let success = http_success && all_assertions_pass; + + let error_msg = if !success { + if !http_success { + Some(format!("HTTP {}", status.as_u16())) + } else if !all_assertions_pass { + Some(format!("{} assertion(s) failed", assertions_failed)) + } else { + None + } + } else { + None + }; + + ( + success, + extracted_count, + assertions_passed, + assertions_failed, + error_msg, + ) + } + Err(e) => { + warn!( + step = %step.name, + error = %e, + "Failed to read response body" + ); + ( + false, + 0, + 0, + 0, + Some(format!("Failed to read response body: {}", e)), + ) + } + }; + + let (success, _extracted_count, assertions_passed, assertions_failed, error_msg) = + body_result_data; + + // Record step metrics + let response_time_secs = response_time_ms as f64 / 1000.0; + SCENARIO_STEP_DURATION_SECONDS + .with_label_values(&["scenario", &step.name]) + .observe(response_time_secs); + + let step_status = if success { "success" } else { "failed" }; + SCENARIO_STEPS_TOTAL + .with_label_values(&["scenario", &step.name, step_status]) + .inc(); + + debug!( + step = %step.name, + status_code = status.as_u16(), + success = success, + assertions_passed = assertions_passed, + assertions_failed = assertions_failed, + "Step execution complete" + ); + + StepResult { + step_name: step.name.clone(), + success, + status_code: Some(status.as_u16()), + response_time_ms, + error: error_msg, + assertions_passed, + assertions_failed, + } + } + Err(e) => { + error!( + step = %step.name, + error = %e, + response_time_ms, + "Request failed" + ); + + // Record failed step metrics + SCENARIO_STEPS_TOTAL + .with_label_values(&["scenario", &step.name, "failed"]) + .inc(); + + StepResult { + step_name: step.name.clone(), + success: false, + status_code: None, + response_time_ms, + error: Some(e.to_string()), + assertions_passed: 0, + assertions_failed: 0, + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scenario_result_success() { + let result = ScenarioResult { + scenario_name: "Test".to_string(), + success: true, + steps: vec![], + total_time_ms: 100, + steps_completed: 3, + failed_at_step: None, + }; + + assert!(result.success); + assert_eq!(result.steps_completed, 3); + assert_eq!(result.failed_at_step, None); + } + + #[test] + fn test_scenario_result_failure() { + let result = ScenarioResult { + scenario_name: "Test".to_string(), + success: false, + steps: vec![], + total_time_ms: 50, + steps_completed: 1, + failed_at_step: Some(1), + }; + + assert!(!result.success); + assert_eq!(result.steps_completed, 1); + assert_eq!(result.failed_at_step, Some(1)); + } + + #[test] + fn test_step_result_success() { + let result = StepResult { + step_name: "Login".to_string(), + success: true, + status_code: Some(200), + response_time_ms: 150, + error: None, + assertions_passed: 2, + assertions_failed: 0, + }; + + assert!(result.success); + assert_eq!(result.status_code, Some(200)); + assert_eq!(result.error, None); + } + + #[tokio::test] + async fn test_executor_creation() { + let client = reqwest::Client::new(); + let executor = ScenarioExecutor::new("https://example.com".to_string(), client); + + assert_eq!(executor.base_url, "https://example.com"); + } + + // Integration tests with actual HTTP calls would go here + // For now, keeping tests simple to avoid external dependencies +} diff --git a/src/extractor.rs b/src/extractor.rs new file mode 100644 index 0000000..98a48be --- /dev/null +++ b/src/extractor.rs @@ -0,0 +1,438 @@ +//! Variable extraction from HTTP responses. +//! +//! This module provides functionality to extract values from HTTP responses +//! using various methods: JSONPath, Regex, HTTP headers, and cookies. + +use crate::scenario::{Extractor, VariableExtraction}; +use regex::Regex; +use serde_json::Value; +use std::collections::HashMap; +use thiserror::Error; +use tracing::{debug, warn}; + +/// Errors that can occur during variable extraction. +#[derive(Error, Debug)] +pub enum ExtractionError { + #[error("JSONPath query failed: {0}")] + JsonPathError(String), + + #[error("Invalid JSON response: {0}")] + InvalidJson(String), + + #[error("Regex compilation failed: {0}")] + RegexError(#[from] regex::Error), + + #[error("Regex pattern did not match")] + RegexNoMatch, + + #[error("Named capture group '{0}' not found in regex")] + RegexGroupNotFound(String), + + #[error("Header '{0}' not found in response")] + HeaderNotFound(String), + + #[error("Cookie '{0}' not found in response")] + CookieNotFound(String), + + #[error("Extraction failed: {0}")] + Other(String), +} + +/// Extract variables from an HTTP response. +/// +/// # Arguments +/// * `extractions` - List of variable extractions to perform +/// * `response_body` - Response body as string +/// * `response_headers` - Response headers +/// +/// # Returns +/// HashMap of extracted variable names to values +pub fn extract_variables( + extractions: &[VariableExtraction], + response_body: &str, + response_headers: &reqwest::header::HeaderMap, +) -> HashMap { + let mut variables = HashMap::new(); + + for extraction in extractions { + debug!( + variable_name = %extraction.name, + extractor = ?extraction.extractor, + "Attempting variable extraction" + ); + + match extract_value(&extraction.extractor, response_body, response_headers) { + Ok(value) => { + debug!( + variable_name = %extraction.name, + value = %value, + "Successfully extracted variable" + ); + variables.insert(extraction.name.clone(), value); + } + Err(e) => { + warn!( + variable_name = %extraction.name, + error = %e, + "Failed to extract variable" + ); + // Don't insert the variable if extraction fails + } + } + } + + variables +} + +/// Extract a single value using the specified extractor. +fn extract_value( + extractor: &Extractor, + response_body: &str, + response_headers: &reqwest::header::HeaderMap, +) -> Result { + match extractor { + Extractor::JsonPath(path) => extract_json_path(response_body, path), + Extractor::Regex { pattern, group } => extract_regex(response_body, pattern, group), + Extractor::Header(header_name) => extract_header(response_headers, header_name), + Extractor::Cookie(cookie_name) => extract_cookie(response_headers, cookie_name), + } +} + +/// Extract value using JSONPath query. +/// +/// # Example +/// ``` +/// use rust_loadtest::extractor::extract_json_path; +/// +/// let json = r#"{"user": {"id": "123", "name": "Alice"}}"#; +/// let result = extract_json_path(json, "$.user.id").unwrap(); +/// assert_eq!(result, "123"); +/// ``` +pub fn extract_json_path(json_body: &str, path: &str) -> Result { + // Parse JSON + let json: Value = + serde_json::from_str(json_body).map_err(|e| ExtractionError::InvalidJson(e.to_string()))?; + + // Use serde_json_path to query + use serde_json_path::JsonPath; + + let json_path = JsonPath::parse(path) + .map_err(|e| ExtractionError::JsonPathError(format!("Invalid JSONPath: {}", e)))?; + + let node_list = json_path.query(&json); + + // Get first match + if let Ok(value) = node_list.exactly_one() { + // Convert value to string + match value { + Value::String(s) => Ok(s.clone()), + Value::Number(n) => Ok(n.to_string()), + Value::Bool(b) => Ok(b.to_string()), + Value::Null => Ok("null".to_string()), + Value::Array(_) | Value::Object(_) => { + // Return JSON representation for complex types + Ok(value.to_string()) + } + } + } else { + // No match or multiple matches + Err(ExtractionError::JsonPathError(format!( + "JSONPath '{}' did not match exactly one value", + path + ))) + } +} + +/// Extract value using regex with named capture group. +/// +/// # Example +/// ``` +/// use rust_loadtest::extractor::extract_regex; +/// +/// let html = r#"
Alice
"#; +/// let result = extract_regex(html, r#"id="user-(?P\d+)""#, "id").unwrap(); +/// assert_eq!(result, "123"); +/// ``` +pub fn extract_regex(text: &str, pattern: &str, group: &str) -> Result { + let re = Regex::new(pattern)?; + + if let Some(captures) = re.captures(text) { + if let Some(matched) = captures.name(group) { + Ok(matched.as_str().to_string()) + } else { + Err(ExtractionError::RegexGroupNotFound(group.to_string())) + } + } else { + Err(ExtractionError::RegexNoMatch) + } +} + +/// Extract value from response header. +/// +/// # Example +/// ``` +/// use reqwest::header::{HeaderMap, HeaderValue, CONTENT_TYPE}; +/// use rust_loadtest::extractor::extract_header; +/// +/// let mut headers = HeaderMap::new(); +/// headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); +/// +/// let result = extract_header(&headers, "content-type").unwrap(); +/// assert_eq!(result, "application/json"); +/// ``` +pub fn extract_header( + headers: &reqwest::header::HeaderMap, + header_name: &str, +) -> Result { + headers + .get(header_name) + .ok_or_else(|| ExtractionError::HeaderNotFound(header_name.to_string()))? + .to_str() + .map(|s| s.to_string()) + .map_err(|e| ExtractionError::Other(format!("Invalid header value: {}", e))) +} + +/// Extract value from Set-Cookie header. +/// +/// Parses Set-Cookie headers and extracts the specified cookie value. +/// +/// # Example +/// ``` +/// use reqwest::header::{HeaderMap, HeaderValue, SET_COOKIE}; +/// use rust_loadtest::extractor::extract_cookie; +/// +/// let mut headers = HeaderMap::new(); +/// headers.insert(SET_COOKIE, HeaderValue::from_static("session_id=abc123; Path=/; HttpOnly")); +/// +/// let result = extract_cookie(&headers, "session_id").unwrap(); +/// assert_eq!(result, "abc123"); +/// ``` +pub fn extract_cookie( + headers: &reqwest::header::HeaderMap, + cookie_name: &str, +) -> Result { + // Look through all Set-Cookie headers + for value in headers.get_all(reqwest::header::SET_COOKIE) { + if let Ok(cookie_str) = value.to_str() { + // Parse cookie: "name=value; attributes..." + if let Some(cookie_part) = cookie_str.split(';').next() { + if let Some((name, val)) = cookie_part.split_once('=') { + if name.trim() == cookie_name { + return Ok(val.trim().to_string()); + } + } + } + } + } + + Err(ExtractionError::CookieNotFound(cookie_name.to_string())) +} + +#[cfg(test)] +mod tests { + use super::*; + use reqwest::header::{HeaderMap, HeaderValue, CONTENT_TYPE, SET_COOKIE}; + + #[test] + fn test_extract_json_path_simple() { + let json = r#"{"user": {"id": "123", "name": "Alice"}}"#; + + let result = extract_json_path(json, "$.user.id").unwrap(); + assert_eq!(result, "123"); + + let result = extract_json_path(json, "$.user.name").unwrap(); + assert_eq!(result, "Alice"); + } + + #[test] + fn test_extract_json_path_array() { + let json = r#"{"products": [{"id": "prod-1", "name": "Laptop"}, {"id": "prod-2", "name": "Mouse"}]}"#; + + let result = extract_json_path(json, "$.products[0].id").unwrap(); + assert_eq!(result, "prod-1"); + + let result = extract_json_path(json, "$.products[1].name").unwrap(); + assert_eq!(result, "Mouse"); + } + + #[test] + fn test_extract_json_path_number() { + let json = r#"{"price": 99.99, "quantity": 5}"#; + + let result = extract_json_path(json, "$.price").unwrap(); + assert_eq!(result, "99.99"); + + let result = extract_json_path(json, "$.quantity").unwrap(); + assert_eq!(result, "5"); + } + + #[test] + fn test_extract_json_path_bool() { + let json = r#"{"active": true, "deleted": false}"#; + + let result = extract_json_path(json, "$.active").unwrap(); + assert_eq!(result, "true"); + + let result = extract_json_path(json, "$.deleted").unwrap(); + assert_eq!(result, "false"); + } + + #[test] + fn test_extract_json_path_not_found() { + let json = r#"{"user": {"id": "123"}}"#; + + let result = extract_json_path(json, "$.user.email"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_json_path_invalid_json() { + let invalid_json = r#"{"user": "broken"#; + + let result = extract_json_path(invalid_json, "$.user"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_regex_named_group() { + let html = r#"
Alice
"#; + + let result = extract_regex(html, r#"id="user-(?P\d+)""#, "id").unwrap(); + assert_eq!(result, "123"); + } + + #[test] + fn test_extract_regex_multiple_groups() { + let text = "Order #12345 for user-678"; + + let result = extract_regex(text, r#"Order #(?P\d+)"#, "order").unwrap(); + assert_eq!(result, "12345"); + + let result = extract_regex(text, r#"user-(?P\d+)"#, "user").unwrap(); + assert_eq!(result, "678"); + } + + #[test] + fn test_extract_regex_no_match() { + let text = "No order here"; + + let result = extract_regex(text, r#"Order #(?P\d+)"#, "order"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_regex_group_not_found() { + let text = "Order #12345"; + + let result = extract_regex(text, r#"Order #(?P\d+)"#, "missing_group"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_header() { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + headers.insert("X-Request-ID", HeaderValue::from_static("req-123")); + + let result = extract_header(&headers, "content-type").unwrap(); + assert_eq!(result, "application/json"); + + let result = extract_header(&headers, "x-request-id").unwrap(); + assert_eq!(result, "req-123"); + } + + #[test] + fn test_extract_header_not_found() { + let headers = HeaderMap::new(); + + let result = extract_header(&headers, "missing-header"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_cookie() { + let mut headers = HeaderMap::new(); + headers.insert( + SET_COOKIE, + HeaderValue::from_static("session_id=abc123; Path=/; HttpOnly"), + ); + headers.append( + SET_COOKIE, + HeaderValue::from_static("user_pref=dark_mode; Path=/"), + ); + + let result = extract_cookie(&headers, "session_id").unwrap(); + assert_eq!(result, "abc123"); + + let result = extract_cookie(&headers, "user_pref").unwrap(); + assert_eq!(result, "dark_mode"); + } + + #[test] + fn test_extract_cookie_not_found() { + let mut headers = HeaderMap::new(); + headers.insert( + SET_COOKIE, + HeaderValue::from_static("session_id=abc123; Path=/"), + ); + + let result = extract_cookie(&headers, "missing_cookie"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_cookie_no_cookies() { + let headers = HeaderMap::new(); + + let result = extract_cookie(&headers, "any_cookie"); + assert!(result.is_err()); + } + + #[test] + fn test_extract_variables_multiple() { + let extractions = vec![ + VariableExtraction { + name: "user_id".to_string(), + extractor: Extractor::JsonPath("$.user.id".to_string()), + }, + VariableExtraction { + name: "user_name".to_string(), + extractor: Extractor::JsonPath("$.user.name".to_string()), + }, + ]; + + let json = r#"{"user": {"id": "123", "name": "Alice"}}"#; + let headers = HeaderMap::new(); + + let result = extract_variables(&extractions, json, &headers); + + assert_eq!(result.get("user_id"), Some(&"123".to_string())); + assert_eq!(result.get("user_name"), Some(&"Alice".to_string())); + } + + #[test] + fn test_extract_variables_partial_failure() { + let extractions = vec![ + VariableExtraction { + name: "user_id".to_string(), + extractor: Extractor::JsonPath("$.user.id".to_string()), + }, + VariableExtraction { + name: "missing".to_string(), + extractor: Extractor::JsonPath("$.does.not.exist".to_string()), + }, + ]; + + let json = r#"{"user": {"id": "123"}}"#; + let headers = HeaderMap::new(); + + let result = extract_variables(&extractions, json, &headers); + + // Should extract user_id successfully + assert_eq!(result.get("user_id"), Some(&"123".to_string())); + // Should not include 'missing' since it failed + assert_eq!(result.get("missing"), None); + // Should have exactly 1 variable + assert_eq!(result.len(), 1); + } +} diff --git a/src/lib.rs b/src/lib.rs index fc988e6..01a5bba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,25 @@ +#![recursion_limit = "256"] + +pub mod assertions; pub mod client; pub mod config; +pub mod config_docs_generator; +pub mod config_hot_reload; +pub mod config_merge; +pub mod config_validation; +pub mod config_version; +pub mod connection_pool; +pub mod data_source; +pub mod errors; +pub mod executor; +pub mod extractor; pub mod load_models; +pub mod memory_guard; pub mod metrics; +pub mod multi_scenario; +pub mod percentiles; +pub mod scenario; +pub mod throughput; pub mod utils; pub mod worker; +pub mod yaml_config; diff --git a/src/main.rs b/src/main.rs index 8059603..b212214 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,7 +5,20 @@ use tracing_subscriber::{fmt, EnvFilter}; use rust_loadtest::client::build_client; use rust_loadtest::config::Config; -use rust_loadtest::metrics::{gather_metrics_string, register_metrics, start_metrics_server}; +use rust_loadtest::connection_pool::{PoolConfig, GLOBAL_POOL_STATS}; +use rust_loadtest::memory_guard::{ + init_percentile_tracking_flag, spawn_memory_guard, MemoryGuardConfig, +}; +use rust_loadtest::metrics::{ + gather_metrics_string, register_metrics, start_metrics_server, update_memory_metrics, + CONNECTION_POOL_IDLE_TIMEOUT_SECONDS, CONNECTION_POOL_MAX_IDLE, + PERCENTILE_SAMPLING_RATE_PERCENT, WORKERS_CONFIGURED_TOTAL, +}; +use rust_loadtest::percentiles::{ + format_percentile_table, rotate_all_histograms, GLOBAL_REQUEST_PERCENTILES, + GLOBAL_SCENARIO_PERCENTILES, GLOBAL_STEP_PERCENTILES, +}; +use rust_loadtest::throughput::{format_throughput_table, GLOBAL_THROUGHPUT_TRACKER}; use rust_loadtest::worker::{run_worker, WorkerConfig}; /// Initializes the tracing subscriber for structured logging. @@ -31,6 +44,134 @@ fn init_tracing() { } } +/// Prints percentile latency statistics. +fn print_percentile_report(enabled: bool, sampling_rate: u8) { + info!("\n{}", "=".repeat(120)); + info!("PERCENTILE LATENCY REPORT (Issue #33)"); + info!("{}", "=".repeat(120)); + + if !enabled { + info!("\n⚠️ Percentile tracking was DISABLED (PERCENTILE_TRACKING_ENABLED=false)"); + info!("No latency percentile data was collected to reduce memory usage."); + info!("To enable percentile tracking, set PERCENTILE_TRACKING_ENABLED=true\n"); + info!("{}", "=".repeat(120)); + info!("END OF PERCENTILE REPORT"); + info!("{}\n", "=".repeat(120)); + return; + } + + if sampling_rate < 100 { + info!( + "\n📊 Percentile sampling active: {}% of requests recorded \ + (PERCENTILE_SAMPLING_RATE={})", + sampling_rate, sampling_rate + ); + } + + // Single request percentiles + if let Some(request_stats) = GLOBAL_REQUEST_PERCENTILES.stats() { + info!("\n## Single Request Latencies\n"); + info!("{}", request_stats.format()); + info!(""); + } else { + info!("\n## Single Request Latencies\n"); + info!("No single request data collected.\n"); + } + + // Scenario percentiles + let scenario_stats = GLOBAL_SCENARIO_PERCENTILES.all_stats(); + if !scenario_stats.is_empty() { + let scenario_table = format_percentile_table("Scenario Latencies", &scenario_stats); + info!("{}", scenario_table); + } + + // Step percentiles + let step_stats = GLOBAL_STEP_PERCENTILES.all_stats(); + if !step_stats.is_empty() { + let step_table = format_percentile_table("Step Latencies", &step_stats); + info!("{}", step_table); + } + + info!("{}", "=".repeat(120)); + info!("END OF PERCENTILE REPORT"); + info!("{}\n", "=".repeat(120)); +} + +/// Prints per-scenario throughput statistics. +fn print_throughput_report() { + info!("\n{}", "=".repeat(120)); + info!("PER-SCENARIO THROUGHPUT REPORT (Issue #35)"); + info!("{}", "=".repeat(120)); + + let all_stats = GLOBAL_THROUGHPUT_TRACKER.all_stats(); + + if !all_stats.is_empty() { + let table = format_throughput_table(&all_stats); + info!("{}", table); + + let total_rps = GLOBAL_THROUGHPUT_TRACKER.total_throughput(); + let elapsed = GLOBAL_THROUGHPUT_TRACKER.elapsed(); + info!( + "\nTotal Throughput: {:.2} scenarios/sec over {:.1}s", + total_rps, + elapsed.as_secs_f64() + ); + } else { + info!("\nNo scenario throughput data collected.\n"); + } + + info!("{}", "=".repeat(120)); + info!("END OF THROUGHPUT REPORT"); + info!("{}\n", "=".repeat(120)); +} + +/// Prints connection pool statistics. +fn print_pool_report() { + info!("\n{}", "=".repeat(120)); + info!("CONNECTION POOL STATISTICS (Issue #36)"); + info!("{}", "=".repeat(120)); + + let stats = GLOBAL_POOL_STATS.stats(); + + if stats.total_requests > 0 { + info!("\nConnection Reuse Analysis:"); + info!(" {}", stats.format()); + + if let Some(duration) = stats.duration() { + info!(" Duration: {:.1}s", duration.as_secs_f64()); + } + + info!("\nInterpretation:"); + if stats.reuse_rate() >= 80.0 { + info!( + " ✅ Excellent connection reuse ({:.1}%)", + stats.reuse_rate() + ); + info!(" Most requests are reusing pooled connections efficiently."); + } else if stats.reuse_rate() >= 50.0 { + info!( + " ⚠️ Moderate connection reuse ({:.1}%)", + stats.reuse_rate() + ); + info!(" Consider increasing pool size or idle timeout."); + } else { + info!(" ❌ Low connection reuse ({:.1}%)", stats.reuse_rate()); + info!(" Many new connections are being established."); + info!(" Check: pool configuration, connection timeouts, load patterns."); + } + + info!("\nNote: Connection classification is based on latency patterns:"); + info!(" - Fast requests (<100ms) likely reused pooled connections"); + info!(" - Slow requests (≥100ms) likely established new connections (TLS handshake)"); + } else { + info!("\nNo connection pool data collected.\n"); + } + + info!("\n{}", "=".repeat(120)); + info!("END OF POOL REPORT"); + info!("{}\n", "=".repeat(120)); +} + /// Prints helpful configuration documentation. fn print_config_help() { eprintln!("Required environment variables:"); @@ -39,10 +180,10 @@ fn print_config_help() { ); eprintln!(); eprintln!("Optional environment variables:"); - eprintln!(" REQUEST_TYPE - HTTP method: GET or POST (default: POST)"); + eprintln!(" REQUEST_TYPE - HTTP method: GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS (default: GET)"); eprintln!(" SEND_JSON - Send JSON payload: true or false (default: false)"); eprintln!( - " JSON_PAYLOAD - JSON body for POST requests (required if SEND_JSON=true)" + " JSON_PAYLOAD - JSON body for POST/PUT/PATCH requests (required if SEND_JSON=true)" ); eprintln!( " NUM_CONCURRENT_TASKS - Number of concurrent workers (default: 10, must be > 0)" @@ -123,6 +264,78 @@ async fn main() -> Result<(), Box> { "Prometheus metrics server started" ); + // Initialize percentile tracking runtime flag (Issue #72) + init_percentile_tracking_flag(config.percentile_tracking_enabled); + if config.percentile_tracking_enabled { + info!("Percentile tracking initialized and enabled"); + } else { + info!("Percentile tracking initialized but DISABLED via config"); + } + + // Spawn auto-OOM memory guard (Issue #72) + if config.percentile_tracking_enabled { + let memory_guard_config = MemoryGuardConfig { + warning_threshold_percent: config.memory_warning_threshold_percent, + critical_threshold_percent: config.memory_critical_threshold_percent, + auto_disable_on_warning: config.auto_disable_percentiles_on_warning, + check_interval: Duration::from_secs(5), + }; + tokio::spawn(async move { + spawn_memory_guard(memory_guard_config).await; + }); + } else { + info!("Memory guard not started - percentile tracking disabled via config"); + } + + // Spawn memory monitoring task (Issue #69) + tokio::spawn(async move { + let mut interval = time::interval(Duration::from_secs(10)); + loop { + interval.tick().await; + if let Err(e) = update_memory_metrics() { + error!(error = %e, "Failed to update memory metrics"); + } + } + }); + info!("Memory monitoring started (updates every 10s)"); + + // Spawn histogram rotation task if enabled (Issue #67) + if config.histogram_rotation_interval.as_secs() > 0 { + let rotation_interval = config.histogram_rotation_interval; + tokio::spawn(async move { + let mut interval = time::interval(rotation_interval); + interval.tick().await; // Skip the first immediate tick + loop { + interval.tick().await; + info!( + rotation_interval_secs = rotation_interval.as_secs(), + "Rotating histograms - clearing percentile data to free memory" + ); + rotate_all_histograms(); + info!("Histogram rotation complete - memory freed"); + } + }); + info!( + rotation_interval_secs = config.histogram_rotation_interval.as_secs(), + "Histogram rotation enabled - will rotate every {} seconds", + config.histogram_rotation_interval.as_secs() + ); + } + + // Initialize connection pool configuration metrics (Issue #36) + let pool_config = PoolConfig::default(); + CONNECTION_POOL_MAX_IDLE.set(pool_config.max_idle_per_host as f64); + CONNECTION_POOL_IDLE_TIMEOUT_SECONDS.set(pool_config.idle_timeout.as_secs() as f64); + info!( + max_idle_per_host = pool_config.max_idle_per_host, + idle_timeout_secs = pool_config.idle_timeout.as_secs(), + "Connection pool configuration initialized" + ); + + // Initialize test configuration metrics + WORKERS_CONFIGURED_TOTAL.set(config.num_concurrent_tasks as f64); + PERCENTILE_SAMPLING_RATE_PERCENT.set(config.percentile_sampling_rate as f64); + // Main loop to run for a duration let start_time = time::Instant::now(); @@ -137,6 +350,8 @@ async fn main() -> Result<(), Box> { test_duration: config.test_duration, load_model: config.load_model.clone(), num_concurrent_tasks: config.num_concurrent_tasks, + percentile_tracking_enabled: config.percentile_tracking_enabled, + percentile_sampling_rate: config.percentile_sampling_rate, }; let client_clone = client.clone(); @@ -159,6 +374,18 @@ async fn main() -> Result<(), Box> { tokio::time::sleep(Duration::from_secs(2)).await; info!("Collecting final metrics"); + // Print percentile latency statistics (Issue #33, #66) + print_percentile_report( + config.percentile_tracking_enabled, + config.percentile_sampling_rate, + ); + + // Print per-scenario throughput statistics (Issue #35) + print_throughput_report(); + + // Print connection pool statistics (Issue #36) + print_pool_report(); + // Gather and print final metrics let final_metrics_output = gather_metrics_string(®istry_arc); info!("\n--- FINAL METRICS ---\n{}", final_metrics_output); diff --git a/src/memory_guard.rs b/src/memory_guard.rs new file mode 100644 index 0000000..da98340 --- /dev/null +++ b/src/memory_guard.rs @@ -0,0 +1,340 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use tokio::time::{self, Duration}; +use tracing::{error, info, warn}; + +use crate::metrics::{ + MEMORY_CRITICAL_THRESHOLD_EXCEEDED_TOTAL, MEMORY_WARNING_THRESHOLD_EXCEEDED_TOTAL, + PERCENTILE_TRACKING_ACTIVE_GAUGE, +}; +use crate::percentiles::rotate_all_histograms; + +/// Global atomic flag for runtime control of percentile tracking. +/// When false, workers should skip percentile recording to save memory. +pub static PERCENTILE_TRACKING_ACTIVE: AtomicBool = AtomicBool::new(true); + +/// Memory guard configuration. +#[derive(Debug, Clone)] +pub struct MemoryGuardConfig { + pub warning_threshold_percent: f64, + pub critical_threshold_percent: f64, + pub auto_disable_on_warning: bool, + pub check_interval: Duration, +} + +impl Default for MemoryGuardConfig { + fn default() -> Self { + Self { + warning_threshold_percent: 80.0, + critical_threshold_percent: 90.0, + auto_disable_on_warning: true, + check_interval: Duration::from_secs(5), + } + } +} + +/// Represents current memory usage and limits. +#[derive(Debug)] +pub struct MemoryStatus { + pub current_bytes: u64, + pub limit_bytes: u64, + pub usage_percent: f64, +} + +/// Detects the memory limit for the current process. +/// +/// For containerized environments (Docker, Kubernetes), checks cgroup limits. +/// For bare metal, uses system memory as the limit. +/// +/// Returns limit in bytes, or None if unable to determine. +#[cfg(target_os = "linux")] +fn detect_memory_limit() -> Option { + // Try cgroup v2 first (modern Docker/Kubernetes) + if let Ok(content) = std::fs::read_to_string("/sys/fs/cgroup/memory.max") { + if let Ok(limit) = content.trim().parse::() { + if limit != u64::MAX { + info!( + limit_mb = limit / 1024 / 1024, + "Detected cgroup v2 memory limit" + ); + return Some(limit); + } + } + } + + // Try cgroup v1 (older Docker/Kubernetes) + if let Ok(content) = std::fs::read_to_string("/sys/fs/cgroup/memory/memory.limit_in_bytes") { + if let Ok(limit) = content.trim().parse::() { + // cgroup v1 uses a very large number to indicate "no limit" + if limit < (1u64 << 60) { + info!( + limit_mb = limit / 1024 / 1024, + "Detected cgroup v1 memory limit" + ); + return Some(limit); + } + } + } + + // Fall back to system total memory + if let Ok(content) = std::fs::read_to_string("/proc/meminfo") { + for line in content.lines() { + if line.starts_with("MemTotal:") { + if let Some(kb_str) = line.split_whitespace().nth(1) { + if let Ok(kb) = kb_str.parse::() { + let bytes = kb * 1024; + info!( + limit_mb = bytes / 1024 / 1024, + "Using system total memory as limit (no cgroup limit detected)" + ); + return Some(bytes); + } + } + } + } + } + + None +} + +#[cfg(not(target_os = "linux"))] +fn detect_memory_limit() -> Option { + // On non-Linux systems, we can't easily detect memory limits + // Return None and monitoring will be disabled + warn!("Memory limit detection not supported on this platform - auto-OOM protection disabled"); + None +} + +/// Gets current memory usage from /proc/self/status (RSS). +#[cfg(target_os = "linux")] +fn get_current_memory_usage() -> Option { + use procfs::process::Process; + + match Process::myself() { + Ok(me) => { + if let Ok(stat) = me.stat() { + // RSS in bytes (Resident Set Size) + let rss_bytes = stat.rss * 4096; // RSS is in pages, typically 4KB per page + return Some(rss_bytes); + } + } + Err(e) => { + tracing::debug!(error = %e, "Failed to read /proc memory stats"); + } + } + None +} + +#[cfg(not(target_os = "linux"))] +fn get_current_memory_usage() -> Option { + None +} + +/// Checks current memory status against limits. +pub fn check_memory_status(limit_bytes: u64) -> Option { + let current_bytes = get_current_memory_usage()?; + let usage_percent = (current_bytes as f64 / limit_bytes as f64) * 100.0; + + Some(MemoryStatus { + current_bytes, + limit_bytes, + usage_percent, + }) +} + +/// State tracking for memory guard to avoid repeated actions. +struct MemoryGuardState { + warning_triggered: bool, + critical_triggered: bool, + percentiles_disabled_at: Option, +} + +impl MemoryGuardState { + fn new() -> Self { + Self { + warning_triggered: false, + critical_triggered: false, + percentiles_disabled_at: None, + } + } +} + +/// Spawns a background task that monitors memory usage and takes defensive actions. +/// +/// Actions taken based on thresholds: +/// - **Warning threshold**: Disable percentile tracking, rotate histograms +/// - **Critical threshold**: Additional aggressive cleanup (future: could add more) +/// +/// This task runs for the lifetime of the application. +pub async fn spawn_memory_guard(config: MemoryGuardConfig) { + let limit_bytes = match detect_memory_limit() { + Some(limit) => limit, + None => { + warn!("Unable to detect memory limit - auto-OOM protection disabled"); + return; + } + }; + + info!( + limit_mb = limit_bytes / 1024 / 1024, + warning_threshold = config.warning_threshold_percent, + critical_threshold = config.critical_threshold_percent, + auto_disable = config.auto_disable_on_warning, + "Memory guard started - monitoring every {} seconds", + config.check_interval.as_secs() + ); + + let mut interval = time::interval(config.check_interval); + let mut state = MemoryGuardState::new(); + + loop { + interval.tick().await; + + let status = match check_memory_status(limit_bytes) { + Some(s) => s, + None => { + tracing::debug!("Unable to read current memory usage"); + continue; + } + }; + + let current_mb = status.current_bytes / 1024 / 1024; + let limit_mb = status.limit_bytes / 1024 / 1024; + + // Log periodic status at debug level + tracing::debug!( + current_mb = current_mb, + limit_mb = limit_mb, + usage_percent = format!("{:.1}", status.usage_percent), + "Memory status check" + ); + + // Critical threshold (90% by default) + if status.usage_percent >= config.critical_threshold_percent && !state.critical_triggered { + error!( + current_mb = current_mb, + limit_mb = limit_mb, + usage_percent = format!("{:.1}", status.usage_percent), + "⚠️ CRITICAL memory threshold exceeded! Process is at {:.1}% of limit", + status.usage_percent + ); + state.critical_triggered = true; + MEMORY_CRITICAL_THRESHOLD_EXCEEDED_TOTAL.inc(); + + // At critical level, rotate histograms again to free as much memory as possible + if config.auto_disable_on_warning { + info!("Critical threshold: Aggressively rotating histograms"); + rotate_all_histograms(); + } + } + + // Warning threshold (80% by default) + if status.usage_percent >= config.warning_threshold_percent && !state.warning_triggered { + warn!( + current_mb = current_mb, + limit_mb = limit_mb, + usage_percent = format!("{:.1}", status.usage_percent), + "⚠️ Memory warning threshold exceeded! Process is at {:.1}% of limit", + status.usage_percent + ); + state.warning_triggered = true; + MEMORY_WARNING_THRESHOLD_EXCEEDED_TOTAL.inc(); + + if config.auto_disable_on_warning { + info!("Auto-OOM protection triggered - taking defensive actions:"); + info!(" 1. Disabling percentile tracking to prevent further memory growth"); + info!(" 2. Rotating all histograms to free existing memory"); + + // Disable percentile tracking globally + PERCENTILE_TRACKING_ACTIVE.store(false, Ordering::SeqCst); + PERCENTILE_TRACKING_ACTIVE_GAUGE.set(0.0); + state.percentiles_disabled_at = Some(std::time::Instant::now()); + + // Clear existing histogram data + rotate_all_histograms(); + + info!("Defensive actions complete - percentile tracking disabled"); + } else { + info!( + "Memory warning threshold exceeded, but auto_disable_on_warning=false - no action taken" + ); + } + } + + // If memory drops back below warning threshold, consider re-enabling (with hysteresis) + if status.usage_percent < config.warning_threshold_percent - 10.0 && state.warning_triggered + { + if let Some(disabled_at) = state.percentiles_disabled_at { + // Only re-enable if it's been at least 60 seconds since we disabled + let elapsed = disabled_at.elapsed(); + if elapsed.as_secs() >= 60 { + info!( + usage_percent = format!("{:.1}", status.usage_percent), + "Memory usage dropped below warning threshold - considering re-enabling percentiles" + ); + + // Don't automatically re-enable for now - too risky + // User can restart the test if they want percentiles back + info!("Percentiles remain disabled for safety - restart test to re-enable"); + } + } + + // Reset warning state (but keep percentiles disabled) + state.warning_triggered = false; + state.critical_triggered = false; + } + } +} + +/// Checks if percentile tracking is currently active. +/// +/// Workers should call this before recording percentile data. +pub fn is_percentile_tracking_active() -> bool { + PERCENTILE_TRACKING_ACTIVE.load(Ordering::Relaxed) +} + +/// Initialize percentile tracking flag based on config. +/// +/// Should be called at startup before spawning workers. +pub fn init_percentile_tracking_flag(enabled: bool) { + PERCENTILE_TRACKING_ACTIVE.store(enabled, Ordering::SeqCst); + PERCENTILE_TRACKING_ACTIVE_GAUGE.set(if enabled { 1.0 } else { 0.0 }); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_memory_guard_config_default() { + let config = MemoryGuardConfig::default(); + assert_eq!(config.warning_threshold_percent, 80.0); + assert_eq!(config.critical_threshold_percent, 90.0); + assert!(config.auto_disable_on_warning); + } + + #[test] + fn test_percentile_tracking_flag() { + // Test that we can read and write the flag + init_percentile_tracking_flag(true); + assert!(is_percentile_tracking_active()); + + init_percentile_tracking_flag(false); + assert!(!is_percentile_tracking_active()); + + // Reset to default for other tests + init_percentile_tracking_flag(true); + } + + #[test] + fn test_memory_status_calculation() { + // Simulate a memory status + let status = MemoryStatus { + current_bytes: 800_000_000, // 800 MB + limit_bytes: 1_000_000_000, // 1 GB + usage_percent: 80.0, + }; + + assert_eq!(status.usage_percent, 80.0); + assert!(status.usage_percent < 90.0); // Below critical + } +} diff --git a/src/metrics.rs b/src/metrics.rs index a08f6ed..90433f7 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -1,7 +1,7 @@ use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; use prometheus::{ - Encoder, Gauge, Histogram, IntCounter, IntCounterVec, Opts, Registry, TextEncoder, + Encoder, Gauge, Histogram, HistogramVec, IntCounter, IntCounterVec, Opts, Registry, TextEncoder, }; use std::env; use std::sync::{Arc, Mutex}; @@ -11,6 +11,8 @@ lazy_static::lazy_static! { pub static ref METRIC_NAMESPACE: String = env::var("METRIC_NAMESPACE").unwrap_or_else(|_| "rust_loadtest".to_string()); + // === Single Request Metrics === + pub static ref REQUEST_TOTAL: IntCounter = IntCounter::with_opts( Opts::new("requests_total", "Total number of HTTP requests made") @@ -37,14 +39,258 @@ lazy_static::lazy_static! { "HTTP request latencies in seconds." ).namespace(METRIC_NAMESPACE.as_str()) ).unwrap(); + + // === Scenario Metrics === + + pub static ref SCENARIO_EXECUTIONS_TOTAL: IntCounterVec = + IntCounterVec::new( + Opts::new("scenario_executions_total", "Total number of scenario executions") + .namespace(METRIC_NAMESPACE.as_str()), + &["scenario", "status"] // status: success, failed + ).unwrap(); + + pub static ref SCENARIO_DURATION_SECONDS: HistogramVec = + HistogramVec::new( + prometheus::HistogramOpts::new( + "scenario_duration_seconds", + "Scenario execution duration in seconds" + ).namespace(METRIC_NAMESPACE.as_str()), + &["scenario"] + ).unwrap(); + + pub static ref SCENARIO_STEPS_TOTAL: IntCounterVec = + IntCounterVec::new( + Opts::new("scenario_steps_total", "Total number of scenario steps executed") + .namespace(METRIC_NAMESPACE.as_str()), + &["scenario", "step", "status"] // status: success, failed + ).unwrap(); + + pub static ref SCENARIO_STEP_DURATION_SECONDS: HistogramVec = + HistogramVec::new( + prometheus::HistogramOpts::new( + "scenario_step_duration_seconds", + "Scenario step duration in seconds" + ).namespace(METRIC_NAMESPACE.as_str()), + &["scenario", "step"] + ).unwrap(); + + pub static ref SCENARIO_ASSERTIONS_TOTAL: IntCounterVec = + IntCounterVec::new( + Opts::new("scenario_assertions_total", "Total number of scenario assertions") + .namespace(METRIC_NAMESPACE.as_str()), + &["scenario", "step", "result"] // result: passed, failed + ).unwrap(); + + pub static ref CONCURRENT_SCENARIOS: Gauge = + Gauge::with_opts( + Opts::new("concurrent_scenarios", "Number of scenario executions currently running") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + // === Per-Scenario Throughput Metrics (Issue #35) === + + pub static ref SCENARIO_REQUESTS_TOTAL: IntCounterVec = + IntCounterVec::new( + Opts::new("scenario_requests_total", "Total number of requests per scenario") + .namespace(METRIC_NAMESPACE.as_str()), + &["scenario"] + ).unwrap(); + + pub static ref SCENARIO_THROUGHPUT_RPS: prometheus::GaugeVec = + prometheus::GaugeVec::new( + Opts::new("scenario_throughput_rps", "Current throughput (requests per second) per scenario") + .namespace(METRIC_NAMESPACE.as_str()), + &["scenario"] + ).unwrap(); + + // === Error Categorization Metrics (Issue #34) === + + pub static ref REQUEST_ERRORS_BY_CATEGORY: IntCounterVec = + IntCounterVec::new( + Opts::new("request_errors_by_category", "Number of errors by category") + .namespace(METRIC_NAMESPACE.as_str()), + &["category"] // category: client_error, server_error, network_error, timeout_error, tls_error, other_error + ).unwrap(); + + // === Connection Pool Metrics (Issue #36) === + + pub static ref CONNECTION_POOL_MAX_IDLE: Gauge = + Gauge::with_opts( + Opts::new("connection_pool_max_idle_per_host", "Maximum idle connections per host (configuration)") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref CONNECTION_POOL_IDLE_TIMEOUT_SECONDS: Gauge = + Gauge::with_opts( + Opts::new("connection_pool_idle_timeout_seconds", "Idle connection timeout in seconds (configuration)") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref CONNECTION_POOL_REQUESTS_TOTAL: IntCounter = + IntCounter::with_opts( + Opts::new("connection_pool_requests_total", "Total requests tracked for pool analysis") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref CONNECTION_POOL_LIKELY_REUSED: IntCounter = + IntCounter::with_opts( + Opts::new("connection_pool_likely_reused_total", "Requests that likely reused existing connections") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref CONNECTION_POOL_LIKELY_NEW: IntCounter = + IntCounter::with_opts( + Opts::new("connection_pool_likely_new_total", "Requests that likely established new connections") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref CONNECTION_POOL_REUSE_RATE: Gauge = + Gauge::with_opts( + Opts::new("connection_pool_reuse_rate_percent", "Percentage of requests reusing connections") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + // === Memory Usage Metrics (Issue #69) === + + pub static ref PROCESS_MEMORY_RSS_BYTES: Gauge = + Gauge::with_opts( + Opts::new("process_memory_rss_bytes", "Resident set size (RSS) memory in bytes") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref PROCESS_MEMORY_VIRTUAL_BYTES: Gauge = + Gauge::with_opts( + Opts::new("process_memory_virtual_bytes", "Virtual memory size in bytes") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref HISTOGRAM_COUNT: Gauge = + Gauge::with_opts( + Opts::new("histogram_count", "Number of active HDR histograms") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + pub static ref HISTOGRAM_MEMORY_ESTIMATE_BYTES: Gauge = + Gauge::with_opts( + Opts::new("histogram_memory_estimate_bytes", "Estimated memory used by histograms") + .namespace(METRIC_NAMESPACE.as_str()) + ).unwrap(); + + // === Memory Guard & Percentile Tracking Metrics (Issue #72) === + + pub static ref PERCENTILE_TRACKING_ACTIVE_GAUGE: Gauge = + Gauge::with_opts( + Opts::new( + "percentile_tracking_active", + "1 if percentile tracking is active, 0 if disabled by memory guard", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); + + pub static ref MEMORY_WARNING_THRESHOLD_EXCEEDED_TOTAL: IntCounter = + IntCounter::with_opts( + Opts::new( + "memory_warning_threshold_exceeded_total", + "Number of times the memory warning threshold has been exceeded", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); + + pub static ref MEMORY_CRITICAL_THRESHOLD_EXCEEDED_TOTAL: IntCounter = + IntCounter::with_opts( + Opts::new( + "memory_critical_threshold_exceeded_total", + "Number of times the memory critical threshold has been exceeded", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); + + pub static ref HISTOGRAM_LABELS_EVICTED_TOTAL: IntCounter = + IntCounter::with_opts( + Opts::new( + "histogram_labels_evicted_total", + "Total number of histogram labels evicted due to LRU capacity limit", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); + + // === Test Configuration Metrics === + + pub static ref PERCENTILE_SAMPLING_RATE_PERCENT: Gauge = + Gauge::with_opts( + Opts::new( + "percentile_sampling_rate_percent", + "Configured percentile sampling rate (1-100 percent of requests recorded)", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); + + pub static ref WORKERS_CONFIGURED_TOTAL: Gauge = + Gauge::with_opts( + Opts::new( + "workers_configured_total", + "Number of concurrent worker tasks configured", + ) + .namespace(METRIC_NAMESPACE.as_str()), + ) + .unwrap(); } /// Registers all metrics with the default Prometheus registry. pub fn register_metrics() -> Result<(), Box> { + // Single request metrics prometheus::default_registry().register(Box::new(REQUEST_TOTAL.clone()))?; prometheus::default_registry().register(Box::new(REQUEST_STATUS_CODES.clone()))?; prometheus::default_registry().register(Box::new(CONCURRENT_REQUESTS.clone()))?; prometheus::default_registry().register(Box::new(REQUEST_DURATION_SECONDS.clone()))?; + + // Scenario metrics + prometheus::default_registry().register(Box::new(SCENARIO_EXECUTIONS_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(SCENARIO_DURATION_SECONDS.clone()))?; + prometheus::default_registry().register(Box::new(SCENARIO_STEPS_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(SCENARIO_STEP_DURATION_SECONDS.clone()))?; + prometheus::default_registry().register(Box::new(SCENARIO_ASSERTIONS_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(CONCURRENT_SCENARIOS.clone()))?; + + // Per-scenario throughput metrics + prometheus::default_registry().register(Box::new(SCENARIO_REQUESTS_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(SCENARIO_THROUGHPUT_RPS.clone()))?; + + // Error categorization metrics + prometheus::default_registry().register(Box::new(REQUEST_ERRORS_BY_CATEGORY.clone()))?; + + // Connection pool metrics + prometheus::default_registry().register(Box::new(CONNECTION_POOL_MAX_IDLE.clone()))?; + prometheus::default_registry() + .register(Box::new(CONNECTION_POOL_IDLE_TIMEOUT_SECONDS.clone()))?; + prometheus::default_registry().register(Box::new(CONNECTION_POOL_REQUESTS_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(CONNECTION_POOL_LIKELY_REUSED.clone()))?; + prometheus::default_registry().register(Box::new(CONNECTION_POOL_LIKELY_NEW.clone()))?; + prometheus::default_registry().register(Box::new(CONNECTION_POOL_REUSE_RATE.clone()))?; + + // Memory usage metrics + prometheus::default_registry().register(Box::new(PROCESS_MEMORY_RSS_BYTES.clone()))?; + prometheus::default_registry().register(Box::new(PROCESS_MEMORY_VIRTUAL_BYTES.clone()))?; + prometheus::default_registry().register(Box::new(HISTOGRAM_COUNT.clone()))?; + prometheus::default_registry().register(Box::new(HISTOGRAM_MEMORY_ESTIMATE_BYTES.clone()))?; + + // Memory guard & percentile tracking metrics + prometheus::default_registry().register(Box::new(PERCENTILE_TRACKING_ACTIVE_GAUGE.clone()))?; + prometheus::default_registry() + .register(Box::new(MEMORY_WARNING_THRESHOLD_EXCEEDED_TOTAL.clone()))?; + prometheus::default_registry() + .register(Box::new(MEMORY_CRITICAL_THRESHOLD_EXCEEDED_TOTAL.clone()))?; + prometheus::default_registry().register(Box::new(HISTOGRAM_LABELS_EVICTED_TOTAL.clone()))?; + + // Test configuration metrics + prometheus::default_registry().register(Box::new(PERCENTILE_SAMPLING_RATE_PERCENT.clone()))?; + prometheus::default_registry().register(Box::new(WORKERS_CONFIGURED_TOTAL.clone()))?; + Ok(()) } @@ -104,3 +350,54 @@ pub fn gather_metrics_string(registry: &Arc>) -> String { String::from("# ERROR ENCODING METRICS TO UTF-8") }) } + +/// Updates memory usage metrics (Issue #69). +/// +/// Reads process memory stats from /proc on Linux and estimates +/// histogram memory usage based on active label count. +pub fn update_memory_metrics() -> Result<(), Box> { + // Platform-specific memory stats + #[cfg(target_os = "linux")] + { + use procfs::process::Process; + + match Process::myself() { + Ok(me) => { + if let Ok(stat) = me.stat() { + // RSS in bytes (Resident Set Size) + let rss_bytes = stat.rss * 4096; // RSS is in pages, typically 4KB per page + PROCESS_MEMORY_RSS_BYTES.set(rss_bytes as f64); + + // Virtual memory size in bytes + PROCESS_MEMORY_VIRTUAL_BYTES.set(stat.vsize as f64); + } + } + Err(e) => { + // Don't fail if we can't read memory stats + tracing::debug!(error = %e, "Failed to read /proc memory stats"); + } + } + } + + // Histogram metrics (platform-independent) + use crate::percentiles::{ + GLOBAL_REQUEST_PERCENTILES, GLOBAL_SCENARIO_PERCENTILES, GLOBAL_STEP_PERCENTILES, + }; + + let scenario_count = GLOBAL_SCENARIO_PERCENTILES.len(); + let step_count = GLOBAL_STEP_PERCENTILES.len(); + let request_count = if GLOBAL_REQUEST_PERCENTILES.stats().is_some() { + 1 + } else { + 0 + }; + let total_histograms = scenario_count + step_count + request_count; + + HISTOGRAM_COUNT.set(total_histograms as f64); + + // Estimate: 3MB per histogram (conservative average) + let estimated_bytes = total_histograms * 3_000_000; + HISTOGRAM_MEMORY_ESTIMATE_BYTES.set(estimated_bytes as f64); + + Ok(()) +} diff --git a/src/multi_scenario.rs b/src/multi_scenario.rs new file mode 100644 index 0000000..96ddb99 --- /dev/null +++ b/src/multi_scenario.rs @@ -0,0 +1,537 @@ +//! Multi-scenario execution with weighted distribution (Issue #43). +//! +//! This module provides functionality for running multiple scenarios concurrently +//! with weighted traffic distribution, per-scenario metrics, and round-robin +//! distribution across workers. + +use crate::scenario::Scenario; +use rand::Rng; +use std::collections::HashMap; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +/// Scenario selector that chooses scenarios based on weighted distribution. +/// +/// Uses weighted random selection where each scenario's weight determines +/// its selection probability. +/// +/// # Example +/// ``` +/// use rust_loadtest::multi_scenario::ScenarioSelector; +/// use rust_loadtest::scenario::Scenario; +/// +/// let scenarios = vec![ +/// Scenario { name: "Read".to_string(), weight: 80.0, steps: vec![] }, +/// Scenario { name: "Write".to_string(), weight: 20.0, steps: vec![] }, +/// ]; +/// +/// let selector = ScenarioSelector::new(scenarios); +/// let scenario = selector.select(); +/// // 80% chance of "Read", 20% chance of "Write" +/// ``` +#[derive(Clone)] +pub struct ScenarioSelector { + scenarios: Arc>, + cumulative_weights: Arc>, + total_weight: f64, +} + +impl ScenarioSelector { + /// Create a new scenario selector with weighted scenarios. + /// + /// # Arguments + /// * `scenarios` - List of scenarios with weights + /// + /// # Panics + /// Panics if scenarios list is empty or if any weight is negative. + pub fn new(scenarios: Vec) -> Self { + if scenarios.is_empty() { + panic!("Cannot create ScenarioSelector with empty scenarios list"); + } + + // Validate weights + for scenario in &scenarios { + if scenario.weight < 0.0 { + panic!( + "Scenario '{}' has negative weight: {}", + scenario.name, scenario.weight + ); + } + if scenario.weight == 0.0 { + panic!( + "Scenario '{}' has zero weight. Remove scenarios with zero weight.", + scenario.name + ); + } + } + + // Calculate cumulative weights for weighted random selection + let mut cumulative = Vec::with_capacity(scenarios.len()); + let mut sum = 0.0; + + for scenario in &scenarios { + sum += scenario.weight; + cumulative.push(sum); + } + + Self { + scenarios: Arc::new(scenarios), + cumulative_weights: Arc::new(cumulative), + total_weight: sum, + } + } + + /// Select a scenario based on weighted random distribution. + /// + /// Uses cumulative weight distribution for O(log n) selection. + pub fn select(&self) -> &Scenario { + let mut rng = rand::thread_rng(); + let random = rng.gen_range(0.0..self.total_weight); + + // Binary search for the selected scenario + let index = self + .cumulative_weights + .binary_search_by(|weight| { + if *weight <= random { + std::cmp::Ordering::Less + } else { + std::cmp::Ordering::Greater + } + }) + .unwrap_or_else(|i| i); + + &self.scenarios[index] + } + + /// Get scenario by index. + pub fn get_scenario(&self, index: usize) -> Option<&Scenario> { + self.scenarios.get(index) + } + + /// Get total number of scenarios. + pub fn scenario_count(&self) -> usize { + self.scenarios.len() + } + + /// Get all scenarios. + pub fn scenarios(&self) -> &[Scenario] { + &self.scenarios + } + + /// Get the total weight of all scenarios. + pub fn total_weight(&self) -> f64 { + self.total_weight + } + + /// Calculate the selection probability for each scenario. + pub fn probabilities(&self) -> Vec<(String, f64)> { + self.scenarios + .iter() + .map(|s| { + let probability = s.weight / self.total_weight; + (s.name.clone(), probability) + }) + .collect() + } +} + +/// Round-robin scenario distributor. +/// +/// Distributes scenarios evenly across workers in a round-robin fashion. +/// Each worker gets the next scenario in sequence, cycling through all scenarios. +/// +/// # Example +/// ``` +/// use rust_loadtest::multi_scenario::RoundRobinDistributor; +/// use rust_loadtest::scenario::Scenario; +/// +/// let scenarios = vec![ +/// Scenario { name: "S1".to_string(), weight: 1.0, steps: vec![] }, +/// Scenario { name: "S2".to_string(), weight: 1.0, steps: vec![] }, +/// ]; +/// +/// let distributor = RoundRobinDistributor::new(scenarios); +/// let s1 = distributor.next(); // Returns S1 +/// let s2 = distributor.next(); // Returns S2 +/// let s3 = distributor.next(); // Returns S1 (cycles back) +/// ``` +pub struct RoundRobinDistributor { + scenarios: Arc>, + counter: AtomicU64, +} + +impl RoundRobinDistributor { + /// Create a new round-robin distributor. + pub fn new(scenarios: Vec) -> Self { + if scenarios.is_empty() { + panic!("Cannot create RoundRobinDistributor with empty scenarios list"); + } + + Self { + scenarios: Arc::new(scenarios), + counter: AtomicU64::new(0), + } + } + + /// Get the next scenario in round-robin order. + pub fn next(&self) -> &Scenario { + let index = self.counter.fetch_add(1, Ordering::Relaxed) as usize; + &self.scenarios[index % self.scenarios.len()] + } + + /// Get scenario by index. + pub fn get_scenario(&self, index: usize) -> Option<&Scenario> { + self.scenarios.get(index) + } + + /// Get total number of scenarios. + pub fn scenario_count(&self) -> usize { + self.scenarios.len() + } + + /// Get all scenarios. + pub fn scenarios(&self) -> &[Scenario] { + &self.scenarios + } +} + +/// Per-scenario metrics tracker. +/// +/// Tracks execution counts, success/failure rates, and timing metrics +/// for each scenario independently. +#[derive(Default)] +pub struct ScenarioMetrics { + /// Total executions per scenario + executions: HashMap, + + /// Successful executions per scenario + successes: HashMap, + + /// Failed executions per scenario + failures: HashMap, + + /// Total execution time in milliseconds per scenario + total_time_ms: HashMap, +} + +impl ScenarioMetrics { + /// Create a new scenario metrics tracker. + pub fn new() -> Self { + Self::default() + } + + /// Initialize metrics for a list of scenarios. + pub fn initialize_scenarios(&mut self, scenarios: &[Scenario]) { + for scenario in scenarios { + self.executions + .insert(scenario.name.clone(), AtomicU64::new(0)); + self.successes + .insert(scenario.name.clone(), AtomicU64::new(0)); + self.failures + .insert(scenario.name.clone(), AtomicU64::new(0)); + self.total_time_ms + .insert(scenario.name.clone(), AtomicU64::new(0)); + } + } + + /// Record a scenario execution. + pub fn record_execution(&self, scenario_name: &str, success: bool, duration_ms: u64) { + if let Some(counter) = self.executions.get(scenario_name) { + counter.fetch_add(1, Ordering::Relaxed); + } + + if success { + if let Some(counter) = self.successes.get(scenario_name) { + counter.fetch_add(1, Ordering::Relaxed); + } + } else if let Some(counter) = self.failures.get(scenario_name) { + counter.fetch_add(1, Ordering::Relaxed); + } + + if let Some(counter) = self.total_time_ms.get(scenario_name) { + counter.fetch_add(duration_ms, Ordering::Relaxed); + } + } + + /// Get execution count for a scenario. + pub fn get_executions(&self, scenario_name: &str) -> u64 { + self.executions + .get(scenario_name) + .map(|c| c.load(Ordering::Relaxed)) + .unwrap_or(0) + } + + /// Get success count for a scenario. + pub fn get_successes(&self, scenario_name: &str) -> u64 { + self.successes + .get(scenario_name) + .map(|c| c.load(Ordering::Relaxed)) + .unwrap_or(0) + } + + /// Get failure count for a scenario. + pub fn get_failures(&self, scenario_name: &str) -> u64 { + self.failures + .get(scenario_name) + .map(|c| c.load(Ordering::Relaxed)) + .unwrap_or(0) + } + + /// Get total execution time for a scenario. + pub fn get_total_time_ms(&self, scenario_name: &str) -> u64 { + self.total_time_ms + .get(scenario_name) + .map(|c| c.load(Ordering::Relaxed)) + .unwrap_or(0) + } + + /// Get average execution time for a scenario. + pub fn get_average_time_ms(&self, scenario_name: &str) -> f64 { + let total = self.get_total_time_ms(scenario_name); + let executions = self.get_executions(scenario_name); + + if executions == 0 { + 0.0 + } else { + total as f64 / executions as f64 + } + } + + /// Get success rate for a scenario (0.0 to 1.0). + pub fn get_success_rate(&self, scenario_name: &str) -> f64 { + let successes = self.get_successes(scenario_name); + let executions = self.get_executions(scenario_name); + + if executions == 0 { + 0.0 + } else { + successes as f64 / executions as f64 + } + } + + /// Get all scenario names. + pub fn scenario_names(&self) -> Vec { + self.executions.keys().cloned().collect() + } + + /// Get summary for all scenarios. + pub fn summary(&self) -> ScenarioMetricsSummary { + let mut summaries = Vec::new(); + + for name in self.scenario_names() { + summaries.push(ScenarioSummary { + name: name.clone(), + executions: self.get_executions(&name), + successes: self.get_successes(&name), + failures: self.get_failures(&name), + success_rate: self.get_success_rate(&name), + average_time_ms: self.get_average_time_ms(&name), + }); + } + + ScenarioMetricsSummary { + scenarios: summaries, + } + } +} + +/// Summary of metrics for a single scenario. +#[derive(Debug, Clone)] +pub struct ScenarioSummary { + pub name: String, + pub executions: u64, + pub successes: u64, + pub failures: u64, + pub success_rate: f64, + pub average_time_ms: f64, +} + +/// Summary of metrics for all scenarios. +#[derive(Debug, Clone)] +pub struct ScenarioMetricsSummary { + pub scenarios: Vec, +} + +impl ScenarioMetricsSummary { + /// Print a formatted summary to stdout. + pub fn print(&self) { + println!("\n=== Per-Scenario Metrics ===\n"); + + for summary in &self.scenarios { + println!("Scenario: {}", summary.name); + println!(" Executions: {}", summary.executions); + println!( + " Successes: {} ({:.1}%)", + summary.successes, + summary.success_rate * 100.0 + ); + println!(" Failures: {}", summary.failures); + println!(" Avg Time: {:.2}ms", summary.average_time_ms); + println!(); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn create_test_scenarios() -> Vec { + vec![ + Scenario { + name: "Read".to_string(), + weight: 80.0, + steps: vec![], + }, + Scenario { + name: "Write".to_string(), + weight: 15.0, + steps: vec![], + }, + Scenario { + name: "Delete".to_string(), + weight: 5.0, + steps: vec![], + }, + ] + } + + #[test] + fn test_scenario_selector_creation() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + assert_eq!(selector.scenario_count(), 3); + assert_eq!(selector.total_weight(), 100.0); + + println!("✅ ScenarioSelector creation works"); + } + + #[test] + fn test_scenario_selector_probabilities() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + let probs = selector.probabilities(); + assert_eq!(probs.len(), 3); + + // Check probabilities + assert!((probs[0].1 - 0.80).abs() < 0.001); // 80% + assert!((probs[1].1 - 0.15).abs() < 0.001); // 15% + assert!((probs[2].1 - 0.05).abs() < 0.001); // 5% + + println!("✅ ScenarioSelector probabilities are correct"); + } + + #[test] + fn test_scenario_selector_distribution() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + // Select many times and check distribution + let mut counts = HashMap::new(); + let iterations = 10000; + + for _ in 0..iterations { + let scenario = selector.select(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + // Check that distribution is roughly correct (within 5%) + let read_pct = *counts.get("Read").unwrap() as f64 / iterations as f64; + let write_pct = *counts.get("Write").unwrap() as f64 / iterations as f64; + let delete_pct = *counts.get("Delete").unwrap() as f64 / iterations as f64; + + assert!((read_pct - 0.80).abs() < 0.05); + assert!((write_pct - 0.15).abs() < 0.05); + assert!((delete_pct - 0.05).abs() < 0.05); + + println!("✅ ScenarioSelector weighted distribution works"); + println!( + " Read: {:.1}%, Write: {:.1}%, Delete: {:.1}%", + read_pct * 100.0, + write_pct * 100.0, + delete_pct * 100.0 + ); + } + + #[test] + #[should_panic(expected = "empty scenarios list")] + fn test_scenario_selector_empty_panics() { + ScenarioSelector::new(vec![]); + } + + #[test] + #[should_panic(expected = "negative weight")] + fn test_scenario_selector_negative_weight_panics() { + let scenarios = vec![Scenario { + name: "Test".to_string(), + weight: -1.0, + steps: vec![], + }]; + ScenarioSelector::new(scenarios); + } + + #[test] + fn test_round_robin_distributor() { + let scenarios = create_test_scenarios(); + let distributor = RoundRobinDistributor::new(scenarios); + + assert_eq!(distributor.scenario_count(), 3); + + // Get scenarios in round-robin order + let s1 = distributor.next(); + let s2 = distributor.next(); + let s3 = distributor.next(); + let s4 = distributor.next(); // Should cycle back to first + + assert_eq!(s1.name, "Read"); + assert_eq!(s2.name, "Write"); + assert_eq!(s3.name, "Delete"); + assert_eq!(s4.name, "Read"); // Cycled back + + println!("✅ RoundRobinDistributor works"); + } + + #[test] + fn test_scenario_metrics() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + // Record some executions + metrics.record_execution("Read", true, 100); + metrics.record_execution("Read", true, 200); + metrics.record_execution("Read", false, 150); + metrics.record_execution("Write", true, 300); + + // Check metrics + assert_eq!(metrics.get_executions("Read"), 3); + assert_eq!(metrics.get_successes("Read"), 2); + assert_eq!(metrics.get_failures("Read"), 1); + assert_eq!(metrics.get_total_time_ms("Read"), 450); + assert_eq!(metrics.get_average_time_ms("Read"), 150.0); + assert!((metrics.get_success_rate("Read") - 0.666).abs() < 0.01); + + assert_eq!(metrics.get_executions("Write"), 1); + assert_eq!(metrics.get_successes("Write"), 1); + + println!("✅ ScenarioMetrics tracking works"); + } + + #[test] + fn test_scenario_metrics_summary() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + metrics.record_execution("Read", true, 100); + metrics.record_execution("Write", true, 200); + metrics.record_execution("Delete", false, 150); + + let summary = metrics.summary(); + assert_eq!(summary.scenarios.len(), 3); + + println!("✅ ScenarioMetrics summary generation works"); + } +} diff --git a/src/percentiles.rs b/src/percentiles.rs new file mode 100644 index 0000000..22ff2ae --- /dev/null +++ b/src/percentiles.rs @@ -0,0 +1,567 @@ +//! Percentile latency tracking using HDR Histogram. +//! +//! This module provides accurate percentile calculation for request latencies +//! using HdrHistogram, which is the industry standard for latency measurement. +//! +//! # Features +//! - P50 (median), P90, P95, P99, P99.9 percentile tracking +//! - Per-endpoint percentile tracking +//! - Per-scenario percentile tracking +//! - Thread-safe concurrent updates +//! - Memory-efficient histogram storage + +use hdrhistogram::Histogram; +use lru::LruCache; +use std::collections::HashMap; +use std::num::NonZeroUsize; +use std::sync::{Arc, Mutex}; +use tracing::{debug, warn}; + +/// Percentile statistics for a set of latency measurements. +#[derive(Debug, Clone)] +pub struct PercentileStats { + /// Number of samples + pub count: u64, + + /// Minimum value (microseconds) + pub min: u64, + + /// Maximum value (microseconds) + pub max: u64, + + /// Mean/average value (microseconds) + pub mean: f64, + + /// 50th percentile - median (microseconds) + pub p50: u64, + + /// 90th percentile (microseconds) + pub p90: u64, + + /// 95th percentile (microseconds) + pub p95: u64, + + /// 99th percentile (microseconds) + pub p99: u64, + + /// 99.9th percentile (microseconds) + pub p99_9: u64, +} + +impl PercentileStats { + /// Format statistics as a human-readable string. + pub fn format(&self) -> String { + format!( + "count={}, min={:.2}ms, max={:.2}ms, mean={:.2}ms, p50={:.2}ms, p90={:.2}ms, p95={:.2}ms, p99={:.2}ms, p99.9={:.2}ms", + self.count, + self.min as f64 / 1000.0, + self.max as f64 / 1000.0, + self.mean / 1000.0, + self.p50 as f64 / 1000.0, + self.p90 as f64 / 1000.0, + self.p95 as f64 / 1000.0, + self.p99 as f64 / 1000.0, + self.p99_9 as f64 / 1000.0, + ) + } + + /// Format statistics as a compact table row. + pub fn format_table_row(&self, label: &str) -> String { + format!( + "{:<30} {:>8} {:>8.2} {:>8.2} {:>8.2} {:>8.2} {:>8.2} {:>8.2} {:>8.2}", + label, + self.count, + self.p50 as f64 / 1000.0, + self.p90 as f64 / 1000.0, + self.p95 as f64 / 1000.0, + self.p99 as f64 / 1000.0, + self.p99_9 as f64 / 1000.0, + self.mean / 1000.0, + self.max as f64 / 1000.0, + ) + } +} + +/// Thread-safe percentile tracker. +/// +/// Uses HdrHistogram internally for efficient percentile calculation. +/// All latencies are stored in microseconds. +pub struct PercentileTracker { + /// HDR Histogram for efficient percentile calculation + /// Tracks latencies from 1 microsecond to 60 seconds with 3 significant digits + histogram: Arc>>, +} + +impl PercentileTracker { + /// Create a new percentile tracker. + /// + /// Configures histogram to track latencies from 1μs to 60 seconds + /// with 3 significant digits of precision. + pub fn new() -> Self { + // Create histogram that can track 1μs to 60s with 3 significant digits + let histogram = + Histogram::new_with_bounds(1, 60_000_000, 3).expect("Failed to create histogram"); + + Self { + histogram: Arc::new(Mutex::new(histogram)), + } + } + + /// Record a latency measurement in milliseconds. + /// + /// # Arguments + /// * `latency_ms` - Latency in milliseconds + pub fn record_ms(&self, latency_ms: u64) { + let latency_us = latency_ms * 1000; // Convert to microseconds + self.record_us(latency_us); + } + + /// Record a latency measurement in microseconds. + /// + /// # Arguments + /// * `latency_us` - Latency in microseconds + pub fn record_us(&self, latency_us: u64) { + let mut hist = self.histogram.lock().unwrap(); + + // Clamp to valid range (1μs to 60s) + let clamped = latency_us.clamp(1, 60_000_000); + + if let Err(e) = hist.record(clamped) { + warn!( + latency_us = latency_us, + error = %e, + "Failed to record latency in histogram" + ); + } + } + + /// Get current percentile statistics. + /// + /// Returns None if no samples have been recorded. + pub fn stats(&self) -> Option { + let hist = self.histogram.lock().unwrap(); + + if hist.is_empty() { + return None; + } + + Some(PercentileStats { + count: hist.len(), + min: hist.min(), + max: hist.max(), + mean: hist.mean(), + p50: hist.value_at_quantile(0.50), + p90: hist.value_at_quantile(0.90), + p95: hist.value_at_quantile(0.95), + p99: hist.value_at_quantile(0.99), + p99_9: hist.value_at_quantile(0.999), + }) + } + + /// Reset all recorded samples. + pub fn reset(&self) { + let mut hist = self.histogram.lock().unwrap(); + hist.clear(); + } +} + +impl Default for PercentileTracker { + fn default() -> Self { + Self::new() + } +} + +/// Multi-label percentile tracker with LRU eviction (Issue #68). +/// +/// Tracks percentiles separately for different labels (e.g., endpoints, scenarios). +/// Thread-safe for concurrent updates. Uses LRU eviction to limit memory usage. +pub struct MultiLabelPercentileTracker { + trackers: Arc>>, + max_labels: usize, + warned_at_80_percent: Arc>, +} + +impl MultiLabelPercentileTracker { + /// Create a new multi-label tracker with a maximum number of labels. + /// + /// # Arguments + /// * `max_labels` - Maximum number of unique labels to track (default: 100) + /// + /// When the limit is reached, least recently used labels are evicted. + pub fn new_with_limit(max_labels: usize) -> Self { + let capacity = NonZeroUsize::new(max_labels).unwrap_or(NonZeroUsize::new(100).unwrap()); + Self { + trackers: Arc::new(Mutex::new(LruCache::new(capacity))), + max_labels, + warned_at_80_percent: Arc::new(Mutex::new(false)), + } + } + + /// Create a new multi-label tracker with default limit of 100 labels. + pub fn new() -> Self { + Self::new_with_limit(100) + } + + /// Record a latency for a specific label. + /// + /// # Arguments + /// * `label` - Label to track (e.g., endpoint path, scenario name) + /// * `latency_ms` - Latency in milliseconds + /// + /// If the label doesn't exist and we're at capacity, the least recently + /// used label will be evicted to make room. + pub fn record(&self, label: &str, latency_ms: u64) { + let mut trackers = self.trackers.lock().unwrap(); + + // Check if we're approaching the limit (80%) + let current_size = trackers.len(); + let threshold_80 = (self.max_labels as f64 * 0.8) as usize; + + if current_size >= threshold_80 && !trackers.contains(&label.to_string()) { + let mut warned = self.warned_at_80_percent.lock().unwrap(); + if !*warned { + warn!( + current_labels = current_size, + max_labels = self.max_labels, + threshold_percent = 80, + "⚠️ Histogram label limit approaching: {}/{} labels ({}%). \ + Consider increasing MAX_HISTOGRAM_LABELS or using fewer unique scenario/step names. \ + Least recently used labels will be evicted when limit is reached.", + current_size, self.max_labels, (current_size as f64 / self.max_labels as f64 * 100.0) as u32 + ); + *warned = true; + } + } + + // Get or create tracker for this label + // LRU will automatically evict oldest entry if at capacity + if !trackers.contains(&label.to_string()) { + if trackers.len() >= self.max_labels { + debug!( + label = label, + max_labels = self.max_labels, + "Histogram label limit reached, evicting least recently used label" + ); + crate::metrics::HISTOGRAM_LABELS_EVICTED_TOTAL.inc(); + } + trackers.put(label.to_string(), PercentileTracker::new()); + } + + // Record the latency + if let Some(tracker) = trackers.get_mut(&label.to_string()) { + tracker.record_ms(latency_ms); + } + } + + /// Get statistics for a specific label. + /// + /// Returns None if label doesn't exist or has no samples. + pub fn stats(&self, label: &str) -> Option { + let trackers = self.trackers.lock().unwrap(); + // peek() doesn't update LRU order + trackers.peek(label).and_then(|t| t.stats()) + } + + /// Get statistics for all labels. + /// + /// Returns a map of label -> statistics. + pub fn all_stats(&self) -> HashMap { + let trackers = self.trackers.lock().unwrap(); + let mut results = HashMap::new(); + + for (label, tracker) in trackers.iter() { + if let Some(stats) = tracker.stats() { + results.insert(label.clone(), stats); + } + } + + results + } + + /// Get all labels currently being tracked. + pub fn labels(&self) -> Vec { + let trackers = self.trackers.lock().unwrap(); + trackers.iter().map(|(k, _)| k.clone()).collect() + } + /// Get the current number of tracked labels. + pub fn len(&self) -> usize { + let trackers = self.trackers.lock().unwrap(); + trackers.len() + } + + /// Check if there are no tracked labels. + pub fn is_empty(&self) -> bool { + let trackers = self.trackers.lock().unwrap(); + trackers.is_empty() + } + + /// Get the maximum number of labels that can be tracked. + pub fn capacity(&self) -> usize { + self.max_labels + } + + /// Reset all trackers. + pub fn reset_all(&self) { + let mut trackers = self.trackers.lock().unwrap(); + trackers.clear(); + // Reset the warning flag + let mut warned = self.warned_at_80_percent.lock().unwrap(); + *warned = false; + } + + /// Rotate histograms by clearing all data (Issue #67). + /// + /// This resets all histogram data to free memory while keeping + /// the label structure intact. Called periodically for long-running tests. + pub fn rotate(&self) { + let trackers = self.trackers.lock().unwrap(); + + // Clear data in each histogram + for (_label, tracker) in trackers.iter() { + tracker.reset(); + } + + // Reset the warning flag since we're starting fresh + let mut warned = self.warned_at_80_percent.lock().unwrap(); + *warned = false; + } +} + +impl Default for MultiLabelPercentileTracker { + fn default() -> Self { + Self::new() + } +} + +// Global percentile trackers for the application. +// +// These are lazily initialized and thread-safe. +lazy_static::lazy_static! { + /// Global tracker for single request latencies + pub static ref GLOBAL_REQUEST_PERCENTILES: PercentileTracker = PercentileTracker::new(); + + /// Global tracker for scenario latencies (by scenario name) + pub static ref GLOBAL_SCENARIO_PERCENTILES: MultiLabelPercentileTracker = MultiLabelPercentileTracker::new(); + + /// Global tracker for step latencies (by scenario:step) + pub static ref GLOBAL_STEP_PERCENTILES: MultiLabelPercentileTracker = MultiLabelPercentileTracker::new(); +} + +/// Rotate all global histogram trackers (Issue #67). +/// +/// Clears histogram data to free memory while keeping labels intact. +/// Should be called periodically for long-running tests to bound memory usage. +pub fn rotate_all_histograms() { + GLOBAL_REQUEST_PERCENTILES.reset(); + GLOBAL_SCENARIO_PERCENTILES.rotate(); + GLOBAL_STEP_PERCENTILES.rotate(); +} + +/// Format percentile statistics as a table. +/// +/// # Arguments +/// * `title` - Table title +/// * `stats_map` - Map of label -> statistics +/// +/// # Returns +/// Formatted table string +pub fn format_percentile_table( + title: &str, + stats_map: &HashMap, +) -> String { + if stats_map.is_empty() { + return format!("## {}\n\nNo data available.\n", title); + } + + let mut output = String::new(); + output.push_str(&format!("\n## {}\n\n", title)); + output.push_str(&format!( + "{:<30} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8}\n", + "Label", "Count", "P50", "P90", "P95", "P99", "P99.9", "Mean", "Max" + )); + output.push_str(&format!( + "{:<30} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8}\n", + "", "", "(ms)", "(ms)", "(ms)", "(ms)", "(ms)", "(ms)", "(ms)" + )); + output.push_str(&"-".repeat(120)); + output.push('\n'); + + // Sort labels for consistent output + let mut labels: Vec<_> = stats_map.keys().collect(); + labels.sort(); + + for label in labels { + let stats = &stats_map[label]; + output.push_str(&stats.format_table_row(label)); + output.push('\n'); + } + + output +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_percentile_tracker_basic() { + let tracker = PercentileTracker::new(); + + // Record some values: 10ms, 20ms, 30ms, 40ms, 50ms + for i in 1..=5 { + tracker.record_ms(i * 10); + } + + let stats = tracker.stats().expect("Should have stats"); + assert_eq!(stats.count, 5); + assert_eq!(stats.min, 10_000); // 10ms in microseconds + + // HDR histogram has precision limits - use tolerance for max value + // Expected 50_000 but histogram may round to ~50_015 due to bucketing + let expected_max = 50_000; + let tolerance = 100; // 0.2% tolerance for histogram precision + assert!( + stats.max >= expected_max && stats.max <= expected_max + tolerance, + "max should be ~{} but was {}", + expected_max, + stats.max + ); + } + + #[test] + fn test_percentile_tracker_empty() { + let tracker = PercentileTracker::new(); + assert!(tracker.stats().is_none()); + } + + #[test] + fn test_percentile_tracker_single_value() { + let tracker = PercentileTracker::new(); + tracker.record_ms(100); + + let stats = tracker.stats().unwrap(); + assert_eq!(stats.count, 1); + + // HDR histogram has precision limits due to bucketing + // Expected 100_000 but may round to ~100_031 (0.03% error) + let expected = 100_000; + let tolerance = 100; // 0.1% tolerance + assert!( + stats.p50 >= expected && stats.p50 <= expected + tolerance, + "p50 should be ~{} but was {}", + expected, + stats.p50 + ); + assert!( + stats.p99 >= expected && stats.p99 <= expected + tolerance, + "p99 should be ~{} but was {}", + expected, + stats.p99 + ); + } + + #[test] + fn test_percentile_tracker_reset() { + let tracker = PercentileTracker::new(); + tracker.record_ms(100); + assert!(tracker.stats().is_some()); + + tracker.reset(); + assert!(tracker.stats().is_none()); + } + + #[test] + fn test_multi_label_tracker() { + let tracker = MultiLabelPercentileTracker::new(); + + // Record for different endpoints + tracker.record("/api/users", 10); + tracker.record("/api/users", 20); + tracker.record("/api/products", 30); + + let user_stats = tracker.stats("/api/users").unwrap(); + assert_eq!(user_stats.count, 2); + + let product_stats = tracker.stats("/api/products").unwrap(); + assert_eq!(product_stats.count, 1); + + assert!(tracker.stats("/api/missing").is_none()); + } + + #[test] + fn test_multi_label_all_stats() { + let tracker = MultiLabelPercentileTracker::new(); + + tracker.record("endpoint1", 10); + tracker.record("endpoint2", 20); + + let all = tracker.all_stats(); + assert_eq!(all.len(), 2); + assert!(all.contains_key("endpoint1")); + assert!(all.contains_key("endpoint2")); + } + + #[test] + fn test_multi_label_labels() { + let tracker = MultiLabelPercentileTracker::new(); + + tracker.record("a", 10); + tracker.record("b", 20); + tracker.record("c", 30); + + let mut labels = tracker.labels(); + labels.sort(); + assert_eq!(labels, vec!["a", "b", "c"]); + } + + #[test] + fn test_percentile_stats_format() { + let stats = PercentileStats { + count: 100, + min: 1_000, // 1ms + max: 100_000, // 100ms + mean: 50_000.0, // 50ms + p50: 50_000, // 50ms + p90: 90_000, // 90ms + p95: 95_000, // 95ms + p99: 99_000, // 99ms + p99_9: 99_900, // 99.9ms + }; + + let formatted = stats.format(); + assert!(formatted.contains("count=100")); + assert!(formatted.contains("p50=50.00ms")); + assert!(formatted.contains("p99=99.00ms")); + } + + #[test] + fn test_format_percentile_table() { + let mut stats_map = HashMap::new(); + stats_map.insert( + "endpoint1".to_string(), + PercentileStats { + count: 100, + min: 10_000, + max: 100_000, + mean: 50_000.0, + p50: 50_000, + p90: 90_000, + p95: 95_000, + p99: 99_000, + p99_9: 99_900, + }, + ); + + let table = format_percentile_table("Test Table", &stats_map); + assert!(table.contains("Test Table")); + assert!(table.contains("endpoint1")); + assert!(table.contains("P50")); + } + + #[test] + fn test_format_percentile_table_empty() { + let stats_map = HashMap::new(); + let table = format_percentile_table("Empty Table", &stats_map); + assert!(table.contains("No data available")); + } +} diff --git a/src/scenario.rs b/src/scenario.rs new file mode 100644 index 0000000..d4fc3ab --- /dev/null +++ b/src/scenario.rs @@ -0,0 +1,514 @@ +//! Multi-step scenario definitions and execution context. +//! +//! This module provides the core data structures for defining and executing +//! multi-step load testing scenarios. A scenario consists of a sequence of steps +//! that can extract variables, make assertions, and maintain state across requests. + +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +/// A multi-step test scenario representing a user journey. +/// +/// # Example +/// ``` +/// use rust_loadtest::scenario::{Scenario, Step, RequestConfig, ThinkTime}; +/// use std::collections::HashMap; +/// use std::time::Duration; +/// +/// let scenario = Scenario { +/// name: "Shopping Flow".to_string(), +/// weight: 1.0, +/// steps: vec![ +/// Step { +/// name: "Browse Products".to_string(), +/// request: RequestConfig { +/// method: "GET".to_string(), +/// path: "/products".to_string(), +/// body: None, +/// headers: HashMap::new(), +/// }, +/// extractions: vec![], +/// assertions: vec![], +/// think_time: Some(ThinkTime::Fixed(Duration::from_secs(2))), +/// }, +/// ], +/// }; +/// ``` +#[derive(Debug, Clone)] +pub struct Scenario { + /// Unique name for this scenario + pub name: String, + + /// Weight for traffic distribution (higher = more traffic) + /// Used when running multiple scenarios: weight / sum(all_weights) = traffic percentage + pub weight: f64, + + /// Sequential steps to execute + pub steps: Vec, +} + +/// Think time configuration for realistic user behavior simulation. +/// +/// Think time represents the delay between steps, simulating the time a real +/// user would take to read content, make decisions, or perform actions. +/// +/// # Examples +/// ``` +/// use rust_loadtest::scenario::ThinkTime; +/// use std::time::Duration; +/// +/// // Fixed delay: always 3 seconds +/// let fixed = ThinkTime::Fixed(Duration::from_secs(3)); +/// +/// // Random delay: between 2 and 5 seconds +/// let random = ThinkTime::Random { +/// min: Duration::from_secs(2), +/// max: Duration::from_secs(5), +/// }; +/// ``` +#[derive(Debug, Clone)] +pub enum ThinkTime { + /// Fixed delay (always the same duration) + Fixed(Duration), + + /// Random delay within a range (min to max, inclusive) + Random { min: Duration, max: Duration }, +} + +impl ThinkTime { + /// Calculate the actual delay to apply. + /// + /// For Fixed, returns the fixed duration. + /// For Random, returns a random duration between min and max. + pub fn calculate_delay(&self) -> Duration { + match self { + ThinkTime::Fixed(duration) => *duration, + ThinkTime::Random { min, max } => { + use rand::Rng; + let min_ms = min.as_millis() as u64; + let max_ms = max.as_millis() as u64; + + if min_ms >= max_ms { + return *min; + } + + let random_ms = rand::thread_rng().gen_range(min_ms..=max_ms); + Duration::from_millis(random_ms) + } + } + } +} + +/// A single step within a scenario. +#[derive(Debug, Clone)] +pub struct Step { + /// Descriptive name for this step (e.g., "Login", "Add to Cart") + pub name: String, + + /// HTTP request configuration + pub request: RequestConfig, + + /// Variables to extract from the response + pub extractions: Vec, + + /// Assertions to validate the response + pub assertions: Vec, + + /// Optional delay after this step completes (think time) + /// + /// Think time simulates realistic user behavior by adding delays between + /// requests. This does NOT count towards request latency metrics. + /// + /// # Examples + /// ```ignore + /// use rust_loadtest::scenario::{Step, ThinkTime}; + /// use std::time::Duration; + /// + /// // Fixed 3-second delay + /// let step = Step { + /// think_time: Some(ThinkTime::Fixed(Duration::from_secs(3))), + /// // ... other fields + /// }; + /// + /// // Random 2-5 second delay + /// let step = Step { + /// think_time: Some(ThinkTime::Random { + /// min: Duration::from_secs(2), + /// max: Duration::from_secs(5), + /// }), + /// // ... other fields + /// }; + /// ``` + pub think_time: Option, +} + +/// HTTP request configuration for a step. +#[derive(Debug, Clone)] +pub struct RequestConfig { + /// HTTP method (GET, POST, PUT, DELETE, etc.) + pub method: String, + + /// Request path (can contain variable references like "/products/${product_id}") + pub path: String, + + /// Optional request body (can contain variable references) + pub body: Option, + + /// Request headers (values can contain variable references) + pub headers: HashMap, +} + +/// Extract a variable from the response for use in subsequent steps. +#[derive(Debug, Clone)] +pub struct VariableExtraction { + /// Name to store the extracted value under + pub name: String, + + /// How to extract the value from the response + pub extractor: Extractor, +} + +/// Methods for extracting values from HTTP responses. +#[derive(Debug, Clone)] +pub enum Extractor { + /// Extract from JSON response using JSONPath (e.g., "$.user.id") + JsonPath(String), + + /// Extract using regex with named capture group + Regex { pattern: String, group: String }, + + /// Extract from response header + Header(String), + + /// Extract from cookie + Cookie(String), +} + +/// Assert conditions on the HTTP response. +#[derive(Debug, Clone)] +pub enum Assertion { + /// Assert response status code equals expected value + StatusCode(u16), + + /// Assert response time is below threshold + ResponseTime(Duration), + + /// Assert JSON path exists and optionally matches value + JsonPath { + path: String, + expected: Option, + }, + + /// Assert response body contains substring + BodyContains(String), + + /// Assert response body matches regex + BodyMatches(String), + + /// Assert response header exists + HeaderExists(String), +} + +/// Execution context maintained across steps in a scenario. +/// +/// Each virtual user gets their own context to maintain state across +/// the steps in a scenario execution. +#[derive(Debug, Clone)] +pub struct ScenarioContext { + /// Extracted variables from previous steps + variables: HashMap, + + /// When this scenario execution started + scenario_start: Instant, + + /// Current step index being executed + current_step: usize, +} + +impl ScenarioContext { + /// Create a new scenario context. + pub fn new() -> Self { + Self { + variables: HashMap::new(), + scenario_start: Instant::now(), + current_step: 0, + } + } + + /// Store a variable for use in subsequent steps. + pub fn set_variable(&mut self, name: String, value: String) { + self.variables.insert(name, value); + } + + /// Load variables from a CSV data row (Issue #31). + /// + /// This copies all key-value pairs from the data row into the context, + /// making them available for variable substitution in scenario steps. + /// + /// # Example + /// ``` + /// use rust_loadtest::scenario::ScenarioContext; + /// use std::collections::HashMap; + /// + /// let mut ctx = ScenarioContext::new(); + /// let mut data = HashMap::new(); + /// data.insert("username".to_string(), "testuser".to_string()); + /// data.insert("password".to_string(), "testpass".to_string()); + /// + /// ctx.load_data_row(&data); + /// assert_eq!(ctx.get_variable("username"), Some(&"testuser".to_string())); + /// ``` + pub fn load_data_row(&mut self, data: &HashMap) { + for (key, value) in data { + self.variables.insert(key.clone(), value.clone()); + } + } + + /// Get a previously stored variable. + pub fn get_variable(&self, name: &str) -> Option<&String> { + self.variables.get(name) + } + + /// Replace variable references in a string with their values. + /// + /// Supports syntax: + /// - ${variable_name} or $variable_name - Replace with stored variable + /// - ${timestamp} - Replace with current Unix timestamp in milliseconds + /// + /// # Example + /// ``` + /// use rust_loadtest::scenario::ScenarioContext; + /// + /// let mut ctx = ScenarioContext::new(); + /// ctx.set_variable("user_id".to_string(), "12345".to_string()); + /// + /// let result = ctx.substitute_variables("/users/${user_id}/profile"); + /// assert_eq!(result, "/users/12345/profile"); + /// ``` + pub fn substitute_variables(&self, input: &str) -> String { + let mut result = input.to_string(); + + // Replace special ${timestamp} variable with current timestamp + if result.contains("${timestamp}") { + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis() + .to_string(); + result = result.replace("${timestamp}", ×tamp); + } + + // Replace ${var} syntax + for (name, value) in &self.variables { + let pattern = format!("${{{}}}", name); + result = result.replace(&pattern, value); + } + + // Replace $var syntax (for simple variable names) + for (name, value) in &self.variables { + let pattern = format!("${}", name); + // Only replace if not followed by { (to avoid replacing ${var} twice) + result = result.replace(&pattern, value); + } + + result + } + + /// Get elapsed time since scenario started. + pub fn elapsed(&self) -> Duration { + self.scenario_start.elapsed() + } + + /// Get current step index. + pub fn current_step(&self) -> usize { + self.current_step + } + + /// Advance to next step. + pub fn next_step(&mut self) { + self.current_step += 1; + } + + /// Reset context for a new scenario execution. + pub fn reset(&mut self) { + self.variables.clear(); + self.scenario_start = Instant::now(); + self.current_step = 0; + } +} + +impl Default for ScenarioContext { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scenario_context_variables() { + let mut ctx = ScenarioContext::new(); + + ctx.set_variable("user_id".to_string(), "123".to_string()); + ctx.set_variable("token".to_string(), "abc-def".to_string()); + + assert_eq!(ctx.get_variable("user_id"), Some(&"123".to_string())); + assert_eq!(ctx.get_variable("token"), Some(&"abc-def".to_string())); + assert_eq!(ctx.get_variable("missing"), None); + } + + #[test] + fn test_variable_substitution_braces() { + let mut ctx = ScenarioContext::new(); + ctx.set_variable("product_id".to_string(), "prod-456".to_string()); + ctx.set_variable("user_id".to_string(), "user-789".to_string()); + + let result = ctx.substitute_variables("/users/${user_id}/cart/items/${product_id}"); + assert_eq!(result, "/users/user-789/cart/items/prod-456"); + } + + #[test] + fn test_variable_substitution_dollar() { + let mut ctx = ScenarioContext::new(); + ctx.set_variable("id".to_string(), "42".to_string()); + + let result = ctx.substitute_variables("/items/$id"); + assert_eq!(result, "/items/42"); + } + + #[test] + fn test_variable_substitution_in_json() { + let mut ctx = ScenarioContext::new(); + ctx.set_variable("cart_id".to_string(), "cart-999".to_string()); + ctx.set_variable("quantity".to_string(), "3".to_string()); + + let json = r#"{"cart_id": "${cart_id}", "quantity": ${quantity}}"#; + let result = ctx.substitute_variables(json); + + assert_eq!(result, r#"{"cart_id": "cart-999", "quantity": 3}"#); + } + + #[test] + fn test_step_counter() { + let mut ctx = ScenarioContext::new(); + + assert_eq!(ctx.current_step(), 0); + + ctx.next_step(); + assert_eq!(ctx.current_step(), 1); + + ctx.next_step(); + assert_eq!(ctx.current_step(), 2); + + ctx.reset(); + assert_eq!(ctx.current_step(), 0); + } + + #[test] + fn test_reset_clears_variables() { + let mut ctx = ScenarioContext::new(); + ctx.set_variable("test".to_string(), "value".to_string()); + ctx.next_step(); + + ctx.reset(); + + assert_eq!(ctx.get_variable("test"), None); + assert_eq!(ctx.current_step(), 0); + } + + #[test] + fn test_timestamp_substitution() { + let ctx = ScenarioContext::new(); + + let email = ctx.substitute_variables("user-${timestamp}@example.com"); + + // Should contain a numeric timestamp + assert!(email.starts_with("user-")); + assert!(email.ends_with("@example.com")); + assert!(email.contains(char::is_numeric)); + + // Verify it's different each time (timestamps advance) + std::thread::sleep(std::time::Duration::from_millis(2)); + let email2 = ctx.substitute_variables("user-${timestamp}@example.com"); + assert_ne!(email, email2); + } + + #[test] + fn test_scenario_creation() { + let scenario = Scenario { + name: "Test Scenario".to_string(), + weight: 1.5, + steps: vec![Step { + name: "Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/api/test".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + assert_eq!(scenario.name, "Test Scenario"); + assert_eq!(scenario.weight, 1.5); + assert_eq!(scenario.steps.len(), 1); + assert_eq!(scenario.steps[0].name, "Step 1"); + } + + #[test] + fn test_think_time_fixed() { + let think_time = ThinkTime::Fixed(Duration::from_secs(3)); + let delay = think_time.calculate_delay(); + + assert_eq!(delay, Duration::from_secs(3)); + } + + #[test] + fn test_think_time_random() { + let think_time = ThinkTime::Random { + min: Duration::from_millis(100), + max: Duration::from_millis(500), + }; + + // Test multiple times to ensure randomness + for _ in 0..10 { + let delay = think_time.calculate_delay(); + let delay_ms = delay.as_millis() as u64; + + // Should be within range + assert!( + (100..=500).contains(&delay_ms), + "Delay {}ms should be between 100-500ms", + delay_ms + ); + } + } + + #[test] + fn test_think_time_random_min_equals_max() { + let think_time = ThinkTime::Random { + min: Duration::from_secs(2), + max: Duration::from_secs(2), + }; + + let delay = think_time.calculate_delay(); + assert_eq!(delay, Duration::from_secs(2)); + } + + #[test] + fn test_think_time_random_min_greater_than_max() { + // If min > max, should return min + let think_time = ThinkTime::Random { + min: Duration::from_secs(5), + max: Duration::from_secs(3), + }; + + let delay = think_time.calculate_delay(); + assert_eq!(delay, Duration::from_secs(5)); + } +} diff --git a/src/throughput.rs b/src/throughput.rs new file mode 100644 index 0000000..4e99f95 --- /dev/null +++ b/src/throughput.rs @@ -0,0 +1,341 @@ +//! Per-scenario throughput tracking and reporting. +//! +//! This module provides throughput calculation and reporting for scenarios. +//! It tracks requests per second (RPS) for each scenario type, enabling +//! performance analysis and comparison across different scenario types. + +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; +use tracing::debug; + +/// Throughput statistics for a scenario. +#[derive(Debug, Clone)] +pub struct ThroughputStats { + /// Scenario name + pub scenario_name: String, + + /// Total requests/executions + pub total_count: u64, + + /// Duration over which requests were counted + pub duration: Duration, + + /// Calculated throughput (requests per second) + pub rps: f64, + + /// Average time per request (milliseconds) + pub avg_time_ms: f64, +} + +impl ThroughputStats { + /// Format throughput statistics as a human-readable string. + pub fn format(&self) -> String { + format!( + "{}: {} requests in {:.1}s = {:.2} RPS (avg {:.2}ms per request)", + self.scenario_name, + self.total_count, + self.duration.as_secs_f64(), + self.rps, + self.avg_time_ms + ) + } + + /// Format as a table row. + pub fn format_table_row(&self) -> String { + format!( + "{:<30} {:>10} {:>10.2} {:>10.2}", + self.scenario_name, self.total_count, self.rps, self.avg_time_ms + ) + } +} + +/// Tracks throughput for multiple scenarios. +#[derive(Clone)] +pub struct ThroughputTracker { + /// Start time of tracking + start_time: Instant, + + /// Request counts per scenario + counts: Arc>>, + + /// Total time spent per scenario (for avg calculation) + total_times: Arc>>, +} + +impl ThroughputTracker { + /// Create a new throughput tracker. + pub fn new() -> Self { + Self { + start_time: Instant::now(), + counts: Arc::new(Mutex::new(HashMap::new())), + total_times: Arc::new(Mutex::new(HashMap::new())), + } + } + + /// Record a scenario execution. + /// + /// # Arguments + /// * `scenario_name` - Name of the scenario + /// * `duration` - Duration of the execution + pub fn record(&self, scenario_name: &str, duration: Duration) { + let mut counts = self.counts.lock().unwrap(); + *counts.entry(scenario_name.to_string()).or_insert(0) += 1; + + let mut times = self.total_times.lock().unwrap(); + *times + .entry(scenario_name.to_string()) + .or_insert(Duration::ZERO) += duration; + + debug!( + scenario = scenario_name, + duration_ms = duration.as_millis(), + "Recorded scenario execution" + ); + } + + /// Get throughput statistics for a specific scenario. + pub fn stats(&self, scenario_name: &str) -> Option { + let counts = self.counts.lock().unwrap(); + let times = self.total_times.lock().unwrap(); + + let count = counts.get(scenario_name)?; + let total_time = times.get(scenario_name)?; + + let duration = self.start_time.elapsed(); + let rps = if duration.as_secs_f64() > 0.0 { + *count as f64 / duration.as_secs_f64() + } else { + 0.0 + }; + + let avg_time_ms = if *count > 0 { + total_time.as_millis() as f64 / *count as f64 + } else { + 0.0 + }; + + Some(ThroughputStats { + scenario_name: scenario_name.to_string(), + total_count: *count, + duration, + rps, + avg_time_ms, + }) + } + + /// Get statistics for all scenarios. + pub fn all_stats(&self) -> Vec { + // Fix deadlock: Don't call self.stats() while holding locks + // Instead, calculate stats inline to avoid nested mutex acquisition + let counts = self.counts.lock().unwrap(); + let times = self.total_times.lock().unwrap(); + let duration = self.start_time.elapsed(); + + let mut stats = Vec::new(); + + for (scenario_name, count) in counts.iter() { + if let Some(total_time) = times.get(scenario_name) { + let rps = if duration.as_secs_f64() > 0.0 { + *count as f64 / duration.as_secs_f64() + } else { + 0.0 + }; + + let avg_time_ms = if *count > 0 { + total_time.as_millis() as f64 / *count as f64 + } else { + 0.0 + }; + + stats.push(ThroughputStats { + scenario_name: scenario_name.to_string(), + total_count: *count, + duration, + rps, + avg_time_ms, + }); + } + } + + // Sort by scenario name for consistent output + stats.sort_by(|a, b| a.scenario_name.cmp(&b.scenario_name)); + stats + } + + /// Get total throughput across all scenarios. + pub fn total_throughput(&self) -> f64 { + let counts = self.counts.lock().unwrap(); + let total: u64 = counts.values().sum(); + let duration = self.start_time.elapsed(); + + if duration.as_secs_f64() > 0.0 { + total as f64 / duration.as_secs_f64() + } else { + 0.0 + } + } + + /// Reset all tracking data. + pub fn reset(&self) { + let mut counts = self.counts.lock().unwrap(); + let mut times = self.total_times.lock().unwrap(); + counts.clear(); + times.clear(); + } + + /// Get the elapsed time since tracking started. + pub fn elapsed(&self) -> Duration { + self.start_time.elapsed() + } +} + +impl Default for ThroughputTracker { + fn default() -> Self { + Self::new() + } +} + +/// Format throughput statistics as a table. +pub fn format_throughput_table(stats: &[ThroughputStats]) -> String { + if stats.is_empty() { + return "No throughput data available.\n".to_string(); + } + + let mut output = String::new(); + output.push_str(&format!( + "\n{:<30} {:>10} {:>10} {:>10}\n", + "Scenario", "Requests", "RPS", "Avg Time" + )); + output.push_str(&format!( + "{:<30} {:>10} {:>10} {:>10}\n", + "", "", "", "(ms)" + )); + output.push_str(&"-".repeat(70)); + output.push('\n'); + + for stat in stats { + output.push_str(&stat.format_table_row()); + output.push('\n'); + } + + output +} + +// Global throughput tracker. +lazy_static::lazy_static! { + pub static ref GLOBAL_THROUGHPUT_TRACKER: ThroughputTracker = ThroughputTracker::new(); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_throughput_tracker() { + let tracker = ThroughputTracker::new(); + + tracker.record("scenario1", Duration::from_millis(100)); + tracker.record("scenario1", Duration::from_millis(150)); + tracker.record("scenario2", Duration::from_millis(200)); + + let stats1 = tracker.stats("scenario1").unwrap(); + assert_eq!(stats1.total_count, 2); + assert_eq!(stats1.avg_time_ms, 125.0); // (100 + 150) / 2 + + let stats2 = tracker.stats("scenario2").unwrap(); + assert_eq!(stats2.total_count, 1); + assert_eq!(stats2.avg_time_ms, 200.0); + } + + #[test] + fn test_all_stats() { + let tracker = ThroughputTracker::new(); + + tracker.record("alpha", Duration::from_millis(100)); + tracker.record("beta", Duration::from_millis(200)); + tracker.record("gamma", Duration::from_millis(300)); + + let all_stats = tracker.all_stats(); + assert_eq!(all_stats.len(), 3); + + // Should be sorted by name + assert_eq!(all_stats[0].scenario_name, "alpha"); + assert_eq!(all_stats[1].scenario_name, "beta"); + assert_eq!(all_stats[2].scenario_name, "gamma"); + } + + #[test] + fn test_total_throughput() { + let tracker = ThroughputTracker::new(); + + // Record some scenarios + for _ in 0..10 { + tracker.record("test", Duration::from_millis(100)); + } + + // Give it a moment to calculate + std::thread::sleep(Duration::from_millis(100)); + + let total_rps = tracker.total_throughput(); + assert!(total_rps > 0.0, "Total RPS should be greater than 0"); + } + + #[test] + fn test_stats_format() { + let stats = ThroughputStats { + scenario_name: "Test Scenario".to_string(), + total_count: 100, + duration: Duration::from_secs(10), + rps: 10.0, + avg_time_ms: 50.0, + }; + + let formatted = stats.format(); + assert!(formatted.contains("Test Scenario")); + assert!(formatted.contains("100 requests")); + assert!(formatted.contains("10.0")); + } + + #[test] + fn test_reset() { + let tracker = ThroughputTracker::new(); + + tracker.record("test", Duration::from_millis(100)); + assert!(tracker.stats("test").is_some()); + + tracker.reset(); + assert!(tracker.stats("test").is_none()); + } + + #[test] + fn test_format_throughput_table() { + let stats = vec![ + ThroughputStats { + scenario_name: "Scenario A".to_string(), + total_count: 100, + duration: Duration::from_secs(10), + rps: 10.0, + avg_time_ms: 50.0, + }, + ThroughputStats { + scenario_name: "Scenario B".to_string(), + total_count: 200, + duration: Duration::from_secs(10), + rps: 20.0, + avg_time_ms: 25.0, + }, + ]; + + let table = format_throughput_table(&stats); + assert!(table.contains("Scenario")); + assert!(table.contains("Requests")); + assert!(table.contains("RPS")); + } + + #[test] + fn test_empty_stats() { + let tracker = ThroughputTracker::new(); + assert!(tracker.stats("nonexistent").is_none()); + } +} diff --git a/src/utils.rs b/src/utils.rs index 8d6faea..4682221 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,9 +1,10 @@ use std::str::FromStr; use tokio::time::Duration; -/// Parses a duration string in the format "10m", "5h", "3d". +/// Parses a duration string in the format "30s", "10m", "5h", "3d". /// /// Supported units: +/// - `s` for seconds /// - `m` for minutes /// - `h` for hours /// - `d` for days @@ -28,11 +29,12 @@ pub fn parse_duration_string(s: &str) -> Result { }; match unit_char { + 's' => Ok(Duration::from_secs(value)), 'm' => Ok(Duration::from_secs(value * 60)), 'h' => Ok(Duration::from_secs(value * 60 * 60)), 'd' => Ok(Duration::from_secs(value * 24 * 60 * 60)), _ => Err(format!( - "Unknown duration unit: '{}'. Use 'm', 'h', or 'd'.", + "Unknown duration unit: '{}'. Use 's', 'm', 'h', or 'd'.", unit_char )), } @@ -162,9 +164,11 @@ mod tests { } #[test] - fn seconds_suffix_not_supported() { - let err = parse_duration_string("10s").unwrap_err(); - assert!(err.contains("Unknown duration unit"), "error was: {}", err); + fn parse_seconds() { + assert_eq!( + parse_duration_string("30s").unwrap(), + Duration::from_secs(30) + ); } #[test] diff --git a/src/worker.rs b/src/worker.rs index dd48f03..efe35ce 100644 --- a/src/worker.rs +++ b/src/worker.rs @@ -1,10 +1,38 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + use tokio::time::{self, Duration, Instant}; use tracing::{debug, error, info}; +/// Atomic counter for deterministic percentile sampling (Issue #70). +static SAMPLE_COUNTER: AtomicU64 = AtomicU64::new(0); + +/// Returns true if this request should be recorded in percentile histograms. +/// +/// Uses a deterministic counter so every Nth request is sampled (not random), +/// giving even distribution across all workers without coordination overhead. +/// `rate` is 1-100: at 100 every request is recorded, at 10 every 10th is. +fn should_sample(rate: u8) -> bool { + if rate >= 100 { + return true; + } + let counter = SAMPLE_COUNTER.fetch_add(1, Ordering::Relaxed); + counter % 100 < rate as u64 +} + +use crate::connection_pool::GLOBAL_POOL_STATS; +use crate::errors::ErrorCategory; +use crate::executor::ScenarioExecutor; use crate::load_models::LoadModel; +use crate::memory_guard::is_percentile_tracking_active; use crate::metrics::{ - CONCURRENT_REQUESTS, REQUEST_DURATION_SECONDS, REQUEST_STATUS_CODES, REQUEST_TOTAL, + CONCURRENT_REQUESTS, REQUEST_DURATION_SECONDS, REQUEST_ERRORS_BY_CATEGORY, + REQUEST_STATUS_CODES, REQUEST_TOTAL, SCENARIO_REQUESTS_TOTAL, }; +use crate::percentiles::{ + GLOBAL_REQUEST_PERCENTILES, GLOBAL_SCENARIO_PERCENTILES, GLOBAL_STEP_PERCENTILES, +}; +use crate::scenario::{Scenario, ScenarioContext}; +use crate::throughput::GLOBAL_THROUGHPUT_TRACKER; /// Configuration for a worker task. pub struct WorkerConfig { @@ -16,6 +44,8 @@ pub struct WorkerConfig { pub test_duration: Duration, pub load_model: LoadModel, pub num_concurrent_tasks: usize, + pub percentile_tracking_enabled: bool, + pub percentile_sampling_rate: u8, } /// Runs a single worker task that sends HTTP requests according to the load model. @@ -62,33 +92,67 @@ pub async fn run_worker(client: reqwest::Client, config: WorkerConfig, start_tim let req = build_request(&client, &config); match req.send().await { - Ok(response) => { + Ok(mut response) => { let status = response.status().as_u16(); let status_str = status.to_string(); REQUEST_STATUS_CODES.with_label_values(&[&status_str]).inc(); + // Categorize HTTP errors (Issue #34) + if let Some(category) = ErrorCategory::from_status_code(status) { + REQUEST_ERRORS_BY_CATEGORY + .with_label_values(&[category.label()]) + .inc(); + } + + // Issue #74: CRITICAL - Must consume response body in chunks to prevent buffering + // At 50K RPS, unconsumed bodies accumulate in memory causing rapid OOM + // Stream and discard body without allocating full buffer + while let Ok(Some(_chunk)) = response.chunk().await { + // Chunk read and immediately dropped - minimal memory footprint + } + debug!( task_id = config.task_id, url = %config.url, status_code = status, - latency_ms = request_start_time.elapsed().as_millis() as u64, "Request completed" ); } Err(e) => { REQUEST_STATUS_CODES.with_label_values(&["error"]).inc(); + + // Categorize request error (Issue #34) + let error_category = ErrorCategory::from_reqwest_error(&e); + REQUEST_ERRORS_BY_CATEGORY + .with_label_values(&[error_category.label()]) + .inc(); + error!( task_id = config.task_id, url = %config.url, error = %e, + error_category = %error_category.label(), "Request failed" ); } } + let actual_latency_ms = request_start_time.elapsed().as_millis() as u64; REQUEST_DURATION_SECONDS.observe(request_start_time.elapsed().as_secs_f64()); CONCURRENT_REQUESTS.dec(); + // Record latency in percentile tracker (Issue #33, #66, #70, #72) + // Check both config flag AND runtime flag (can be disabled by memory guard) + if config.percentile_tracking_enabled + && is_percentile_tracking_active() + && should_sample(config.percentile_sampling_rate) + { + GLOBAL_REQUEST_PERCENTILES.record_ms(actual_latency_ms); + } + + // Record connection pool statistics (Issue #36) + GLOBAL_POOL_STATS.record_request(actual_latency_ms); + // Apply the calculated delay if delay_ms > 0 && delay_ms != u64::MAX { tokio::time::sleep(Duration::from_millis(delay_ms)).await; @@ -112,6 +176,27 @@ fn build_request(client: &reqwest::Client, config: &WorkerConfig) -> reqwest::Re req } } + "PUT" => { + let req = client.put(&config.url); + if config.send_json { + req.header("Content-Type", "application/json") + .body(config.json_payload.clone().unwrap_or_default()) + } else { + req + } + } + "PATCH" => { + let req = client.patch(&config.url); + if config.send_json { + req.header("Content-Type", "application/json") + .body(config.json_payload.clone().unwrap_or_default()) + } else { + req + } + } + "DELETE" => client.delete(&config.url), + "HEAD" => client.head(&config.url), + "OPTIONS" => client.request(reqwest::Method::OPTIONS, &config.url), _ => { error!( request_type = %config.request_type, @@ -121,3 +206,124 @@ fn build_request(client: &reqwest::Client, config: &WorkerConfig) -> reqwest::Re } } } + +/// Configuration for a scenario-based worker task. +pub struct ScenarioWorkerConfig { + pub task_id: usize, + pub base_url: String, + pub scenario: Scenario, + pub test_duration: Duration, + pub load_model: LoadModel, + pub num_concurrent_tasks: usize, + pub percentile_tracking_enabled: bool, + pub percentile_sampling_rate: u8, +} + +/// Runs a scenario-based worker task that executes multi-step scenarios according to the load model. +/// +/// This worker executes complete scenarios (multiple steps) instead of individual requests. +/// Each scenario execution counts as one "virtual user" completing their journey. +/// +/// # Cookie and Session Management +/// +/// For proper session isolation, each scenario execution gets its own cookie-enabled +/// HTTP client. This ensures cookies from one virtual user don't leak to another. +pub async fn run_scenario_worker( + _client: reqwest::Client, // Ignored - we create per-execution clients + config: ScenarioWorkerConfig, + start_time: Instant, +) { + debug!( + task_id = config.task_id, + scenario = %config.scenario.name, + steps = config.scenario.steps.len(), + load_model = ?config.load_model, + "Scenario worker starting" + ); + + loop { + let elapsed_total_secs = Instant::now().duration_since(start_time).as_secs_f64(); + + // Check if the total test duration has passed + if elapsed_total_secs >= config.test_duration.as_secs_f64() { + info!( + task_id = config.task_id, + scenario = %config.scenario.name, + elapsed_secs = elapsed_total_secs, + "Scenario worker stopping after duration limit" + ); + break; + } + + // Calculate current target RPS (scenarios per second in this case) + let current_target_sps = config + .load_model + .calculate_current_rps(elapsed_total_secs, config.test_duration.as_secs_f64()); + + // Calculate delay per task to achieve the current_target_sps + let delay_ms = if current_target_sps > 0.0 { + (config.num_concurrent_tasks as f64 * 1000.0 / current_target_sps).round() as u64 + } else { + u64::MAX + }; + + // Create new cookie-enabled client for this virtual user + // This ensures cookie isolation between scenario executions + let client = reqwest::Client::builder() + .cookie_store(true) // Enable automatic cookie management + .timeout(std::time::Duration::from_secs(30)) + .build() + .unwrap_or_else(|_| reqwest::Client::new()); + + // Create executor with isolated client + let executor = ScenarioExecutor::new(config.base_url.clone(), client); + + // Create new context for this scenario execution + let mut context = ScenarioContext::new(); + + // Execute the scenario + let result = executor.execute(&config.scenario, &mut context).await; + + debug!( + task_id = config.task_id, + scenario = %config.scenario.name, + success = result.success, + duration_ms = result.total_time_ms, + steps_completed = result.steps_completed, + "Scenario execution completed" + ); + + // Record scenario latency in percentile tracker (Issue #33, #66, #70, #72) + // Check both config flag AND runtime flag (can be disabled by memory guard) + if config.percentile_tracking_enabled + && is_percentile_tracking_active() + && should_sample(config.percentile_sampling_rate) + { + GLOBAL_SCENARIO_PERCENTILES.record(&config.scenario.name, result.total_time_ms); + + // Record individual step latencies (Issue #33, #66, #70, #72) + for step in &result.steps { + let label = format!("{}:{}", config.scenario.name, step.step_name); + GLOBAL_STEP_PERCENTILES.record(&label, step.response_time_ms); + } + } + + // Record throughput (Issue #35) + SCENARIO_REQUESTS_TOTAL + .with_label_values(&[&config.scenario.name]) + .inc(); + GLOBAL_THROUGHPUT_TRACKER.record( + &config.scenario.name, + std::time::Duration::from_millis(result.total_time_ms), + ); + + // Apply the calculated delay between scenario executions + if delay_ms > 0 && delay_ms != u64::MAX { + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + } else if delay_ms == u64::MAX { + // Sleep for a very long time if SPS is 0 + tokio::time::sleep(Duration::from_secs(3600)).await; + } + // If delay_ms is 0, no sleep, execute scenarios as fast as possible + } +} diff --git a/src/yaml_config.rs b/src/yaml_config.rs new file mode 100644 index 0000000..703e784 --- /dev/null +++ b/src/yaml_config.rs @@ -0,0 +1,859 @@ +//! YAML configuration file support (Issue #37). +//! +//! This module provides YAML-based configuration as an alternative to +//! environment variables. YAML files enable version-controlled test plans, +//! reusable scenarios, and easier configuration management. + +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::Path; +use std::time::Duration as StdDuration; +use thiserror::Error; + +use crate::config_validation::{ + HttpMethodValidator, LoadModelValidator, RangeValidator, UrlValidator, ValidationContext, +}; +use crate::config_version::VersionChecker; +use crate::load_models::LoadModel; +use crate::scenario::{Assertion, Extractor, RequestConfig, Scenario, Step, VariableExtraction}; + +/// Errors that can occur when loading or parsing YAML configuration. +#[derive(Error, Debug)] +pub enum YamlConfigError { + #[error("Failed to read config file: {0}")] + FileRead(#[from] std::io::Error), + + #[error("Failed to parse YAML: {0}")] + YamlParse(#[from] serde_yaml::Error), + + #[error("Invalid configuration: {0}")] + Validation(String), + + #[error("Missing required field: {0}")] + MissingField(String), +} + +/// Duration format for YAML (e.g., "30s", "5m", "2h"). +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum YamlDuration { + Seconds(u64), + String(String), +} + +impl YamlDuration { + pub fn to_std_duration(&self) -> Result { + match self { + YamlDuration::Seconds(s) => Ok(StdDuration::from_secs(*s)), + YamlDuration::String(s) => crate::utils::parse_duration_string(s).map_err(|e| { + YamlConfigError::Validation(format!("Invalid duration '{}': {}", s, e)) + }), + } + } +} + +/// Metadata about the test configuration. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct YamlMetadata { + pub name: Option, + pub description: Option, + pub author: Option, + #[serde(default)] + pub tags: Vec, +} + +/// Global configuration settings. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlGlobalConfig { + #[serde(rename = "baseUrl")] + pub base_url: String, + + #[serde(default = "default_timeout")] + pub timeout: YamlDuration, + + #[serde(default = "default_workers")] + pub workers: usize, + + pub duration: YamlDuration, + + #[serde(rename = "skipTlsVerify", default)] + pub skip_tls_verify: bool, + + #[serde(rename = "customHeaders")] + pub custom_headers: Option, +} + +fn default_timeout() -> YamlDuration { + YamlDuration::Seconds(30) +} + +fn default_workers() -> usize { + 10 +} + +/// Load model configuration in YAML. +/// +/// Default ratios for DailyTraffic pattern +fn default_morning_ramp_ratio() -> f64 { + 0.2 +} +fn default_peak_sustain_ratio() -> f64 { + 0.1 +} +fn default_mid_decline_ratio() -> f64 { + 0.2 +} +fn default_mid_sustain_ratio() -> f64 { + 0.1 +} +fn default_evening_decline_ratio() -> f64 { + 0.2 +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "model", rename_all = "lowercase")] +pub enum YamlLoadModel { + Concurrent, + Rps { + target: f64, + }, + Ramp { + min: f64, + max: f64, + #[serde(rename = "rampDuration")] + ramp_duration: YamlDuration, + }, + #[serde(rename = "dailytraffic")] + DailyTraffic { + min: f64, + mid: f64, + max: f64, + #[serde(rename = "cycleDuration")] + cycle_duration: YamlDuration, + #[serde(rename = "morningRampRatio", default = "default_morning_ramp_ratio")] + morning_ramp_ratio: f64, + #[serde(rename = "peakSustainRatio", default = "default_peak_sustain_ratio")] + peak_sustain_ratio: f64, + #[serde(rename = "midDeclineRatio", default = "default_mid_decline_ratio")] + mid_decline_ratio: f64, + #[serde(rename = "midSustainRatio", default = "default_mid_sustain_ratio")] + mid_sustain_ratio: f64, + #[serde( + rename = "eveningDeclineRatio", + default = "default_evening_decline_ratio" + )] + evening_decline_ratio: f64, + }, +} + +impl YamlLoadModel { + pub fn to_load_model(&self) -> Result { + match self { + YamlLoadModel::Concurrent => Ok(LoadModel::Concurrent), + YamlLoadModel::Rps { target } => Ok(LoadModel::Rps { + target_rps: *target, + }), + YamlLoadModel::Ramp { + min, + max, + ramp_duration, + } => Ok(LoadModel::RampRps { + min_rps: *min, + max_rps: *max, + ramp_duration: ramp_duration.to_std_duration()?, + }), + YamlLoadModel::DailyTraffic { + min, + mid, + max, + cycle_duration, + morning_ramp_ratio, + peak_sustain_ratio, + mid_decline_ratio, + mid_sustain_ratio, + evening_decline_ratio, + } => Ok(LoadModel::DailyTraffic { + min_rps: *min, + mid_rps: *mid, + max_rps: *max, + cycle_duration: cycle_duration.to_std_duration()?, + morning_ramp_ratio: *morning_ramp_ratio, + peak_sustain_ratio: *peak_sustain_ratio, + mid_decline_ratio: *mid_decline_ratio, + mid_sustain_ratio: *mid_sustain_ratio, + evening_decline_ratio: *evening_decline_ratio, + }), + } + } +} + +/// Scenario definition in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlScenario { + pub name: String, + + #[serde(default = "default_weight")] + pub weight: f64, + + pub steps: Vec, + + /// Optional data file for data-driven testing + #[serde(rename = "dataFile")] + pub data_file: Option, + + /// Optional scenario-level configuration overrides + #[serde(default)] + pub config: YamlScenarioConfig, +} + +/// Data file configuration for data-driven scenarios. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlDataFile { + /// Path to the data file (CSV or JSON) + pub path: String, + + /// Data file format (csv, json) + #[serde(default = "default_data_format")] + pub format: String, + + /// How to iterate through data (sequential, random, cycle) + #[serde(default = "default_data_strategy")] + pub strategy: String, +} + +fn default_data_format() -> String { + "csv".to_string() +} + +fn default_data_strategy() -> String { + "sequential".to_string() +} + +/// Scenario-level configuration overrides. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct YamlScenarioConfig { + /// Override global timeout for this scenario + pub timeout: Option, + + /// Number of times to retry failed requests in this scenario + #[serde(rename = "retryCount")] + pub retry_count: Option, + + /// Delay between retries + #[serde(rename = "retryDelay")] + pub retry_delay: Option, +} + +fn default_weight() -> f64 { + 1.0 +} + +/// Think time configuration in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum YamlThinkTime { + /// Fixed think time (e.g., "3s") + Fixed(YamlDuration), + + /// Random think time with min/max range + Random { + min: YamlDuration, + max: YamlDuration, + }, +} + +impl YamlThinkTime { + pub fn to_think_time(&self) -> Result { + match self { + YamlThinkTime::Fixed(duration) => Ok(crate::scenario::ThinkTime::Fixed( + duration.to_std_duration()?, + )), + YamlThinkTime::Random { min, max } => Ok(crate::scenario::ThinkTime::Random { + min: min.to_std_duration()?, + max: max.to_std_duration()?, + }), + } + } +} + +/// Step definition in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlStep { + pub name: Option, + + pub request: YamlRequest, + + #[serde(default)] + pub extract: Vec, + + #[serde(default)] + pub assertions: Vec, + + #[serde(rename = "thinkTime")] + pub think_time: Option, +} + +/// Request configuration in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlRequest { + pub method: String, + pub path: String, + + #[serde(rename = "queryParams")] + pub query_params: Option>, + + pub headers: Option>, + + pub body: Option, +} + +/// Extractor definition in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "lowercase")] +pub enum YamlExtractor { + #[serde(rename = "jsonPath")] + JsonPath { + name: String, + #[serde(rename = "jsonPath")] + json_path: String, + }, + Regex { + name: String, + regex: String, + }, + Header { + name: String, + header: String, + }, + Cookie { + name: String, + cookie: String, + }, +} + +/// Assertion definition in YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "camelCase")] +pub enum YamlAssertion { + #[serde(rename = "statusCode")] + StatusCode { expected: u16 }, + #[serde(rename = "responseTime")] + ResponseTime { max: YamlDuration }, + #[serde(rename = "jsonPath")] + JsonPath { + path: String, + expected: Option, + }, + #[serde(rename = "bodyContains")] + BodyContains { text: String }, + #[serde(rename = "bodyMatches")] + BodyMatches { regex: String }, + #[serde(rename = "headerExists")] + HeaderExists { header: String }, +} + +/// Root YAML configuration structure. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlConfig { + pub version: String, + + #[serde(default)] + pub metadata: YamlMetadata, + + pub config: YamlGlobalConfig, + + pub load: YamlLoadModel, + + pub scenarios: Vec, +} + +impl YamlConfig { + /// Load configuration from a YAML file. + pub fn from_file>(path: P) -> Result { + let content = fs::read_to_string(path)?; + Self::from_str(&content) + } + + /// Parse configuration from a YAML string. + #[allow(clippy::should_implement_trait)] + pub fn from_str(content: &str) -> Result { + let config: YamlConfig = serde_yaml::from_str(content)?; + config.validate()?; + Ok(config) + } + + /// Validate the configuration using enhanced validation system. + pub fn validate(&self) -> Result<(), YamlConfigError> { + let mut ctx = ValidationContext::new(); + + // Validate version using VersionChecker + ctx.enter("version"); + if let Err(e) = VersionChecker::parse_and_validate(&self.version) { + ctx.field_error(e.to_string()); + } + ctx.exit(); + + // Validate config section + ctx.enter("config"); + + // Validate base URL + ctx.enter("baseUrl"); + if let Err(e) = UrlValidator::validate(&self.config.base_url) { + ctx.field_error(e.to_string()); + } + ctx.exit(); + + // Validate workers + ctx.enter("workers"); + if let Err(e) = RangeValidator::validate_positive_u64(self.config.workers as u64, "workers") + { + ctx.field_error(e.to_string()); + } + if let Err(_e) = + RangeValidator::validate_u64(self.config.workers as u64, 1, 10000, "workers") + { + ctx.field_error(format!( + "Workers should be between 1 and 10000, got: {}", + self.config.workers + )); + } + ctx.exit(); + + ctx.exit(); // config + + // Validate load model + ctx.enter("load"); + match &self.load { + YamlLoadModel::Rps { target } => { + if let Err(e) = LoadModelValidator::validate_rps(*target) { + ctx.field_error(e.to_string()); + } + } + YamlLoadModel::Ramp { min, max, .. } => { + if let Err(e) = LoadModelValidator::validate_ramp(*min, *max) { + ctx.field_error(e.to_string()); + } + } + YamlLoadModel::DailyTraffic { min, mid, max, .. } => { + if let Err(e) = LoadModelValidator::validate_daily_traffic(*min, *mid, *max) { + ctx.field_error(e.to_string()); + } + } + YamlLoadModel::Concurrent => {} // No validation needed + } + ctx.exit(); // load + + // Validate scenarios + ctx.enter("scenarios"); + if self.scenarios.is_empty() { + ctx.field_error("At least one scenario must be defined".to_string()); + } + + for (idx, scenario) in self.scenarios.iter().enumerate() { + ctx.enter(&format!("[{}]", idx)); + ctx.enter("name"); + if scenario.name.is_empty() { + ctx.field_error("Scenario name cannot be empty".to_string()); + } + ctx.exit(); + + // Validate weight + ctx.enter("weight"); + if let Err(e) = RangeValidator::validate_positive_f64(scenario.weight, "weight") { + ctx.field_error(e.to_string()); + } + ctx.exit(); + + // Validate steps + ctx.enter("steps"); + if scenario.steps.is_empty() { + ctx.field_error(format!( + "Scenario '{}' must have at least one step", + scenario.name + )); + } + + for (step_idx, step) in scenario.steps.iter().enumerate() { + ctx.enter(&format!("[{}]", step_idx)); + ctx.enter("request"); + + // Validate HTTP method + ctx.enter("method"); + if let Err(e) = HttpMethodValidator::validate(&step.request.method) { + ctx.field_error(e.to_string()); + } + ctx.exit(); + + // Validate path + ctx.enter("path"); + if step.request.path.is_empty() { + ctx.field_error("Request path cannot be empty".to_string()); + } + ctx.exit(); + + ctx.exit(); // request + ctx.exit(); // step + } + + ctx.exit(); // steps + ctx.exit(); // scenario + } + ctx.exit(); // scenarios + + // Convert validation context to result + ctx.into_result() + .map_err(|e| YamlConfigError::Validation(e.to_string())) + } + + /// Convert YAML scenarios to Scenario structs. + pub fn to_scenarios(&self) -> Result, YamlConfigError> { + let mut scenarios = Vec::new(); + + for yaml_scenario in &self.scenarios { + let mut steps = Vec::new(); + + for (idx, yaml_step) in yaml_scenario.steps.iter().enumerate() { + let step_name = yaml_step + .name + .clone() + .unwrap_or_else(|| format!("Step {}", idx + 1)); + + // Build request config + let mut headers = std::collections::HashMap::new(); + if let Some(yaml_headers) = &yaml_step.request.headers { + headers.extend(yaml_headers.clone()); + } + + // Build body with query params if present + let path = if let Some(query_params) = &yaml_step.request.query_params { + let query_string: Vec = query_params + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect(); + format!("{}?{}", yaml_step.request.path, query_string.join("&")) + } else { + yaml_step.request.path.clone() + }; + + let request = RequestConfig { + method: yaml_step.request.method.clone(), + path, + body: yaml_step.request.body.clone(), + headers, + }; + + // Convert extractors + let extractors = yaml_step + .extract + .iter() + .map(|e| self.convert_extractor(e)) + .collect(); + + // Convert assertions + let assertions = yaml_step + .assertions + .iter() + .map(|a| self.convert_assertion(a)) + .collect::, _>>()?; + + // Convert think time + let think_time = if let Some(think_time_yaml) = &yaml_step.think_time { + Some(think_time_yaml.to_think_time()?) + } else { + None + }; + + steps.push(Step { + name: step_name, + request, + extractions: extractors, + assertions, + think_time, + }); + } + + scenarios.push(Scenario { + name: yaml_scenario.name.clone(), + weight: yaml_scenario.weight, + steps, + }); + } + + Ok(scenarios) + } + + fn convert_extractor(&self, extractor: &YamlExtractor) -> VariableExtraction { + match extractor { + YamlExtractor::JsonPath { name, json_path } => VariableExtraction { + name: name.clone(), + extractor: Extractor::JsonPath(json_path.clone()), + }, + YamlExtractor::Regex { name, regex } => { + // For Regex, we need to parse the regex to extract pattern and group + // For now, use the entire regex as pattern and empty group + // TODO: Improve regex parsing to separate pattern and group + VariableExtraction { + name: name.clone(), + extractor: Extractor::Regex { + pattern: regex.clone(), + group: String::from("0"), // Default to capture group 0 (full match) + }, + } + } + YamlExtractor::Header { name, header } => VariableExtraction { + name: name.clone(), + extractor: Extractor::Header(header.clone()), + }, + YamlExtractor::Cookie { name, cookie } => VariableExtraction { + name: name.clone(), + extractor: Extractor::Cookie(cookie.clone()), + }, + } + } + + fn convert_assertion(&self, assertion: &YamlAssertion) -> Result { + match assertion { + YamlAssertion::StatusCode { expected } => Ok(Assertion::StatusCode(*expected)), + YamlAssertion::ResponseTime { max } => { + Ok(Assertion::ResponseTime(max.to_std_duration()?)) + } + YamlAssertion::JsonPath { path, expected } => Ok(Assertion::JsonPath { + path: path.clone(), + expected: expected.clone(), + }), + YamlAssertion::BodyContains { text } => Ok(Assertion::BodyContains(text.clone())), + YamlAssertion::BodyMatches { regex } => Ok(Assertion::BodyMatches(regex.clone())), + YamlAssertion::HeaderExists { header } => Ok(Assertion::HeaderExists(header.clone())), + } + } +} + +impl Default for YamlConfig { + fn default() -> Self { + Self { + version: "1.0".to_string(), + metadata: YamlMetadata::default(), + config: YamlGlobalConfig { + base_url: "https://example.com".to_string(), + timeout: YamlDuration::Seconds(30), + workers: 10, + duration: YamlDuration::Seconds(60), + skip_tls_verify: false, + custom_headers: None, + }, + load: YamlLoadModel::Concurrent, + scenarios: vec![], + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_simple_yaml() { + let yaml = r#" +version: "1.0" +metadata: + name: "Test Config" +config: + baseUrl: "https://api.example.com" + workers: 5 + duration: "1m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Test Scenario" + steps: + - request: + method: "GET" + path: "/health" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 5); + assert_eq!(config.scenarios.len(), 1); + assert_eq!(config.scenarios[0].name, "Test Scenario"); + } + + #[test] + fn test_yaml_duration_parsing() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "30s" + timeout: 15 +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let duration = config.config.duration.to_std_duration().unwrap(); + assert_eq!(duration, StdDuration::from_secs(30)); + + let timeout = config.config.timeout.to_std_duration().unwrap(); + assert_eq!(timeout, StdDuration::from_secs(15)); + } + + #[test] + fn test_validation_invalid_version() { + let yaml = r#" +version: "2.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + // Error contains version validation info + let err = result.unwrap_err().to_string(); + assert!( + err.contains("version") + && (err.contains("2.0") || err.contains("too new") || err.contains("Unsupported")), + "Expected version validation error, got: {}", + err + ); + } + + #[test] + fn test_validation_invalid_url() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "invalid-url" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + // Error contains URL validation info + let err = result.unwrap_err().to_string(); + assert!( + err.contains("baseUrl") + && (err.contains("invalid") || err.contains("URL") || err.contains("http")), + "Expected URL validation error, got: {}", + err + ); + } + + #[test] + fn test_validation_no_scenarios() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: [] +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("At least one scenario")); + } + + #[test] + fn test_scenario_conversion() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test Flow" + weight: 1.5 + steps: + - name: "Step 1" + request: + method: "GET" + path: "/api/test" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 1); + assert_eq!(scenarios[0].name, "Test Flow"); + assert_eq!(scenarios[0].weight, 1.5); + assert_eq!(scenarios[0].steps.len(), 1); + assert_eq!(scenarios[0].steps[0].name, "Step 1"); + assert_eq!(scenarios[0].steps[0].request.method, "GET"); + assert_eq!(scenarios[0].steps[0].assertions.len(), 1); + assert!(scenarios[0].steps[0].think_time.is_some()); + } + + #[test] + fn test_load_model_conversion() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "30s" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let load_model = config.load.to_load_model().unwrap(); + + match load_model { + LoadModel::RampRps { + min_rps, + max_rps, + ramp_duration, + } => { + assert_eq!(min_rps, 10.0); + assert_eq!(max_rps, 100.0); + assert_eq!(ramp_duration, StdDuration::from_secs(30)); + } + _ => panic!("Expected RampRps load model"), + } + } +} diff --git a/tests/assertion_integration_tests.rs b/tests/assertion_integration_tests.rs new file mode 100644 index 0000000..28c7395 --- /dev/null +++ b/tests/assertion_integration_tests.rs @@ -0,0 +1,678 @@ +//! Integration tests for response assertions framework (Issue #30). +//! +//! These tests validate that assertions work correctly against a live API, +//! including proper failure detection, metrics tracking, and mixed scenarios. +//! +//! **NOTE**: Most tests use httpbin.org (public testing API). +//! E-commerce specific tests require ecom.edge.baugus-lab.com and are marked #[ignore]. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{Assertion, RequestConfig, Scenario, ScenarioContext, Step}; +use std::collections::HashMap; +use std::time::Duration; + +// Public testing API - always available +const HTTPBIN_URL: &str = "https://httpbin.org"; +// E-commerce test API - may not be accessible in all environments +const ECOM_URL: &str = "https://ecom.edge.baugus-lab.com"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +async fn test_status_code_assertion_pass() { + let scenario = Scenario { + name: "Status Code Assertion - Pass".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Get 200 Response".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/200".to_string(), // httpbin returns 200 + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps.len(), 1); + assert!(result.steps[0].success); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ Status code assertion passed"); +} + +#[tokio::test] +async fn test_status_code_assertion_fail() { + let scenario = Scenario { + name: "Status Code Assertion - Fail".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Expect 404".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/200".to_string(), // Returns 200, not 404 + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(404)], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(!result.success, "Scenario should fail due to assertion"); + assert_eq!(result.steps.len(), 1); + assert!(!result.steps[0].success); + assert_eq!(result.steps[0].assertions_passed, 0); + assert_eq!(result.steps[0].assertions_failed, 1); + assert!(result.steps[0].error.is_some()); + + println!("✅ Status code assertion correctly failed"); +} + +#[tokio::test] + +async fn test_response_time_assertion_pass() { + let scenario = Scenario { + name: "Response Time Assertion - Pass".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Fast Response".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::ResponseTime(Duration::from_secs(5))], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!( + "✅ Response time assertion passed ({}ms < 5000ms)", + result.steps[0].response_time_ms + ); +} + +#[tokio::test] +async fn test_response_time_assertion_fail() { + let scenario = Scenario { + name: "Response Time Assertion - Fail".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Unrealistic Threshold".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::ResponseTime(Duration::from_millis(1))], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(!result.success, "Scenario should fail due to slow response"); + assert_eq!(result.steps[0].assertions_passed, 0); + assert_eq!(result.steps[0].assertions_failed, 1); + + println!( + "✅ Response time assertion correctly failed ({}ms > 1ms)", + result.steps[0].response_time_ms + ); +} + +#[tokio::test] + +async fn test_json_path_assertion_existence() { + let scenario = Scenario { + name: "JSONPath Existence".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Field Exists".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::JsonPath { + path: "$.slideshow".to_string(), + expected: None, // Just check it exists + }], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ JSONPath existence assertion passed"); +} + +#[tokio::test] + +async fn test_json_path_assertion_value_match() { + let scenario = Scenario { + name: "JSONPath Value Match".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check JSON Value".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::JsonPath { + path: "$.slideshow.title".to_string(), + expected: Some("Sample Slide Show".to_string()), + }], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ JSONPath value match assertion passed"); +} + +#[tokio::test] +async fn test_json_path_assertion_value_mismatch() { + let scenario = Scenario { + name: "JSONPath Value Mismatch".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Wrong Value".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::JsonPath { + path: "$.slideshow.title".to_string(), + expected: Some("Wrong Title".to_string()), // Should be "Sample Slide Show" + }], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!( + !result.success, + "Scenario should fail due to value mismatch" + ); + assert_eq!(result.steps[0].assertions_passed, 0); + assert_eq!(result.steps[0].assertions_failed, 1); + + println!("✅ JSONPath value mismatch correctly failed"); +} + +#[tokio::test] + +async fn test_body_contains_assertion_pass() { + let scenario = Scenario { + name: "Body Contains - Pass".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Response Contains Text".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::BodyContains("slideshow".to_string())], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ Body contains assertion passed"); +} + +#[tokio::test] +async fn test_body_contains_assertion_fail() { + let scenario = Scenario { + name: "Body Contains - Fail".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Missing Text".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::BodyContains("MISSING_TEXT_XYZ".to_string())], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(!result.success, "Scenario should fail"); + assert_eq!(result.steps[0].assertions_passed, 0); + assert_eq!(result.steps[0].assertions_failed, 1); + + println!("✅ Body contains assertion correctly failed"); +} + +#[tokio::test] + +async fn test_body_matches_regex_assertion() { + let scenario = Scenario { + name: "Body Matches Regex".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check JSON Pattern".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::BodyMatches( + r#""slideshow"\s*:\s*\{"#.to_string(), + )], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ Body matches regex assertion passed"); +} + +#[tokio::test] + +async fn test_header_exists_assertion_pass() { + let scenario = Scenario { + name: "Header Exists - Pass".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Content-Type Header".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/headers".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::HeaderExists("content-type".to_string())], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 1); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ Header exists assertion passed"); +} + +#[tokio::test] +async fn test_header_exists_assertion_fail() { + let scenario = Scenario { + name: "Header Exists - Fail".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Missing Header".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/headers".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::HeaderExists("x-missing-header".to_string())], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(!result.success, "Scenario should fail"); + assert_eq!(result.steps[0].assertions_passed, 0); + assert_eq!(result.steps[0].assertions_failed, 1); + + println!("✅ Header exists assertion correctly failed"); +} + +#[tokio::test] + +async fn test_multiple_assertions_all_pass() { + let scenario = Scenario { + name: "Multiple Assertions - All Pass".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Multiple Checks".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::ResponseTime(Duration::from_secs(5)), + Assertion::JsonPath { + path: "$.url".to_string(), + expected: None, // Just check it exists + }, + Assertion::BodyContains("headers".to_string()), + Assertion::HeaderExists("content-type".to_string()), + ], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps[0].assertions_passed, 5); + assert_eq!(result.steps[0].assertions_failed, 0); + + println!("✅ All 5 assertions passed"); +} + +#[tokio::test] + +async fn test_multiple_assertions_mixed_results() { + let scenario = Scenario { + name: "Multiple Assertions - Mixed".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Mixed Results".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), // PASS + Assertion::BodyContains("headers".to_string()), // PASS + Assertion::StatusCode(404), // FAIL + Assertion::BodyContains("MISSING".to_string()), // FAIL + ], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!( + !result.success, + "Scenario should fail (2 failed assertions)" + ); + assert_eq!(result.steps[0].assertions_passed, 2); + assert_eq!(result.steps[0].assertions_failed, 2); + + println!("✅ Mixed assertions: 2 passed, 2 failed as expected"); +} + +#[tokio::test] + +async fn test_multi_step_assertion_stops_on_failure() { + let scenario = Scenario { + name: "Multi-Step with Assertion Failure".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step 1 - Pass".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/200".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + Step { + name: "Step 2 - Fail".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/200".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(404)], // Will fail + think_time: None, + }, + Step { + name: "Step 3 - Never Reached".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(HTTPBIN_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(!result.success, "Scenario should fail"); + assert_eq!( + result.steps_completed, 1, + "Should stop after step 2 failure" + ); + assert_eq!(result.steps.len(), 2, "Should only have 2 step results"); + assert_eq!(result.failed_at_step, Some(1)); + + // Step 1 should pass + assert!(result.steps[0].success); + assert_eq!(result.steps[0].assertions_passed, 1); + + // Step 2 should fail + assert!(!result.steps[1].success); + assert_eq!(result.steps[1].assertions_failed, 1); + + println!("✅ Execution correctly stopped after assertion failure in step 2"); +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_realistic_e_commerce_flow_with_assertions() { + let scenario = Scenario { + name: "E-Commerce Flow with Assertions".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/health".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::ResponseTime(Duration::from_secs(2)), + ], + think_time: None, + }, + Step { + name: "Get Products".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/products?limit=10".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::ResponseTime(Duration::from_secs(3)), + Assertion::BodyContains("id".to_string()), + Assertion::BodyContains("name".to_string()), + Assertion::HeaderExists("content-type".to_string()), + ], + think_time: None, + }, + Step { + name: "Check Status".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![ + Assertion::StatusCode(200), + Assertion::JsonPath { + path: "$.status".to_string(), + expected: Some("ok".to_string()), + }, + Assertion::BodyMatches(r#""status"\s*:\s*"ok""#.to_string()), + ], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(ECOM_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "E-commerce flow should succeed"); + assert_eq!(result.steps_completed, 3); + + // Verify assertion counts + assert_eq!(result.steps[0].assertions_passed, 2); + assert_eq!(result.steps[1].assertions_passed, 5); + assert_eq!(result.steps[2].assertions_passed, 3); + + let total_assertions_passed: usize = result.steps.iter().map(|s| s.assertions_passed).sum(); + + println!( + "✅ E-commerce flow completed with {} total assertions passing", + total_assertions_passed + ); +} diff --git a/tests/config_docs_generator_tests.rs b/tests/config_docs_generator_tests.rs new file mode 100644 index 0000000..ebf42fb --- /dev/null +++ b/tests/config_docs_generator_tests.rs @@ -0,0 +1,317 @@ +//! Integration tests for config documentation generator (Issue #46). +//! +//! These tests validate: +//! - JSON Schema generation +//! - Markdown documentation generation +//! - VS Code snippets generation +//! - Output file generation + +use rust_loadtest::config_docs_generator::ConfigDocsGenerator; +use std::fs; +use tempfile::TempDir; + +#[test] +fn test_generate_json_schema() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + assert!(!schema.is_empty()); + assert!(schema.contains("\"$schema\"")); + assert!(schema.contains("\"title\": \"Rust LoadTest Configuration\"")); + + println!("✅ JSON Schema generation works"); +} + +#[test] +fn test_json_schema_contains_all_sections() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + // Check all major sections are present + assert!(schema.contains("\"version\"")); + assert!(schema.contains("\"metadata\"")); + assert!(schema.contains("\"config\"")); + assert!(schema.contains("\"load\"")); + assert!(schema.contains("\"scenarios\"")); + + println!("✅ JSON Schema contains all required sections"); +} + +#[test] +fn test_json_schema_has_load_models() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + // Check all load models are documented + assert!(schema.contains("concurrent")); + assert!(schema.contains("\"rps\"")); + assert!(schema.contains("\"ramp\"")); + + println!("✅ JSON Schema documents all load models"); +} + +#[test] +fn test_json_schema_is_valid_json() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + let parsed: Result = serde_json::from_str(&schema); + assert!(parsed.is_ok(), "JSON Schema should be valid JSON"); + + let json = parsed.unwrap(); + assert_eq!(json["$schema"], "http://json-schema.org/draft-07/schema#"); + + println!("✅ JSON Schema is valid JSON"); +} + +#[test] +fn test_json_schema_required_fields() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + let json: serde_json::Value = serde_json::from_str(&schema).unwrap(); + + // Check required fields at root level + let required = json["required"].as_array().unwrap(); + assert!(required.contains(&serde_json::json!("version"))); + assert!(required.contains(&serde_json::json!("config"))); + assert!(required.contains(&serde_json::json!("load"))); + assert!(required.contains(&serde_json::json!("scenarios"))); + + println!("✅ JSON Schema has correct required fields"); +} + +#[test] +fn test_json_schema_config_properties() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + let json: serde_json::Value = serde_json::from_str(&schema).unwrap(); + + // Check config section properties + let config_props = &json["properties"]["config"]["properties"]; + assert!(config_props["baseUrl"].is_object()); + assert!(config_props["timeout"].is_object()); + assert!(config_props["workers"].is_object()); + assert!(config_props["duration"].is_object()); + + println!("✅ JSON Schema config section is correct"); +} + +#[test] +fn test_generate_markdown_docs() { + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + assert!(!markdown.is_empty()); + assert!(markdown.contains("# Configuration Schema Reference")); + + println!("✅ Markdown documentation generation works"); +} + +#[test] +fn test_markdown_docs_has_all_sections() { + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + // Check all major sections + assert!(markdown.contains("## Version")); + assert!(markdown.contains("## Metadata")); + assert!(markdown.contains("## Config")); + assert!(markdown.contains("## Load Models")); + assert!(markdown.contains("## Scenarios")); + assert!(markdown.contains("## Complete Example")); + + println!("✅ Markdown docs contain all sections"); +} + +#[test] +fn test_markdown_docs_has_examples() { + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + // Check that code examples are present + assert!(markdown.contains("```yaml")); + assert!(markdown.contains("version: \"1.0\"")); + assert!(markdown.contains("baseUrl:")); + assert!(markdown.contains("scenarios:")); + + println!("✅ Markdown docs include YAML examples"); +} + +#[test] +fn test_markdown_docs_has_tables() { + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + // Check that tables are present + assert!(markdown.contains("| Property")); + assert!(markdown.contains("|-------")); + + println!("✅ Markdown docs include property tables"); +} + +#[test] +fn test_generate_vscode_snippets() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + assert!(!snippets.is_empty()); + assert!(snippets.contains("\"loadtest-basic\"")); + + println!("✅ VS Code snippets generation works"); +} + +#[test] +fn test_vscode_snippets_is_valid_json() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + let parsed: Result = serde_json::from_str(&snippets); + assert!(parsed.is_ok(), "Snippets should be valid JSON"); + + println!("✅ VS Code snippets are valid JSON"); +} + +#[test] +fn test_vscode_snippets_has_all_snippets() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + // Check all major snippets are present + assert!(snippets.contains("\"loadtest-basic\"")); + assert!(snippets.contains("\"loadtest-rps\"")); + assert!(snippets.contains("\"loadtest-ramp\"")); + assert!(snippets.contains("\"loadtest-scenario\"")); + assert!(snippets.contains("\"loadtest-step\"")); + + println!("✅ VS Code snippets include all snippet types"); +} + +#[test] +fn test_vscode_snippets_structure() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + let json: serde_json::Value = serde_json::from_str(&snippets).unwrap(); + + // Check snippet structure + let basic = &json["loadtest-basic"]; + assert!(basic["prefix"].is_string()); + assert!(basic["body"].is_array()); + assert!(basic["description"].is_string()); + + println!("✅ VS Code snippets have correct structure"); +} + +#[test] +fn test_vscode_snippet_basic_config() { + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + let json: serde_json::Value = serde_json::from_str(&snippets).unwrap(); + + let basic = &json["loadtest-basic"]; + let body = basic["body"].as_array().unwrap(); + + // Check that basic config includes all essential parts + let body_str = body + .iter() + .map(|v| v.as_str().unwrap()) + .collect::>() + .join("\n"); + + assert!(body_str.contains("version:")); + assert!(body_str.contains("config:")); + assert!(body_str.contains("load:")); + assert!(body_str.contains("scenarios:")); + + println!("✅ Basic snippet includes all essential sections"); +} + +#[test] +fn test_write_json_schema_to_file() { + let temp_dir = TempDir::new().unwrap(); + let schema_path = temp_dir.path().join("schema.json"); + + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + + fs::write(&schema_path, schema).unwrap(); + + assert!(schema_path.exists()); + + let content = fs::read_to_string(&schema_path).unwrap(); + assert!(!content.is_empty()); + + println!("✅ Can write JSON Schema to file"); +} + +#[test] +fn test_write_markdown_docs_to_file() { + let temp_dir = TempDir::new().unwrap(); + let docs_path = temp_dir.path().join("schema.md"); + + let generator = ConfigDocsGenerator::new(); + let markdown = generator.generate_markdown_docs(); + + fs::write(&docs_path, markdown).unwrap(); + + assert!(docs_path.exists()); + + let content = fs::read_to_string(&docs_path).unwrap(); + assert!(!content.is_empty()); + + println!("✅ Can write Markdown docs to file"); +} + +#[test] +fn test_write_vscode_snippets_to_file() { + let temp_dir = TempDir::new().unwrap(); + let snippets_path = temp_dir.path().join("snippets.json"); + + let generator = ConfigDocsGenerator::new(); + let snippets = generator.generate_vscode_snippets(); + + fs::write(&snippets_path, snippets).unwrap(); + + assert!(snippets_path.exists()); + + let content = fs::read_to_string(&snippets_path).unwrap(); + assert!(!content.is_empty()); + + println!("✅ Can write VS Code snippets to file"); +} + +#[test] +fn test_generator_default() { + let generator = ConfigDocsGenerator::default(); + let schema = generator.generate_json_schema(); + + assert!(!schema.is_empty()); + + println!("✅ Default constructor works"); +} + +#[test] +fn test_json_schema_examples() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + let json: serde_json::Value = serde_json::from_str(&schema).unwrap(); + + // Check that examples are provided + let version_examples = &json["properties"]["version"]["examples"]; + assert!(version_examples.is_array()); + assert_eq!(version_examples[0], "1.0"); + + println!("✅ JSON Schema includes examples"); +} + +#[test] +fn test_json_schema_patterns() { + let generator = ConfigDocsGenerator::new(); + let schema = generator.generate_json_schema(); + let json: serde_json::Value = serde_json::from_str(&schema).unwrap(); + + // Check that version has a pattern + let version_pattern = &json["properties"]["version"]["pattern"]; + assert!(version_pattern.is_string()); + + println!("✅ JSON Schema includes validation patterns"); +} diff --git a/tests/config_examples_tests.rs b/tests/config_examples_tests.rs new file mode 100644 index 0000000..9e758ca --- /dev/null +++ b/tests/config_examples_tests.rs @@ -0,0 +1,452 @@ +//! Integration tests for config examples and templates (Issue #45). +//! +//! These tests validate: +//! - All example configs parse successfully +//! - All configs pass validation +//! - Templates have correct structure +//! - Example data files are valid + +use rust_loadtest::yaml_config::YamlConfig; +use std::fs; +use std::path::Path; + +fn load_example_config(filename: &str) -> YamlConfig { + let path = format!("examples/configs/{}", filename); + YamlConfig::from_file(&path).unwrap_or_else(|e| panic!("Failed to load {}: {}", filename, e)) +} + +fn validate_example_config(filename: &str) { + let config = load_example_config(filename); + + // Basic structure validation + assert!(!config.version.is_empty(), "{}: version is empty", filename); + assert!( + !config.config.base_url.is_empty(), + "{}: baseUrl is empty", + filename + ); + assert!( + config.config.workers > 0, + "{}: workers must be > 0", + filename + ); + assert!( + !config.scenarios.is_empty(), + "{}: scenarios are empty", + filename + ); + + println!("✅ {} is valid", filename); +} + +#[test] +fn test_basic_api_test_template() { + let config = load_example_config("basic-api-test.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 10); + assert_eq!(config.scenarios.len(), 1); + assert_eq!(config.scenarios[0].name, "API Health Check"); + assert_eq!(config.scenarios[0].weight, 100.0); + + println!("✅ basic-api-test.yaml is valid"); +} + +#[test] +fn test_ecommerce_scenario_template() { + let config = load_example_config("ecommerce-scenario.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://shop.example.com"); + assert_eq!(config.config.workers, 50); + assert_eq!(config.scenarios.len(), 4); + + // Check scenario weights + assert_eq!(config.scenarios[0].name, "Browse Only"); + assert_eq!(config.scenarios[0].weight, 60.0); + assert_eq!(config.scenarios[1].name, "Browse and Add to Cart"); + assert_eq!(config.scenarios[1].weight, 25.0); + assert_eq!(config.scenarios[2].name, "Complete Purchase"); + assert_eq!(config.scenarios[2].weight, 12.0); + assert_eq!(config.scenarios[3].name, "Quick Browse"); + assert_eq!(config.scenarios[3].weight, 3.0); + + // Total weight should be 100 + let total_weight: f64 = config.scenarios.iter().map(|s| s.weight).sum(); + assert_eq!(total_weight, 100.0); + + println!("✅ ecommerce-scenario.yaml is valid"); +} + +#[test] +fn test_stress_test_template() { + let config = load_example_config("stress-test.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 200); + assert_eq!(config.scenarios.len(), 3); + + // Check scenario distribution + let total_weight: f64 = config.scenarios.iter().map(|s| s.weight).sum(); + assert_eq!(total_weight, 100.0); + + println!("✅ stress-test.yaml is valid"); +} + +#[test] +fn test_data_driven_test_template() { + let config = load_example_config("data-driven-test.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 20); + assert_eq!(config.scenarios.len(), 2); + + // Check data file configurations + assert_eq!(config.scenarios[0].name, "User Login with CSV Data"); + assert!(config.scenarios[0].data_file.is_some()); + let csv_data_file = config.scenarios[0].data_file.as_ref().unwrap(); + assert_eq!(csv_data_file.format, "csv"); + assert_eq!(csv_data_file.strategy, "random"); + + assert_eq!(config.scenarios[1].name, "Product Search with JSON Data"); + assert!(config.scenarios[1].data_file.is_some()); + let json_data_file = config.scenarios[1].data_file.as_ref().unwrap(); + assert_eq!(json_data_file.format, "json"); + assert_eq!(json_data_file.strategy, "cycle"); + + println!("✅ data-driven-test.yaml is valid"); +} + +#[test] +fn test_authenticated_api_template() { + let config = load_example_config("authenticated-api.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 25); + assert_eq!(config.scenarios.len(), 3); + + // Check authentication scenarios + assert_eq!(config.scenarios[0].name, "JWT Authenticated Requests"); + assert_eq!(config.scenarios[0].weight, 60.0); + assert_eq!(config.scenarios[1].name, "API Key Authenticated Requests"); + assert_eq!(config.scenarios[1].weight, 30.0); + assert_eq!(config.scenarios[2].name, "OAuth Token Refresh Flow"); + assert_eq!(config.scenarios[2].weight, 10.0); + + println!("✅ authenticated-api.yaml is valid"); +} + +#[test] +fn test_microservices_test_template() { + let config = load_example_config("microservices-test.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://gateway.example.com"); + assert_eq!(config.config.workers, 40); + assert_eq!(config.scenarios.len(), 4); + + // Check service scenarios + assert_eq!(config.scenarios[0].name, "User Service Flow"); + assert_eq!(config.scenarios[0].weight, 25.0); + assert_eq!(config.scenarios[1].name, "Product Service Flow"); + assert_eq!(config.scenarios[1].weight, 30.0); + assert_eq!(config.scenarios[2].name, "Order Service Flow"); + assert_eq!(config.scenarios[2].weight, 30.0); + assert_eq!(config.scenarios[3].name, "Inventory Service Flow"); + assert_eq!(config.scenarios[3].weight, 15.0); + + println!("✅ microservices-test.yaml is valid"); +} + +#[test] +fn test_graphql_api_template() { + let config = load_example_config("graphql-api.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://graphql.example.com"); + assert_eq!(config.config.workers, 30); + assert_eq!(config.scenarios.len(), 4); + + // Check GraphQL scenarios + assert_eq!(config.scenarios[0].name, "Simple GraphQL Queries"); + assert_eq!(config.scenarios[0].weight, 40.0); + assert_eq!(config.scenarios[1].name, "Complex Nested Queries"); + assert_eq!(config.scenarios[1].weight, 25.0); + assert_eq!(config.scenarios[2].name, "GraphQL Mutations"); + assert_eq!(config.scenarios[2].weight, 25.0); + assert_eq!(config.scenarios[3].name, "GraphQL Search and Filter"); + assert_eq!(config.scenarios[3].weight, 10.0); + + println!("✅ graphql-api.yaml is valid"); +} + +#[test] +fn test_spike_test_template() { + let config = load_example_config("spike-test.yaml"); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 150); + assert_eq!(config.scenarios.len(), 3); + + // Check spike scenarios + assert_eq!(config.scenarios[0].name, "High-Traffic Endpoint"); + assert_eq!(config.scenarios[0].weight, 80.0); + assert_eq!(config.scenarios[1].name, "Spike Write Operations"); + assert_eq!(config.scenarios[1].weight, 15.0); + assert_eq!(config.scenarios[2].name, "System Health Check"); + assert_eq!(config.scenarios[2].weight, 5.0); + + println!("✅ spike-test.yaml is valid"); +} + +#[test] +fn test_all_templates_parse() { + let templates = vec![ + "basic-api-test.yaml", + "ecommerce-scenario.yaml", + "stress-test.yaml", + "data-driven-test.yaml", + "authenticated-api.yaml", + "microservices-test.yaml", + "graphql-api.yaml", + "spike-test.yaml", + ]; + + for template in &templates { + validate_example_config(template); + } + + println!("✅ All {} templates are valid", templates.len()); +} + +#[test] +fn test_all_templates_have_metadata() { + let templates = vec![ + "basic-api-test.yaml", + "ecommerce-scenario.yaml", + "stress-test.yaml", + "data-driven-test.yaml", + "authenticated-api.yaml", + "microservices-test.yaml", + "graphql-api.yaml", + "spike-test.yaml", + ]; + + for template in templates { + let config = load_example_config(template); + + assert!( + config.metadata.name.is_some(), + "{}: metadata.name is missing", + template + ); + assert!( + config.metadata.description.is_some(), + "{}: metadata.description is missing", + template + ); + assert!( + !config.metadata.tags.is_empty(), + "{}: metadata.tags are empty", + template + ); + } + + println!("✅ All templates have complete metadata"); +} + +#[test] +fn test_all_templates_have_valid_scenarios() { + let templates = vec![ + "basic-api-test.yaml", + "ecommerce-scenario.yaml", + "stress-test.yaml", + "data-driven-test.yaml", + "authenticated-api.yaml", + "microservices-test.yaml", + "graphql-api.yaml", + "spike-test.yaml", + ]; + + for template in templates { + let config = load_example_config(template); + + // All templates should have at least one scenario + assert!( + !config.scenarios.is_empty(), + "{}: no scenarios defined", + template + ); + + // All scenarios should have valid properties + for scenario in &config.scenarios { + assert!( + !scenario.name.is_empty(), + "{}: scenario name is empty", + template + ); + assert!( + scenario.weight > 0.0, + "{}: scenario weight must be > 0", + template + ); + assert!( + !scenario.steps.is_empty(), + "{}: scenario '{}' has no steps", + template, + scenario.name + ); + } + } + + println!("✅ All templates have valid scenarios"); +} + +#[test] +fn test_example_data_files_exist() { + let data_files = vec!["examples/data/users.csv", "examples/data/products.json"]; + + for file in data_files { + assert!(Path::new(file).exists(), "Data file not found: {}", file); + } + + println!("✅ All example data files exist"); +} + +#[test] +fn test_users_csv_format() { + let csv_content = + fs::read_to_string("examples/data/users.csv").expect("Failed to read users.csv"); + + // Check header + assert!(csv_content.contains("username,email,user_id")); + + // Count lines (header + data) + let line_count = csv_content.lines().count(); + assert!(line_count > 1, "CSV file should have data rows"); + + // Check first data row + assert!(csv_content.contains("john.doe")); + + println!("✅ users.csv has correct format ({} rows)", line_count - 1); +} + +#[test] +fn test_products_json_format() { + let json_content = + fs::read_to_string("examples/data/products.json").expect("Failed to read products.json"); + + // Parse JSON + let products: serde_json::Value = + serde_json::from_str(&json_content).expect("Failed to parse products.json"); + + // Should be an array + assert!(products.is_array(), "products.json should be an array"); + + let products_array = products.as_array().unwrap(); + assert!( + !products_array.is_empty(), + "products.json should not be empty" + ); + + // Check first product has required fields + let first_product = &products_array[0]; + assert!(first_product.get("product_name").is_some()); + assert!(first_product.get("category").is_some()); + assert!(first_product.get("sku").is_some()); + assert!(first_product.get("price").is_some()); + + println!( + "✅ products.json has correct format ({} products)", + products_array.len() + ); +} + +#[test] +fn test_readme_exists() { + assert!( + Path::new("examples/configs/README.md").exists(), + "README.md not found in examples/configs/" + ); + + let readme = + fs::read_to_string("examples/configs/README.md").expect("Failed to read README.md"); + + // Check that README documents all templates + assert!(readme.contains("basic-api-test.yaml")); + assert!(readme.contains("ecommerce-scenario.yaml")); + assert!(readme.contains("stress-test.yaml")); + assert!(readme.contains("data-driven-test.yaml")); + assert!(readme.contains("authenticated-api.yaml")); + assert!(readme.contains("microservices-test.yaml")); + assert!(readme.contains("graphql-api.yaml")); + assert!(readme.contains("spike-test.yaml")); + + println!("✅ README.md exists and documents all templates"); +} + +#[test] +fn test_template_weights_sum_correctly() { + let templates_with_weights = vec![ + "ecommerce-scenario.yaml", + "stress-test.yaml", + "authenticated-api.yaml", + "microservices-test.yaml", + "graphql-api.yaml", + "spike-test.yaml", + ]; + + for template in templates_with_weights { + let config = load_example_config(template); + let total_weight: f64 = config.scenarios.iter().map(|s| s.weight).sum(); + + assert!( + (total_weight - 100.0).abs() < 0.001, + "{}: weights sum to {}, expected 100", + template, + total_weight + ); + } + + println!("✅ All multi-scenario templates have weights summing to 100"); +} + +#[test] +fn test_templates_have_reasonable_settings() { + let templates = vec![ + "basic-api-test.yaml", + "ecommerce-scenario.yaml", + "stress-test.yaml", + "data-driven-test.yaml", + "authenticated-api.yaml", + "microservices-test.yaml", + "graphql-api.yaml", + "spike-test.yaml", + ]; + + for template in templates { + let config = load_example_config(template); + + // Workers should be reasonable (1-500) + assert!( + config.config.workers >= 1 && config.config.workers <= 500, + "{}: workers {} out of reasonable range (1-500)", + template, + config.config.workers + ); + + // Should have example.com URLs (not real production URLs) + assert!( + config.config.base_url.contains("example.com"), + "{}: should use example.com URLs", + template + ); + } + + println!("✅ All templates have reasonable settings"); +} diff --git a/tests/config_hot_reload_tests.rs b/tests/config_hot_reload_tests.rs new file mode 100644 index 0000000..f2d5b0f --- /dev/null +++ b/tests/config_hot_reload_tests.rs @@ -0,0 +1,510 @@ +//! Integration tests for config hot-reload (Issue #44). +//! +//! These tests validate: +//! - File watching and change detection +//! - Config validation before reload +//! - Reload notification system +//! - Debouncing of rapid changes +//! - Development mode enable/disable + +use rust_loadtest::config_hot_reload::{ + ConfigWatcher, ConfigWatcherError, HotReloadConfig, ReloadNotifier, +}; +use std::fs; +use std::sync::Arc; +use std::thread; +use std::time::Duration; +use tempfile::TempDir; + +fn create_test_config() -> String { + r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" + workers: 10 +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/test" +"# + .to_string() +} + +fn create_updated_config() -> String { + r#" +version: "1.0" +config: + baseUrl: "https://updated.com" + duration: "10m" + workers: 20 +load: + model: "rps" + target: 100 +scenarios: + - name: "Updated Test" + steps: + - request: + method: "POST" + path: "/updated" +"# + .to_string() +} + +fn create_invalid_config() -> String { + r#" +version: "1.0" +config: + baseUrl: "not-a-url" + duration: "invalid" + workers: -5 +load: + model: "concurrent" +scenarios: [] +"# + .to_string() +} + +#[test] +fn test_hot_reload_config_creation() { + let config = HotReloadConfig::new("test.yaml"); + assert!(config.enabled); + assert_eq!(config.file_path.to_str().unwrap(), "test.yaml"); + assert_eq!(config.debounce_ms, 500); + + println!("✅ HotReloadConfig creation works"); +} + +#[test] +fn test_hot_reload_config_disabled() { + let config = HotReloadConfig::disabled(); + assert!(!config.enabled); + assert_eq!(config.debounce_ms, 0); + + println!("✅ HotReloadConfig disabled mode works"); +} + +#[test] +fn test_hot_reload_config_builders() { + let config = HotReloadConfig::new("test.yaml") + .disable() + .with_debounce_ms(1000); + + assert!(!config.enabled); + assert_eq!(config.debounce_ms, 1000); + + let enabled = HotReloadConfig::new("test.yaml").enable(); + assert!(enabled.enabled); + + println!("✅ HotReloadConfig builder methods work"); +} + +#[test] +fn test_reload_notifier_basic() { + let notifier = ReloadNotifier::new(); + + // Should be empty initially + assert!(notifier.try_recv().is_none()); + + println!("✅ ReloadNotifier basic functionality works"); +} + +#[test] +fn test_reload_notifier_send_receive() { + use rust_loadtest::yaml_config::YamlConfig; + use std::path::PathBuf; + use std::time::SystemTime; + + let notifier = ReloadNotifier::new(); + + // Send event + let event = rust_loadtest::config_hot_reload::ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: true, + error: None, + }; + + notifier.notify(event.clone()); + + // Receive event + let received = notifier.try_recv(); + assert!(received.is_some()); + + let received_event = received.unwrap(); + assert!(received_event.is_success()); + assert!(received_event.valid); + assert!(received_event.error.is_none()); + + // Should be empty again + assert!(notifier.try_recv().is_none()); + + println!("✅ ReloadNotifier send/receive works"); +} + +#[test] +fn test_reload_notifier_multiple_events() { + use rust_loadtest::yaml_config::YamlConfig; + use std::path::PathBuf; + use std::time::SystemTime; + + let notifier = ReloadNotifier::new(); + + // Send multiple events + for i in 0..3 { + let event = rust_loadtest::config_hot_reload::ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from(format!("test{}.yaml", i)), + config: YamlConfig::default(), + valid: true, + error: None, + }; + notifier.notify(event); + } + + // Receive all events + for _ in 0..3 { + let received = notifier.try_recv(); + assert!(received.is_some()); + } + + // Should be empty + assert!(notifier.try_recv().is_none()); + + println!("✅ ReloadNotifier handles multiple events"); +} + +#[test] +fn test_config_watcher_creation_file_not_found() { + let notifier = Arc::new(ReloadNotifier::new()); + let result = ConfigWatcher::new("nonexistent.yaml", notifier); + + assert!(result.is_err()); + match result.unwrap_err() { + ConfigWatcherError::FileNotFound(path) => { + assert_eq!(path.to_str().unwrap(), "nonexistent.yaml"); + } + _ => panic!("Expected FileNotFound error"), + } + + println!("✅ ConfigWatcher rejects nonexistent files"); +} + +#[test] +fn test_config_watcher_creation_success() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let watcher = ConfigWatcher::new(&config_path, notifier); + + assert!(watcher.is_ok()); + let watcher = watcher.unwrap(); + assert_eq!(watcher.file_path(), config_path.as_path()); + assert!(!watcher.is_running()); + + println!("✅ ConfigWatcher creation succeeds with valid file"); +} + +#[test] +fn test_config_watcher_with_config() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let hot_reload_config = HotReloadConfig::new(&config_path).with_debounce_ms(1000); + let notifier = Arc::new(ReloadNotifier::new()); + + let watcher = ConfigWatcher::with_config(hot_reload_config, notifier); + assert!(watcher.is_ok()); + + println!("✅ ConfigWatcher with custom config works"); +} + +#[test] +fn test_config_watcher_disabled() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let hot_reload_config = HotReloadConfig::new(&config_path).disable(); + let notifier = Arc::new(ReloadNotifier::new()); + + let mut watcher = ConfigWatcher::with_config(hot_reload_config, notifier).unwrap(); + + // Start should succeed but not actually watch + let result = watcher.start(); + assert!(result.is_ok()); + assert!(!watcher.is_running()); // Not running because disabled + + println!("✅ ConfigWatcher respects disabled flag"); +} + +#[test] +fn test_config_watcher_start_stop() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let mut watcher = ConfigWatcher::new(&config_path, notifier).unwrap(); + + // Start watcher + let result = watcher.start(); + assert!(result.is_ok()); + assert!(watcher.is_running()); + + // Stop watcher + let result = watcher.stop(); + assert!(result.is_ok()); + assert!(!watcher.is_running()); + + println!("✅ ConfigWatcher start/stop works"); +} + +#[test] +fn test_config_watcher_file_change_detection() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let notifier_clone = notifier.clone(); + let mut watcher = ConfigWatcher::new(&config_path, notifier).unwrap(); + + // Start watcher + watcher.start().unwrap(); + + // Give watcher time to initialize + thread::sleep(Duration::from_millis(100)); + + // Modify file + fs::write(&config_path, create_updated_config()).unwrap(); + + // Wait for change detection + thread::sleep(Duration::from_millis(1000)); + + // Check for reload event + let event = notifier_clone.try_recv(); + assert!(event.is_some(), "Should receive reload event"); + + let event = event.unwrap(); + assert!(event.is_success(), "Reload should succeed"); + assert_eq!(event.config.config.base_url, "https://updated.com"); + assert_eq!(event.config.config.workers, 20); + + println!("✅ ConfigWatcher detects file changes"); +} + +#[test] +fn test_config_watcher_invalid_config_handling() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let notifier_clone = notifier.clone(); + let mut watcher = ConfigWatcher::new(&config_path, notifier).unwrap(); + + // Start watcher + watcher.start().unwrap(); + thread::sleep(Duration::from_millis(100)); + + // Write invalid config + fs::write(&config_path, create_invalid_config()).unwrap(); + + // Wait for change detection + thread::sleep(Duration::from_millis(1000)); + + // Check for reload event + let event = notifier_clone.try_recv(); + assert!( + event.is_some(), + "Should receive reload event even for invalid config" + ); + + let event = event.unwrap(); + assert!(!event.is_success(), "Reload should fail for invalid config"); + assert!(event.error.is_some()); + + println!("✅ ConfigWatcher handles invalid config gracefully"); +} + +#[test] +fn test_config_watcher_debouncing() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + // Short debounce for testing + let hot_reload_config = HotReloadConfig::new(&config_path).with_debounce_ms(300); + let notifier = Arc::new(ReloadNotifier::new()); + let notifier_clone = notifier.clone(); + let mut watcher = ConfigWatcher::with_config(hot_reload_config, notifier).unwrap(); + + watcher.start().unwrap(); + thread::sleep(Duration::from_millis(100)); + + // Make rapid changes + for i in 0..3 { + let config = format!( + r#" +version: "1.0" +config: + baseUrl: "https://test{}.com" + duration: "5m" + workers: 10 +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/test" +"#, + i + ); + fs::write(&config_path, config).unwrap(); + thread::sleep(Duration::from_millis(50)); // Rapid changes + } + + // Wait for debounce + processing + thread::sleep(Duration::from_millis(800)); + + // Should only get one or two events (debounced) + let mut event_count = 0; + while notifier_clone.try_recv().is_some() { + event_count += 1; + } + + // Due to debouncing, should be fewer than 3 events + assert!( + event_count < 3, + "Expected fewer than 3 events due to debouncing, got {}", + event_count + ); + + println!( + "✅ ConfigWatcher debounces rapid changes (got {} events)", + event_count + ); +} + +#[test] +fn test_config_watcher_multiple_changes() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + let notifier_clone = notifier.clone(); + let mut watcher = ConfigWatcher::new(&config_path, notifier).unwrap(); + + watcher.start().unwrap(); + thread::sleep(Duration::from_millis(100)); + + // First change + fs::write(&config_path, create_updated_config()).unwrap(); + thread::sleep(Duration::from_millis(700)); + + // Second change (after debounce) + fs::write(&config_path, create_test_config()).unwrap(); + thread::sleep(Duration::from_millis(700)); + + // Should get two events + let event1 = notifier_clone.try_recv(); + assert!(event1.is_some()); + assert_eq!( + event1.unwrap().config.config.base_url, + "https://updated.com" + ); + + let event2 = notifier_clone.try_recv(); + assert!(event2.is_some()); + assert_eq!(event2.unwrap().config.config.base_url, "https://test.com"); + + println!("✅ ConfigWatcher handles multiple distinct changes"); +} + +#[test] +fn test_reload_event_is_success() { + use rust_loadtest::config_hot_reload::ReloadEvent; + use rust_loadtest::yaml_config::YamlConfig; + use std::path::PathBuf; + use std::time::SystemTime; + + let success = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: true, + error: None, + }; + assert!(success.is_success()); + + let failed_validation = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: false, + error: Some("Validation failed".to_string()), + }; + assert!(!failed_validation.is_success()); + + let with_error = ReloadEvent { + timestamp: SystemTime::now(), + file_path: PathBuf::from("test.yaml"), + config: YamlConfig::default(), + valid: true, + error: Some("Some error".to_string()), + }; + assert!(!with_error.is_success()); + + println!("✅ ReloadEvent.is_success() works correctly"); +} + +#[test] +fn test_config_watcher_drop_stops_watching() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("test.yaml"); + fs::write(&config_path, create_test_config()).unwrap(); + + let notifier = Arc::new(ReloadNotifier::new()); + { + let mut watcher = ConfigWatcher::new(&config_path, notifier.clone()).unwrap(); + watcher.start().unwrap(); + assert!(watcher.is_running()); + // Watcher dropped here + } + + // Change file after drop + thread::sleep(Duration::from_millis(100)); + fs::write(&config_path, create_updated_config()).unwrap(); + thread::sleep(Duration::from_millis(700)); + + // Should not receive event (watcher was dropped) + let event = notifier.try_recv(); + assert!(event.is_none(), "Should not receive event after drop"); + + println!("✅ ConfigWatcher stops watching when dropped"); +} + +#[test] +fn test_yaml_config_default() { + use rust_loadtest::yaml_config::YamlConfig; + + let config = YamlConfig::default(); + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://example.com"); + assert_eq!(config.config.workers, 10); + assert_eq!(config.scenarios.len(), 0); + + println!("✅ YamlConfig::default() works"); +} diff --git a/tests/config_merge_tests.rs b/tests/config_merge_tests.rs new file mode 100644 index 0000000..d76054d --- /dev/null +++ b/tests/config_merge_tests.rs @@ -0,0 +1,377 @@ +//! Integration tests for configuration merging (Issue #39). +//! +//! These tests validate configuration precedence: env > yaml > defaults. + +use rust_loadtest::config_merge::{ConfigDefaults, ConfigMerger, ConfigPrecedence}; +use std::env; +use std::time::Duration; + +#[test] +fn test_default_values() { + let defaults = ConfigDefaults::new(); + + assert_eq!(defaults.workers, 10); + assert_eq!(defaults.timeout, Duration::from_secs(30)); + assert!(!defaults.skip_tls_verify); + assert_eq!(defaults.scenario_weight, 1.0); + assert_eq!(defaults.load_model, "concurrent"); + + // Test static methods too + assert_eq!(ConfigDefaults::workers(), 10); + assert_eq!(ConfigDefaults::timeout(), Duration::from_secs(30)); + assert!(!ConfigDefaults::skip_tls_verify()); + assert_eq!(ConfigDefaults::scenario_weight(), 1.0); + assert_eq!(ConfigDefaults::load_model(), "concurrent"); + + println!("✅ Default values are correct"); +} + +#[test] +fn test_workers_precedence_default() { + // No YAML, no env -> use default + let result = ConfigMerger::merge_workers(None, "WORKERS_TEST_1"); + assert_eq!(result, 10); + + println!("✅ Workers use default when not specified"); +} + +#[test] +fn test_workers_precedence_yaml() { + // YAML provided, no env -> use YAML + let result = ConfigMerger::merge_workers(Some(50), "WORKERS_TEST_2"); + assert_eq!(result, 50); + + println!("✅ Workers use YAML value when provided"); +} + +#[test] +fn test_workers_precedence_env_override() { + // YAML=50, ENV=100 -> use ENV + env::set_var("WORKERS_TEST_3", "100"); + let result = ConfigMerger::merge_workers(Some(50), "WORKERS_TEST_3"); + assert_eq!(result, 100); + env::remove_var("WORKERS_TEST_3"); + + println!("✅ Environment variable overrides YAML for workers"); +} + +#[test] +fn test_workers_precedence_full_chain() { + // Test all three: default < yaml < env + + // 1. Default only + let result = ConfigMerger::merge_workers(None, "WORKERS_CHAIN_1"); + assert_eq!(result, 10, "Should use default"); + + // 2. YAML overrides default + let result = ConfigMerger::merge_workers(Some(50), "WORKERS_CHAIN_2"); + assert_eq!(result, 50, "Should use YAML"); + + // 3. Env overrides YAML and default + env::set_var("WORKERS_CHAIN_3", "100"); + let result = ConfigMerger::merge_workers(Some(50), "WORKERS_CHAIN_3"); + assert_eq!(result, 100, "Should use env"); + env::remove_var("WORKERS_CHAIN_3"); + + println!("✅ Workers precedence chain works: env > yaml > default"); +} + +#[test] +fn test_timeout_precedence() { + // Default + let result = ConfigMerger::merge_timeout(None, "TIMEOUT_TEST_1"); + assert_eq!(result, Duration::from_secs(30)); + + // YAML + let result = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "TIMEOUT_TEST_2"); + assert_eq!(result, Duration::from_secs(60)); + + // Env override + env::set_var("TIMEOUT_TEST_3", "90s"); + let result = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "TIMEOUT_TEST_3"); + assert_eq!(result, Duration::from_secs(90)); + env::remove_var("TIMEOUT_TEST_3"); + + println!("✅ Timeout precedence works: env > yaml > default"); +} + +#[test] +fn test_skip_tls_verify_precedence() { + // Default + let result = ConfigMerger::merge_skip_tls_verify(None, "TLS_TEST_1"); + assert!(!result); + + // YAML + let result = ConfigMerger::merge_skip_tls_verify(Some(true), "TLS_TEST_2"); + assert!(result); + + // Env override with "true" + env::set_var("TLS_TEST_3", "true"); + let result = ConfigMerger::merge_skip_tls_verify(Some(false), "TLS_TEST_3"); + assert!(result); + env::remove_var("TLS_TEST_3"); + + // Env override with "false" + env::set_var("TLS_TEST_4", "false"); + let result = ConfigMerger::merge_skip_tls_verify(Some(true), "TLS_TEST_4"); + assert!(!result); + env::remove_var("TLS_TEST_4"); + + println!("✅ Skip TLS verify precedence works"); +} + +#[test] +fn test_scenario_weight_precedence() { + // Default + let result = ConfigMerger::merge_scenario_weight(None); + assert_eq!(result, 1.0); + + // YAML + let result = ConfigMerger::merge_scenario_weight(Some(2.5)); + assert_eq!(result, 2.5); + + println!("✅ Scenario weight uses YAML or default"); +} + +#[test] +fn test_string_precedence() { + // Default only + let result = ConfigMerger::merge_string(None, "STRING_TEST_1", "default".to_string()); + assert_eq!(result, "default"); + + // YAML overrides default + let result = ConfigMerger::merge_string( + Some("yaml".to_string()), + "STRING_TEST_2", + "default".to_string(), + ); + assert_eq!(result, "yaml"); + + // Env overrides YAML and default + env::set_var("STRING_TEST_3", "env"); + let result = ConfigMerger::merge_string( + Some("yaml".to_string()), + "STRING_TEST_3", + "default".to_string(), + ); + assert_eq!(result, "env"); + env::remove_var("STRING_TEST_3"); + + println!("✅ String precedence works: env > yaml > default"); +} + +#[test] +fn test_optional_string_precedence() { + // No value + let result = ConfigMerger::merge_optional_string(None, "OPT_STRING_TEST_1"); + assert_eq!(result, None); + + // YAML only + let result = ConfigMerger::merge_optional_string(Some("yaml".to_string()), "OPT_STRING_TEST_2"); + assert_eq!(result, Some("yaml".to_string())); + + // Env overrides YAML + env::set_var("OPT_STRING_TEST_3", "env"); + let result = ConfigMerger::merge_optional_string(Some("yaml".to_string()), "OPT_STRING_TEST_3"); + assert_eq!(result, Some("env".to_string())); + env::remove_var("OPT_STRING_TEST_3"); + + println!("✅ Optional string precedence works: env > yaml"); +} + +#[test] +fn test_rps_precedence() { + // No value + let result = ConfigMerger::merge_rps(None, "RPS_TEST_1"); + assert_eq!(result, None); + + // YAML only + let result = ConfigMerger::merge_rps(Some(100.0), "RPS_TEST_2"); + assert_eq!(result, Some(100.0)); + + // Env overrides YAML + env::set_var("RPS_TEST_3", "200.5"); + let result = ConfigMerger::merge_rps(Some(100.0), "RPS_TEST_3"); + assert_eq!(result, Some(200.5)); + env::remove_var("RPS_TEST_3"); + + println!("✅ RPS precedence works: env > yaml"); +} + +#[test] +fn test_env_invalid_value_fallback() { + // Invalid env value should fall back to YAML or default + env::set_var("ENV_INVALID_1", "not-a-number"); + let result = ConfigMerger::merge_workers(Some(50), "ENV_INVALID_1"); + assert_eq!(result, 50, "Should fall back to YAML when env is invalid"); + env::remove_var("ENV_INVALID_1"); + + env::set_var("ENV_INVALID_2", "not-a-number"); + let result = ConfigMerger::merge_workers(None, "ENV_INVALID_2"); + assert_eq!( + result, 10, + "Should fall back to default when env is invalid" + ); + env::remove_var("ENV_INVALID_2"); + + println!("✅ Invalid env values fall back to YAML or default"); +} + +#[test] +fn test_env_empty_value_fallback() { + // Empty env value should fall back to YAML or default + env::set_var("ENV_EMPTY_1", ""); + let result = ConfigMerger::merge_string( + Some("yaml".to_string()), + "ENV_EMPTY_1", + "default".to_string(), + ); + assert_eq!(result, "yaml", "Empty env should use YAML"); + env::remove_var("ENV_EMPTY_1"); + + env::set_var("ENV_EMPTY_2", ""); + let result = ConfigMerger::merge_string(None, "ENV_EMPTY_2", "default".to_string()); + assert_eq!(result, "default", "Empty env should use default"); + env::remove_var("ENV_EMPTY_2"); + + println!("✅ Empty env values fall back to YAML or default"); +} + +#[test] +fn test_multiple_fields_precedence() { + // Set multiple env vars + env::set_var("MULTI_WORKERS", "100"); + env::set_var("MULTI_TIMEOUT", "90s"); + env::set_var("MULTI_TLS", "true"); + + // All should use env values + let workers = ConfigMerger::merge_workers(Some(50), "MULTI_WORKERS"); + let timeout = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "MULTI_TIMEOUT"); + let tls = ConfigMerger::merge_skip_tls_verify(Some(false), "MULTI_TLS"); + + assert_eq!(workers, 100); + assert_eq!(timeout, Duration::from_secs(90)); + assert!(tls); + + // Clean up + env::remove_var("MULTI_WORKERS"); + env::remove_var("MULTI_TIMEOUT"); + env::remove_var("MULTI_TLS"); + + println!("✅ Multiple fields respect env precedence independently"); +} + +#[test] +fn test_precedence_documentation() { + let docs = ConfigPrecedence::documentation(); + + assert!(!docs.is_empty()); + assert!(docs.contains("Precedence")); + assert!(docs.contains("Environment Variables")); + assert!(docs.contains("YAML Configuration File")); + assert!(docs.contains("Default Values")); + assert!(docs.contains("workers: 10")); + assert!(docs.contains("timeout: 30s")); + + println!("✅ Precedence documentation is comprehensive"); + println!(" Documentation length: {} chars", docs.len()); +} + +#[test] +fn test_timeout_duration_formats() { + // Test various duration formats via env + env::set_var("TIMEOUT_FMT_1", "30s"); + let result = ConfigMerger::merge_timeout(None, "TIMEOUT_FMT_1"); + assert_eq!(result, Duration::from_secs(30)); + env::remove_var("TIMEOUT_FMT_1"); + + env::set_var("TIMEOUT_FMT_2", "5m"); + let result = ConfigMerger::merge_timeout(None, "TIMEOUT_FMT_2"); + assert_eq!(result, Duration::from_secs(300)); + env::remove_var("TIMEOUT_FMT_2"); + + env::set_var("TIMEOUT_FMT_3", "2h"); + let result = ConfigMerger::merge_timeout(None, "TIMEOUT_FMT_3"); + assert_eq!(result, Duration::from_secs(7200)); + env::remove_var("TIMEOUT_FMT_3"); + + println!("✅ Timeout duration formats work with env override"); +} + +#[test] +fn test_precedence_isolation() { + // Test that different fields don't interfere with each other + env::set_var("ISOLATION_WORKERS", "100"); + // Don't set ISOLATION_TIMEOUT + + let workers = ConfigMerger::merge_workers(Some(50), "ISOLATION_WORKERS"); + let timeout = ConfigMerger::merge_timeout(Some(Duration::from_secs(60)), "ISOLATION_TIMEOUT"); + + assert_eq!(workers, 100, "Workers should use env"); + assert_eq!(timeout, Duration::from_secs(60), "Timeout should use YAML"); + + env::remove_var("ISOLATION_WORKERS"); + + println!("✅ Field precedence is independent and isolated"); +} + +#[test] +fn test_case_sensitivity_boolean() { + // Test boolean env var case insensitivity + env::set_var("BOOL_TEST_1", "TRUE"); + assert!(ConfigMerger::merge_skip_tls_verify(None, "BOOL_TEST_1")); + env::remove_var("BOOL_TEST_1"); + + env::set_var("BOOL_TEST_2", "True"); + assert!(ConfigMerger::merge_skip_tls_verify(None, "BOOL_TEST_2")); + env::remove_var("BOOL_TEST_2"); + + env::set_var("BOOL_TEST_3", "true"); + assert!(ConfigMerger::merge_skip_tls_verify(None, "BOOL_TEST_3")); + env::remove_var("BOOL_TEST_3"); + + env::set_var("BOOL_TEST_4", "false"); + assert!(!ConfigMerger::merge_skip_tls_verify(None, "BOOL_TEST_4")); + env::remove_var("BOOL_TEST_4"); + + println!("✅ Boolean env vars are case insensitive"); +} + +#[test] +fn test_full_precedence_scenario() { + // Simulate a realistic scenario with all three sources + println!("\n=== Testing Full Precedence Scenario ==="); + + // Defaults (implicit) + println!("1. Defaults: workers=10, timeout=30s, tls=false"); + + // YAML config (simulated) + let yaml_workers = Some(50); + let yaml_timeout = Some(Duration::from_secs(60)); + let yaml_tls = Some(false); + println!("2. YAML: workers=50, timeout=60s, tls=false"); + + // Environment overrides (for some fields) + env::set_var("FULL_WORKERS", "100"); + // No env for timeout - should use YAML + // No env for tls - should use YAML + println!("3. Environment: workers=100"); + + // Resolve with precedence + let final_workers = ConfigMerger::merge_workers(yaml_workers, "FULL_WORKERS"); + let final_timeout = ConfigMerger::merge_timeout(yaml_timeout, "FULL_TIMEOUT"); + let final_tls = ConfigMerger::merge_skip_tls_verify(yaml_tls, "FULL_TLS"); + + println!("\n4. Final values:"); + println!(" workers: {} (from env)", final_workers); + println!(" timeout: {}s (from YAML)", final_timeout.as_secs()); + println!(" tls: {} (from YAML)", final_tls); + + assert_eq!(final_workers, 100, "Workers from env"); + assert_eq!(final_timeout, Duration::from_secs(60), "Timeout from YAML"); + assert!(!final_tls, "TLS from YAML"); + + env::remove_var("FULL_WORKERS"); + + println!("✅ Full precedence scenario works correctly"); +} diff --git a/tests/config_validation_tests.rs b/tests/config_validation_tests.rs new file mode 100644 index 0000000..3a7f0b8 --- /dev/null +++ b/tests/config_validation_tests.rs @@ -0,0 +1,569 @@ +//! Integration tests for config validation (Issue #38). +//! +//! These tests validate the enhanced validation system with detailed error messages. + +use rust_loadtest::config_validation::{ + ConfigSchema, DurationValidator, HttpMethodValidator, LoadModelValidator, RangeValidator, + UrlValidator, ValidationContext, +}; +use rust_loadtest::yaml_config::YamlConfig; + +#[test] +fn test_url_validator_valid_urls() { + assert!(UrlValidator::validate("https://example.com").is_ok()); + assert!(UrlValidator::validate("http://localhost").is_ok()); + assert!(UrlValidator::validate("https://api.example.com/v1").is_ok()); + assert!(UrlValidator::validate("http://192.168.1.1:8080").is_ok()); + + println!("✅ Valid URLs pass validation"); +} + +#[test] +fn test_url_validator_invalid_urls() { + assert!(UrlValidator::validate("").is_err()); + assert!(UrlValidator::validate("example.com").is_err()); + assert!(UrlValidator::validate("ftp://example.com").is_err()); + assert!(UrlValidator::validate("https://example .com").is_err()); + + println!("✅ Invalid URLs are rejected"); +} + +#[test] +fn test_duration_validator_valid_formats() { + assert!(DurationValidator::validate("1s").is_ok()); + assert!(DurationValidator::validate("30s").is_ok()); + assert!(DurationValidator::validate("5m").is_ok()); + assert!(DurationValidator::validate("2h").is_ok()); + assert!(DurationValidator::validate("1d").is_ok()); + + println!("✅ Valid duration formats pass validation"); +} + +#[test] +fn test_duration_validator_invalid_formats() { + assert!(DurationValidator::validate("invalid").is_err()); + assert!(DurationValidator::validate("30").is_err()); // missing unit + assert!(DurationValidator::validate("abc").is_err()); + + println!("✅ Invalid duration formats are rejected"); +} + +#[test] +fn test_duration_validator_positive() { + assert!(DurationValidator::validate_positive("1s").is_ok()); + assert!(DurationValidator::validate_positive("5m").is_ok()); + assert!(DurationValidator::validate_positive("0s").is_err()); + + println!("✅ Zero duration is rejected when positive required"); +} + +#[test] +fn test_range_validator_u64() { + assert!(RangeValidator::validate_u64(50, 1, 100, "test").is_ok()); + assert!(RangeValidator::validate_u64(1, 1, 100, "test").is_ok()); + assert!(RangeValidator::validate_u64(100, 1, 100, "test").is_ok()); + assert!(RangeValidator::validate_u64(0, 1, 100, "test").is_err()); + assert!(RangeValidator::validate_u64(101, 1, 100, "test").is_err()); + + println!("✅ Range validation for u64 works"); +} + +#[test] +fn test_range_validator_f64() { + assert!(RangeValidator::validate_f64(50.0, 1.0, 100.0, "test").is_ok()); + assert!(RangeValidator::validate_f64(0.5, 1.0, 100.0, "test").is_err()); + assert!(RangeValidator::validate_f64(100.5, 1.0, 100.0, "test").is_err()); + + println!("✅ Range validation for f64 works"); +} + +#[test] +fn test_range_validator_positive() { + assert!(RangeValidator::validate_positive_u64(1, "test").is_ok()); + assert!(RangeValidator::validate_positive_u64(100, "test").is_ok()); + assert!(RangeValidator::validate_positive_u64(0, "test").is_err()); + + assert!(RangeValidator::validate_positive_f64(0.1, "test").is_ok()); + assert!(RangeValidator::validate_positive_f64(100.0, "test").is_ok()); + assert!(RangeValidator::validate_positive_f64(0.0, "test").is_err()); + assert!(RangeValidator::validate_positive_f64(-1.0, "test").is_err()); + + println!("✅ Positive value validation works"); +} + +#[test] +fn test_http_method_validator() { + // Valid methods + assert!(HttpMethodValidator::validate("GET").is_ok()); + assert!(HttpMethodValidator::validate("POST").is_ok()); + assert!(HttpMethodValidator::validate("PUT").is_ok()); + assert!(HttpMethodValidator::validate("PATCH").is_ok()); + assert!(HttpMethodValidator::validate("DELETE").is_ok()); + assert!(HttpMethodValidator::validate("HEAD").is_ok()); + assert!(HttpMethodValidator::validate("OPTIONS").is_ok()); + + // Case insensitive + assert!(HttpMethodValidator::validate("get").is_ok()); + assert!(HttpMethodValidator::validate("Post").is_ok()); + + // Invalid methods + assert!(HttpMethodValidator::validate("INVALID").is_err()); + assert!(HttpMethodValidator::validate("CONNECT").is_err()); + + println!("✅ HTTP method validation works"); +} + +#[test] +fn test_load_model_validator_rps() { + assert!(LoadModelValidator::validate_rps(1.0).is_ok()); + assert!(LoadModelValidator::validate_rps(100.0).is_ok()); + assert!(LoadModelValidator::validate_rps(0.1).is_ok()); + + assert!(LoadModelValidator::validate_rps(0.0).is_err()); + assert!(LoadModelValidator::validate_rps(-10.0).is_err()); + + println!("✅ RPS load model validation works"); +} + +#[test] +fn test_load_model_validator_ramp() { + assert!(LoadModelValidator::validate_ramp(10.0, 100.0).is_ok()); + assert!(LoadModelValidator::validate_ramp(0.1, 100.0).is_ok()); + + assert!(LoadModelValidator::validate_ramp(100.0, 10.0).is_err()); + assert!(LoadModelValidator::validate_ramp(50.0, 50.0).is_err()); + assert!(LoadModelValidator::validate_ramp(0.0, 100.0).is_err()); + + println!("✅ Ramp load model validation works"); +} + +#[test] +fn test_load_model_validator_daily_traffic() { + assert!(LoadModelValidator::validate_daily_traffic(10.0, 50.0, 100.0).is_ok()); + assert!(LoadModelValidator::validate_daily_traffic(1.0, 10.0, 100.0).is_ok()); + + assert!(LoadModelValidator::validate_daily_traffic(100.0, 50.0, 10.0).is_err()); + assert!(LoadModelValidator::validate_daily_traffic(10.0, 10.0, 100.0).is_err()); + assert!(LoadModelValidator::validate_daily_traffic(10.0, 50.0, 50.0).is_err()); + + println!("✅ Daily traffic load model validation works"); +} + +#[test] +fn test_validation_context() { + let mut ctx = ValidationContext::new(); + + ctx.enter("config"); + assert_eq!(ctx.current_path(), "config"); + + ctx.enter("baseUrl"); + assert_eq!(ctx.current_path(), "config.baseUrl"); + + ctx.field_error("Invalid URL".to_string()); + assert!(ctx.has_errors()); + assert_eq!(ctx.errors().len(), 1); + + ctx.exit(); + ctx.exit(); + + println!("✅ Validation context tracks field paths"); +} + +#[test] +fn test_validation_context_multiple_errors() { + let mut ctx = ValidationContext::new(); + + ctx.enter("config"); + ctx.enter("baseUrl"); + ctx.field_error("Invalid URL".to_string()); + ctx.exit(); + + ctx.enter("workers"); + ctx.field_error("Invalid worker count".to_string()); + ctx.exit(); + ctx.exit(); + + assert_eq!(ctx.errors().len(), 2); + + let result = ctx.into_result(); + assert!(result.is_err()); + + println!("✅ Validation context collects multiple errors"); +} + +#[test] +fn test_yaml_validation_invalid_version() { + let yaml = r#" +version: "2.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("version")); + assert!(err.contains("2.0")); + + println!("✅ Invalid version caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_invalid_url() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "not-a-url" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("baseUrl") || err.contains("URL")); + + println!("✅ Invalid base URL caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_zero_workers() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + workers: 0 + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("workers") || err.contains("greater than 0")); + + println!("✅ Zero workers caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_invalid_http_method() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "INVALID" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("method") || err.contains("INVALID")); + + println!("✅ Invalid HTTP method caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_empty_path() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("path") || err.contains("empty")); + + println!("✅ Empty request path caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_invalid_rps() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "rps" + target: 0 +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("load") || err.contains("target") || err.contains("0")); + + println!("✅ Zero RPS caught by enhanced validation"); +} + +#[test] +fn test_yaml_validation_invalid_ramp() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "ramp" + min: 100 + max: 10 + rampDuration: "30s" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("load") || err.contains("min") || err.contains("max")); + + println!("✅ Invalid ramp configuration caught"); +} + +#[test] +fn test_yaml_validation_empty_scenario_name() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("name") || err.contains("empty")); + + println!("✅ Empty scenario name caught"); +} + +#[test] +fn test_yaml_validation_negative_weight() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + weight: 0 + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("weight") || err.contains("0")); + + println!("✅ Zero/negative weight caught"); +} + +#[test] +fn test_yaml_validation_too_many_workers() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + workers: 20000 + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + assert!(err.contains("workers") || err.contains("10000")); + + println!("✅ Excessive worker count caught"); +} + +#[test] +fn test_json_schema_generation() { + let schema = ConfigSchema::to_json_schema(); + + assert!(schema.is_object()); + assert!(schema.get("$schema").is_some()); + assert!(schema.get("title").is_some()); + assert!(schema.get("properties").is_some()); + + let properties = schema.get("properties").unwrap(); + assert!(properties.get("version").is_some()); + assert!(properties.get("config").is_some()); + assert!(properties.get("load").is_some()); + assert!(properties.get("scenarios").is_some()); + + println!("✅ JSON Schema generation works"); +} + +#[test] +fn test_json_schema_export() { + let schema_str = ConfigSchema::export_json_schema(); + + assert!(!schema_str.is_empty()); + assert!(schema_str.contains("\"$schema\"")); + assert!(schema_str.contains("\"version\"")); + assert!(schema_str.contains("\"config\"")); + assert!(schema_str.contains("\"baseUrl\"")); + assert!(schema_str.contains("\"workers\"")); + + println!("✅ JSON Schema export produces valid JSON"); + println!(" Schema length: {} bytes", schema_str.len()); +} + +#[test] +fn test_yaml_validation_multiple_errors() { + let yaml = r#" +version: "2.0" +config: + baseUrl: "invalid-url" + workers: 0 + duration: "1m" +load: + model: "rps" + target: -10 +scenarios: + - name: "" + weight: 0 + steps: + - request: + method: "INVALID" + path: "" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err().to_string(); + // Should contain multiple error mentions + assert!(err.len() > 100); // Multiple errors make for a long message + + println!("✅ Multiple validation errors are collected"); + println!(" Error message length: {} chars", err.len()); +} + +#[test] +fn test_yaml_validation_valid_complex_config() { + let yaml = r#" +version: "1.0" +metadata: + name: "Valid Complex Test" + author: "test@example.com" +config: + baseUrl: "https://api.example.com" + workers: 50 + duration: "10m" + timeout: "30s" +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "5m" +scenarios: + - name: "Heavy Traffic" + weight: 70 + steps: + - name: "GET Request" + request: + method: "GET" + path: "/api/test" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" + + - name: "Light Traffic" + weight: 30 + steps: + - request: + method: "POST" + path: "/api/data" + body: '{"test": true}' +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_ok()); + + println!("✅ Valid complex config passes all validations"); +} diff --git a/tests/config_version_tests.rs b/tests/config_version_tests.rs new file mode 100644 index 0000000..c97c8eb --- /dev/null +++ b/tests/config_version_tests.rs @@ -0,0 +1,542 @@ +//! Integration tests for config versioning (Issue #41). +//! +//! These tests validate version parsing, validation, compatibility checking, +//! and the migration framework. + +use rust_loadtest::config_version::{ + Migration, MigrationRegistry, Version, VersionChecker, VersionError, VersionInfo, +}; +use std::str::FromStr; + +#[test] +fn test_version_parsing_valid() { + let version = Version::from_str("1.0").unwrap(); + assert_eq!(version.major, 1); + assert_eq!(version.minor, 0); + + let version = Version::from_str("2.5").unwrap(); + assert_eq!(version.major, 2); + assert_eq!(version.minor, 5); + + let version = Version::from_str("10.99").unwrap(); + assert_eq!(version.major, 10); + assert_eq!(version.minor, 99); + + println!("✅ Valid version parsing works"); +} + +#[test] +fn test_version_parsing_invalid() { + assert!(Version::from_str("1").is_err()); + assert!(Version::from_str("1.0.0").is_err()); + assert!(Version::from_str("invalid").is_err()); + assert!(Version::from_str("1.x").is_err()); + assert!(Version::from_str("x.0").is_err()); + assert!(Version::from_str("").is_err()); + assert!(Version::from_str("1.").is_err()); + assert!(Version::from_str(".0").is_err()); + + println!("✅ Invalid version parsing is rejected"); +} + +#[test] +fn test_version_display() { + assert_eq!(Version::new(1, 0).to_string(), "1.0"); + assert_eq!(Version::new(2, 5).to_string(), "2.5"); + assert_eq!(Version::new(10, 99).to_string(), "10.99"); + + println!("✅ Version display formatting works"); +} + +#[test] +fn test_version_equality() { + assert_eq!(Version::new(1, 0), Version::new(1, 0)); + assert_eq!(Version::new(2, 5), Version::new(2, 5)); + assert_ne!(Version::new(1, 0), Version::new(1, 1)); + assert_ne!(Version::new(1, 0), Version::new(2, 0)); + + println!("✅ Version equality comparison works"); +} + +#[test] +fn test_version_ordering() { + // Minor version comparison + assert!(Version::new(1, 0) < Version::new(1, 1)); + assert!(Version::new(1, 5) < Version::new(1, 6)); + assert!(Version::new(1, 9) < Version::new(1, 10)); + + // Major version comparison + assert!(Version::new(1, 0) < Version::new(2, 0)); + assert!(Version::new(1, 9) < Version::new(2, 0)); + assert!(Version::new(2, 5) < Version::new(3, 0)); + + // Greater than + assert!(Version::new(1, 1) > Version::new(1, 0)); + assert!(Version::new(2, 0) > Version::new(1, 9)); + assert!(Version::new(3, 0) > Version::new(2, 99)); + + println!("✅ Version ordering comparison works"); +} + +#[test] +fn test_version_constants() { + assert_eq!(Version::CURRENT, Version::new(1, 0)); + assert_eq!(Version::MINIMUM_SUPPORTED, Version::new(1, 0)); + assert_eq!(Version::MAXIMUM_SUPPORTED, Version::new(1, 0)); + + println!("✅ Version constants are correct"); +} + +#[test] +fn test_version_is_supported() { + // Current version should be supported + assert!(Version::CURRENT.is_supported()); + assert!(Version::new(1, 0).is_supported()); + + // Future versions not yet supported + assert!(!Version::new(2, 0).is_supported()); + assert!(!Version::new(1, 1).is_supported()); + + // Old versions not supported + assert!(!Version::new(0, 9).is_supported()); + assert!(!Version::new(0, 1).is_supported()); + + println!("✅ Version support detection works"); +} + +#[test] +fn test_version_needs_migration() { + // Current version doesn't need migration + assert!(!Version::CURRENT.needs_migration()); + assert!(!Version::new(1, 0).needs_migration()); + + // Future versions would need migration (once we have multiple versions) + // For now, only 1.0 exists, so no migrations needed yet + + println!("✅ Version migration detection works"); +} + +#[test] +fn test_version_supported_list() { + let versions = Version::supported_versions(); + assert!(!versions.is_empty()); + assert!(versions.contains(&Version::new(1, 0))); + + let version_string = Version::supported_versions_string(); + assert!(version_string.contains("1.0")); + + println!("✅ Supported versions list is correct"); +} + +#[test] +fn test_version_checker_validate_supported() { + let result = VersionChecker::validate(&Version::new(1, 0)); + assert!(result.is_ok()); + + println!("✅ Supported version passes validation"); +} + +#[test] +fn test_version_checker_validate_too_old() { + let result = VersionChecker::validate(&Version::new(0, 5)); + assert!(result.is_err()); + + match result.unwrap_err() { + VersionError::VersionTooOld { current, minimum } => { + assert_eq!(current, "0.5"); + assert_eq!(minimum, "1.0"); + } + _ => panic!("Expected VersionTooOld error"), + } + + println!("✅ Too old version is rejected with correct error"); +} + +#[test] +fn test_version_checker_validate_too_new() { + let result = VersionChecker::validate(&Version::new(99, 0)); + assert!(result.is_err()); + + match result.unwrap_err() { + VersionError::VersionTooNew { current, maximum } => { + assert_eq!(current, "99.0"); + assert_eq!(maximum, "1.0"); + } + _ => panic!("Expected VersionTooNew error"), + } + + println!("✅ Too new version is rejected with correct error"); +} + +#[test] +fn test_version_checker_parse_and_validate_valid() { + let version = VersionChecker::parse_and_validate("1.0").unwrap(); + assert_eq!(version, Version::new(1, 0)); + + println!("✅ Parse and validate works for valid version"); +} + +#[test] +fn test_version_checker_parse_and_validate_invalid_format() { + let result = VersionChecker::parse_and_validate("invalid"); + assert!(result.is_err()); + + match result.unwrap_err() { + VersionError::InvalidFormat(msg) => { + assert_eq!(msg, "invalid"); + } + _ => panic!("Expected InvalidFormat error"), + } + + println!("✅ Parse and validate rejects invalid format"); +} + +#[test] +fn test_version_checker_parse_and_validate_unsupported() { + let result = VersionChecker::parse_and_validate("2.0"); + assert!(result.is_err()); + + match result.unwrap_err() { + VersionError::VersionTooNew { .. } => { + // Expected + } + _ => panic!("Expected VersionTooNew error"), + } + + println!("✅ Parse and validate rejects unsupported version"); +} + +#[test] +fn test_version_checker_check_compatibility_current() { + let result = VersionChecker::check_compatibility(&Version::CURRENT); + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); // No migration needed for current version + + println!("✅ Compatibility check for current version succeeds"); +} + +#[test] +fn test_version_checker_check_compatibility_unsupported() { + let result = VersionChecker::check_compatibility(&Version::new(99, 0)); + assert!(result.is_err()); + + println!("✅ Compatibility check for unsupported version fails"); +} + +#[test] +fn test_migration_registry_empty() { + let registry = MigrationRegistry::new(); + + let migration = registry.find_migration(&Version::new(1, 0), &Version::new(2, 0)); + assert!(migration.is_none()); + + println!("✅ Empty migration registry has no migrations"); +} + +#[test] +fn test_migration_registry_default() { + let registry = MigrationRegistry::default_migrations(); + + // Currently no migrations exist, but registry should be valid + let migration = registry.find_migration(&Version::new(1, 0), &Version::new(2, 0)); + assert!(migration.is_none()); + + println!("✅ Default migration registry is valid"); +} + +#[test] +fn test_migration_registry_migrate_same_version() { + let registry = MigrationRegistry::default_migrations(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = registry + .migrate(yaml, &Version::new(1, 0), &Version::new(1, 0)) + .unwrap(); + + assert_eq!(result, yaml); + + println!("✅ Migrating same version returns unchanged YAML"); +} + +#[test] +fn test_migration_registry_register() { + struct DummyMigration; + + impl Migration for DummyMigration { + fn from_version(&self) -> Version { + Version::new(1, 0) + } + + fn to_version(&self) -> Version { + Version::new(2, 0) + } + + fn description(&self) -> &str { + "Test migration" + } + + fn migrate(&self, yaml: &str) -> Result { + Ok(yaml.replace("1.0", "2.0")) + } + } + + let mut registry = MigrationRegistry::new(); + registry.register(Box::new(DummyMigration)); + + let migration = registry.find_migration(&Version::new(1, 0), &Version::new(2, 0)); + assert!(migration.is_some()); + assert_eq!(migration.unwrap().description(), "Test migration"); + + println!("✅ Migration registration works"); +} + +#[test] +fn test_migration_registry_apply_migration() { + struct TestMigration; + + impl Migration for TestMigration { + fn from_version(&self) -> Version { + Version::new(1, 0) + } + + fn to_version(&self) -> Version { + Version::new(1, 1) + } + + fn description(&self) -> &str { + "Add new field" + } + + fn migrate(&self, yaml: &str) -> Result { + // Simple test: replace version string + Ok(yaml.replace("version: \"1.0\"", "version: \"1.1\"")) + } + } + + let mut registry = MigrationRegistry::new(); + registry.register(Box::new(TestMigration)); + + let yaml = "version: \"1.0\""; + let result = registry + .migrate(yaml, &Version::new(1, 0), &Version::new(1, 1)) + .unwrap(); + + assert!(result.contains("version: \"1.1\"")); + + println!("✅ Migration application works"); +} + +#[test] +fn test_version_error_display() { + let err = VersionError::InvalidFormat("1.0.0".to_string()); + assert!(err.to_string().contains("Invalid version format")); + assert!(err.to_string().contains("1.0.0")); + + let err = VersionError::VersionTooOld { + current: "0.5".to_string(), + minimum: "1.0".to_string(), + }; + assert!(err.to_string().contains("too old")); + assert!(err.to_string().contains("0.5")); + assert!(err.to_string().contains("1.0")); + + let err = VersionError::VersionTooNew { + current: "99.0".to_string(), + maximum: "1.0".to_string(), + }; + assert!(err.to_string().contains("too new")); + assert!(err.to_string().contains("99.0")); + + println!("✅ Version error display messages are helpful"); +} + +#[test] +fn test_version_info_current() { + let version = VersionInfo::current(); + assert_eq!(version, Version::new(1, 0)); + + println!("✅ VersionInfo returns current version"); +} + +#[test] +fn test_version_info_supported_range() { + let min = VersionInfo::minimum_supported(); + let max = VersionInfo::maximum_supported(); + + assert_eq!(min, Version::new(1, 0)); + assert_eq!(max, Version::new(1, 0)); + assert!(min <= max); + + println!("✅ VersionInfo returns supported range"); +} + +#[test] +fn test_version_info_string() { + let info = VersionInfo::info_string(); + + assert!(info.contains("Current")); + assert!(info.contains("1.0")); + assert!(info.contains("Minimum Supported")); + assert!(info.contains("Maximum Supported")); + assert!(info.contains("Supported Versions")); + + println!("✅ VersionInfo string contains all information"); +} + +#[test] +fn test_version_error_equality() { + let err1 = VersionError::InvalidFormat("test".to_string()); + let err2 = VersionError::InvalidFormat("test".to_string()); + let err3 = VersionError::InvalidFormat("other".to_string()); + + assert_eq!(err1, err2); + assert_ne!(err1, err3); + + println!("✅ VersionError equality comparison works"); +} + +#[test] +fn test_version_roundtrip() { + let version = Version::new(2, 5); + let version_str = version.to_string(); + let parsed = Version::from_str(&version_str).unwrap(); + assert_eq!(version, parsed); + + println!("✅ Version roundtrip (to_string -> from_str) works"); +} + +#[test] +fn test_version_with_yaml_config() { + use rust_loadtest::yaml_config::YamlConfig; + + // Valid version should work + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_ok()); + + println!("✅ Version 1.0 works with YamlConfig"); +} + +#[test] +fn test_unsupported_version_with_yaml_config() { + use rust_loadtest::yaml_config::YamlConfig; + + // Unsupported version should fail + let yaml = r#" +version: "2.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err(); + assert!(err.to_string().contains("2.0")); + assert!(err.to_string().contains("too new")); + + println!("✅ Unsupported version 2.0 is rejected by YamlConfig"); +} + +#[test] +fn test_invalid_version_format_with_yaml_config() { + use rust_loadtest::yaml_config::YamlConfig; + + // Invalid version format should fail + let yaml = r#" +version: "1.0.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + let err = result.unwrap_err(); + assert!(err.to_string().contains("Invalid version format")); + + println!("✅ Invalid version format is rejected by YamlConfig"); +} + +#[test] +fn test_future_version_scenario() { + // Scenario: When we release version 2.0 in the future + // Version 2.0 config should not be loadable with current code + + let version_2_0 = Version::new(2, 0); + assert!(!version_2_0.is_supported()); + assert!(VersionChecker::validate(&version_2_0).is_err()); + + println!("✅ Future version 2.0 is correctly rejected"); +} + +#[test] +fn test_version_comparison_comprehensive() { + let versions = [ + Version::new(0, 9), + Version::new(1, 0), + Version::new(1, 1), + Version::new(1, 9), + Version::new(2, 0), + Version::new(2, 1), + Version::new(10, 0), + ]; + + for i in 0..versions.len() { + for j in i + 1..versions.len() { + assert!( + versions[i] < versions[j], + "{} should be less than {}", + versions[i], + versions[j] + ); + } + } + + println!("✅ Comprehensive version comparison works"); +} diff --git a/tests/connection_pool_tests.rs b/tests/connection_pool_tests.rs new file mode 100644 index 0000000..48644a2 --- /dev/null +++ b/tests/connection_pool_tests.rs @@ -0,0 +1,410 @@ +//! Integration tests for connection pool statistics (Issue #36). +//! +//! These tests validate connection pool configuration and statistics tracking. + +use rust_loadtest::connection_pool::{ + ConnectionStats, PoolConfig, PoolStatsTracker, GLOBAL_POOL_STATS, +}; +use std::time::Duration; + +#[test] +fn test_pool_config_default() { + let config = PoolConfig::default(); + + assert_eq!(config.max_idle_per_host, 32); + assert_eq!(config.idle_timeout, Duration::from_secs(90)); + assert_eq!(config.tcp_keepalive, Some(Duration::from_secs(60))); + + println!("✅ Pool configuration defaults work"); +} + +#[test] +fn test_pool_config_builder_pattern() { + let config = PoolConfig::new() + .with_max_idle_per_host(64) + .with_idle_timeout(Duration::from_secs(120)) + .with_tcp_keepalive(Some(Duration::from_secs(30))); + + assert_eq!(config.max_idle_per_host, 64); + assert_eq!(config.idle_timeout, Duration::from_secs(120)); + assert_eq!(config.tcp_keepalive, Some(Duration::from_secs(30))); + + println!("✅ Pool configuration builder pattern works"); +} + +#[test] +fn test_pool_config_disable_keepalive() { + let config = PoolConfig::new().with_tcp_keepalive(None); + + assert_eq!(config.tcp_keepalive, None); + + println!("✅ TCP keepalive can be disabled"); +} + +#[test] +fn test_connection_stats_empty() { + let stats = ConnectionStats::default(); + + assert_eq!(stats.total_requests, 0); + assert_eq!(stats.likely_new_connections, 0); + assert_eq!(stats.likely_reused_connections, 0); + assert_eq!(stats.reuse_rate(), 0.0); + assert_eq!(stats.new_connection_rate(), 0.0); + assert!(stats.duration().is_none()); + + println!("✅ Empty connection stats handled correctly"); +} + +#[test] +fn test_connection_stats_calculations() { + let stats = ConnectionStats { + total_requests: 100, + likely_new_connections: 20, + likely_reused_connections: 80, + first_request: Some(std::time::Instant::now()), + last_request: Some(std::time::Instant::now()), + }; + + assert_eq!(stats.reuse_rate(), 80.0); + assert_eq!(stats.new_connection_rate(), 20.0); + + let formatted = stats.format(); + assert!(formatted.contains("Total: 100")); + assert!(formatted.contains("Reused: 80")); + assert!(formatted.contains("80.0%")); + assert!(formatted.contains("New: 20")); + assert!(formatted.contains("20.0%")); + + println!("✅ Connection stats calculations work"); + println!(" {}", formatted); +} + +#[test] +fn test_pool_stats_tracker_fast_requests() { + let tracker = PoolStatsTracker::new(100); + + // Simulate 10 fast requests (reused connections) + for _ in 0..10 { + tracker.record_request(30); // 30ms - very fast + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 10); + assert_eq!(stats.likely_reused_connections, 10); + assert_eq!(stats.likely_new_connections, 0); + assert_eq!(stats.reuse_rate(), 100.0); + + println!("✅ Fast requests classified as reused connections"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_tracker_slow_requests() { + let tracker = PoolStatsTracker::new(100); + + // Simulate 10 slow requests (new connections with TLS handshake) + for _ in 0..10 { + tracker.record_request(150); // 150ms - includes TLS handshake + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 10); + assert_eq!(stats.likely_reused_connections, 0); + assert_eq!(stats.likely_new_connections, 10); + assert_eq!(stats.new_connection_rate(), 100.0); + + println!("✅ Slow requests classified as new connections"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_tracker_mixed_patterns() { + let tracker = PoolStatsTracker::new(100); + + // Simulate realistic mixed pattern + tracker.record_request(150); // New connection (slow) + tracker.record_request(25); // Reused (fast) + tracker.record_request(30); // Reused (fast) + tracker.record_request(120); // New connection (slow) + tracker.record_request(20); // Reused (fast) + tracker.record_request(35); // Reused (fast) + tracker.record_request(110); // New connection (slow) + tracker.record_request(28); // Reused (fast) + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 8); + assert_eq!(stats.likely_reused_connections, 5); + assert_eq!(stats.likely_new_connections, 3); + assert_eq!(stats.reuse_rate(), 62.5); + assert_eq!(stats.new_connection_rate(), 37.5); + + println!("✅ Mixed request patterns tracked correctly"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_tracker_custom_threshold() { + let tracker = PoolStatsTracker::new(200); // Higher threshold + + tracker.record_request(150); // Under threshold - reused + tracker.record_request(180); // Under threshold - reused + tracker.record_request(210); // Over threshold - new + tracker.record_request(250); // Over threshold - new + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 4); + assert_eq!(stats.likely_reused_connections, 2); + assert_eq!(stats.likely_new_connections, 2); + + println!("✅ Custom threshold works correctly"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_tracker_reset() { + let tracker = PoolStatsTracker::new(100); + + // Record some requests + tracker.record_request(50); + tracker.record_request(150); + tracker.record_request(30); + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 3); + + // Reset + tracker.reset(); + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 0); + assert_eq!(stats.likely_reused_connections, 0); + assert_eq!(stats.likely_new_connections, 0); + + println!("✅ Pool stats tracker reset works"); +} + +#[test] +fn test_pool_stats_timing_accuracy() { + let tracker = PoolStatsTracker::new(100); + + tracker.record_request(50); + + // Wait a known duration + std::thread::sleep(Duration::from_millis(100)); + + tracker.record_request(50); + + let stats = tracker.stats(); + let duration = stats.duration().unwrap(); + + // Duration should be at least 100ms but less than 200ms + assert!(duration >= Duration::from_millis(100)); + assert!(duration < Duration::from_millis(200)); + + println!("✅ Timing accuracy validated"); + println!(" Duration: {:?}", duration); +} + +#[test] +fn test_connection_stats_duration_calculation() { + use std::time::Instant; + + let start = Instant::now(); + std::thread::sleep(Duration::from_millis(50)); + let end = Instant::now(); + + let stats = ConnectionStats { + total_requests: 10, + likely_new_connections: 2, + likely_reused_connections: 8, + first_request: Some(start), + last_request: Some(end), + }; + + let duration = stats.duration().unwrap(); + assert!(duration >= Duration::from_millis(50)); + assert!(duration < Duration::from_millis(100)); + + println!("✅ Duration calculation works"); + println!(" Duration: {:.3}s", duration.as_secs_f64()); +} + +#[test] +fn test_pool_stats_high_reuse_scenario() { + let tracker = PoolStatsTracker::new(100); + + // Simulate high connection reuse (ideal scenario) + // First request is slow (new connection) + tracker.record_request(150); + + // Following 99 requests are fast (reused) + for _ in 0..99 { + tracker.record_request(30); + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 100); + assert_eq!(stats.likely_reused_connections, 99); + assert_eq!(stats.likely_new_connections, 1); + assert_eq!(stats.reuse_rate(), 99.0); + + println!("✅ High reuse scenario validated"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_concurrent_access() { + use std::sync::Arc; + use std::thread; + + let tracker = Arc::new(PoolStatsTracker::new(100)); + let mut handles = vec![]; + + // Spawn 5 threads, each recording 20 requests + for thread_id in 0..5 { + let tracker_clone = Arc::clone(&tracker); + let handle = thread::spawn(move || { + for i in 0..20 { + // Alternate between fast and slow requests + if (thread_id + i) % 3 == 0 { + tracker_clone.record_request(150); // Slow (new) + } else { + tracker_clone.record_request(30); // Fast (reused) + } + } + }); + handles.push(handle); + } + + // Wait for all threads + for handle in handles { + handle.join().unwrap(); + } + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 100); // 5 threads * 20 requests + + println!("✅ Concurrent access handled correctly"); + println!(" {}", stats.format()); +} + +#[test] +fn test_pool_stats_boundary_values() { + let tracker = PoolStatsTracker::new(100); + + // Test exact threshold + tracker.record_request(99); // Just below threshold - reused + tracker.record_request(100); // Exactly at threshold - new + tracker.record_request(101); // Just above threshold - new + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 3); + assert_eq!(stats.likely_reused_connections, 1); + assert_eq!(stats.likely_new_connections, 2); + + println!("✅ Boundary values handled correctly"); +} + +#[test] +fn test_pool_stats_zero_latency() { + let tracker = PoolStatsTracker::new(100); + + // Edge case: zero latency (shouldn't happen in practice) + tracker.record_request(0); + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 1); + assert_eq!(stats.likely_reused_connections, 1); // Zero is below threshold + + println!("✅ Zero latency handled correctly"); +} + +#[test] +fn test_pool_stats_extreme_latency() { + let tracker = PoolStatsTracker::new(100); + + // Edge case: very high latency (network issues) + tracker.record_request(5000); // 5 seconds - definitely new connection or error + + let stats = tracker.stats(); + assert_eq!(stats.total_requests, 1); + assert_eq!(stats.likely_new_connections, 1); + + println!("✅ Extreme latency handled correctly"); +} + +#[test] +fn test_global_pool_stats_singleton() { + // Note: GLOBAL_POOL_STATS is shared across tests, so we just verify it exists + // and can be called without testing specific values + + let stats = GLOBAL_POOL_STATS.stats(); + + // Should be able to get stats (may have data from other tests) + // Just verify we can access it without panicking + let _ = stats.total_requests; + + println!("✅ Global pool stats singleton accessible"); +} + +#[test] +fn test_pool_config_apply_to_builder() { + let config = PoolConfig::new() + .with_max_idle_per_host(64) + .with_idle_timeout(Duration::from_secs(120)) + .with_tcp_keepalive(Some(Duration::from_secs(30))); + + // Create a reqwest client builder + let builder = reqwest::Client::builder(); + + // Apply pool config (this should not panic) + let _builder = config.apply_to_builder(builder); + + println!("✅ Pool config can be applied to reqwest ClientBuilder"); +} + +#[tokio::test] +async fn test_pool_with_real_client() { + let config = PoolConfig::new() + .with_max_idle_per_host(10) + .with_idle_timeout(Duration::from_secs(30)); + + let builder = reqwest::Client::builder(); + let builder = config.apply_to_builder(builder); + + let client = builder.build().expect("Failed to build client"); + + // Just verify we can create a client with pool config + // We won't make actual requests in unit tests + assert!(client.get("http://example.com").build().is_ok()); + + println!("✅ Real HTTP client with pool config works"); +} + +#[test] +fn test_connection_stats_format_variations() { + // Test different percentage scenarios + let test_cases = vec![ + (100, 0, 100), // 100% reuse + (100, 100, 0), // 0% reuse (all new) + (100, 50, 50), // 50/50 + (100, 75, 25), // 75% reuse + ]; + + for (total, new, reused) in test_cases { + let stats = ConnectionStats { + total_requests: total, + likely_new_connections: new, + likely_reused_connections: reused, + first_request: Some(std::time::Instant::now()), + last_request: Some(std::time::Instant::now()), + }; + + let formatted = stats.format(); + assert!(formatted.contains(&format!("Total: {}", total))); + assert!(formatted.contains(&format!("New: {}", new))); + assert!(formatted.contains(&format!("Reused: {}", reused))); + } + + println!("✅ Connection stats formatting works for all scenarios"); +} diff --git a/tests/cookie_session_tests.rs b/tests/cookie_session_tests.rs new file mode 100644 index 0000000..acabf00 --- /dev/null +++ b/tests/cookie_session_tests.rs @@ -0,0 +1,438 @@ +//! Integration tests for cookie and session management (#28). +//! +//! These tests validate that cookies are automatically handled across +//! requests within a scenario, enabling session-based authentication. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{ + Extractor, RequestConfig, Scenario, ScenarioContext, Step, ThinkTime, VariableExtraction, +}; +use std::collections::HashMap; +use std::time::Duration; + +// E-commerce test API - not accessible in CI +const BASE_URL: &str = "https://ecom.edge.baugus-lab.com"; + +/// Create a cookie-enabled HTTP client for testing +fn create_cookie_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) // Enable automatic cookie management + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_cookies_persist_across_steps() { + // Test that cookies set in one step are sent in subsequent steps + let scenario = Scenario { + name: "Cookie Persistence Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Login (sets cookies)".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/login".to_string(), + body: Some( + r#"{ + "email": "test@example.com", + "password": "password123" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(100))), + }, + Step { + name: "Access Protected Resource (uses cookies)".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/users/me".to_string(), + body: None, + headers: HashMap::new(), // No manual auth header needed - cookies handle it + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_cookie_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // If cookies work, both steps should succeed + // Step 1: Login sets session cookie + // Step 2: Uses session cookie automatically + println!("\nCookie Persistence Test:"); + println!( + " Step 1 (Login): {}", + if result.steps[0].success { + "✓" + } else { + "✗" + } + ); + if result.steps.len() > 1 { + println!( + " Step 2 (Protected): {}", + if result.steps[1].success { + "✓" + } else { + "✗" + } + ); + } +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_auth_flow_with_token_and_cookies() { + // Test a realistic auth flow that combines token extraction and cookies + let scenario = Scenario { + name: "Auth Flow with Token and Cookies".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Register User".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/register".to_string(), + body: Some( + r#"{ + "email": "user-${timestamp}@example.com", + "password": "SecurePass123!", + "name": "Test User" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![ + // Extract token from response + VariableExtraction { + name: "auth_token".to_string(), + extractor: Extractor::JsonPath("$.token".to_string()), + }, + ], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Access Profile with Token".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/users/me".to_string(), + body: None, + headers: { + let mut headers = HashMap::new(); + // Use extracted token in Authorization header + headers.insert( + "Authorization".to_string(), + "Bearer ${auth_token}".to_string(), + ); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_cookie_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + println!("\nAuth Flow Test:"); + println!( + " Registration: {}", + if result.steps[0].success { + "✓" + } else { + "✗" + } + ); + + // Token should be extracted + let token = context.get_variable("auth_token"); + println!( + " Token extracted: {}", + if token.is_some() { "✓" } else { "✗" } + ); + + if result.steps.len() > 1 { + println!( + " Profile access: {}", + if result.steps[1].success { + "✓" + } else { + "✗" + } + ); + } +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_cookie_isolation_between_clients() { + // Test that different client instances have isolated cookies + let scenario = Scenario { + name: "Login Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Login".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/register".to_string(), + body: Some( + r#"{ + "email": "user-${timestamp}@example.com", + "password": "password123", + "name": "Test User" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Create two separate cookie-enabled clients + let client1 = create_cookie_client(); + let client2 = create_cookie_client(); + + let executor1 = ScenarioExecutor::new(BASE_URL.to_string(), client1); + let executor2 = ScenarioExecutor::new(BASE_URL.to_string(), client2); + + let mut context1 = ScenarioContext::new(); + let mut context2 = ScenarioContext::new(); + + // Execute scenarios with different clients + let result1 = executor1.execute(&scenario, &mut context1).await; + let result2 = executor2.execute(&scenario, &mut context2).await; + + println!("\nCookie Isolation Test:"); + println!(" Client 1: {}", if result1.success { "✓" } else { "✗" }); + println!(" Client 2: {}", if result2.success { "✓" } else { "✗" }); + + // Both should succeed independently (cookies are isolated) + assert!( + result1.success || result2.success, + "At least one should succeed" + ); +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_shopping_flow_with_session() { + // Realistic e-commerce flow using session cookies + let scenario = Scenario { + name: "Shopping with Session".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Browse Products".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/products?limit=3".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![VariableExtraction { + name: "product_id".to_string(), + extractor: Extractor::JsonPath("$.products[0].id".to_string()), + }], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Register and Login".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/register".to_string(), + body: Some( + r#"{ + "email": "shopper-${timestamp}@example.com", + "password": "Shop123!", + "name": "Shopper" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![VariableExtraction { + name: "token".to_string(), + extractor: Extractor::JsonPath("$.token".to_string()), + }], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Add to Cart (with auth)".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/cart/items".to_string(), + body: Some( + r#"{ + "product_id": "${product_id}", + "quantity": 2 + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers.insert("Authorization".to_string(), "Bearer ${token}".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "View Cart (session maintained)".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/cart".to_string(), + body: None, + headers: { + let mut headers = HashMap::new(); + headers.insert("Authorization".to_string(), "Bearer ${token}".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_cookie_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + println!("\nShopping Flow with Session:"); + println!(" Success: {}", result.success); + println!( + " Steps completed: {}/{}", + result.steps_completed, + result.steps.len() + ); + + for (idx, step) in result.steps.iter().enumerate() { + println!( + " Step {}: {} - {}", + idx + 1, + step.step_name, + if step.success { "✓" } else { "✗" } + ); + } +} + +#[tokio::test] +#[ignore] // Requires ecom.edge.baugus-lab.com +async fn test_client_without_cookies_fails_session() { + // Demonstrate that without cookies, session-based auth fails + let scenario = Scenario { + name: "No Cookie Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Login".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/auth/register".to_string(), + body: Some( + r#"{ + "email": "nocookie-${timestamp}@example.com", + "password": "Test123!", + "name": "No Cookie User" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Client WITHOUT cookies + let client_no_cookies = reqwest::Client::builder() + .timeout(Duration::from_secs(30)) + .build() + .unwrap(); + + // Client WITH cookies + let client_with_cookies = create_cookie_client(); + + let executor_no_cookies = ScenarioExecutor::new(BASE_URL.to_string(), client_no_cookies); + let executor_with_cookies = ScenarioExecutor::new(BASE_URL.to_string(), client_with_cookies); + + let mut context_no_cookies = ScenarioContext::new(); + let mut context_with_cookies = ScenarioContext::new(); + + let result_no_cookies = executor_no_cookies + .execute(&scenario, &mut context_no_cookies) + .await; + let result_with_cookies = executor_with_cookies + .execute(&scenario, &mut context_with_cookies) + .await; + + println!("\nCookie Enabled Comparison:"); + println!( + " Without cookies: {}", + if result_no_cookies.success { + "✓" + } else { + "✗" + } + ); + println!( + " With cookies: {}", + if result_with_cookies.success { + "✓" + } else { + "✗" + } + ); +} diff --git a/tests/csv_data_driven_tests.rs b/tests/csv_data_driven_tests.rs new file mode 100644 index 0000000..99bf47c --- /dev/null +++ b/tests/csv_data_driven_tests.rs @@ -0,0 +1,362 @@ +//! Integration tests for CSV data-driven testing (Issue #31). +//! +//! These tests validate that CSV data can be loaded, distributed across +//! virtual users, and used for variable substitution in scenarios. + +use rust_loadtest::data_source::CsvDataSource; +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{Assertion, RequestConfig, Scenario, ScenarioContext, Step}; +use std::collections::HashMap; +use std::time::Duration; +use tempfile::NamedTempFile; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[test] +fn test_csv_load_from_string() { + let csv = "username,password,email\nuser1,pass1,user1@test.com\nuser2,pass2,user2@test.com"; + let ds = CsvDataSource::from_string(csv).unwrap(); + + assert_eq!(ds.row_count(), 2); + assert_eq!(ds.headers(), &["username", "password", "email"]); + + let row1 = ds.next_row().unwrap(); + assert_eq!(row1.get("username").unwrap(), "user1"); + assert_eq!(row1.get("password").unwrap(), "pass1"); + + println!("✅ CSV loading from string works"); +} + +#[test] +fn test_csv_load_from_file() { + // Create temporary CSV file + let csv_content = + "product_id,name,price\n101,Widget,19.99\n102,Gadget,29.99\n103,Doohickey,39.99"; + + let mut temp_file = NamedTempFile::new().unwrap(); + use std::io::Write; + temp_file.write_all(csv_content.as_bytes()).unwrap(); + temp_file.flush().unwrap(); + + let ds = CsvDataSource::from_file(temp_file.path()).unwrap(); + + assert_eq!(ds.row_count(), 3); + assert_eq!(ds.headers(), &["product_id", "name", "price"]); + + let row = ds.next_row().unwrap(); + assert_eq!(row.get("product_id").unwrap(), "101"); + assert_eq!(row.get("name").unwrap(), "Widget"); + assert_eq!(row.get("price").unwrap(), "19.99"); + + println!("✅ CSV loading from file works"); +} + +#[test] +fn test_csv_round_robin_distribution() { + let csv = "user_id,role\n1,admin\n2,user\n3,guest"; + let ds = CsvDataSource::from_string(csv).unwrap(); + + // Get 6 rows (2 full cycles through 3 users) + let ids: Vec = (0..6) + .map(|_| ds.next_row().unwrap().get("user_id").unwrap().clone()) + .collect(); + + assert_eq!(ids, vec!["1", "2", "3", "1", "2", "3"]); + + println!("✅ Round-robin distribution works"); +} + +#[test] +fn test_csv_reset() { + let csv = "id,value\n1,a\n2,b\n3,c"; + let ds = CsvDataSource::from_string(csv).unwrap(); + + ds.next_row().unwrap(); + ds.next_row().unwrap(); + + ds.reset(); + + let row = ds.next_row().unwrap(); + assert_eq!(row.get("id").unwrap(), "1"); + + println!("✅ CSV reset works"); +} + +#[test] +fn test_context_load_data_row() { + let csv = "username,api_key,region\ntestuser,abc123,us-west"; + let ds = CsvDataSource::from_string(csv).unwrap(); + let row = ds.next_row().unwrap(); + + let mut context = ScenarioContext::new(); + context.load_data_row(&row); + + assert_eq!( + context.get_variable("username"), + Some(&"testuser".to_string()) + ); + assert_eq!(context.get_variable("api_key"), Some(&"abc123".to_string())); + assert_eq!(context.get_variable("region"), Some(&"us-west".to_string())); + + println!("✅ Context loads data row correctly"); +} + +#[test] +fn test_variable_substitution_from_csv() { + let csv = "user_id,product_id,quantity\n42,SKU-999,5"; + let ds = CsvDataSource::from_string(csv).unwrap(); + let row = ds.next_row().unwrap(); + + let mut context = ScenarioContext::new(); + context.load_data_row(&row); + + let path = context + .substitute_variables("/users/${user_id}/cart?product=${product_id}&qty=${quantity}"); + assert_eq!(path, "/users/42/cart?product=SKU-999&qty=5"); + + println!("✅ Variable substitution from CSV works"); +} + +#[tokio::test] +async fn test_scenario_with_csv_data() { + let csv = "username,email\ntestuser1,test1@example.com\ntestuser2,test2@example.com"; + let ds = CsvDataSource::from_string(csv).unwrap(); + + let scenario = Scenario { + name: "CSV Data Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Request with CSV data".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some(r#"{"username": "${username}", "email": "${email}"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Execute scenario twice with different data rows + for i in 0..2 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + let mut context = ScenarioContext::new(); + let row = ds.next_row().unwrap(); + context.load_data_row(&row); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.steps[0].status_code.is_some()); + println!( + " Execution {} completed with status {:?}", + i + 1, + result.steps[0].status_code + ); + } + + println!("✅ Scenario with CSV data works"); +} + +#[tokio::test] +async fn test_multiple_users_different_data() { + let csv = "username,password\nuser1,pass1\nuser2,pass2\nuser3,pass3"; + let ds = CsvDataSource::from_string(csv).unwrap(); + + let scenario = Scenario { + name: "Multi-User Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Login with user data".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), // Simple GET endpoint + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }], + }; + + // Simulate 3 virtual users, each getting different data + let mut users_data = Vec::new(); + + for i in 0..3 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + let mut context = ScenarioContext::new(); + let row = ds.next_row().unwrap(); + let username = row.get("username").unwrap().clone(); + users_data.push(username.clone()); + + context.load_data_row(&row); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Virtual user {} should succeed", i + 1); + println!(" Virtual user {} used data: {}", i + 1, username); + } + + // Verify each user got different data + assert_eq!(users_data, vec!["user1", "user2", "user3"]); + + println!("✅ Multiple users with different data works"); +} + +#[tokio::test] +async fn test_realistic_user_pool() { + // Simulate a realistic user pool with credentials + let user_csv = r#"username,password,email,role +alice,alice123,alice@company.com,admin +bob,bob456,bob@company.com,user +carol,carol789,carol@company.com,user +dave,dave012,dave@company.com,manager"#; + + let ds = CsvDataSource::from_string(user_csv).unwrap(); + + let scenario = Scenario { + name: "User Pool Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + Step { + name: "Check Status".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + // Simulate 8 virtual users (2 full cycles through 4 users) + for i in 0..8 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + let mut context = ScenarioContext::new(); + let row = ds.next_row().unwrap(); + let username = row.get("username").unwrap(); + let role = row.get("role").unwrap(); + + context.load_data_row(&row); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "User {} should succeed", username); + println!(" VU {} as {} (role: {})", i + 1, username, role); + } + + println!("✅ Realistic user pool test works"); +} + +#[test] +fn test_csv_with_special_characters() { + let csv = r#"username,password,notes +user1,p@ss!123,"Has special chars" +user2,"pass,with,comma","Multi, line, value" +user3,simple,Normal"#; + + let ds = CsvDataSource::from_string(csv).unwrap(); + + let row1 = ds.next_row().unwrap(); + assert_eq!(row1.get("password").unwrap(), "p@ss!123"); + + let row2 = ds.next_row().unwrap(); + assert_eq!(row2.get("password").unwrap(), "pass,with,comma"); + + println!("✅ CSV with special characters works"); +} + +#[test] +fn test_empty_csv_error() { + let empty_csv = "username,password\n"; + let result = CsvDataSource::from_string(empty_csv); + + assert!(result.is_err()); + println!("✅ Empty CSV properly returns error"); +} + +#[test] +fn test_csv_concurrent_access() { + use std::sync::Arc; + use std::thread; + + let csv = "id,value\n1,a\n2,b\n3,c\n4,d\n5,e"; + let ds = Arc::new(CsvDataSource::from_string(csv).unwrap()); + + let mut handles = vec![]; + + // Spawn 10 threads, each getting 3 rows + for thread_id in 0..10 { + let ds_clone = Arc::clone(&ds); + let handle = thread::spawn(move || { + let mut ids = Vec::new(); + for _ in 0..3 { + let row = ds_clone.next_row().unwrap(); + ids.push(row.get("id").unwrap().clone()); + } + (thread_id, ids) + }); + handles.push(handle); + } + + let mut all_ids = Vec::new(); + for handle in handles { + let (thread_id, ids) = handle.join().unwrap(); + println!(" Thread {} got IDs: {:?}", thread_id, ids); + all_ids.extend(ids); + } + + // Should have distributed 30 rows total (10 threads * 3 rows each) + assert_eq!(all_ids.len(), 30); + + println!("✅ Concurrent CSV access works correctly"); +} + +#[test] +fn test_csv_builder() { + let csv = "a,b,c\n1,2,3"; + + let ds = rust_loadtest::data_source::CsvDataSourceBuilder::new() + .content(csv) + .build() + .unwrap(); + + assert_eq!(ds.row_count(), 1); + println!("✅ CSV builder works"); +} diff --git a/tests/env_override_tests.rs b/tests/env_override_tests.rs new file mode 100644 index 0000000..7c46f61 --- /dev/null +++ b/tests/env_override_tests.rs @@ -0,0 +1,662 @@ +//! Integration tests for environment variable overrides (Issue #40). +//! +//! These tests validate that environment variables can override YAML config values +//! according to precedence: env > yaml > defaults. + +use rust_loadtest::config::Config; +use rust_loadtest::load_models::LoadModel; +use rust_loadtest::yaml_config::YamlConfig; +use serial_test::serial; +use std::env; +use std::time::Duration; + +/// Clear all env vars that could affect config parsing. +/// Must be called at the start of every test to prevent leakage +/// from other tests (execution order is not guaranteed). +fn clean_env() { + for var in [ + "TARGET_URL", + "NUM_CONCURRENT_TASKS", + "REQUEST_TIMEOUT", + "TEST_DURATION", + "SKIP_TLS_VERIFY", + "CUSTOM_HEADERS", + "LOAD_MODEL_TYPE", + "TARGET_RPS", + "MIN_RPS", + "MAX_RPS", + "RAMP_DURATION", + "DAILY_MIN_RPS", + "DAILY_MID_RPS", + "DAILY_MAX_RPS", + "DAILY_CYCLE_DURATION", + ] { + env::remove_var(var); + } +} + +#[test] +#[serial] +fn test_no_env_override_uses_yaml_values() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.example.com" + workers: 50 + timeout: "60s" + duration: "10m" + skipTlsVerify: true +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.target_url, "https://yaml.example.com"); + assert_eq!(config.num_concurrent_tasks, 50); + assert_eq!(config.test_duration, Duration::from_secs(600)); // 10m + assert!(config.skip_tls_verify); + + println!("✅ YAML values used when no env overrides"); +} + +#[test] +#[serial] +fn test_env_overrides_base_url() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.example.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("TARGET_URL", "https://env.example.com"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.target_url, "https://env.example.com"); + + env::remove_var("TARGET_URL"); + + println!("✅ TARGET_URL env var overrides YAML baseUrl"); +} + +#[test] +#[serial] +fn test_env_overrides_workers() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + workers: 50 + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("NUM_CONCURRENT_TASKS", "100"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.num_concurrent_tasks, 100); + + env::remove_var("NUM_CONCURRENT_TASKS"); + + println!("✅ NUM_CONCURRENT_TASKS env var overrides YAML workers"); +} + +#[test] +#[serial] +fn test_env_overrides_timeout() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + timeout: "30s" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("REQUEST_TIMEOUT", "90s"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let _config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + // Note: timeout is currently not stored in Config struct, but test validates parsing works + // The timeout is used in client config creation + + env::remove_var("REQUEST_TIMEOUT"); + + println!("✅ REQUEST_TIMEOUT env var overrides YAML timeout"); +} + +#[test] +#[serial] +fn test_env_overrides_test_duration() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("TEST_DURATION", "30m"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.test_duration, Duration::from_secs(1800)); // 30m + + env::remove_var("TEST_DURATION"); + + println!("✅ TEST_DURATION env var overrides YAML duration"); +} + +#[test] +#[serial] +fn test_env_overrides_skip_tls_verify() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" + skipTlsVerify: false +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("SKIP_TLS_VERIFY", "true"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert!(config.skip_tls_verify); + + env::remove_var("SKIP_TLS_VERIFY"); + + println!("✅ SKIP_TLS_VERIFY env var overrides YAML skipTlsVerify"); +} + +#[test] +#[serial] +fn test_env_overrides_custom_headers() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" + customHeaders: "X-YAML-Header:yaml-value" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("CUSTOM_HEADERS", "X-ENV-Header:env-value"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.custom_headers.unwrap(), "X-ENV-Header:env-value"); + + env::remove_var("CUSTOM_HEADERS"); + + println!("✅ CUSTOM_HEADERS env var overrides YAML customHeaders"); +} + +#[test] +#[serial] +fn test_env_overrides_rps_target() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("TARGET_RPS", "500"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + match config.load_model { + LoadModel::Rps { target_rps } => { + assert_eq!(target_rps, 500.0); + } + _ => panic!("Expected RPS load model"), + } + + env::remove_var("TARGET_RPS"); + + println!("✅ TARGET_RPS env var overrides YAML load.target"); +} + +#[test] +#[serial] +fn test_env_overrides_ramp_params() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "2m" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("MIN_RPS", "50"); + env::set_var("MAX_RPS", "500"); + env::set_var("RAMP_DURATION", "10m"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + match config.load_model { + LoadModel::RampRps { + min_rps, + max_rps, + ramp_duration, + } => { + assert_eq!(min_rps, 50.0); + assert_eq!(max_rps, 500.0); + assert_eq!(ramp_duration, Duration::from_secs(600)); // 10m + } + _ => panic!("Expected RampRps load model"), + } + + env::remove_var("MIN_RPS"); + env::remove_var("MAX_RPS"); + env::remove_var("RAMP_DURATION"); + + println!("✅ MIN_RPS, MAX_RPS, RAMP_DURATION env vars override YAML ramp params"); +} + +#[test] +#[serial] +fn test_env_overrides_load_model_entirely() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("LOAD_MODEL_TYPE", "Rps"); + env::set_var("TARGET_RPS", "200"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + match config.load_model { + LoadModel::Rps { target_rps } => { + assert_eq!(target_rps, 200.0); + } + _ => panic!("Expected RPS load model"), + } + + env::remove_var("LOAD_MODEL_TYPE"); + env::remove_var("TARGET_RPS"); + + println!("✅ LOAD_MODEL_TYPE env var completely overrides YAML load model"); +} + +#[test] +#[serial] +fn test_multiple_env_overrides_together() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.com" + workers: 10 + timeout: "30s" + duration: "5m" + skipTlsVerify: false +load: + model: "rps" + target: 50 +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("TARGET_URL", "https://env.com"); + env::set_var("NUM_CONCURRENT_TASKS", "100"); + env::set_var("TEST_DURATION", "30m"); + env::set_var("SKIP_TLS_VERIFY", "true"); + env::set_var("TARGET_RPS", "500"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.target_url, "https://env.com"); + assert_eq!(config.num_concurrent_tasks, 100); + assert_eq!(config.test_duration, Duration::from_secs(1800)); // 30m + assert!(config.skip_tls_verify); + + match config.load_model { + LoadModel::Rps { target_rps } => { + assert_eq!(target_rps, 500.0); + } + _ => panic!("Expected RPS load model"), + } + + env::remove_var("TARGET_URL"); + env::remove_var("NUM_CONCURRENT_TASKS"); + env::remove_var("TEST_DURATION"); + env::remove_var("SKIP_TLS_VERIFY"); + env::remove_var("TARGET_RPS"); + + println!("✅ Multiple env vars can override YAML values independently"); +} + +#[test] +#[serial] +fn test_partial_env_overrides() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.com" + workers: 50 + timeout: "60s" + duration: "10m" + skipTlsVerify: true +load: + model: "rps" + target: 100 +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + // Only override some fields + env::set_var("NUM_CONCURRENT_TASKS", "200"); + env::set_var("TARGET_RPS", "500"); + // Don't set TARGET_URL, TEST_DURATION, SKIP_TLS_VERIFY + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + // Overridden by env + assert_eq!(config.num_concurrent_tasks, 200); + match config.load_model { + LoadModel::Rps { target_rps } => { + assert_eq!(target_rps, 500.0); + } + _ => panic!("Expected RPS load model"), + } + + // Not overridden, should use YAML values + assert_eq!(config.target_url, "https://yaml.com"); + assert_eq!(config.test_duration, Duration::from_secs(600)); // 10m + assert!(config.skip_tls_verify); + + env::remove_var("NUM_CONCURRENT_TASKS"); + env::remove_var("TARGET_RPS"); + + println!("✅ Partial env overrides work correctly"); +} + +#[test] +#[serial] +fn test_env_override_with_yaml_defaults() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" + # workers and timeout will use YAML defaults (10 and 30s) +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("NUM_CONCURRENT_TASKS", "75"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + // Env override + assert_eq!(config.num_concurrent_tasks, 75); + + // YAML default (workers defaults to 10 in YAML) + // Test that we can load without error + + env::remove_var("NUM_CONCURRENT_TASKS"); + + println!("✅ Env overrides work with YAML default values"); +} + +#[test] +#[serial] +fn test_env_override_precedence_chain() { + clean_env(); + // Test full precedence: env > yaml > default + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.com" + workers: 50 # YAML overrides default (10) + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("NUM_CONCURRENT_TASKS", "100"); // ENV overrides YAML (50) and default (10) + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + assert_eq!(config.num_concurrent_tasks, 100); // From ENV + + env::remove_var("NUM_CONCURRENT_TASKS"); + + // Now without env, should use YAML value + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + assert_eq!(config.num_concurrent_tasks, 50); // From YAML + + println!("✅ Full precedence chain works: env > yaml > default"); +} + +#[test] +#[serial] +fn test_invalid_env_override_falls_back_to_yaml() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.com" + workers: 50 + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("NUM_CONCURRENT_TASKS", "invalid-number"); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + // Invalid env var should fall back to YAML value + assert_eq!(config.num_concurrent_tasks, 50); + + env::remove_var("NUM_CONCURRENT_TASKS"); + + println!("✅ Invalid env var falls back to YAML value"); +} + +#[test] +#[serial] +fn test_empty_env_override_falls_back_to_yaml() { + clean_env(); + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://yaml.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + env::set_var("TARGET_URL", ""); + + let yaml_config = YamlConfig::from_str(yaml).unwrap(); + let config = Config::from_yaml_with_env_overrides(&yaml_config).unwrap(); + + // Empty env var should fall back to YAML value + assert_eq!(config.target_url, "https://yaml.com"); + + env::remove_var("TARGET_URL"); + + println!("✅ Empty env var falls back to YAML value"); +} + +#[test] +#[serial] +fn test_env_override_documentation() { + clean_env(); + // This test documents the environment variable mapping + let mappings = vec![ + ("TARGET_URL", "config.baseUrl"), + ("NUM_CONCURRENT_TASKS", "config.workers"), + ("REQUEST_TIMEOUT", "config.timeout"), + ("TEST_DURATION", "config.duration"), + ("SKIP_TLS_VERIFY", "config.skipTlsVerify"), + ("CUSTOM_HEADERS", "config.customHeaders"), + ("LOAD_MODEL_TYPE", "load.model"), + ("TARGET_RPS", "load.target (RPS model)"), + ("MIN_RPS", "load.min (Ramp model)"), + ("MAX_RPS", "load.max (Ramp model)"), + ("RAMP_DURATION", "load.rampDuration (Ramp model)"), + ("DAILY_MIN_RPS", "load.min (DailyTraffic model)"), + ("DAILY_MID_RPS", "load.mid (DailyTraffic model)"), + ("DAILY_MAX_RPS", "load.max (DailyTraffic model)"), + ( + "DAILY_CYCLE_DURATION", + "load.cycleDuration (DailyTraffic model)", + ), + ]; + + println!("\n=== Environment Variable Override Mapping ==="); + println!("Precedence: env > yaml > default\n"); + for (env_var, yaml_path) in mappings { + println!(" {} → {}", env_var, yaml_path); + } + println!("===========================================\n"); + + println!("✅ Environment variable override mapping documented"); +} diff --git a/tests/error_categorization_tests.rs b/tests/error_categorization_tests.rs new file mode 100644 index 0000000..be21941 --- /dev/null +++ b/tests/error_categorization_tests.rs @@ -0,0 +1,385 @@ +//! Integration tests for error categorization (Issue #34). +//! +//! These tests validate that errors are properly categorized into +//! client errors, server errors, network errors, timeouts, and TLS errors. + +use rust_loadtest::errors::{categorize_status_code, CategorizedError, ErrorCategory}; +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{Assertion, RequestConfig, Scenario, ScenarioContext, Step}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[test] +fn test_categorize_2xx_success() { + assert_eq!(ErrorCategory::from_status_code(200), None); + assert_eq!(ErrorCategory::from_status_code(201), None); + assert_eq!(ErrorCategory::from_status_code(204), None); + println!("✅ 2xx codes not categorized as errors"); +} + +#[test] +fn test_categorize_3xx_redirection() { + assert_eq!(ErrorCategory::from_status_code(301), None); + assert_eq!(ErrorCategory::from_status_code(302), None); + assert_eq!(ErrorCategory::from_status_code(304), None); + println!("✅ 3xx codes not categorized as errors"); +} + +#[test] +fn test_categorize_4xx_client_errors() { + assert_eq!( + ErrorCategory::from_status_code(400), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(401), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(403), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(404), + Some(ErrorCategory::ClientError) + ); + assert_eq!( + ErrorCategory::from_status_code(429), + Some(ErrorCategory::ClientError) + ); + + println!("✅ 4xx codes categorized as client errors"); +} + +#[test] +fn test_categorize_5xx_server_errors() { + assert_eq!( + ErrorCategory::from_status_code(500), + Some(ErrorCategory::ServerError) + ); + assert_eq!( + ErrorCategory::from_status_code(502), + Some(ErrorCategory::ServerError) + ); + assert_eq!( + ErrorCategory::from_status_code(503), + Some(ErrorCategory::ServerError) + ); + assert_eq!( + ErrorCategory::from_status_code(504), + Some(ErrorCategory::ServerError) + ); + + println!("✅ 5xx codes categorized as server errors"); +} + +#[test] +fn test_error_category_labels() { + assert_eq!(ErrorCategory::ClientError.label(), "client_error"); + assert_eq!(ErrorCategory::ServerError.label(), "server_error"); + assert_eq!(ErrorCategory::NetworkError.label(), "network_error"); + assert_eq!(ErrorCategory::TimeoutError.label(), "timeout_error"); + assert_eq!(ErrorCategory::TlsError.label(), "tls_error"); + assert_eq!(ErrorCategory::OtherError.label(), "other_error"); + + println!("✅ Error category labels correct"); +} + +#[test] +fn test_error_category_descriptions() { + assert!(ErrorCategory::ClientError.description().contains("4xx")); + assert!(ErrorCategory::ServerError.description().contains("5xx")); + assert!(ErrorCategory::NetworkError + .description() + .contains("Network")); + assert!(ErrorCategory::TimeoutError + .description() + .contains("Timeout")); + assert!(ErrorCategory::TlsError.description().contains("TLS")); + + println!("✅ Error category descriptions correct"); +} + +#[test] +fn test_categorized_error_from_status() { + let err = CategorizedError::from_status( + 404, + "Not Found".to_string(), + Some("/api/missing".to_string()), + ) + .unwrap(); + + assert_eq!(err.category, ErrorCategory::ClientError); + assert_eq!(err.status_code, Some(404)); + assert_eq!(err.message, "Not Found"); + assert_eq!(err.endpoint, Some("/api/missing".to_string())); + + println!("✅ CategorizedError from status works"); +} + +#[test] +fn test_categorized_error_display() { + let err = CategorizedError::new( + ErrorCategory::ServerError, + "Service temporarily unavailable".to_string(), + ); + + let display = format!("{}", err); + assert!(display.contains("server_error")); + assert!(display.contains("Service temporarily unavailable")); + + println!("✅ CategorizedError display formatting works"); +} + +#[test] +fn test_all_error_categories() { + let categories = ErrorCategory::all(); + + assert_eq!(categories.len(), 6); + assert!(categories.contains(&ErrorCategory::ClientError)); + assert!(categories.contains(&ErrorCategory::ServerError)); + assert!(categories.contains(&ErrorCategory::NetworkError)); + assert!(categories.contains(&ErrorCategory::TimeoutError)); + assert!(categories.contains(&ErrorCategory::TlsError)); + assert!(categories.contains(&ErrorCategory::OtherError)); + + println!("✅ All error categories enumerated"); +} + +#[test] +fn test_status_code_names() { + assert_eq!(categorize_status_code(200), "OK"); + assert_eq!(categorize_status_code(404), "Not Found"); + assert_eq!(categorize_status_code(500), "Internal Server Error"); + assert_eq!(categorize_status_code(503), "Service Unavailable"); + assert_eq!(categorize_status_code(429), "Too Many Requests"); + + println!("✅ Status code name mapping works"); +} + +#[tokio::test] +async fn test_404_error_categorization() { + let scenario = Scenario { + name: "404 Error Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Request non-existent endpoint".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/this-endpoint-does-not-exist-12345".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Request should "succeed" (no network error) but return 404 + assert_eq!(result.steps[0].status_code, Some(404)); + + // Error should be categorized as ClientError + if let Some(category) = ErrorCategory::from_status_code(404) { + assert_eq!(category, ErrorCategory::ClientError); + } + + println!("✅ 404 error properly categorized as client error"); +} + +#[tokio::test] +async fn test_timeout_error_categorization() { + let scenario = Scenario { + name: "Timeout Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Request with very short timeout".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/health".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Create client with extremely short timeout to force timeout + let client = reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_micros(1)) // 1 microsecond - guaranteed to timeout + .build() + .expect("Failed to create client"); + + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Should fail due to timeout + assert!(!result.success); + assert!(result.steps[0].error.is_some()); + + println!("✅ Timeout error detected (may be categorized as timeout or network)"); +} + +#[tokio::test] +async fn test_network_error_categorization() { + let scenario = Scenario { + name: "Network Error Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Request to invalid host".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/health".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + // Use invalid base URL to trigger network error + let executor = ScenarioExecutor::new( + "https://invalid-host-that-does-not-exist-12345.com".to_string(), + client, + ); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Should fail due to DNS/network error + assert!(!result.success); + assert!(result.steps[0].error.is_some()); + assert_eq!(result.steps[0].status_code, None); + + println!("✅ Network error detected for invalid host"); +} + +#[tokio::test] +async fn test_mixed_error_types_in_scenario() { + let scenario = Scenario { + name: "Mixed Errors Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Success".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + Step { + name: "404 Client Error".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/404".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // First step succeeds + assert!(result.steps[0].success); + assert_eq!(result.steps[0].status_code, Some(200)); + + // Second step completes but returns 404 + if result.steps.len() > 1 { + assert_eq!(result.steps[1].status_code, Some(404)); + + let category = ErrorCategory::from_status_code(404).unwrap(); + assert_eq!(category, ErrorCategory::ClientError); + } + + println!("✅ Mixed success and error types handled correctly"); +} + +#[test] +fn test_error_category_equality() { + assert_eq!(ErrorCategory::ClientError, ErrorCategory::ClientError); + assert_ne!(ErrorCategory::ClientError, ErrorCategory::ServerError); + assert_ne!(ErrorCategory::NetworkError, ErrorCategory::TimeoutError); + + println!("✅ Error category equality works"); +} + +#[test] +fn test_error_category_hash() { + use std::collections::HashMap; + + let mut map = HashMap::new(); + map.insert(ErrorCategory::ClientError, 10); + map.insert(ErrorCategory::ServerError, 20); + + assert_eq!(map.get(&ErrorCategory::ClientError), Some(&10)); + assert_eq!(map.get(&ErrorCategory::ServerError), Some(&20)); + + println!("✅ Error category can be used as HashMap key"); +} + +#[test] +fn test_categorized_error_with_endpoint() { + let err = CategorizedError::from_status( + 503, + "Service Unavailable".to_string(), + Some("/api/critical".to_string()), + ) + .unwrap(); + + assert_eq!(err.category, ErrorCategory::ServerError); + assert_eq!(err.endpoint, Some("/api/critical".to_string())); + + println!("✅ CategorizedError includes endpoint information"); +} + +#[test] +fn test_categorized_error_new() { + let err = CategorizedError::new( + ErrorCategory::TlsError, + "Certificate verification failed".to_string(), + ); + + assert_eq!(err.category, ErrorCategory::TlsError); + assert_eq!(err.status_code, None); + assert!(err.message.contains("Certificate")); + + println!("✅ CategorizedError::new works"); +} diff --git a/tests/http_methods_tests.rs b/tests/http_methods_tests.rs new file mode 100644 index 0000000..600b350 --- /dev/null +++ b/tests/http_methods_tests.rs @@ -0,0 +1,555 @@ +//! Integration tests for all HTTP methods (Issue #32). +//! +//! These tests validate that GET, POST, PUT, PATCH, DELETE, HEAD, and OPTIONS +//! methods work correctly in both single requests and multi-step scenarios. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{RequestConfig, Scenario, ScenarioContext, Step}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +async fn test_get_request() { + let scenario = Scenario { + name: "GET Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "GET /get".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "GET request should succeed"); + assert_eq!(result.steps[0].status_code, Some(200)); + + println!("✅ GET request works"); +} + +#[tokio::test] +async fn test_post_request() { + let scenario = Scenario { + name: "POST Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "POST /post".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some(r#"{"test": "data"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "POST request should succeed"); + assert!(result.steps[0].status_code.is_some()); + + println!("✅ POST request works"); +} + +#[tokio::test] +async fn test_put_request() { + let scenario = Scenario { + name: "PUT Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "PUT /put".to_string(), + request: RequestConfig { + method: "PUT".to_string(), + path: "/put".to_string(), + body: Some(r#"{"update": "data"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // PUT may return 2xx/3xx or 4xx depending on endpoint implementation + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ PUT request works (status: {:?})", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_patch_request() { + let scenario = Scenario { + name: "PATCH Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "PATCH /patch".to_string(), + request: RequestConfig { + method: "PATCH".to_string(), + path: "/patch".to_string(), + body: Some(r#"{"patch": "data"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // PATCH may return 2xx/3xx or 4xx depending on endpoint implementation + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ PATCH request works (status: {:?})", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_delete_request() { + let scenario = Scenario { + name: "DELETE Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "DELETE /delete".to_string(), + request: RequestConfig { + method: "DELETE".to_string(), + path: "/delete".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // DELETE may return 2xx/3xx or 4xx depending on endpoint implementation + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ DELETE request works (status: {:?})", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_head_request() { + let scenario = Scenario { + name: "HEAD Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "HEAD /get".to_string(), + request: RequestConfig { + method: "HEAD".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // HEAD should return same status as GET but no body + assert!(result.success, "HEAD request should succeed"); + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ HEAD request works (status: {:?})", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_options_request() { + let scenario = Scenario { + name: "OPTIONS Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "OPTIONS /get".to_string(), + request: RequestConfig { + method: "OPTIONS".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // OPTIONS typically returns 200 or 204 with Allow header + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ OPTIONS request works (status: {:?})", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_mixed_methods_scenario() { + let scenario = Scenario { + name: "Mixed HTTP Methods".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "GET health".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "POST status".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some(r#"{"action": "check"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "PUT status".to_string(), + request: RequestConfig { + method: "PUT".to_string(), + path: "/put".to_string(), + body: Some(r#"{"action": "update"}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "HEAD health".to_string(), + request: RequestConfig { + method: "HEAD".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // All steps should execute (some may fail depending on API implementation) + assert!(result.steps.len() >= 2, "Should execute multiple steps"); + assert!(result.steps[0].success, "GET should succeed"); + assert!( + result.steps[3].success || result.steps.len() == 4, + "HEAD should execute" + ); + + println!("✅ Mixed methods scenario works"); + println!(" Steps executed: {}", result.steps.len()); + for (i, step) in result.steps.iter().enumerate() { + println!( + " Step {}: {} (status: {:?})", + i + 1, + step.step_name, + step.status_code + ); + } +} + +#[tokio::test] +async fn test_case_insensitive_methods() { + // Test that methods are case-insensitive + let test_cases: Vec<(&str, &str)> = vec![ + ("get", "/get"), + ("Get", "/get"), + ("GET", "/get"), + ("post", "/post"), + ("Post", "/post"), + ("POST", "/post"), + ]; + + for (method, path) in test_cases { + let scenario = Scenario { + name: format!("Case Test: {}", method), + weight: 1.0, + steps: vec![Step { + name: format!("{} request", method), + request: RequestConfig { + method: method.to_string(), + path: path.to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "{} should work (case-insensitive)", method); + } + + println!("✅ HTTP methods are case-insensitive"); +} + +#[tokio::test] +async fn test_rest_crud_flow() { + // Simulate a realistic REST CRUD flow + let scenario = Scenario { + name: "REST CRUD Flow".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "1. GET - Read all".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "2. POST - Create".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some(r#"{"name": "Test Item", "price": 99.99}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "3. PUT - Update full".to_string(), + request: RequestConfig { + method: "PUT".to_string(), + path: "/put".to_string(), + body: Some( + r#"{"name": "Updated Item", "price": 149.99, "stock": 10}"#.to_string(), + ), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "4. PATCH - Partial update".to_string(), + request: RequestConfig { + method: "PATCH".to_string(), + path: "/patch".to_string(), + body: Some(r#"{"price": 129.99}"#.to_string()), + headers: { + let mut h = HashMap::new(); + h.insert("Content-Type".to_string(), "application/json".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "5. HEAD - Check existence".to_string(), + request: RequestConfig { + method: "HEAD".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "6. DELETE - Remove".to_string(), + request: RequestConfig { + method: "DELETE".to_string(), + path: "/delete".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + println!("✅ REST CRUD flow executed"); + println!(" Total steps: {}", result.steps.len()); + for step in result.steps.iter() { + println!(" {} - Status: {:?}", step.step_name, step.status_code); + } + + // At least GET should work + assert!(result.steps[0].success, "GET should succeed"); +} + +#[tokio::test] +async fn test_options_cors_preflight() { + // Test OPTIONS for CORS preflight + let scenario = Scenario { + name: "CORS Preflight".to_string(), + weight: 1.0, + steps: vec![Step { + name: "OPTIONS preflight".to_string(), + request: RequestConfig { + method: "OPTIONS".to_string(), + path: "/get".to_string(), + body: None, + headers: { + let mut h = HashMap::new(); + h.insert( + "Access-Control-Request-Method".to_string(), + "POST".to_string(), + ); + h.insert( + "Access-Control-Request-Headers".to_string(), + "Content-Type".to_string(), + ); + h.insert("Origin".to_string(), "https://example.com".to_string()); + h + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.steps[0].status_code.is_some()); + + println!( + "✅ OPTIONS CORS preflight works (status: {:?})", + result.steps[0].status_code + ); +} diff --git a/tests/integration_test.rs b/tests/integration_test.rs index 678b317..c59d861 100644 --- a/tests/integration_test.rs +++ b/tests/integration_test.rs @@ -57,6 +57,8 @@ async fn worker_sends_get_requests() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -94,6 +96,8 @@ async fn worker_sends_post_requests() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -127,6 +131,8 @@ async fn worker_sends_json_post_body() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -159,6 +165,8 @@ async fn worker_tracks_200_status_codes() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -195,6 +203,8 @@ async fn worker_tracks_404_status_codes() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -231,6 +241,8 @@ async fn worker_tracks_500_status_codes() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -269,6 +281,8 @@ async fn worker_records_request_duration() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -305,6 +319,8 @@ async fn concurrent_requests_returns_to_zero_after_worker_finishes() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); @@ -337,6 +353,8 @@ async fn worker_handles_connection_error_gracefully() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::builder() @@ -377,6 +395,8 @@ async fn worker_respects_rps_rate_limit() { test_duration: Duration::from_secs(3), load_model: LoadModel::Rps { target_rps: 5.0 }, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let start = Instant::now(); @@ -414,6 +434,8 @@ async fn worker_stops_after_test_duration() { test_duration: Duration::from_secs(2), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let start = Instant::now(); @@ -458,6 +480,8 @@ async fn worker_handles_slow_responses() { test_duration: Duration::from_secs(3), load_model: LoadModel::Concurrent, num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, }; let client = reqwest::Client::new(); diff --git a/tests/multi_scenario_tests.rs b/tests/multi_scenario_tests.rs new file mode 100644 index 0000000..41bacae --- /dev/null +++ b/tests/multi_scenario_tests.rs @@ -0,0 +1,521 @@ +//! Integration tests for multi-scenario execution (Issue #43). +//! +//! These tests validate: +//! - Weighted scenario selection +//! - Round-robin distribution +//! - Per-scenario metrics tracking +//! - Multi-scenario YAML loading + +use rust_loadtest::multi_scenario::{RoundRobinDistributor, ScenarioMetrics, ScenarioSelector}; +use rust_loadtest::scenario::Scenario; +use rust_loadtest::yaml_config::YamlConfig; +use std::collections::HashMap; + +fn create_test_scenarios() -> Vec { + vec![ + Scenario { + name: "Read Operations".to_string(), + weight: 80.0, + steps: vec![], + }, + Scenario { + name: "Write Operations".to_string(), + weight: 15.0, + steps: vec![], + }, + Scenario { + name: "Delete Operations".to_string(), + weight: 5.0, + steps: vec![], + }, + ] +} + +#[test] +fn test_scenario_selector_basic() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + assert_eq!(selector.scenario_count(), 3); + assert_eq!(selector.total_weight(), 100.0); + + println!("✅ ScenarioSelector basic functionality works"); +} + +#[test] +fn test_scenario_selector_single_selection() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + let selected = selector.select(); + assert!( + selected.name == "Read Operations" + || selected.name == "Write Operations" + || selected.name == "Delete Operations" + ); + + println!("✅ ScenarioSelector can select a scenario"); +} + +#[test] +fn test_scenario_selector_weighted_distribution() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + let iterations = 10000; + let mut counts: HashMap = HashMap::new(); + + for _ in 0..iterations { + let scenario = selector.select(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + let read_count = counts.get("Read Operations").unwrap(); + let write_count = counts.get("Write Operations").unwrap(); + let delete_count = counts.get("Delete Operations").unwrap(); + + // Calculate percentages + let read_pct = *read_count as f64 / iterations as f64; + let write_pct = *write_count as f64 / iterations as f64; + let delete_pct = *delete_count as f64 / iterations as f64; + + // Check within 5% margin + assert!( + (read_pct - 0.80).abs() < 0.05, + "Read: expected ~80%, got {:.1}%", + read_pct * 100.0 + ); + assert!( + (write_pct - 0.15).abs() < 0.05, + "Write: expected ~15%, got {:.1}%", + write_pct * 100.0 + ); + assert!( + (delete_pct - 0.05).abs() < 0.05, + "Delete: expected ~5%, got {:.1}%", + delete_pct * 100.0 + ); + + println!("✅ Weighted distribution is correct:"); + println!(" Read: {:.1}% (expected 80%)", read_pct * 100.0); + println!(" Write: {:.1}% (expected 15%)", write_pct * 100.0); + println!(" Delete: {:.1}% (expected 5%)", delete_pct * 100.0); +} + +#[test] +fn test_scenario_selector_probabilities() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + let probs = selector.probabilities(); + + assert_eq!(probs.len(), 3); + assert_eq!(probs[0].0, "Read Operations"); + assert!((probs[0].1 - 0.80).abs() < 0.001); + assert_eq!(probs[1].0, "Write Operations"); + assert!((probs[1].1 - 0.15).abs() < 0.001); + assert_eq!(probs[2].0, "Delete Operations"); + assert!((probs[2].1 - 0.05).abs() < 0.001); + + println!("✅ Probability calculation works"); +} + +#[test] +fn test_scenario_selector_equal_weights() { + let scenarios = vec![ + Scenario { + name: "S1".to_string(), + weight: 1.0, + steps: vec![], + }, + Scenario { + name: "S2".to_string(), + weight: 1.0, + steps: vec![], + }, + Scenario { + name: "S3".to_string(), + weight: 1.0, + steps: vec![], + }, + ]; + + let selector = ScenarioSelector::new(scenarios); + + let iterations = 9000; + let mut counts: HashMap = HashMap::new(); + + for _ in 0..iterations { + let scenario = selector.select(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + // Each should be ~33% (within 5%) + for (name, count) in &counts { + let pct = *count as f64 / iterations as f64; + assert!( + (pct - 0.333).abs() < 0.05, + "{}: expected ~33%, got {:.1}%", + name, + pct * 100.0 + ); + } + + println!("✅ Equal weight distribution works"); +} + +#[test] +fn test_scenario_selector_extreme_weights() { + let scenarios = vec![ + Scenario { + name: "Dominant".to_string(), + weight: 99.0, + steps: vec![], + }, + Scenario { + name: "Rare".to_string(), + weight: 1.0, + steps: vec![], + }, + ]; + + let selector = ScenarioSelector::new(scenarios); + + let iterations = 10000; + let mut counts: HashMap = HashMap::new(); + + for _ in 0..iterations { + let scenario = selector.select(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + let dominant_pct = *counts.get("Dominant").unwrap() as f64 / iterations as f64; + let rare_pct = *counts.get("Rare").unwrap() as f64 / iterations as f64; + + assert!((dominant_pct - 0.99).abs() < 0.02); + assert!((rare_pct - 0.01).abs() < 0.02); + + println!("✅ Extreme weight distribution works (99:1)"); +} + +#[test] +#[should_panic(expected = "empty scenarios list")] +fn test_scenario_selector_empty_list() { + ScenarioSelector::new(vec![]); +} + +#[test] +#[should_panic(expected = "negative weight")] +fn test_scenario_selector_negative_weight() { + let scenarios = vec![Scenario { + name: "Invalid".to_string(), + weight: -5.0, + steps: vec![], + }]; + ScenarioSelector::new(scenarios); +} + +#[test] +#[should_panic(expected = "zero weight")] +fn test_scenario_selector_zero_weight() { + let scenarios = vec![Scenario { + name: "Invalid".to_string(), + weight: 0.0, + steps: vec![], + }]; + ScenarioSelector::new(scenarios); +} + +#[test] +fn test_round_robin_distributor_basic() { + let scenarios = create_test_scenarios(); + let distributor = RoundRobinDistributor::new(scenarios); + + assert_eq!(distributor.scenario_count(), 3); + + println!("✅ RoundRobinDistributor basic functionality works"); +} + +#[test] +fn test_round_robin_distributor_sequence() { + let scenarios = create_test_scenarios(); + let distributor = RoundRobinDistributor::new(scenarios); + + let s1 = distributor.next(); + let s2 = distributor.next(); + let s3 = distributor.next(); + let s4 = distributor.next(); + let s5 = distributor.next(); + let s6 = distributor.next(); + + assert_eq!(s1.name, "Read Operations"); + assert_eq!(s2.name, "Write Operations"); + assert_eq!(s3.name, "Delete Operations"); + assert_eq!(s4.name, "Read Operations"); // Cycle + assert_eq!(s5.name, "Write Operations"); + assert_eq!(s6.name, "Delete Operations"); + + println!("✅ RoundRobinDistributor cycles through scenarios correctly"); +} + +#[test] +fn test_round_robin_distributor_even_distribution() { + let scenarios = create_test_scenarios(); + let distributor = RoundRobinDistributor::new(scenarios); + + let iterations = 9000; // Multiple of 3 + let mut counts: HashMap = HashMap::new(); + + for _ in 0..iterations { + let scenario = distributor.next(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + // Each should get exactly 3000 iterations (33.33%) + assert_eq!(*counts.get("Read Operations").unwrap(), 3000); + assert_eq!(*counts.get("Write Operations").unwrap(), 3000); + assert_eq!(*counts.get("Delete Operations").unwrap(), 3000); + + println!("✅ RoundRobinDistributor provides even distribution"); +} + +#[test] +#[should_panic(expected = "empty scenarios list")] +fn test_round_robin_distributor_empty_list() { + RoundRobinDistributor::new(vec![]); +} + +#[test] +fn test_scenario_metrics_initialization() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + for scenario in &scenarios { + assert_eq!(metrics.get_executions(&scenario.name), 0); + assert_eq!(metrics.get_successes(&scenario.name), 0); + assert_eq!(metrics.get_failures(&scenario.name), 0); + } + + println!("✅ ScenarioMetrics initialization works"); +} + +#[test] +fn test_scenario_metrics_recording() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + metrics.record_execution("Read Operations", true, 100); + metrics.record_execution("Read Operations", true, 200); + metrics.record_execution("Read Operations", false, 150); + + assert_eq!(metrics.get_executions("Read Operations"), 3); + assert_eq!(metrics.get_successes("Read Operations"), 2); + assert_eq!(metrics.get_failures("Read Operations"), 1); + assert_eq!(metrics.get_total_time_ms("Read Operations"), 450); + + println!("✅ ScenarioMetrics recording works"); +} + +#[test] +fn test_scenario_metrics_calculations() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + metrics.record_execution("Write Operations", true, 100); + metrics.record_execution("Write Operations", true, 200); + metrics.record_execution("Write Operations", true, 300); + metrics.record_execution("Write Operations", false, 400); + + assert_eq!(metrics.get_average_time_ms("Write Operations"), 250.0); + assert_eq!(metrics.get_success_rate("Write Operations"), 0.75); + + println!("✅ ScenarioMetrics calculations (average, success rate) work"); +} + +#[test] +fn test_scenario_metrics_summary() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + metrics.record_execution("Read Operations", true, 100); + metrics.record_execution("Write Operations", true, 200); + metrics.record_execution("Delete Operations", false, 150); + + let summary = metrics.summary(); + assert_eq!(summary.scenarios.len(), 3); + + // Find each scenario in summary + let read_summary = summary + .scenarios + .iter() + .find(|s| s.name == "Read Operations") + .unwrap(); + assert_eq!(read_summary.executions, 1); + assert_eq!(read_summary.successes, 1); + assert_eq!(read_summary.average_time_ms, 100.0); + + println!("✅ ScenarioMetrics summary generation works"); +} + +#[test] +fn test_scenario_metrics_zero_executions() { + let scenarios = create_test_scenarios(); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + // Don't record any executions + assert_eq!(metrics.get_average_time_ms("Read Operations"), 0.0); + assert_eq!(metrics.get_success_rate("Read Operations"), 0.0); + + println!("✅ ScenarioMetrics handles zero executions correctly"); +} + +#[test] +fn test_yaml_multiple_scenarios_loading() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Read API" + weight: 70 + steps: + - request: + method: "GET" + path: "/api/read" + + - name: "Write API" + weight: 20 + steps: + - request: + method: "POST" + path: "/api/write" + + - name: "Delete API" + weight: 10 + steps: + - request: + method: "DELETE" + path: "/api/delete" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 3); + assert_eq!(scenarios[0].name, "Read API"); + assert_eq!(scenarios[0].weight, 70.0); + assert_eq!(scenarios[1].name, "Write API"); + assert_eq!(scenarios[1].weight, 20.0); + assert_eq!(scenarios[2].name, "Delete API"); + assert_eq!(scenarios[2].weight, 10.0); + + println!("✅ YAML loading of multiple weighted scenarios works"); +} + +#[test] +fn test_yaml_scenarios_with_selector() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Heavy" + weight: 80 + steps: + - request: + method: "GET" + path: "/heavy" + + - name: "Light" + weight: 20 + steps: + - request: + method: "GET" + path: "/light" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + let selector = ScenarioSelector::new(scenarios); + + let iterations = 10000; + let mut counts: HashMap = HashMap::new(); + + for _ in 0..iterations { + let scenario = selector.select(); + *counts.entry(scenario.name.clone()).or_insert(0) += 1; + } + + let heavy_pct = *counts.get("Heavy").unwrap() as f64 / iterations as f64; + let light_pct = *counts.get("Light").unwrap() as f64 / iterations as f64; + + assert!((heavy_pct - 0.80).abs() < 0.05); + assert!((light_pct - 0.20).abs() < 0.05); + + println!("✅ YAML-loaded scenarios work with ScenarioSelector"); +} + +#[test] +fn test_integration_selector_with_metrics() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios.clone()); + let mut metrics = ScenarioMetrics::new(); + metrics.initialize_scenarios(&scenarios); + + // Simulate 100 scenario executions + for _ in 0..100 { + let scenario = selector.select(); + let success = rand::random::(); + let duration_ms = rand::random::() % 1000; + metrics.record_execution(&scenario.name, success, duration_ms); + } + + let summary = metrics.summary(); + let total_executions: u64 = summary.scenarios.iter().map(|s| s.executions).sum(); + assert_eq!(total_executions, 100); + + println!("✅ Integration: Selector + Metrics works"); +} + +#[test] +fn test_scenario_selector_get_methods() { + let scenarios = create_test_scenarios(); + let selector = ScenarioSelector::new(scenarios); + + assert!(selector.get_scenario(0).is_some()); + assert!(selector.get_scenario(1).is_some()); + assert!(selector.get_scenario(2).is_some()); + assert!(selector.get_scenario(3).is_none()); + + let all_scenarios = selector.scenarios(); + assert_eq!(all_scenarios.len(), 3); + + println!("✅ ScenarioSelector get methods work"); +} + +#[test] +fn test_round_robin_get_methods() { + let scenarios = create_test_scenarios(); + let distributor = RoundRobinDistributor::new(scenarios); + + assert!(distributor.get_scenario(0).is_some()); + assert!(distributor.get_scenario(2).is_some()); + assert!(distributor.get_scenario(3).is_none()); + + let all_scenarios = distributor.scenarios(); + assert_eq!(all_scenarios.len(), 3); + + println!("✅ RoundRobinDistributor get methods work"); +} diff --git a/tests/per_scenario_throughput_tests.rs b/tests/per_scenario_throughput_tests.rs new file mode 100644 index 0000000..1400fe6 --- /dev/null +++ b/tests/per_scenario_throughput_tests.rs @@ -0,0 +1,339 @@ +//! Integration tests for per-scenario throughput tracking (Issue #35). +//! +//! These tests validate that throughput (requests per second) is tracked +//! separately for each scenario type, enabling performance comparison. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{RequestConfig, Scenario, ScenarioContext, Step}; +use rust_loadtest::throughput::{format_throughput_table, ThroughputTracker}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[test] +fn test_throughput_tracker_basic() { + let tracker = ThroughputTracker::new(); + + tracker.record("scenario1", Duration::from_millis(100)); + tracker.record("scenario1", Duration::from_millis(150)); + tracker.record("scenario2", Duration::from_millis(200)); + + let stats1 = tracker.stats("scenario1").unwrap(); + assert_eq!(stats1.total_count, 2); + assert_eq!(stats1.avg_time_ms, 125.0); + + let stats2 = tracker.stats("scenario2").unwrap(); + assert_eq!(stats2.total_count, 1); + assert_eq!(stats2.avg_time_ms, 200.0); + + println!("✅ Throughput tracker basic functionality works"); +} + +#[test] +fn test_throughput_tracker_rps_calculation() { + let tracker = ThroughputTracker::new(); + + // Record 10 requests + for _ in 0..10 { + tracker.record("test", Duration::from_millis(50)); + } + + // Wait a bit to ensure time has passed + std::thread::sleep(Duration::from_millis(100)); + + let stats = tracker.stats("test").unwrap(); + assert_eq!(stats.total_count, 10); + assert!(stats.rps > 0.0, "RPS should be greater than 0"); + assert!(stats.duration.as_millis() >= 100); + + println!("✅ RPS calculation works (RPS: {:.2})", stats.rps); +} + +#[test] +fn test_throughput_tracker_multiple_scenarios() { + let tracker = ThroughputTracker::new(); + + tracker.record("fast", Duration::from_millis(10)); + tracker.record("fast", Duration::from_millis(20)); + tracker.record("medium", Duration::from_millis(100)); + tracker.record("slow", Duration::from_millis(500)); + + let all_stats = tracker.all_stats(); + assert_eq!(all_stats.len(), 3); + + // Should be sorted by name + assert_eq!(all_stats[0].scenario_name, "fast"); + assert_eq!(all_stats[1].scenario_name, "medium"); + assert_eq!(all_stats[2].scenario_name, "slow"); + + println!("✅ Multiple scenarios tracked correctly"); +} + +#[test] +fn test_throughput_stats_formatting() { + let tracker = ThroughputTracker::new(); + + tracker.record("TestScenario", Duration::from_millis(100)); + + let stats = tracker.stats("TestScenario").unwrap(); + let formatted = stats.format(); + + assert!(formatted.contains("TestScenario")); + assert!(formatted.contains("requests")); + assert!(formatted.contains("RPS")); + + println!("✅ Throughput stats formatting works"); + println!(" {}", formatted); +} + +#[test] +fn test_throughput_table_formatting() { + let tracker = ThroughputTracker::new(); + + tracker.record("Scenario A", Duration::from_millis(50)); + tracker.record("Scenario B", Duration::from_millis(100)); + tracker.record("Scenario C", Duration::from_millis(150)); + + let all_stats = tracker.all_stats(); + let table = format_throughput_table(&all_stats); + + assert!(table.contains("Scenario")); + assert!(table.contains("Requests")); + assert!(table.contains("RPS")); + assert!(table.contains("Scenario A")); + assert!(table.contains("Scenario B")); + + println!("✅ Throughput table formatting works"); + println!("{}", table); +} + +#[test] +fn test_total_throughput() { + let tracker = ThroughputTracker::new(); + + // Record requests across multiple scenarios + for _ in 0..5 { + tracker.record("scenario1", Duration::from_millis(50)); + } + for _ in 0..3 { + tracker.record("scenario2", Duration::from_millis(75)); + } + + std::thread::sleep(Duration::from_millis(50)); + + let total_rps = tracker.total_throughput(); + assert!(total_rps > 0.0, "Total RPS should be greater than 0"); + + println!( + "✅ Total throughput calculation works (Total RPS: {:.2})", + total_rps + ); +} + +#[test] +fn test_throughput_reset() { + let tracker = ThroughputTracker::new(); + + tracker.record("test", Duration::from_millis(100)); + assert!(tracker.stats("test").is_some()); + + tracker.reset(); + assert!(tracker.stats("test").is_none()); + + println!("✅ Throughput tracker reset works"); +} + +#[tokio::test] +async fn test_scenario_throughput_tracking() { + let tracker = ThroughputTracker::new(); + + let scenario = Scenario { + name: "Throughput Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Fast Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Execute scenario 5 times + for _ in 0..5 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + assert!(result.success); + + // Record throughput + tracker.record(&scenario.name, Duration::from_millis(result.total_time_ms)); + } + + let stats = tracker.stats(&scenario.name).unwrap(); + assert_eq!(stats.total_count, 5); + assert!(stats.rps > 0.0); + + println!("✅ Scenario throughput tracking works"); + println!(" {}", stats.format()); +} + +#[tokio::test] +async fn test_multiple_scenarios_different_throughput() { + let tracker = ThroughputTracker::new(); + + let fast_scenario = Scenario { + name: "Fast Scenario".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let slow_scenario = Scenario { + name: "Slow Scenario".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "First Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "Delayed Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/delay/1".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + // Execute fast scenario 3 times + for _ in 0..3 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&fast_scenario, &mut context).await; + tracker.record( + &fast_scenario.name, + Duration::from_millis(result.total_time_ms), + ); + } + + // Execute slow scenario 2 times + for _ in 0..2 { + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&slow_scenario, &mut context).await; + tracker.record( + &slow_scenario.name, + Duration::from_millis(result.total_time_ms), + ); + } + + let fast_stats = tracker.stats(&fast_scenario.name).unwrap(); + let slow_stats = tracker.stats(&slow_scenario.name).unwrap(); + + assert_eq!(fast_stats.total_count, 3); + assert_eq!(slow_stats.total_count, 2); + + // Fast scenario should have lower average time + assert!( + fast_stats.avg_time_ms < slow_stats.avg_time_ms, + "Fast scenario ({:.2}ms) should be faster than slow scenario ({:.2}ms)", + fast_stats.avg_time_ms, + slow_stats.avg_time_ms + ); + + println!("✅ Multiple scenarios tracked with different throughput"); + println!(" Fast: {}", fast_stats.format()); + println!(" Slow: {}", slow_stats.format()); +} + +#[test] +fn test_throughput_tracker_concurrent_access() { + use std::sync::Arc; + use std::thread; + + let tracker = Arc::new(ThroughputTracker::new()); + let mut handles = vec![]; + + // Spawn 5 threads, each recording 10 requests + for thread_id in 0..5 { + let tracker_clone = Arc::clone(&tracker); + let handle = thread::spawn(move || { + for _ in 0..10 { + tracker_clone.record( + &format!("scenario{}", thread_id % 2), + Duration::from_millis(50), + ); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + // Should have recorded 50 total requests across 2 scenarios + let all_stats = tracker.all_stats(); + let total_count: u64 = all_stats.iter().map(|s| s.total_count).sum(); + assert_eq!(total_count, 50); + + println!("✅ Concurrent access to throughput tracker works"); +} + +#[test] +fn test_empty_throughput_tracker() { + let tracker = ThroughputTracker::new(); + + assert!(tracker.stats("nonexistent").is_none()); + assert_eq!(tracker.all_stats().len(), 0); + + let table = format_throughput_table(&tracker.all_stats()); + assert!(table.contains("No throughput data")); + + println!("✅ Empty throughput tracker handled correctly"); +} diff --git a/tests/percentile_tracking_tests.rs b/tests/percentile_tracking_tests.rs new file mode 100644 index 0000000..9a8c596 --- /dev/null +++ b/tests/percentile_tracking_tests.rs @@ -0,0 +1,392 @@ +//! Integration tests for percentile latency tracking (Issue #33). +//! +//! These tests validate that percentile calculations are accurate and that +//! latencies are properly tracked across requests, scenarios, and steps. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::percentiles::{ + MultiLabelPercentileTracker, PercentileTracker, GLOBAL_SCENARIO_PERCENTILES, + GLOBAL_STEP_PERCENTILES, +}; +use rust_loadtest::scenario::{RequestConfig, Scenario, ScenarioContext, Step}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[test] +fn test_percentile_tracker_basic() { + let tracker = PercentileTracker::new(); + + // Record latencies: 10ms, 20ms, 30ms, 40ms, 50ms, 60ms, 70ms, 80ms, 90ms, 100ms + for i in 1..=10 { + tracker.record_ms(i * 10); + } + + let stats = tracker.stats().expect("Should have stats"); + + assert_eq!(stats.count, 10); + // HdrHistogram has internal precision rounding, so use approximate checks + assert!( + stats.min >= 9_900 && stats.min <= 10_100, + "min {}μs should be around 10000μs", + stats.min + ); + assert!( + stats.max >= 99_900 && stats.max <= 100_500, + "max {}μs should be around 100000μs", + stats.max + ); + + // P50 should be around 50ms + assert!( + stats.p50 >= 45_000 && stats.p50 <= 55_000, + "P50 {}μs should be around 50000μs", + stats.p50 + ); + + // P90 should be around 90ms + assert!( + stats.p90 >= 85_000 && stats.p90 <= 95_000, + "P90 {}μs should be around 90000μs", + stats.p90 + ); + + // P99 should be around 100ms (or close to max) + assert!( + stats.p99 >= 95_000 && stats.p99 <= 105_000, + "P99 {}μs should be around 100000μs", + stats.p99 + ); + + println!("✅ Basic percentile tracking works correctly"); + println!(" {}", stats.format()); +} + +#[test] +fn test_percentile_tracker_large_dataset() { + let tracker = PercentileTracker::new(); + + // Record 1000 samples from 1ms to 1000ms + for i in 1..=1000 { + tracker.record_ms(i); + } + + let stats = tracker.stats().expect("Should have stats"); + + assert_eq!(stats.count, 1000); + + // For uniform distribution: + // P50 should be around 500ms + assert!( + stats.p50 >= 480_000 && stats.p50 <= 520_000, + "P50 {}μs should be around 500000μs", + stats.p50 + ); + + // P90 should be around 900ms + assert!( + stats.p90 >= 880_000 && stats.p90 <= 920_000, + "P90 {}μs should be around 900000μs", + stats.p90 + ); + + // P95 should be around 950ms + assert!( + stats.p95 >= 930_000 && stats.p95 <= 970_000, + "P95 {}μs should be around 950000μs", + stats.p95 + ); + + // P99 should be around 990ms + assert!( + stats.p99 >= 970_000 && stats.p99 <= 1_010_000, + "P99 {}μs should be around 990000μs", + stats.p99 + ); + + println!("✅ Large dataset percentile tracking accurate"); + println!(" {}", stats.format()); +} + +#[test] +fn test_percentile_tracker_skewed_distribution() { + let tracker = PercentileTracker::new(); + + // Record 90 fast requests (10ms) and 10 slow requests (1000ms) + for _ in 0..90 { + tracker.record_ms(10); + } + for _ in 0..10 { + tracker.record_ms(1000); + } + + let stats = tracker.stats().expect("Should have stats"); + + assert_eq!(stats.count, 100); + + // P50 should be 10ms (median is in the fast group) + assert!( + stats.p50 <= 15_000, + "P50 {}μs should be around 10000μs", + stats.p50 + ); + + // P90 should still be 10ms (90th percentile is last fast request) + assert!( + stats.p90 <= 15_000, + "P90 {}μs should be around 10000μs", + stats.p90 + ); + + // P95 should be 1000ms (now in the slow group) + assert!( + stats.p95 >= 900_000, + "P95 {}μs should be around 1000000μs", + stats.p95 + ); + + // P99 should be 1000ms + assert!( + stats.p99 >= 900_000, + "P99 {}μs should be around 1000000μs", + stats.p99 + ); + + println!("✅ Skewed distribution percentiles correct"); + println!(" {}", stats.format()); + println!( + " Shows P90 at {}ms and P95 at {}ms", + stats.p90 as f64 / 1000.0, + stats.p95 as f64 / 1000.0 + ); +} + +#[test] +fn test_multi_label_tracker() { + let tracker = MultiLabelPercentileTracker::new(); + + // Record different latencies for different endpoints + tracker.record("/api/fast", 10); + tracker.record("/api/fast", 20); + tracker.record("/api/fast", 15); + + tracker.record("/api/slow", 100); + tracker.record("/api/slow", 200); + tracker.record("/api/slow", 150); + + let fast_stats = tracker.stats("/api/fast").expect("Should have fast stats"); + let slow_stats = tracker.stats("/api/slow").expect("Should have slow stats"); + + assert_eq!(fast_stats.count, 3); + assert_eq!(slow_stats.count, 3); + + // Fast endpoint should have low latencies + assert!(fast_stats.max < 30_000, "Fast max should be under 30ms"); + + // Slow endpoint should have high latencies + assert!(slow_stats.min > 90_000, "Slow min should be over 90ms"); + + println!("✅ Multi-label tracking separates endpoints correctly"); + println!(" Fast endpoint: {}", fast_stats.format()); + println!(" Slow endpoint: {}", slow_stats.format()); +} + +#[test] +fn test_multi_label_all_stats() { + let tracker = MultiLabelPercentileTracker::new(); + + tracker.record("endpoint1", 10); + tracker.record("endpoint2", 20); + tracker.record("endpoint3", 30); + + let all_stats = tracker.all_stats(); + + assert_eq!(all_stats.len(), 3); + assert!(all_stats.contains_key("endpoint1")); + assert!(all_stats.contains_key("endpoint2")); + assert!(all_stats.contains_key("endpoint3")); + + println!("✅ all_stats() returns all tracked labels"); +} + +#[tokio::test] +async fn test_scenario_percentile_tracking() { + let scenario = Scenario { + name: "Percentile Test Scenario".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "Status Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + // Execute scenario multiple times + for _ in 0..5 { + let mut context = ScenarioContext::new(); + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success); + + // Manually record for testing (in production, worker.rs does this) + GLOBAL_SCENARIO_PERCENTILES.record(&scenario.name, result.total_time_ms); + + for step in &result.steps { + let label = format!("{}:{}", scenario.name, step.step_name); + GLOBAL_STEP_PERCENTILES.record(&label, step.response_time_ms); + } + } + + // Verify we have stats + let scenario_stats = GLOBAL_SCENARIO_PERCENTILES + .stats(&scenario.name) + .expect("Should have scenario stats"); + + assert_eq!(scenario_stats.count, 5, "Should have 5 scenario executions"); + + let health_label = format!("{}:Health Check", scenario.name); + let health_stats = GLOBAL_STEP_PERCENTILES + .stats(&health_label) + .expect("Should have health step stats"); + + assert_eq!(health_stats.count, 5, "Should have 5 health check steps"); + + println!("✅ Scenario percentile tracking works"); + println!(" Scenario: {}", scenario_stats.format()); + println!(" Health step: {}", health_stats.format()); +} + +#[test] +fn test_percentile_tracker_reset() { + let tracker = PercentileTracker::new(); + + tracker.record_ms(100); + tracker.record_ms(200); + assert!(tracker.stats().is_some()); + + tracker.reset(); + assert!( + tracker.stats().is_none(), + "Stats should be None after reset" + ); + + println!("✅ Tracker reset works correctly"); +} + +#[test] +fn test_percentile_stats_format() { + let tracker = PercentileTracker::new(); + + // Record some values + for i in 1..=100 { + tracker.record_ms(i); + } + + let stats = tracker.stats().expect("Should have stats"); + let formatted = stats.format(); + + // Should contain all the key metrics + assert!(formatted.contains("count=")); + assert!(formatted.contains("min=")); + assert!(formatted.contains("max=")); + assert!(formatted.contains("mean=")); + assert!(formatted.contains("p50=")); + assert!(formatted.contains("p90=")); + assert!(formatted.contains("p95=")); + assert!(formatted.contains("p99=")); + assert!(formatted.contains("p99.9=")); + + println!("✅ Stats formatting includes all percentiles"); + println!(" {}", formatted); +} + +#[tokio::test] +async fn test_realistic_latency_distribution() { + // Simulate realistic API latencies: mostly fast with occasional slow requests + let tracker = PercentileTracker::new(); + + // 80% of requests are fast (10-50ms) + for _ in 0..80 { + let latency = 10 + (rand::random::() % 40); + tracker.record_ms(latency); + } + + // 15% are medium (50-200ms) + for _ in 0..15 { + let latency = 50 + (rand::random::() % 150); + tracker.record_ms(latency); + } + + // 5% are slow (200-1000ms) + for _ in 0..5 { + let latency = 200 + (rand::random::() % 800); + tracker.record_ms(latency); + } + + let stats = tracker.stats().expect("Should have stats"); + + assert_eq!(stats.count, 100); + + // P50 should be in the fast range + assert!( + stats.p50 < 100_000, + "P50 {}μs should be under 100ms", + stats.p50 + ); + + // P90 should be in the medium range or below + assert!( + stats.p90 < 300_000, + "P90 {}μs should be under 300ms", + stats.p90 + ); + + // P99 should catch the slow requests + assert!( + stats.p99 >= 200_000, + "P99 {}μs should be at least 200ms", + stats.p99 + ); + + println!("✅ Realistic latency distribution captured correctly"); + println!(" {}", stats.format()); + println!( + " P50 at {:.2}ms, P90 at {:.2}ms, P99 at {:.2}ms", + stats.p50 as f64 / 1000.0, + stats.p90 as f64 / 1000.0, + stats.p99 as f64 / 1000.0 + ); +} diff --git a/tests/scenario_integration_tests.rs b/tests/scenario_integration_tests.rs new file mode 100644 index 0000000..dcb8f21 --- /dev/null +++ b/tests/scenario_integration_tests.rs @@ -0,0 +1,394 @@ +//! Integration tests for multi-step scenario execution. +//! +//! These tests run against httpbin.org to validate scenario execution. +//! +//! Run with: cargo test --test scenario_integration_tests + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{ + Assertion, RequestConfig, Scenario, ScenarioContext, Step, ThinkTime, +}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +/// Create a basic HTTP client for testing +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +async fn test_health_check_scenario() { + let scenario = Scenario { + name: "Health Check".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Check Health".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Health check scenario should succeed"); + assert_eq!(result.steps.len(), 1); + assert_eq!(result.steps[0].status_code, Some(200)); +} + +#[tokio::test] +async fn test_product_browsing_scenario() { + let scenario = Scenario { + name: "Product Browsing".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "List Items".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(100))), + }, + Step { + name: "Get Item Details".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Product browsing scenario should succeed"); + assert_eq!(result.steps_completed, 2); + assert_eq!(result.steps.len(), 2); + + // Verify both steps succeeded + for step in &result.steps { + assert!(step.success, "Step '{}' should succeed", step.step_name); + assert_eq!(step.status_code, Some(200)); + } +} + +#[tokio::test] +async fn test_variable_substitution() { + let mut context = ScenarioContext::new(); + + // Simulate extracting a product ID (this will be done by #27) + context.set_variable("product_id".to_string(), "prod-123".to_string()); + + let scenario = Scenario { + name: "Variable Substitution Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Get Product with Variable".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get?product=${product_id}".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + let result = executor.execute(&scenario, &mut context).await; + + // The request should have been made to /products/prod-123 + // If variable substitution works, we'll get a response + assert!( + result.steps[0].status_code.is_some(), + "Should have received a response" + ); +} + +#[tokio::test] +async fn test_multi_step_with_delays() { + let scenario = Scenario { + name: "Multi-Step with Think Times".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(200))), + }, + Step { + name: "Step 2".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(200))), + }, + Step { + name: "Step 3".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let start = std::time::Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let duration = start.elapsed(); + + assert!(result.success, "Multi-step scenario should succeed"); + assert_eq!(result.steps_completed, 3); + + // Should take at least 400ms (200ms + 200ms think times) + assert!( + duration.as_millis() >= 400, + "Scenario should respect think times (took {}ms, expected >= 400ms)", + duration.as_millis() + ); +} + +#[tokio::test] +async fn test_scenario_failure_handling() { + let scenario = Scenario { + name: "Failure Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Valid Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + Step { + name: "Invalid Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/status/404".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![Assertion::StatusCode(200)], + think_time: None, + }, + Step { + name: "Should Not Execute".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Scenario should fail on step 2 + assert!(!result.success, "Scenario should fail"); + assert_eq!(result.steps_completed, 1, "Should complete only 1 step"); + assert_eq!( + result.failed_at_step, + Some(1), + "Should fail at step 1 (index 1)" + ); + assert_eq!(result.steps.len(), 2, "Should have 2 step results"); + + // Step 1 should succeed + assert!(result.steps[0].success); + assert_eq!(result.steps[0].status_code, Some(200)); + + // Step 2 should fail with 404 + assert!(!result.steps[1].success); + assert_eq!(result.steps[1].status_code, Some(404)); +} + +#[tokio::test] +async fn test_timestamp_variable() { + let scenario = Scenario { + name: "Timestamp Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Request with Timestamp".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: { + let mut headers = HashMap::new(); + // Test timestamp in headers + headers.insert("X-Request-ID".to_string(), "req-${timestamp}".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Timestamp substitution should work, request should succeed + assert!(result.success, "Scenario with timestamp should succeed"); + assert_eq!(result.steps[0].status_code, Some(200)); +} + +#[tokio::test] +async fn test_post_request_with_json_body() { + let scenario = Scenario { + name: "POST Request Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Post JSON Data".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some( + r#"{ + "email": "loadtest-${timestamp}@example.com", + "password": "TestPass123!", + "name": "Test User" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // POST should work (200 OK from httpbin) + assert!( + result.steps[0].success, + "POST should succeed, got status: {:?}", + result.steps[0].status_code + ); +} + +#[tokio::test] +async fn test_scenario_context_isolation() { + // Test that each scenario execution has isolated context + let scenario = Scenario { + name: "Context Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Simple Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + // Execute scenario twice with different contexts + let mut context1 = ScenarioContext::new(); + context1.set_variable("test".to_string(), "value1".to_string()); + + let mut context2 = ScenarioContext::new(); + context2.set_variable("test".to_string(), "value2".to_string()); + + let result1 = executor.execute(&scenario, &mut context1).await; + let result2 = executor.execute(&scenario, &mut context2).await; + + // Both should succeed + assert!(result1.success); + assert!(result2.success); + + // Contexts should maintain their separate variables + assert_eq!(context1.get_variable("test"), Some(&"value1".to_string())); + assert_eq!(context2.get_variable("test"), Some(&"value2".to_string())); +} diff --git a/tests/scenario_worker_tests.rs b/tests/scenario_worker_tests.rs new file mode 100644 index 0000000..b1b5bde --- /dev/null +++ b/tests/scenario_worker_tests.rs @@ -0,0 +1,156 @@ +//! Unit tests for scenario worker functionality. +//! +//! These tests validate that the scenario worker correctly executes scenarios +//! according to load models and respects timing constraints. + +use rust_loadtest::load_models::LoadModel; +use rust_loadtest::scenario::{RequestConfig, Scenario, Step, ThinkTime}; +use rust_loadtest::worker::{run_scenario_worker, ScenarioWorkerConfig}; +use std::collections::HashMap; +use std::time::Duration; +use tokio::time::Instant; + +#[tokio::test] +async fn test_scenario_worker_respects_duration() { + let scenario = Scenario { + name: "Test Scenario".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Health Check".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + let config = ScenarioWorkerConfig { + task_id: 1, + base_url: "https://httpbin.org".to_string(), + scenario, + test_duration: Duration::from_secs(2), + load_model: LoadModel::Rps { target_rps: 1.0 }, + num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, + }; + + let client = reqwest::Client::new(); + let start_time = Instant::now(); + + // Run worker + let worker_start = Instant::now(); + run_scenario_worker(client, config, start_time).await; + let worker_duration = worker_start.elapsed(); + + // Worker should stop after ~2 seconds + assert!( + worker_duration.as_secs() >= 2 && worker_duration.as_secs() <= 3, + "Worker should run for approximately 2 seconds, ran for {}s", + worker_duration.as_secs() + ); +} + +#[tokio::test] +async fn test_scenario_worker_constant_load() { + let scenario = Scenario { + name: "Constant Load Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Quick Request".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }], + }; + + // Run at 2 scenarios per second for 3 seconds + // Should execute approximately 6 scenarios + let config = ScenarioWorkerConfig { + task_id: 1, + base_url: "https://httpbin.org".to_string(), + scenario, + test_duration: Duration::from_secs(3), + load_model: LoadModel::Rps { target_rps: 2.0 }, + num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, + }; + + let client = reqwest::Client::new(); + let start_time = Instant::now(); + + run_scenario_worker(client, config, start_time).await; + + // Just verify it completes without panicking + // Actual scenario count would need metrics tracking to verify +} + +#[tokio::test] +async fn test_scenario_worker_with_think_time() { + let scenario = Scenario { + name: "Think Time Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Step 2".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let config = ScenarioWorkerConfig { + task_id: 1, + base_url: "https://httpbin.org".to_string(), + scenario, + test_duration: Duration::from_secs(2), + load_model: LoadModel::Rps { target_rps: 0.5 }, // 1 scenario every 2 seconds + num_concurrent_tasks: 1, + percentile_tracking_enabled: true, + percentile_sampling_rate: 100, + }; + + let client = reqwest::Client::new(); + let start_time = Instant::now(); + + let worker_start = Instant::now(); + run_scenario_worker(client, config, start_time).await; + let worker_duration = worker_start.elapsed(); + + // Should take at least 2 seconds (test duration) + assert!( + worker_duration.as_secs() >= 2, + "Worker should run for at least 2 seconds" + ); +} diff --git a/tests/scenario_yaml_tests.rs b/tests/scenario_yaml_tests.rs new file mode 100644 index 0000000..c8ce591 --- /dev/null +++ b/tests/scenario_yaml_tests.rs @@ -0,0 +1,695 @@ +//! Integration tests for scenario YAML definitions (Issue #42). +//! +//! These tests validate enhanced scenario features in YAML including: +//! - Data file support (CSV, JSON) +//! - Random think time +//! - Scenario-level configuration overrides +//! - Multiple scenarios with weighting +//! - Complex multi-step scenarios + +use rust_loadtest::scenario::ThinkTime; +use rust_loadtest::yaml_config::YamlConfig; +use std::time::Duration; + +#[test] +fn test_basic_scenario() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Basic Scenario" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 1); + assert_eq!(scenarios[0].name, "Basic Scenario"); + assert_eq!(scenarios[0].weight, 1.0); // Default weight + assert_eq!(scenarios[0].steps.len(), 1); + + println!("✅ Basic scenario parsing works"); +} + +#[test] +fn test_multiple_scenarios_with_weight() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Heavy Traffic Scenario" + weight: 70 + steps: + - request: + method: "GET" + path: "/api/v1/popular" + + - name: "Light Traffic Scenario" + weight: 30 + steps: + - request: + method: "GET" + path: "/api/v1/details" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 2); + assert_eq!(scenarios[0].name, "Heavy Traffic Scenario"); + assert_eq!(scenarios[0].weight, 70.0); + assert_eq!(scenarios[1].name, "Light Traffic Scenario"); + assert_eq!(scenarios[1].weight, 30.0); + + println!("✅ Multiple scenarios with weighting work"); +} + +#[test] +fn test_scenario_with_fixed_think_time() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Scenario with Think Time" + steps: + - name: "Step 1" + request: + method: "GET" + path: "/page1" + thinkTime: "3s" + + - name: "Step 2" + request: + method: "GET" + path: "/page2" + thinkTime: "5s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios[0].steps.len(), 2); + + // Check Step 1 think time + let step1_think_time = scenarios[0].steps[0].think_time.as_ref().unwrap(); + match step1_think_time { + ThinkTime::Fixed(duration) => { + assert_eq!(*duration, Duration::from_secs(3)); + } + _ => panic!("Expected Fixed think time"), + } + + // Check Step 2 think time + let step2_think_time = scenarios[0].steps[1].think_time.as_ref().unwrap(); + match step2_think_time { + ThinkTime::Fixed(duration) => { + assert_eq!(*duration, Duration::from_secs(5)); + } + _ => panic!("Expected Fixed think time"), + } + + println!("✅ Fixed think time works"); +} + +#[test] +fn test_scenario_with_random_think_time() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Scenario with Random Think Time" + steps: + - name: "Browse" + request: + method: "GET" + path: "/browse" + thinkTime: + min: "2s" + max: "5s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + let think_time = scenarios[0].steps[0].think_time.as_ref().unwrap(); + match think_time { + ThinkTime::Random { min, max } => { + assert_eq!(*min, Duration::from_secs(2)); + assert_eq!(*max, Duration::from_secs(5)); + } + _ => panic!("Expected Random think time"), + } + + println!("✅ Random think time works"); +} + +#[test] +fn test_multi_step_scenario() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "10m" +load: + model: "rps" + target: 100 +scenarios: + - name: "E-commerce Flow" + weight: 1.0 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" + + - name: "Search" + request: + method: "GET" + path: "/search?q=laptop" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.products[0].id" + thinkTime: "3s" + + - name: "Product Details" + request: + method: "GET" + path: "/products/${productId}" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "5s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/cart" + body: '{"productId": "${productId}", "quantity": 1}' + assertions: + - type: "statusCode" + expected: 201 +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios[0].steps.len(), 4); + assert_eq!(scenarios[0].steps[0].name, "Homepage"); + assert_eq!(scenarios[0].steps[1].name, "Search"); + assert_eq!(scenarios[0].steps[2].name, "Product Details"); + assert_eq!(scenarios[0].steps[3].name, "Add to Cart"); + + // Validate extraction in step 2 + assert_eq!(scenarios[0].steps[1].extractions.len(), 1); + + // Validate assertions + assert_eq!(scenarios[0].steps[0].assertions.len(), 1); + assert_eq!(scenarios[0].steps[2].assertions.len(), 1); + assert_eq!(scenarios[0].steps[3].assertions.len(), 1); + + println!("✅ Multi-step scenario with extractions and assertions works"); +} + +#[test] +fn test_scenario_with_data_file_csv() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Data-Driven Test" + dataFile: + path: "./testdata/users.csv" + format: "csv" + strategy: "sequential" + steps: + - request: + method: "POST" + path: "/login" + body: '{"username": "${username}", "password": "${password}"}' +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + // Validate data file configuration + assert!(config.scenarios[0].data_file.is_some()); + + let data_file = config.scenarios[0].data_file.as_ref().unwrap(); + assert_eq!(data_file.path, "./testdata/users.csv"); + assert_eq!(data_file.format, "csv"); + assert_eq!(data_file.strategy, "sequential"); + + println!("✅ Data file configuration (CSV) works"); +} + +#[test] +fn test_scenario_with_data_file_json() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "JSON Data-Driven Test" + dataFile: + path: "./testdata/products.json" + format: "json" + strategy: "random" + steps: + - request: + method: "GET" + path: "/products/${productId}" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + let data_file = config.scenarios[0].data_file.as_ref().unwrap(); + assert_eq!(data_file.path, "./testdata/products.json"); + assert_eq!(data_file.format, "json"); + assert_eq!(data_file.strategy, "random"); + + println!("✅ Data file configuration (JSON) works"); +} + +#[test] +fn test_scenario_with_config_overrides() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + timeout: "30s" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Slow API Scenario" + config: + timeout: "120s" + retryCount: 3 + retryDelay: "5s" + steps: + - request: + method: "GET" + path: "/slow-endpoint" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + // Validate scenario config overrides + let scenario_config = &config.scenarios[0].config; + assert!(scenario_config.timeout.is_some()); + assert_eq!(scenario_config.retry_count, Some(3)); + assert!(scenario_config.retry_delay.is_some()); + + println!("✅ Scenario-level config overrides work"); +} + +#[test] +fn test_scenario_with_extractors() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test Extractors" + steps: + - name: "Get User" + request: + method: "GET" + path: "/user/123" + extract: + - type: "jsonPath" + name: "userId" + jsonPath: "$.id" + - type: "jsonPath" + name: "userName" + jsonPath: "$.name" + - type: "header" + name: "authToken" + header: "X-Auth-Token" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios[0].steps[0].extractions.len(), 3); + + println!("✅ Multiple extractors per step work"); +} + +#[test] +fn test_scenario_with_multiple_assertions() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test Assertions" + steps: + - name: "API Call" + request: + method: "POST" + path: "/api/data" + body: '{"test": true}' + assertions: + - type: "statusCode" + expected: 201 + - type: "responseTime" + max: "1s" + - type: "bodyContains" + text: "success" + - type: "jsonPath" + path: "$.status" + expected: "ok" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios[0].steps[0].assertions.len(), 4); + + println!("✅ Multiple assertions per step work"); +} + +#[test] +fn test_scenario_with_headers_and_query_params() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test Headers and Query Params" + steps: + - request: + method: "GET" + path: "/api/search" + queryParams: + q: "laptop" + limit: "10" + sort: "price" + headers: + Authorization: "Bearer ${token}" + X-Custom-Header: "test-value" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + // Validate request path includes query params + assert!(scenarios[0].steps[0].request.path.contains("?")); + assert!(scenarios[0].steps[0].request.path.contains("q=laptop")); + assert!(scenarios[0].steps[0].request.path.contains("limit=10")); + + // Validate headers + assert_eq!(scenarios[0].steps[0].request.headers.len(), 2); + + println!("✅ Headers and query parameters work"); +} + +#[test] +fn test_weighted_scenario_distribution() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Read Operations" + weight: 80 + steps: + - request: + method: "GET" + path: "/api/read" + + - name: "Write Operations" + weight: 15 + steps: + - request: + method: "POST" + path: "/api/write" + + - name: "Delete Operations" + weight: 5 + steps: + - request: + method: "DELETE" + path: "/api/delete" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 3); + + let total_weight: f64 = scenarios.iter().map(|s| s.weight).sum(); + assert_eq!(total_weight, 100.0); + + // Verify percentages + assert_eq!(scenarios[0].weight / total_weight, 0.80); // 80% + assert_eq!(scenarios[1].weight / total_weight, 0.15); // 15% + assert_eq!(scenarios[2].weight / total_weight, 0.05); // 5% + + println!("✅ Weighted scenario distribution works"); +} + +#[test] +fn test_scenario_with_no_think_time() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Fast Scenario" + steps: + - request: + method: "GET" + path: "/fast" + - request: + method: "GET" + path: "/fast2" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert!(scenarios[0].steps[0].think_time.is_none()); + assert!(scenarios[0].steps[1].think_time.is_none()); + + println!("✅ Scenarios without think time work"); +} + +#[test] +fn test_scenario_data_file_defaults() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "5m" +load: + model: "concurrent" +scenarios: + - name: "Test Defaults" + dataFile: + path: "./data.csv" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + let data_file = config.scenarios[0].data_file.as_ref().unwrap(); + assert_eq!(data_file.format, "csv"); // Default format + assert_eq!(data_file.strategy, "sequential"); // Default strategy + + println!("✅ Data file defaults work"); +} + +#[test] +fn test_complex_real_world_scenario() { + let yaml = r#" +version: "1.0" +metadata: + name: "E-commerce Load Test" + description: "Realistic user shopping flow" + author: "test@example.com" +config: + baseUrl: "https://shop.example.com" + workers: 50 + timeout: "30s" + duration: "30m" +load: + model: "ramp" + min: 10 + max: 200 + rampDuration: "10m" +scenarios: + - name: "Browse and Purchase" + weight: 70 + config: + timeout: "60s" + retryCount: 2 + retryDelay: "3s" + dataFile: + path: "./users.csv" + format: "csv" + strategy: "cycle" + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: "statusCode" + expected: 200 + - type: "responseTime" + max: "1s" + thinkTime: + min: "1s" + max: "3s" + + - name: "Login" + request: + method: "POST" + path: "/api/auth/login" + body: '{"email": "${email}", "password": "${password}"}' + headers: + Content-Type: "application/json" + extract: + - type: "jsonPath" + name: "authToken" + jsonPath: "$.token" + assertions: + - type: "statusCode" + expected: 200 + thinkTime: "2s" + + - name: "Search Products" + request: + method: "GET" + path: "/api/products/search" + queryParams: + q: "laptop" + limit: "20" + headers: + Authorization: "Bearer ${authToken}" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.results[0].id" + thinkTime: + min: "2s" + max: "5s" + + - name: "View Product" + request: + method: "GET" + path: "/api/products/${productId}" + headers: + Authorization: "Bearer ${authToken}" + assertions: + - type: "statusCode" + expected: 200 + - type: "bodyContains" + text: "price" + thinkTime: "4s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/api/cart/items" + body: '{"productId": "${productId}", "quantity": 1}' + headers: + Authorization: "Bearer ${authToken}" + Content-Type: "application/json" + assertions: + - type: "statusCode" + expected: 201 + thinkTime: "2s" + + - name: "Quick Browse" + weight: 30 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + thinkTime: "1s" + + - name: "Category Page" + request: + method: "GET" + path: "/category/electronics" + thinkTime: + min: "2s" + max: "4s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + // Validate overall structure + assert_eq!(scenarios.len(), 2); + assert_eq!(scenarios[0].name, "Browse and Purchase"); + assert_eq!(scenarios[0].steps.len(), 5); + assert_eq!(scenarios[1].name, "Quick Browse"); + assert_eq!(scenarios[1].steps.len(), 2); + + // Validate weighting + assert_eq!(scenarios[0].weight, 70.0); + assert_eq!(scenarios[1].weight, 30.0); + + // Validate data file + assert!(config.scenarios[0].data_file.is_some()); + + // Validate config overrides + assert!(config.scenarios[0].config.timeout.is_some()); + assert_eq!(config.scenarios[0].config.retry_count, Some(2)); + + println!("✅ Complex real-world scenario works"); +} diff --git a/tests/think_time_tests.rs b/tests/think_time_tests.rs new file mode 100644 index 0000000..7307750 --- /dev/null +++ b/tests/think_time_tests.rs @@ -0,0 +1,390 @@ +//! Integration tests for think times and configurable delays (#29). +//! +//! These tests validate that think times: +//! - Add delays between steps +//! - Support both fixed and random delays +//! - Do NOT count towards request latency metrics + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{RequestConfig, Scenario, ScenarioContext, Step, ThinkTime}; +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .cookie_store(true) + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +async fn test_fixed_think_time() { + let scenario = Scenario { + name: "Fixed Think Time Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Step 2".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let start = Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let total_duration = start.elapsed(); + + assert!(result.success, "Scenario should succeed"); + assert_eq!(result.steps_completed, 2); + + // Total time should include the 500ms think time + assert!( + total_duration.as_millis() >= 500, + "Total duration {}ms should be at least 500ms (think time)", + total_duration.as_millis() + ); + + // But individual request metrics should NOT include think time + // Step 1 latency should be much less than 500ms + assert!( + result.steps[0].response_time_ms < 500, + "Step 1 latency {}ms should not include 500ms think time", + result.steps[0].response_time_ms + ); + + println!("\nFixed Think Time Test:"); + println!(" Total duration: {}ms", total_duration.as_millis()); + println!( + " Step 1 latency: {}ms (excludes think time)", + result.steps[0].response_time_ms + ); + println!(" Step 2 latency: {}ms", result.steps[1].response_time_ms); + println!(" ✅ Think time does NOT count towards request latency"); +} + +#[tokio::test] +async fn test_random_think_time() { + let scenario = Scenario { + name: "Random Think Time Test".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Request with Random Delay".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Random { + min: Duration::from_millis(200), + max: Duration::from_millis(800), + }), + }, + Step { + name: "Next Step".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + + // Run multiple times to test randomness + let mut durations = Vec::new(); + + for _ in 0..5 { + let mut context = ScenarioContext::new(); + let start = Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let total_duration = start.elapsed(); + + assert!(result.success); + durations.push(total_duration.as_millis()); + + // Should take at least 200ms (min think time) + assert!( + total_duration.as_millis() >= 200, + "Duration {}ms should be at least 200ms", + total_duration.as_millis() + ); + } + + println!("\nRandom Think Time Test (200-800ms):"); + println!(" Run 1: {}ms", durations[0]); + println!(" Run 2: {}ms", durations[1]); + println!(" Run 3: {}ms", durations[2]); + println!(" Run 4: {}ms", durations[3]); + println!(" Run 5: {}ms", durations[4]); + + // Check that durations vary (randomness working) + let all_same = durations.windows(2).all(|w| w[0] == w[1]); + assert!(!all_same, "Durations should vary due to random think time"); + + println!(" ✅ Think times are random and vary between runs"); +} + +#[tokio::test] +async fn test_multiple_think_times() { + let scenario = Scenario { + name: "Multiple Think Times".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(100))), + }, + Step { + name: "Step 2".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(200))), + }, + Step { + name: "Step 3".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(300))), + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let start = Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let total_duration = start.elapsed(); + + assert!(result.success); + assert_eq!(result.steps_completed, 3); + + // Should take at least 600ms (100 + 200 + 300) + assert!( + total_duration.as_millis() >= 600, + "Total duration {}ms should be at least 600ms (cumulative think time)", + total_duration.as_millis() + ); + + println!("\nMultiple Think Times Test:"); + println!( + " Total duration: {}ms (includes 600ms think time)", + total_duration.as_millis() + ); + println!( + " Step 1: {}ms + 100ms think", + result.steps[0].response_time_ms + ); + println!( + " Step 2: {}ms + 200ms think", + result.steps[1].response_time_ms + ); + println!( + " Step 3: {}ms + 300ms think", + result.steps[2].response_time_ms + ); + println!(" ✅ Multiple think times accumulate correctly"); +} + +#[tokio::test] +async fn test_no_think_time() { + let scenario = Scenario { + name: "No Think Time".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Fast Step 1".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + Step { + name: "Fast Step 2".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let start = Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let total_duration = start.elapsed(); + + assert!(result.success); + + // Should be fast with no think time (under 1 second) + assert!( + total_duration.as_millis() < 1000, + "Without think time, should complete quickly ({}ms)", + total_duration.as_millis() + ); + + println!("\nNo Think Time Test:"); + println!(" Total duration: {}ms", total_duration.as_millis()); + println!(" ✅ No delays when think_time is None"); +} + +#[tokio::test] +async fn test_realistic_user_behavior() { + // Simulate realistic e-commerce browsing with varied think times + let scenario = Scenario { + name: "Realistic User Behavior".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Land on homepage".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Random { + min: Duration::from_secs(1), + max: Duration::from_secs(3), + }), // Read homepage content + }, + Step { + name: "Browse products".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Random { + min: Duration::from_secs(2), + max: Duration::from_secs(5), + }), // Browse product list + }, + Step { + name: "View product details".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: Some(ThinkTime::Random { + min: Duration::from_secs(3), + max: Duration::from_secs(10), + }), // Read product description, reviews + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let start = Instant::now(); + let result = executor.execute(&scenario, &mut context).await; + let total_duration = start.elapsed(); + + assert!(result.success); + + // Should take at least 6 seconds (1+2+3 minimum think times) + assert!( + total_duration.as_secs() >= 6, + "Realistic flow should take at least 6s, took {}s", + total_duration.as_secs() + ); + + println!("\nRealistic User Behavior Test:"); + println!(" Total duration: {:.1}s", total_duration.as_secs_f64()); + println!( + " Step 1 (homepage): {}ms + 1-3s think", + result.steps[0].response_time_ms + ); + println!( + " Step 2 (browse): {}ms + 2-5s think", + result.steps[1].response_time_ms + ); + println!( + " Step 3 (details): {}ms + 3-10s think", + result.steps[2].response_time_ms + ); + println!(" ✅ Realistic user delays applied"); +} diff --git a/tests/variable_extraction_tests.rs b/tests/variable_extraction_tests.rs new file mode 100644 index 0000000..199f8c3 --- /dev/null +++ b/tests/variable_extraction_tests.rs @@ -0,0 +1,384 @@ +//! Integration tests for variable extraction (#27). +//! +//! These tests validate JSONPath, Regex, Header, and Cookie extraction +//! from HTTP responses against httpbin.org. + +use rust_loadtest::executor::ScenarioExecutor; +use rust_loadtest::scenario::{ + Extractor, RequestConfig, Scenario, ScenarioContext, Step, ThinkTime, VariableExtraction, +}; +use std::collections::HashMap; +use std::time::Duration; + +const BASE_URL: &str = "https://httpbin.org"; + +fn create_test_client() -> reqwest::Client { + reqwest::Client::builder() + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client") +} + +#[tokio::test] +async fn test_jsonpath_extraction_from_products() { + // httpbin /json returns {"slideshow": {"author": "...", "title": "...", ...}} + let scenario = Scenario { + name: "JSONPath Extraction Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Get JSON and Extract Fields".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![ + VariableExtraction { + name: "author".to_string(), + extractor: Extractor::JsonPath("$.slideshow.author".to_string()), + }, + VariableExtraction { + name: "title".to_string(), + extractor: Extractor::JsonPath("$.slideshow.title".to_string()), + }, + ], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Scenario should succeed"); + + // Verify variables were extracted + assert!( + context.get_variable("author").is_some(), + "Should extract author" + ); + assert!( + context.get_variable("title").is_some(), + "Should extract title" + ); + + println!("Extracted author: {:?}", context.get_variable("author")); + println!("Extracted title: {:?}", context.get_variable("title")); +} + +#[tokio::test] +async fn test_extraction_and_reuse_in_next_step() { + // Extract the origin IP from /get and reuse it as a query param in the next step + let scenario = Scenario { + name: "Extract and Reuse".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Get Origin IP".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![VariableExtraction { + name: "origin_ip".to_string(), + extractor: Extractor::JsonPath("$.origin".to_string()), + }], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(100))), + }, + Step { + name: "Use Extracted Value".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get?origin=${origin_ip}".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Both steps should succeed"); + assert_eq!(result.steps_completed, 2, "Should complete both steps"); + + // Verify origin IP was extracted + let origin_ip = context.get_variable("origin_ip"); + assert!(origin_ip.is_some(), "Should extract origin IP"); + + println!("Extracted and reused origin_ip: {:?}", origin_ip); + + // Both steps should have succeeded + assert!(result.steps[0].success, "First step should succeed"); + assert!( + result.steps[1].success, + "Second step (using extracted var) should succeed" + ); +} + +#[tokio::test] +async fn test_header_extraction() { + let scenario = Scenario { + name: "Header Extraction Test".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Get Response with Headers".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![VariableExtraction { + name: "content_type".to_string(), + extractor: Extractor::Header("content-type".to_string()), + }], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Should succeed"); + + // Content-type header should be extracted + let content_type = context.get_variable("content_type"); + assert!(content_type.is_some(), "Should extract content-type header"); + + if let Some(ct) = content_type { + println!("Extracted content-type: {}", ct); + assert!( + ct.contains("json") || ct.contains("text"), + "Content-type should be a valid MIME type" + ); + } +} + +#[tokio::test] +async fn test_multiple_extractions_in_single_step() { + // httpbin /json returns {"slideshow": {"author": "...", "date": "...", "title": "...", ...}} + let scenario = Scenario { + name: "Multiple Extractions".to_string(), + weight: 1.0, + steps: vec![Step { + name: "Get JSON with Multiple Extractions".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![ + VariableExtraction { + name: "author".to_string(), + extractor: Extractor::JsonPath("$.slideshow.author".to_string()), + }, + VariableExtraction { + name: "title".to_string(), + extractor: Extractor::JsonPath("$.slideshow.title".to_string()), + }, + VariableExtraction { + name: "content_type".to_string(), + extractor: Extractor::Header("content-type".to_string()), + }, + ], + assertions: vec![], + think_time: None, + }], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + assert!(result.success, "Should succeed"); + + // Verify all extractions worked + assert!( + context.get_variable("author").is_some(), + "Should extract author" + ); + assert!( + context.get_variable("title").is_some(), + "Should extract title" + ); + assert!( + context.get_variable("content_type").is_some(), + "Should extract content_type" + ); + + println!("Extracted variables:"); + println!(" author: {:?}", context.get_variable("author")); + println!(" title: {:?}", context.get_variable("title")); + println!(" content_type: {:?}", context.get_variable("content_type")); +} + +#[tokio::test] +async fn test_shopping_flow_with_extraction() { + // Realistic multi-step flow using variable extraction with httpbin + let scenario = Scenario { + name: "Multi-Step Flow with Extraction".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Get JSON Data".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![VariableExtraction { + name: "author".to_string(), + extractor: Extractor::JsonPath("$.slideshow.author".to_string()), + }], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Post Data with Extracted Value".to_string(), + request: RequestConfig { + method: "POST".to_string(), + path: "/post".to_string(), + body: Some( + r#"{ + "author": "${author}", + "timestamp": "${timestamp}" + }"# + .to_string(), + ), + headers: { + let mut headers = HashMap::new(); + headers.insert("Content-Type".to_string(), "application/json".to_string()); + headers + }, + }, + extractions: vec![VariableExtraction { + name: "post_url".to_string(), + extractor: Extractor::JsonPath("$.url".to_string()), + }], + assertions: vec![], + think_time: Some(ThinkTime::Fixed(Duration::from_millis(500))), + }, + Step { + name: "Final GET".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![VariableExtraction { + name: "final_origin".to_string(), + extractor: Extractor::JsonPath("$.origin".to_string()), + }], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // All steps should succeed + assert!(result.success, "Multi-step flow should succeed"); + assert_eq!(result.steps_completed, 3); + + // Verify extractions + assert!(context.get_variable("author").is_some()); + assert!(context.get_variable("post_url").is_some()); + assert!(context.get_variable("final_origin").is_some()); + + println!("\nMulti-Step Flow Extracted Variables:"); + println!(" author: {:?}", context.get_variable("author")); + println!(" post_url: {:?}", context.get_variable("post_url")); + println!(" final_origin: {:?}", context.get_variable("final_origin")); +} + +#[tokio::test] +async fn test_extraction_failure_doesnt_stop_scenario() { + // Test that failed extraction doesn't stop the scenario + let scenario = Scenario { + name: "Partial Extraction Failure".to_string(), + weight: 1.0, + steps: vec![ + Step { + name: "Step with Mixed Extractions".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/json".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![ + VariableExtraction { + name: "author".to_string(), + extractor: Extractor::JsonPath("$.slideshow.author".to_string()), + }, + VariableExtraction { + name: "nonexistent".to_string(), + extractor: Extractor::JsonPath("$.does.not.exist".to_string()), + }, + ], + assertions: vec![], + think_time: None, + }, + Step { + name: "Next Step".to_string(), + request: RequestConfig { + method: "GET".to_string(), + path: "/get".to_string(), + body: None, + headers: HashMap::new(), + }, + extractions: vec![], + assertions: vec![], + think_time: None, + }, + ], + }; + + let client = create_test_client(); + let executor = ScenarioExecutor::new(BASE_URL.to_string(), client); + let mut context = ScenarioContext::new(); + + let result = executor.execute(&scenario, &mut context).await; + + // Scenario should still succeed + assert!( + result.success, + "Scenario should succeed even with failed extraction" + ); + assert_eq!(result.steps_completed, 2); + + // author should be extracted + assert!(context.get_variable("author").is_some()); + + // nonexistent should NOT be in context (extraction failed) + assert!(context.get_variable("nonexistent").is_none()); +} diff --git a/tests/yaml_config_tests.rs b/tests/yaml_config_tests.rs new file mode 100644 index 0000000..8346e7a --- /dev/null +++ b/tests/yaml_config_tests.rs @@ -0,0 +1,733 @@ +//! Integration tests for YAML configuration (Issue #37). +//! +//! These tests validate YAML config file parsing, validation, and conversion. + +use rust_loadtest::yaml_config::{YamlConfig, YamlConfigError}; +use std::fs; +use tempfile::NamedTempFile; + +#[test] +fn test_simple_yaml_config() { + let yaml = r#" +version: "1.0" +metadata: + name: "Simple Test" + description: "Basic API test" +config: + baseUrl: "https://api.example.com" + workers: 10 + duration: "5m" +load: + model: "rps" + target: 100 +scenarios: + - name: "Health Check" + steps: + - request: + method: "GET" + path: "/health" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + assert_eq!(config.version, "1.0"); + assert_eq!(config.metadata.name, Some("Simple Test".to_string())); + assert_eq!(config.config.base_url, "https://api.example.com"); + assert_eq!(config.config.workers, 10); + assert_eq!(config.scenarios.len(), 1); + + println!("✅ Simple YAML config parses correctly"); +} + +#[test] +fn test_yaml_config_from_file() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let temp_file = NamedTempFile::new().unwrap(); + fs::write(temp_file.path(), yaml).unwrap(); + + let config = YamlConfig::from_file(temp_file.path()).unwrap(); + assert_eq!(config.version, "1.0"); + assert_eq!(config.config.base_url, "https://test.com"); + + println!("✅ YAML config loads from file"); +} + +#[test] +fn test_yaml_duration_formats() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "30s" + timeout: 15 +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" + thinkTime: "2s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + let duration = config.config.duration.to_std_duration().unwrap(); + assert_eq!(duration.as_secs(), 30); + + let timeout = config.config.timeout.to_std_duration().unwrap(); + assert_eq!(timeout.as_secs(), 15); + + let scenarios = config.to_scenarios().unwrap(); + let think_time = scenarios[0].steps[0].think_time.as_ref().unwrap(); + match think_time { + rust_loadtest::scenario::ThinkTime::Fixed(d) => assert_eq!(d.as_secs(), 2), + _ => panic!("Expected fixed think time"), + } + + println!("✅ Duration formats (seconds and strings) work"); +} + +#[test] +fn test_yaml_load_models() { + // Test RPS model + let yaml_rps = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "rps" + target: 50 +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml_rps).unwrap(); + let load_model = config.load.to_load_model().unwrap(); + match load_model { + rust_loadtest::load_models::LoadModel::Rps { target_rps } => { + assert_eq!(target_rps, 50.0); + } + _ => panic!("Expected RPS load model"), + } + + // Test Ramp model + let yaml_ramp = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "30s" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml_ramp).unwrap(); + let load_model = config.load.to_load_model().unwrap(); + match load_model { + rust_loadtest::load_models::LoadModel::RampRps { + min_rps, + max_rps, + ramp_duration, + } => { + assert_eq!(min_rps, 10.0); + assert_eq!(max_rps, 100.0); + assert_eq!(ramp_duration.as_secs(), 30); + } + _ => panic!("Expected Ramp load model"), + } + + println!("✅ All load model types parse correctly"); +} + +#[test] +fn test_yaml_scenarios_with_assertions() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "API Test" + weight: 1.5 + steps: + - name: "Create Resource" + request: + method: "POST" + path: "/api/resource" + body: '{"name": "test"}' + assertions: + - type: "statusCode" + expected: 201 + - type: "jsonPath" + path: "$.id" + - type: "responseTime" + max: "1s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 1); + assert_eq!(scenarios[0].name, "API Test"); + assert_eq!(scenarios[0].weight, 1.5); + assert_eq!(scenarios[0].steps.len(), 1); + assert_eq!(scenarios[0].steps[0].assertions.len(), 3); + + println!("✅ Scenarios with assertions convert correctly"); +} + +#[test] +fn test_yaml_scenarios_with_extractors() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://shop.example.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Shopping Flow" + steps: + - name: "Search Products" + request: + method: "GET" + path: "/api/search?q=laptop" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.products[0].id" + - type: "header" + name: "sessionToken" + header: "X-Session-Token" + thinkTime: "2s" + + - name: "View Product" + request: + method: "GET" + path: "/products/${productId}" + thinkTime: "3s" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios[0].steps.len(), 2); + assert_eq!(scenarios[0].steps[0].extractions.len(), 2); + + // Check extractor types + let extraction = &scenarios[0].steps[0].extractions[0]; + assert_eq!(extraction.name, "productId"); + match &extraction.extractor { + rust_loadtest::scenario::Extractor::JsonPath(path) => { + assert_eq!(path, "$.products[0].id"); + } + _ => panic!("Expected JsonPath extractor"), + } + + println!("✅ Scenarios with extractors convert correctly"); +} + +#[test] +fn test_yaml_query_params() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Search" + steps: + - request: + method: "GET" + path: "/search" + queryParams: + q: "laptop" + limit: "20" + sort: "price" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + let path = &scenarios[0].steps[0].request.path; + assert!(path.contains("?")); + assert!(path.contains("q=laptop")); + assert!(path.contains("limit=20")); + assert!(path.contains("sort=price")); + + println!("✅ Query parameters are appended to path"); +} + +#[test] +fn test_yaml_custom_headers() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://api.example.com" + duration: "1m" + customHeaders: "Authorization: Bearer token123" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/api/data" + headers: + X-Custom-Header: "value" + Content-Type: "application/json" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + let headers = &scenarios[0].steps[0].request.headers; + assert_eq!(headers.get("X-Custom-Header"), Some(&"value".to_string())); + assert_eq!( + headers.get("Content-Type"), + Some(&"application/json".to_string()) + ); + + println!("✅ Custom headers work correctly"); +} + +#[test] +fn test_validation_unsupported_version() { + let yaml = r#" +version: "2.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::Validation(msg) => { + assert!( + msg.contains("version") + && (msg.contains("too new") + || msg.contains("2.0") + || msg.contains("Unsupported")), + "Expected version validation message, got: {}", + msg + ); + println!("✅ Unsupported version rejected: {}", msg); + } + _ => panic!("Expected validation error"), + } +} + +#[test] +fn test_validation_invalid_url() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "not-a-url" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::Validation(msg) => { + assert!( + msg.contains("baseUrl") || msg.contains("http") || msg.contains("URL"), + "Expected URL validation message, got: {}", + msg + ); + println!("✅ Invalid URL rejected: {}", msg); + } + _ => panic!("Expected validation error"), + } +} + +#[test] +fn test_validation_zero_workers() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + workers: 0 + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::Validation(msg) => { + assert!( + msg.contains("workers"), + "Expected workers validation message, got: {}", + msg + ); + println!("✅ Zero workers rejected: {}", msg); + } + _ => panic!("Expected validation error"), + } +} + +#[test] +fn test_validation_no_scenarios() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: [] +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::Validation(msg) => { + assert!(msg.contains("At least one scenario")); + println!("✅ Empty scenarios rejected: {}", msg); + } + _ => panic!("Expected validation error"), + } +} + +#[test] +fn test_validation_empty_scenario_steps() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Empty Scenario" + steps: [] +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::Validation(msg) => { + assert!(msg.contains("must have at least one step")); + println!("✅ Empty scenario steps rejected: {}", msg); + } + _ => panic!("Expected validation error"), + } +} + +#[test] +fn test_validation_invalid_duration_format() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "invalid" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_ok()); // Parse succeeds + + let config = result.unwrap(); + let duration_result = config.config.duration.to_std_duration(); + assert!(duration_result.is_err()); + + println!("✅ Invalid duration format detected during conversion"); +} + +#[test] +fn test_multiple_scenarios_different_weights() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Heavy Traffic" + weight: 70 + steps: + - request: + method: "GET" + path: "/api/heavy" + + - name: "Light Traffic" + weight: 30 + steps: + - request: + method: "GET" + path: "/api/light" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + let scenarios = config.to_scenarios().unwrap(); + + assert_eq!(scenarios.len(), 2); + assert_eq!(scenarios[0].weight, 70.0); + assert_eq!(scenarios[1].weight, 30.0); + + println!("✅ Multiple scenarios with different weights work"); +} + +#[test] +fn test_complex_ecommerce_scenario() { + let yaml = r#" +version: "1.0" +metadata: + name: "E-commerce Load Test" + description: "Full shopping flow" + author: "test@example.com" + tags: ["production", "critical"] +config: + baseUrl: "https://shop.example.com" + workers: 50 + duration: "10m" + timeout: "30s" + skipTlsVerify: false +load: + model: "ramp" + min: 10 + max: 100 + rampDuration: "2m" +scenarios: + - name: "Browse and Purchase" + weight: 70 + steps: + - name: "Homepage" + request: + method: "GET" + path: "/" + assertions: + - type: "statusCode" + expected: 200 + - type: "responseTime" + max: "1s" + thinkTime: "2s" + + - name: "Search" + request: + method: "GET" + path: "/search" + queryParams: + q: "laptop" + limit: "20" + extract: + - type: "jsonPath" + name: "productId" + jsonPath: "$.products[0].id" + - type: "jsonPath" + name: "productPrice" + jsonPath: "$.products[0].price" + assertions: + - type: "statusCode" + expected: 200 + - type: "jsonPath" + path: "$.products" + thinkTime: "3s" + + - name: "View Product" + request: + method: "GET" + path: "/products/${productId}" + assertions: + - type: "statusCode" + expected: 200 + - type: "bodyContains" + text: "Add to Cart" + thinkTime: "5s" + + - name: "Add to Cart" + request: + method: "POST" + path: "/api/cart" + headers: + Content-Type: "application/json" + body: '{"productId": "${productId}", "quantity": 1}' + extract: + - type: "jsonPath" + name: "cartId" + jsonPath: "$.cartId" + assertions: + - type: "statusCode" + expected: 201 + - type: "jsonPath" + path: "$.cartId" + thinkTime: "1s" + + - name: "Quick Browse" + weight: 30 + steps: + - request: + method: "GET" + path: "/" + - request: + method: "GET" + path: "/products/featured" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + // Validate metadata + assert_eq!( + config.metadata.name, + Some("E-commerce Load Test".to_string()) + ); + assert_eq!(config.metadata.tags.len(), 2); + + // Validate config + assert_eq!(config.config.workers, 50); + assert!(!config.config.skip_tls_verify); + + // Validate load model + let load_model = config.load.to_load_model().unwrap(); + match load_model { + rust_loadtest::load_models::LoadModel::RampRps { + min_rps, max_rps, .. + } => { + assert_eq!(min_rps, 10.0); + assert_eq!(max_rps, 100.0); + } + _ => panic!("Expected RampRps model"), + } + + // Validate scenarios + let scenarios = config.to_scenarios().unwrap(); + assert_eq!(scenarios.len(), 2); + assert_eq!(scenarios[0].steps.len(), 4); + assert_eq!(scenarios[1].steps.len(), 2); + + println!("✅ Complex e-commerce scenario parses completely"); +} + +#[test] +fn test_default_values() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request: + method: "GET" + path: "/" +"#; + + let config = YamlConfig::from_str(yaml).unwrap(); + + // Default workers should be 10 + assert_eq!(config.config.workers, 10); + + // Default timeout should be 30 seconds + let timeout = config.config.timeout.to_std_duration().unwrap(); + assert_eq!(timeout.as_secs(), 30); + + // Default scenario weight should be 1.0 + assert_eq!(config.scenarios[0].weight, 1.0); + + println!("✅ Default values are applied correctly"); +} + +#[test] +fn test_parse_error_helpful_message() { + let yaml = r#" +version: "1.0" +config: + baseUrl: "https://test.com" + duration: "1m" +load: + model: "concurrent" +scenarios: + - name: "Test" + steps: + - request + method: "GET" # Missing colon after 'request' + path: "/" +"#; + + let result = YamlConfig::from_str(yaml); + assert!(result.is_err()); + + match result.unwrap_err() { + YamlConfigError::YamlParse(e) => { + let error_msg = e.to_string(); + assert!(!error_msg.is_empty()); + println!("✅ Parse error provides message: {}", error_msg); + } + _ => panic!("Expected YAML parse error"), + } +}