diff --git a/.github/workflows/ci.json b/.github/workflows/ci.json index e5450c12..1981b60a 100644 --- a/.github/workflows/ci.json +++ b/.github/workflows/ci.json @@ -43,6 +43,9 @@ { "run": "npm run derive:registry" }, + { + "run": "rm -rf artifacts" + }, { "run": "npm run generate:summary", "env": { @@ -174,10 +177,13 @@ { "run": "npm install" }, + { + "run": "rm -rf artifacts" + }, { "uses": "actions/download-artifact@v4", "with": { - "path": "./downloaded" + "path": "./artifacts" } }, { @@ -186,10 +192,10 @@ { "run": "npm run generate:summary", "env": { - "TEST_RESULTS_GLOBS": "downloaded/test-results/**/*junit*.xml\ndownloaded/pester-junit-*/pester-junit.xml\n", + "TEST_RESULTS_GLOBS": "artifacts/test-results/**/*junit*.xml\nartifacts/pester-junit-*/pester-junit.xml\n", "REQ_MAPPING_FILE": "requirements.json", "DISPATCHER_REGISTRY": "dispatchers.json", - "EVIDENCE_DIR": "downloaded/evidence" + "EVIDENCE_DIR": "artifacts/evidence" } }, { diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0dc9356..e892012b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,6 +23,7 @@ jobs: - run: npm run link:check - run: npm run test:ci - run: npm run derive:registry + - run: rm -rf artifacts - run: npm run generate:summary env: TEST_RESULTS_GLOBS: test-results/*junit*.xml @@ -142,16 +143,17 @@ jobs: node-version: 24 - run: npm run check:node - run: npm install + - run: rm -rf artifacts - uses: actions/download-artifact@v4 with: - path: ./downloaded + path: ./artifacts - run: npm run derive:registry - run: npm run generate:summary env: TEST_RESULTS_GLOBS: | - downloaded/test-results/**/*junit*.xml - downloaded/pester-junit-*/pester-junit.xml + artifacts/test-results/**/*junit*.xml + artifacts/pester-junit-*/pester-junit.xml REQ_MAPPING_FILE: requirements.json DISPATCHER_REGISTRY: dispatchers.json - EVIDENCE_DIR: downloaded/evidence + EVIDENCE_DIR: artifacts/evidence - run: npx tsx scripts/print-pester-traceability.ts >> "$GITHUB_STEP_SUMMARY" diff --git a/AGENTS.md b/AGENTS.md index f98f4c1f..81a9a53c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -31,6 +31,6 @@ - Run `npm install` to ensure Node dependencies are available. - Run `npm test`. - Run `npm run lint:md` to lint Markdown files. -- Run `npx --yes markdown-link-check -q -c .markdown-link-check.json README.md $(find docs scripts -name '*.md')` to verify links. +- Run `npx --yes markdown-link-check -c .markdown-link-check.json README.md $(find docs scripts -name '*.md')` to verify links and ensure failures are visible. - Run `actionlint` to validate GitHub Actions workflows. - Run `pwsh -NoLogo -Command "$cfg = New-PesterConfiguration; $cfg.Run.Path = './tests/pester'; $cfg.TestResult.Enabled = $false; Invoke-Pester -Configuration $cfg"` and ensure all tests pass (XML output is intentionally disabled). diff --git a/README.md b/README.md index d33ca36f..168de0cd 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ npm install npm test ``` -For CI, `npm run test:ci` emits a JUnit XML report that [scripts/generate-ci-summary.ts](scripts/generate-ci-summary.ts) parses to build requirement traceability files in OS‑specific subdirectories (e.g., `artifacts/windows`, `artifacts/linux`) based on the `RUNNER_OS` environment variable. +For CI, `npm run test:ci` emits a JUnit XML report that [scripts/generate-ci-summary.ts](scripts/generate-ci-summary.ts) parses to build requirement traceability files in OS‑specific subdirectories (e.g., `artifacts/windows`, `artifacts/linux`) based on the `RUNNER_OS` environment variable. The summary script searches `artifacts/` by default; set `TEST_RESULTS_GLOBS` if your reports are elsewhere. Pester tests cover the dispatcher and helper modules. See [docs/testing-pester.md](docs/testing-pester.md) for guidelines on using the canonical argument helper and adding new tests. Run them with: diff --git a/docs/architecture.md b/docs/architecture.md index c655d95a..c223a11f 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -6,6 +6,8 @@ The Open Source LabVIEW Actions project exposes multiple LabVIEW CI/CD steps thr `Invoke-OSAction.ps1` routes incoming requests to the appropriate adapter script. The dispatcher discovers available actions, forwards command-line arguments, and preserves exit codes. +![Dispatcher to adapter to action flow](images/architecture.svg) + ## Adapter scripts Each action lives in a `scripts/` folder. These PowerShell scripts implement the build or test work and are invoked by the dispatcher with the JSON arguments supplied by the GitHub Action. diff --git a/docs/contributing-docs.md b/docs/contributing-docs.md index a0461487..c9b36a7a 100644 --- a/docs/contributing-docs.md +++ b/docs/contributing-docs.md @@ -55,7 +55,11 @@ MkDocs serves the site at by default. The server automa ## JUnit integration -The CI pipeline collects JUnit XML output from both Node and PowerShell tests. `scripts/generate-ci-summary.ts` parses these files to build the requirement traceability report. Use `npm run test:ci` to produce the Node JUnit report when verifying documentation updates. +The CI pipeline collects JUnit XML output from both Node and PowerShell tests. `scripts/generate-ci-summary.ts` parses these files to build the requirement traceability report. Use `npm run test:ci` to produce the Node JUnit report when verifying documentation updates. By default, the summary script only searches `artifacts/` for JUnit XML files; if your results are elsewhere, pass a glob via `TEST_RESULTS_GLOBS`, for example: + +```bash +TEST_RESULTS_GLOBS='test-results/*junit*.xml' npm run generate:summary +``` ### Pester properties diff --git a/docs/images/architecture.svg b/docs/images/architecture.svg new file mode 100644 index 00000000..9250cda2 --- /dev/null +++ b/docs/images/architecture.svg @@ -0,0 +1,43 @@ + + + + + + +%3 + + + +Dispatcher + +Dispatcher + + + +Adapter + +Adapter + + + +Dispatcher->Adapter + + + + + +Action + +Action + + + +Adapter->Action + + + + + diff --git a/docs/index.md b/docs/index.md index e3b2466d..821a840a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -39,4 +39,20 @@ Open Source LabVIEW Actions unifies LabVIEW CI/CD scripts behind a single PowerS | Workflow | Purpose | | --- | --- | -| [run-pester-tests](workflows/run-pester-tests.md) | Run Pester tests in a target repository. | +| [add-token-to-labview](workflows/add-token-to-labview.md) | Add a custom library path token to the LabVIEW INI file so LabVIEW can locate project libraries. | +| [apply-vipc](workflows/apply-vipc.md) | Apply a VI Package Configuration (.vipc) file to a specific LabVIEW installation using g-cli. | +| [build-lvlibp](workflows/build-lvlibp.md) | Build a LabVIEW project’s build specification into a Packed Project Library (.lvlibp). | +| [build-vi-package](workflows/build-vi-package.md) | Update VIPB display information and build a VI package using g-cli. | +| [build](workflows/build.md) | Automate building the LabVIEW Icon Editor project, including cleaning, building libraries, and packaging. | +| [close-labview](workflows/close-labview.md) | Gracefully close a running LabVIEW instance via g-cli. | +| [generate-release-notes](workflows/generate-release-notes.md) | Generate release notes from the git history and write them to a markdown file. | +| [missing-in-project](workflows/missing-in-project.md) | Check that all files in a LabVIEW project are present by scanning for items missing from the `.lvproj`. | +| [modify-vipb-display-info](workflows/modify-vipb-display-info.md) | Update display information in a VIPB file and rebuild the VI package. | +| [prepare-labview-source](workflows/prepare-labview-source.md) | Run PrepareIESource.vi via g-cli to unzip components and configure LabVIEW for building. | +| [rename-file](workflows/rename-file.md) | Rename a file if it exists. | +| [restore-setup-lv-source](workflows/restore-setup-lv-source.md) | Restore the LabVIEW source setup by unzipping the LabVIEW Icon API and removing the INI token. | +| [revert-development-mode](workflows/revert-development-mode.md) | Restore the repository from development mode by restoring packaged sources and closing LabVIEW. | +| [run-pester-tests](workflows/run-pester-tests.md) | Run PowerShell Pester tests in a repository. | +| [run-unit-tests](workflows/run-unit-tests.md) | Run LabVIEW unit tests via the LabVIEW Unit Test Framework CLI and report pass/fail/error using standard exit codes. | +| [set-development-mode](workflows/set-development-mode.md) | Configure the repository for development mode by removing packed libraries, adding tokens, preparing sources, and closing LabVIEW. | +| [setup-mkdocs](workflows/setup-mkdocs.md) | Install a pinned MkDocs with caching. | diff --git a/docs/workflows/add-token-to-labview.md b/docs/workflows/add-token-to-labview.md new file mode 100644 index 00000000..a5683a74 --- /dev/null +++ b/docs/workflows/add-token-to-labview.md @@ -0,0 +1,49 @@ +# add-token-to-labview workflow + +## Purpose + +Dispatch the [add-token-to-labview](../actions/add-token-to-labview.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: add-token-to-labview +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + add-token-to-labview: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run add-token-to-labview + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName add-token-to-labview -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/apply-vipc.md b/docs/workflows/apply-vipc.md new file mode 100644 index 00000000..8ab84db1 --- /dev/null +++ b/docs/workflows/apply-vipc.md @@ -0,0 +1,49 @@ +# apply-vipc workflow + +## Purpose + +Dispatch the [apply-vipc](../actions/apply-vipc.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: apply-vipc +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + apply-vipc: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run apply-vipc + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName apply-vipc -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/build-lvlibp.md b/docs/workflows/build-lvlibp.md new file mode 100644 index 00000000..e7a4e827 --- /dev/null +++ b/docs/workflows/build-lvlibp.md @@ -0,0 +1,49 @@ +# build-lvlibp workflow + +## Purpose + +Dispatch the [build-lvlibp](../actions/build-lvlibp.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: build-lvlibp +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + build-lvlibp: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run build-lvlibp + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName build-lvlibp -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/build-vi-package.md b/docs/workflows/build-vi-package.md new file mode 100644 index 00000000..e5ed5147 --- /dev/null +++ b/docs/workflows/build-vi-package.md @@ -0,0 +1,49 @@ +# build-vi-package workflow + +## Purpose + +Dispatch the [build-vi-package](../actions/build-vi-package.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: build-vi-package +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + build-vi-package: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run build-vi-package + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName build-vi-package -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/build.md b/docs/workflows/build.md new file mode 100644 index 00000000..0788c141 --- /dev/null +++ b/docs/workflows/build.md @@ -0,0 +1,49 @@ +# build workflow + +## Purpose + +Dispatch the [build](../actions/build.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: build +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run build + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName build -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/close-labview.md b/docs/workflows/close-labview.md new file mode 100644 index 00000000..b552d2be --- /dev/null +++ b/docs/workflows/close-labview.md @@ -0,0 +1,49 @@ +# close-labview workflow + +## Purpose + +Dispatch the [close-labview](../actions/close-labview.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: close-labview +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + close-labview: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run close-labview + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName close-labview -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/generate-release-notes.md b/docs/workflows/generate-release-notes.md new file mode 100644 index 00000000..462d9b3a --- /dev/null +++ b/docs/workflows/generate-release-notes.md @@ -0,0 +1,49 @@ +# generate-release-notes workflow + +## Purpose + +Dispatch the [generate-release-notes](../actions/generate-release-notes.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: generate-release-notes +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + generate-release-notes: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run generate-release-notes + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName generate-release-notes -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/missing-in-project.md b/docs/workflows/missing-in-project.md new file mode 100644 index 00000000..a50fa93e --- /dev/null +++ b/docs/workflows/missing-in-project.md @@ -0,0 +1,49 @@ +# missing-in-project workflow + +## Purpose + +Dispatch the [missing-in-project](../actions/missing-in-project.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: missing-in-project +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + missing-in-project: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run missing-in-project + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName missing-in-project -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/modify-vipb-display-info.md b/docs/workflows/modify-vipb-display-info.md new file mode 100644 index 00000000..b30f8394 --- /dev/null +++ b/docs/workflows/modify-vipb-display-info.md @@ -0,0 +1,49 @@ +# modify-vipb-display-info workflow + +## Purpose + +Dispatch the [modify-vipb-display-info](../actions/modify-vipb-display-info.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: modify-vipb-display-info +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + modify-vipb-display-info: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run modify-vipb-display-info + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName modify-vipb-display-info -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/prepare-labview-source.md b/docs/workflows/prepare-labview-source.md new file mode 100644 index 00000000..3950ba70 --- /dev/null +++ b/docs/workflows/prepare-labview-source.md @@ -0,0 +1,49 @@ +# prepare-labview-source workflow + +## Purpose + +Dispatch the [prepare-labview-source](../actions/prepare-labview-source.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: prepare-labview-source +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + prepare-labview-source: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run prepare-labview-source + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName prepare-labview-source -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/rename-file.md b/docs/workflows/rename-file.md new file mode 100644 index 00000000..779af7a2 --- /dev/null +++ b/docs/workflows/rename-file.md @@ -0,0 +1,49 @@ +# rename-file workflow + +## Purpose + +Dispatch the [rename-file](../actions/rename-file.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: rename-file +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + rename-file: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run rename-file + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName rename-file -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/restore-setup-lv-source.md b/docs/workflows/restore-setup-lv-source.md new file mode 100644 index 00000000..bdbce0dc --- /dev/null +++ b/docs/workflows/restore-setup-lv-source.md @@ -0,0 +1,49 @@ +# restore-setup-lv-source workflow + +## Purpose + +Dispatch the [restore-setup-lv-source](../actions/restore-setup-lv-source.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: restore-setup-lv-source +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + restore-setup-lv-source: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run restore-setup-lv-source + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName restore-setup-lv-source -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/revert-development-mode.md b/docs/workflows/revert-development-mode.md new file mode 100644 index 00000000..be78cfba --- /dev/null +++ b/docs/workflows/revert-development-mode.md @@ -0,0 +1,49 @@ +# revert-development-mode workflow + +## Purpose + +Dispatch the [revert-development-mode](../actions/revert-development-mode.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: revert-development-mode +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + revert-development-mode: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run revert-development-mode + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName revert-development-mode -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/run-pester-tests.md b/docs/workflows/run-pester-tests.md index 08bc4fab..f6074c9d 100644 --- a/docs/workflows/run-pester-tests.md +++ b/docs/workflows/run-pester-tests.md @@ -2,37 +2,35 @@ ## Purpose -Run Pester tests in a target repository by dispatching the [`run-pester-tests`](../actions/run-pester-tests.md) action through `Invoke-OSAction.ps1`. +Dispatch the [run-pester-tests](../actions/run-pester-tests.md) action to a target repository through `Invoke-OSAction.ps1`. ## Inputs | Input | Description | | --- | --- | -| `repository` | Repository in `owner/repo` format to test. | +| `repository` | Repository in `owner/repo` format to operate on. | | `ref` | Branch or tag to check out. Defaults to `main`. | ## Required secrets | Secret | Description | | --- | --- | -| `REPO_TOKEN` | Personal access token with permission to read the target repository. Used by `actions/checkout` when accessing another repository. | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | ## Example ```yaml -name: Run Pester tests - +name: run-pester-tests on: workflow_dispatch: inputs: repository: - description: 'owner/repo of the repository to test' + description: 'owner/repo of the repository to target' required: true ref: description: 'Branch or tag to check out' required: false default: 'main' - jobs: run-pester-tests: runs-on: ubuntu-latest @@ -45,7 +43,7 @@ jobs: ref: ${{ inputs.ref }} path: target token: ${{ secrets.REPO_TOKEN }} - - name: Run Pester tests + - name: Run run-pester-tests shell: pwsh run: ./actions/Invoke-OSAction.ps1 -ActionName run-pester-tests -WorkingDirectory "${{ github.workspace }}/target" ``` diff --git a/docs/workflows/run-unit-tests.md b/docs/workflows/run-unit-tests.md new file mode 100644 index 00000000..6c732e35 --- /dev/null +++ b/docs/workflows/run-unit-tests.md @@ -0,0 +1,49 @@ +# run-unit-tests workflow + +## Purpose + +Dispatch the [run-unit-tests](../actions/run-unit-tests.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: run-unit-tests +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + run-unit-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run run-unit-tests + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName run-unit-tests -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/set-development-mode.md b/docs/workflows/set-development-mode.md new file mode 100644 index 00000000..e7af20d1 --- /dev/null +++ b/docs/workflows/set-development-mode.md @@ -0,0 +1,49 @@ +# set-development-mode workflow + +## Purpose + +Dispatch the [set-development-mode](../actions/set-development-mode.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: set-development-mode +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + set-development-mode: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run set-development-mode + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName set-development-mode -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/docs/workflows/setup-mkdocs.md b/docs/workflows/setup-mkdocs.md new file mode 100644 index 00000000..5953b2ee --- /dev/null +++ b/docs/workflows/setup-mkdocs.md @@ -0,0 +1,49 @@ +# setup-mkdocs workflow + +## Purpose + +Dispatch the [setup-mkdocs](../actions/setup-mkdocs.md) action to a target repository through `Invoke-OSAction.ps1`. + +## Inputs + +| Input | Description | +| --- | --- | +| `repository` | Repository in `owner/repo` format to operate on. | +| `ref` | Branch or tag to check out. Defaults to `main`. | + +## Required secrets + +| Secret | Description | +| --- | --- | +| `REPO_TOKEN` | Personal access token with permission to read the target repository. | + +## Example + +```yaml +name: setup-mkdocs +on: + workflow_dispatch: + inputs: + repository: + description: 'owner/repo of the repository to target' + required: true + ref: + description: 'Branch or tag to check out' + required: false + default: 'main' +jobs: + setup-mkdocs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Checkout target repository + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repository }} + ref: ${{ inputs.ref }} + path: target + token: ${{ secrets.REPO_TOKEN }} + - name: Run setup-mkdocs + shell: pwsh + run: ./actions/Invoke-OSAction.ps1 -ActionName setup-mkdocs -WorkingDirectory "${{ github.workspace }}/target" +``` diff --git a/mkdocs.yml b/mkdocs.yml index 0905958a..55143cfa 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,6 +1,39 @@ site_name: Open Source LabVIEW Actions -site_url: https://open-source-actions.github.io/open-source-actions/ +site_url: https://labview-community-ci-cd.github.io/open-source/ +repo_url: https://github.com/LabVIEW-Community-CI-CD/open-source +repo_name: open-source +edit_uri: edit/actions/docs/ +site_description: Actions for automating LabVIEW workflows +site_author: Open Source Actions Team + +strict: true +watch: + - docs + - scripts docs_dir: docs + +theme: + name: material + palette: + - scheme: default + toggle: + icon: material/weather-night + name: Switch to dark mode + - scheme: slate + toggle: + icon: material/weather-sunny + name: Switch to light mode + features: + - navigation.tabs + - content.code.copy + - toc.integrate + +markdown_extensions: + - admonition + - def_list + - toc + - pymdownx.superfences + - pymdownx.highlight nav: - Home: - Overview: index.md @@ -17,18 +50,45 @@ nav: - Requirements: requirements.md - Testing with Pester: testing-pester.md - Actions: - - Add Token to LabVIEW: actions/add-token-to-labview.md - - Apply VIPC: actions/apply-vipc.md - - Build: actions/build.md - - Build LVLIBP: actions/build-lvlibp.md - - Build VI Package: actions/build-vi-package.md - - Close LabVIEW: actions/close-labview.md - - Generate Release Notes: actions/generate-release-notes.md - - Missing in Project: actions/missing-in-project.md - - Modify VIPB Display Info: actions/modify-vipb-display-info.md - - Prepare LabVIEW Source: actions/prepare-labview-source.md - - Rename File: actions/rename-file.md - - Restore Setup LV Source: actions/restore-setup-lv-source.md - - Revert Development Mode: actions/revert-development-mode.md - - Run Unit Tests: actions/run-unit-tests.md - - Set Development Mode: actions/set-development-mode.md + - Setup: + - Add Token to LabVIEW: actions/add-token-to-labview.md + - Apply VIPC: actions/apply-vipc.md + - Close LabVIEW: actions/close-labview.md + - Missing in Project: actions/missing-in-project.md + - Prepare LabVIEW Source: actions/prepare-labview-source.md + - Rename File: actions/rename-file.md + - Restore Setup LV Source: actions/restore-setup-lv-source.md + - Set Development Mode: actions/set-development-mode.md + - Revert Development Mode: actions/revert-development-mode.md + - Build: + - Build: actions/build.md + - Build LVLIBP: actions/build-lvlibp.md + - Build VI Package: actions/build-vi-package.md + - Generate Release Notes: actions/generate-release-notes.md + - Modify VIPB Display Info: actions/modify-vipb-display-info.md + - Testing: + - Run Unit Tests: actions/run-unit-tests.md + - Workflows: + - Add Token to LabVIEW: workflows/add-token-to-labview.md + - Apply VIPC: workflows/apply-vipc.md + - Build LVLIBP: workflows/build-lvlibp.md + - Build VI Package: workflows/build-vi-package.md + - Build: workflows/build.md + - Close LabVIEW: workflows/close-labview.md + - Generate Release Notes: workflows/generate-release-notes.md + - Missing in Project: workflows/missing-in-project.md + - Modify VIPB Display Info: workflows/modify-vipb-display-info.md + - Prepare LabVIEW Source: workflows/prepare-labview-source.md + - Rename File: workflows/rename-file.md + - Restore Setup LV Source: workflows/restore-setup-lv-source.md + - Revert Development Mode: workflows/revert-development-mode.md + - Run Pester Tests: workflows/run-pester-tests.md + - Run Unit Tests: workflows/run-unit-tests.md + - Set Development Mode: workflows/set-development-mode.md + - Setup MkDocs: workflows/setup-mkdocs.md +plugins: + - search + - autorefs + - redirects: + redirect_maps: + old-page.md: new-page.md diff --git a/scripts/__tests__/fixtures/downloaded/pester-junit-sample/pester-junit.xml b/scripts/__tests__/fixtures/artifacts/pester-junit-sample/pester-junit.xml similarity index 100% rename from scripts/__tests__/fixtures/downloaded/pester-junit-sample/pester-junit.xml rename to scripts/__tests__/fixtures/artifacts/pester-junit-sample/pester-junit.xml diff --git a/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-1/pester-junit.xml b/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-1/pester-junit.xml new file mode 100644 index 00000000..12ae44d9 --- /dev/null +++ b/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-1/pester-junit.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-2/pester-junit.xml b/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-2/pester-junit.xml new file mode 100644 index 00000000..3dbd00c7 --- /dev/null +++ b/scripts/__tests__/fixtures/multiple-artifacts/artifacts/pester-junit-2/pester-junit.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/scripts/__tests__/fixtures/no-artifacts/.gitkeep b/scripts/__tests__/fixtures/no-artifacts/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/scripts/__tests__/generate-ci-summary.test.js b/scripts/__tests__/generate-ci-summary.test.js index 1d50a1a2..6a88b3b4 100644 --- a/scripts/__tests__/generate-ci-summary.test.js +++ b/scripts/__tests__/generate-ci-summary.test.js @@ -6,17 +6,23 @@ import os from 'node:os'; import { fileURLToPath } from 'node:url'; import { execFile } from 'node:child_process'; import { promisify } from 'node:util'; -import { collectTestCases, loadRequirements, mapToRequirements, groupToMarkdown, buildSummary } from '../generate-ci-summary.ts'; +import { collectTestCases } from '../summary/tests.ts'; +import { loadRequirements, mapToRequirements } from '../summary/requirements.ts'; +import { groupToMarkdown, summaryToMarkdown, requirementsSummaryToMarkdown, buildSummary, computeStatusCounts } from '../summary/index.ts'; import { writeErrorSummary } from '../error-handler.ts'; const fileUrl = new URL('../generate-ci-summary.ts', import.meta.url); +const summaryUrl = new URL('../summary/index.ts', import.meta.url); +const requirementsUrl = new URL('../summary/requirements.ts', import.meta.url); test('generate-ci-summary features', async () => { const content = await fs.readFile(fileUrl, 'utf8'); + const summaryContent = await fs.readFile(summaryUrl, 'utf8'); + const reqContent = await fs.readFile(requirementsUrl, 'utf8'); assert.match(content, /TEST_RESULTS_GLOBS/); - assert.match(content, //); - assert.match(content, /
/); - assert.match(content, /\*\*\/\*junit\*\.xml/); + assert.match(reqContent, //); + assert.match(summaryContent, /
/); + assert.match(content, /artifacts\/\*\*\/\*junit\*\.xml/); }); test('writeErrorSummary skips summary file for non-Error throws', async () => { @@ -78,6 +84,32 @@ test('loadRequirements logs warning on invalid JSON', async () => { assert.match(warned, /Failed to load requirements mapping/); }); +test('loadRequirements warns and skips invalid entries', async () => { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), 'req-')); + const req = { + requirements: [ + { id: 'REQ-1', tests: ['good'] }, + { tests: ['missing id'] }, + { id: 'REQ-3', tests: 'not-array' }, + ], + }; + const reqPath = path.join(dir, 'req.json'); + await fs.writeFile(reqPath, JSON.stringify(req)); + let warned = ''; + const origWarn = console.warn; + console.warn = (msg) => { + warned += String(msg); + }; + const { map, meta } = await loadRequirements(reqPath); + console.warn = origWarn; + await fs.rm(dir, { recursive: true, force: true }); + assert.match(warned, /Invalid requirement entry/); + assert.strictEqual('good' in map, true); + assert.strictEqual('missing id' in map, false); + assert.strictEqual('not-array' in map, false); + assert.deepEqual(Object.keys(meta), ['REQ-1']); +}); + test('collectTestCases uses machine-name property for owner', async () => { const dir = await fs.mkdtemp(path.join(os.tmpdir(), 'owner-')); const xmlProp = ``; @@ -113,6 +145,16 @@ test('collectTestCases uses evidence property and falls back to directory scan', await fs.rm(dir, { recursive: true, force: true }); }); +test('collectTestCases captures requirement property', async () => { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), 'reqprop-')); + const xml = ``; + const xmlPath = path.join(dir, 'junit.xml'); + await fs.writeFile(xmlPath, xml); + const tests = await collectTestCases([xmlPath], dir, 'linux'); + assert.deepStrictEqual(tests[0].requirements, ['REQ-123']); + await fs.rm(dir, { recursive: true, force: true }); +}); + test('groupToMarkdown omits numeric identifiers', () => { const groups = [{ id: 'REQ-XYZ', @@ -128,6 +170,67 @@ test('groupToMarkdown omits numeric identifiers', () => { assert.match(md, /\| REQ-XYZ \| beta \| Failed \|/); }); +test('groupToMarkdown supports optional limit for truncation', () => { + const groups = [{ + id: 'REQ-XYZ', + tests: [ + { id: 'a', name: 'alpha', status: 'Passed', duration: 0, requirements: [] }, + { id: 'b', name: 'beta', status: 'Failed', duration: 0, requirements: [] }, + { id: 'c', name: 'gamma', status: 'Skipped', duration: 0, requirements: [] }, + ], + }]; + const truncated = groupToMarkdown(groups, 2); + assert.match(truncated, /Truncated/); + assert.strictEqual(truncated.includes('gamma'), false); + + const full = groupToMarkdown(groups); + assert.doesNotMatch(full, /Truncated/); + assert.ok(full.includes('gamma')); +}); + +test('requirementsSummaryToMarkdown escapes pipes in description', () => { + const groups = [ + { id: 'REQ-1', description: 'Alpha | Beta', tests: [] }, + ]; + const md = requirementsSummaryToMarkdown(groups); + assert.ok(md.includes('| Requirement ID | Description | Owner | Total Tests | Passed | Failed | Skipped | Pass Rate (%) |')); + assert.ok(md.includes('| REQ-1 | Alpha \\| Beta | | 0 | 0 | 0 | 0 | 0.00 |')); +}); + +test('computeStatusCounts tallies test statuses', () => { + const tests = [ + { id: '1', name: 'a', status: 'Passed', duration: 0, requirements: [] }, + { id: '2', name: 'b', status: 'Failed', duration: 0, requirements: [] }, + { id: '3', name: 'c', status: 'Skipped', duration: 0, requirements: [] }, + { id: '4', name: 'd', status: 'Passed', duration: 0, requirements: [] }, + ]; + const counts = computeStatusCounts(tests); + assert.deepEqual(counts, { total: 4, passed: 2, failed: 1, skipped: 1 }); +}); + +test('summaryToMarkdown sorts OS alphabetically and escapes special characters', () => { + const totals = { + overall: { passed: 2, failed: 0, skipped: 0, duration: 3, rate: 100 }, + byOs: { + 'win|dos': { passed: 1, failed: 0, skipped: 0, duration: 1, rate: 100 }, + linux: { passed: 1, failed: 0, skipped: 0, duration: 2, rate: 100 }, + }, + }; + const md = summaryToMarkdown(totals); + assert.match(md, /\| OS \| Passed \| Failed \| Skipped \| Duration \(s\) \| Pass Rate \(%\) \|/); + assert.ok(md.includes('| win\\|dos | 1 | 0 | 0 | 1.00 | 100.00 |')); + const linuxIdx = md.indexOf('| linux |'); + const winIdx = md.indexOf('| win\\|dos |'); + assert.ok(linuxIdx > -1 && winIdx > linuxIdx); +}); + +test('summaryToMarkdown handles no tests', () => { + const totals = { overall: { passed: 0, failed: 0, skipped: 0, duration: 0, rate: 0 }, byOs: {} }; + const md = summaryToMarkdown(totals); + assert.ok(md.includes('| overall | 0 | 0 | 0 | 0.00 | 0.00 |')); + assert.strictEqual(md.includes('| linux |'), false); +}); + test('buildSummary splits totals by OS', () => { const groups = [{ id: 'REQ-1', @@ -177,6 +280,58 @@ test('writes outputs to OS-specific directory', async () => { await fs.rm('artifacts', { recursive: true, force: true }); }); +test('skips invalid JUnit files and still generates summary', async () => { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), 'badjunit-')); + const goodXml = ''; + const badXml = ''; + const goodPath = path.join(dir, 'good.xml'); + const badPath = path.join(dir, 'bad.xml'); + await fs.writeFile(goodPath, goodXml); + await fs.writeFile(badPath, badXml); + + await fs.rm('artifacts', { recursive: true, force: true }); + + const env = { + ...process.env, + TEST_RESULTS_GLOBS: `${goodPath} ${badPath}`, + EVIDENCE_DIR: dir, + RUNNER_OS: 'Linux', + }; + + const { stderr } = await execFileP('node_modules/.bin/tsx', ['scripts/generate-ci-summary.ts'], { env }); + + const outDir = path.join('artifacts', 'linux'); + const summary = await fs.readFile(path.join(outDir, 'summary.md'), 'utf8'); + assert.match(summary, /\| linux \| 1 \| 0 \| 0 \|/); + const trace = await fs.readFile(path.join(outDir, 'traceability.md'), 'utf8'); + assert.match(trace, /good/); + assert.strictEqual(trace.includes('bad'), false); + assert.match(stderr, /Failed to parse JUnit file/); + + await fs.rm(dir, { recursive: true, force: true }); + await fs.rm('artifacts', { recursive: true, force: true }); +}); + +test('ignores stale JUnit files outside artifacts path', async () => { + await fs.rm('artifacts', { recursive: true, force: true }); + const freshDir = path.join('artifacts', 'current'); + await fs.mkdir(freshDir, { recursive: true }); + const freshXml = ''; + await fs.writeFile(path.join(freshDir, 'junit.xml'), freshXml); + const stalePath = path.join('stale-junit.xml'); + await fs.writeFile(stalePath, ''); + + const env = { ...process.env, RUNNER_OS: 'Linux' }; + await execFileP('node_modules/.bin/tsx', ['scripts/generate-ci-summary.ts'], { env }); + + const trace = await fs.readFile(path.join('artifacts', 'linux', 'traceability.md'), 'utf8'); + assert.match(trace, /fresh/); + assert.strictEqual(trace.includes('stale'), false); + + await fs.rm('artifacts', { recursive: true, force: true }); + await fs.rm(stalePath, { force: true }); +}); + test('partitions requirement groups by runner_type', async () => { const dir = await fs.mkdtemp(path.join(os.tmpdir(), 'partition-')); const junitPath = path.join(dir, 'junit.xml'); diff --git a/scripts/__tests__/markdown.test.js b/scripts/__tests__/markdown.test.js new file mode 100644 index 00000000..bcaff920 --- /dev/null +++ b/scripts/__tests__/markdown.test.js @@ -0,0 +1,14 @@ +import test from 'node:test'; +import assert from 'node:assert/strict'; +import { escapeMarkdown } from '../utils/markdown.ts'; + +test('escapeMarkdown escapes special characters', () => { + const input = '`*_[]|'; + const expected = '\\`\\*\\_\\[\\]\\|'; + assert.strictEqual(escapeMarkdown(input), expected); +}); + +test('escapeMarkdown leaves plain text untouched', () => { + const input = 'plain text'; + assert.strictEqual(escapeMarkdown(input), input); +}); diff --git a/scripts/__tests__/print-pester-traceability.test.js b/scripts/__tests__/print-pester-traceability.test.js index 52a4c21e..236e44ef 100644 --- a/scripts/__tests__/print-pester-traceability.test.js +++ b/scripts/__tests__/print-pester-traceability.test.js @@ -9,12 +9,12 @@ const execFileP = promisify(execFile); const fixtureDir = fileURLToPath(new URL('./fixtures', import.meta.url)); const rootDir = fileURLToPath(new URL('../..', import.meta.url)); +const scriptFile = path.join(rootDir, 'scripts/print-pester-traceability.ts'); test('groups owners and includes requirements and evidence', async () => { const env = { ...process.env, RUNNER_OS: 'Linux' }; const tsxPath = path.join(rootDir, 'node_modules/.bin/tsx'); - const scriptPath = '../../print-pester-traceability.ts'; - const { stdout } = await execFileP(tsxPath, [scriptPath], { cwd: fixtureDir, env }); + const { stdout } = await execFileP(tsxPath, [scriptFile], { cwd: fixtureDir, env }); // ensure details sections for each owner assert.match(stdout, /
alice<\/summary>/); @@ -33,3 +33,26 @@ test('groups owners and includes requirements and evidence', async () => { assert.match(aliceSection, /Gamma \| REQ-789 \| Passed \| \[link\]\(http:\/\/example.com\/gamma.log\)/); assert.match(bobSection, /Beta \| REQ-456 \| Passed \| \[link\]\(http:\/\/example.com\/beta.log\)/); }); + +test('fails when no JUnit files are found', async () => { + const env = { ...process.env, RUNNER_OS: 'Linux' }; + const tsxPath = path.join(rootDir, 'node_modules/.bin/tsx'); + const cwd = path.join(fixtureDir, 'no-artifacts'); + await assert.rejects( + execFileP(tsxPath, [scriptFile], { cwd, env }), + (err) => { + assert.equal(err.code, 1); + assert.match(err.stderr, /No JUnit files found/); + return true; + } + ); +}); + +test('uses latest artifact directory when multiple are present', async () => { + const env = { ...process.env, RUNNER_OS: 'Linux' }; + const tsxPath = path.join(rootDir, 'node_modules/.bin/tsx'); + const cwd = path.join(fixtureDir, 'multiple-artifacts'); + const { stdout } = await execFileP(tsxPath, [scriptFile], { cwd, env }); + assert.match(stdout, /
dave<\/summary>/); + assert.doesNotMatch(stdout, /
carol<\/summary>/); +}); diff --git a/scripts/generate-ci-summary.ts b/scripts/generate-ci-summary.ts index df3d2413..cc74742b 100644 --- a/scripts/generate-ci-summary.ts +++ b/scripts/generate-ci-summary.ts @@ -2,362 +2,21 @@ import fs from 'fs/promises'; import { constants as fsConstants } from 'fs'; import path from 'path'; -import { fileURLToPath, pathToFileURL } from 'url'; +import { pathToFileURL } from 'url'; import { glob } from 'glob'; -import { parseStringPromise } from 'xml2js'; -import yaml from 'js-yaml'; import { writeErrorSummary } from './error-handler.ts'; - -interface TestCase { - id: string; - name: string; - className?: string; - status: 'Passed' | 'Failed' | 'Skipped'; - duration: number; - owner?: string; - evidence?: string; - requirements: string[]; - os?: string; -} - -interface RequirementGroup { - id: string; - description?: string; - owner?: string; - runner_label?: string; - runner_type?: string; - skip_dry_run?: boolean; - tests: TestCase[]; -} - -function normalizeTestId(id: string): string { - return id.toLowerCase().replace(/::/g, '-').replace(/\s+/g, '-'); -} - -function redact(text: string): string { - return text.replace(/[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+/g, ''); -} - -export async function loadRequirements(mappingFile: string) { - try { - const raw = await fs.readFile(mappingFile, 'utf8'); - const parsed = JSON.parse(raw); - const defaults: Record = parsed.runners || parsed.defaults || {}; - const map: Record = {}; - const meta: Record = {}; - if (Array.isArray(parsed.requirements)) { - for (const r of parsed.requirements) { - const def = (r.runner && defaults[r.runner]) || {}; - const owner = r.owner ?? def.owner; - const runner_label = r.runner_label ?? def.runner_label; - const runner_type = r.runner_type ?? def.runner_type; - const skip_dry_run = r.skip_dry_run ?? def.skip_dry_run; - meta[r.id] = { description: r.description, owner, runner_label, runner_type, skip_dry_run }; - if (Array.isArray(r.tests)) { - for (const t of r.tests) { - const key = t.toLowerCase(); - if (!map[key]) map[key] = { requirements: [], owner }; - map[key].requirements.push(r.id); - } - } - } - } - return { map, meta }; - } catch (err) { - const msg = err instanceof Error ? err.message : String(err); - console.warn(`Failed to load requirements mapping from ${mappingFile}: ${msg}`); - await writeErrorSummary(err); - return { map: {}, meta: {} }; - } -} - -export async function collectTestCases(files: string[], evidenceDir: string, os?: string): Promise { - const evidenceFiles = await fs.readdir(evidenceDir).catch(() => []); - const tests: TestCase[] = []; - const osType = (os ?? process.env.RUNNER_OS ?? 'unknown').toLowerCase(); - for (const file of files) { - const xml = await fs.readFile(file, 'utf8'); - const data = await parseStringPromise(xml, { explicitArray: true, mergeAttrs: true }); - const suites: any[] = []; - if (data.testsuite) suites.push(data.testsuite); - if (data.testsuites) { - if (Array.isArray(data.testsuites.testsuite)) suites.push(...data.testsuites.testsuite); - else if (data.testsuites.testsuite) suites.push(data.testsuites.testsuite); - } - const collect = (obj: any) => { - if (!obj) return; - if (Array.isArray(obj.testcase)) { - for (const tc of obj.testcase) { - const name = tc.name?.[0] ?? 'unknown'; - const className = tc.classname?.[0]; - const id = normalizeTestId(name); - let status: 'Passed' | 'Failed' | 'Skipped' = 'Passed'; - if (tc.failure || tc.error) status = 'Failed'; - else if (tc.skipped) status = 'Skipped'; - const duration = parseFloat(tc.time?.[0] ?? '0'); - const test: TestCase = { id, name, className, status, duration, requirements: [], os: osType }; - const props = tc.properties?.[0]?.property; - if (Array.isArray(props)) { - const findProp = (n: string) => - props.find((p: any) => p.name?.[0]?.toLowerCase() === n); - const ownerProp = findProp('owner') ?? findProp('machine-name'); - const ownerVal = ownerProp?.value?.[0] ?? ownerProp?._; - if (ownerVal) test.owner = ownerVal; - const evidenceProp = props.find((p: any) => - ['evidence', 'attachment', 'ci_link'].includes((p.name?.[0] ?? '').toLowerCase()) - ); - const evidenceVal = evidenceProp?.value?.[0] ?? evidenceProp?._; - if (evidenceVal) test.evidence = evidenceVal; - for (const p of props) { - if (p.name?.[0]?.toLowerCase() === 'requirement') { - const val = p.value?.[0] ?? p._; - if (typeof val === 'string') test.requirements.push(val.toUpperCase()); - } - } - } - if (!test.evidence) { - const evidence = evidenceFiles.find((f) => f.startsWith(id) || f.startsWith(id + '.')); - if (evidence) test.evidence = path.join('evidence', evidence); - } - if (!test.owner) { - const ownerMatch = name.match(/\[Owner:([^\]]+)\]/i); - if (ownerMatch) test.owner = ownerMatch[1]; - } - tests.push(test); - } - } - if (Array.isArray(obj.testsuite)) { - for (const s of obj.testsuite) collect(s); - } - }; - for (const s of suites) collect(s); - } - return tests; -} - -export function mapToRequirements( - tests: TestCase[], - mapping: Record, - meta: Record -): RequirementGroup[] { - const groups: Map = new Map(); - for (const test of tests) { - const stripAnnotations = (s: string) => s.replace(/\[[^\]]+\]/g, '').trim(); - const nameKey = stripAnnotations(test.name).toLowerCase(); - const classKey = test.className ? stripAnnotations(test.className).toLowerCase() : undefined; - const mapped = mapping[nameKey] || (classKey ? mapping[classKey] : undefined); - const reqs = mapped ? mapped.requirements : test.requirements; - if (mapped && mapped.owner) test.owner = mapped.owner; - if (!test.owner) { - for (const r of reqs) { - if (meta[r]?.owner) { - test.owner = meta[r].owner; - break; - } - } - } - const targetReqs = reqs.length ? reqs : ['Unmapped']; - for (const reqId of targetReqs) { - if (!groups.has(reqId)) { - groups.set(reqId, { - id: reqId, - description: meta[reqId]?.description, - owner: meta[reqId]?.owner, - runner_label: meta[reqId]?.runner_label, - runner_type: meta[reqId]?.runner_type, - skip_dry_run: meta[reqId]?.skip_dry_run, - tests: [], - }); - } - groups.get(reqId)!.tests.push(test); - } - } - const statusRank: Record = { Failed: 0, Passed: 1, Skipped: 2 }; - const sorted = Array.from(groups.values()).sort((a, b) => a.id.localeCompare(b.id, undefined, { numeric: true })); - for (const g of sorted) { - g.tests.sort((a, b) => { - const diff = statusRank[a.status] - statusRank[b.status]; - if (diff !== 0) return diff; - return a.name.localeCompare(b.name); - }); - } - return sorted; -} - -export function buildSummary(groups: RequirementGroup[]) { - const overall = { passed: 0, failed: 0, skipped: 0, duration: 0, rate: 0 }; - const byOs: Record = {}; - for (const g of groups) { - for (const t of g.tests) { - const os = t.os || 'unknown'; - if (!byOs[os]) byOs[os] = { passed: 0, failed: 0, skipped: 0, duration: 0, rate: 0 }; - overall.duration += t.duration; - byOs[os].duration += t.duration; - if (t.status === 'Passed') { - overall.passed++; byOs[os].passed++; - } else if (t.status === 'Failed') { - overall.failed++; byOs[os].failed++; - } else { - overall.skipped++; byOs[os].skipped++; - } - } - } - overall.rate = overall.passed + overall.failed === 0 ? 0 : (overall.passed / (overall.passed + overall.failed)) * 100; - for (const os of Object.keys(byOs)) { - const t = byOs[os]; - t.rate = t.passed + t.failed === 0 ? 0 : (t.passed / (t.passed + t.failed)) * 100; - } - return { overall, byOs }; -} - -export function summaryToMarkdown(totals: { overall: { passed: number; failed: number; skipped: number; duration: number; rate: number }; byOs: Record }) { - const lines = [ - '### Test Summary', - '| OS | Passed | Failed | Skipped | Duration (s) | Pass Rate (%) |', - '| --- | --- | --- | --- | --- | --- |', - `| overall | ${totals.overall.passed} | ${totals.overall.failed} | ${totals.overall.skipped} | ${totals.overall.duration.toFixed(2)} | ${totals.overall.rate.toFixed(2)} |`, - ]; - for (const os of Object.keys(totals.byOs).sort()) { - const t = totals.byOs[os]; - lines.push(`| ${os} | ${t.passed} | ${t.failed} | ${t.skipped} | ${t.duration.toFixed(2)} | ${t.rate.toFixed(2)} |`); - } - return lines.join('\n'); -} - -export function requirementsSummaryToMarkdown(groups: RequirementGroup[]) { - const lines = [ - '### Requirement Summary', - '| Requirement ID | Description | Owner | Total Tests | Passed | Failed | Skipped | Pass Rate (%) |', - '| --- | --- | --- | --- | --- | --- | --- | --- |', - ]; - for (const g of groups) { - const total = g.tests.length; - const passed = g.tests.filter((t) => t.status === 'Passed').length; - const failed = g.tests.filter((t) => t.status === 'Failed').length; - const skipped = g.tests.filter((t) => t.status === 'Skipped').length; - const rate = passed + failed === 0 ? 0 : (passed / (passed + failed)) * 100; - lines.push(`| ${g.id} | ${g.description ?? ''} | ${g.owner ?? ''} | ${total} | ${passed} | ${failed} | ${skipped} | ${rate.toFixed(2)} |`); - } - return lines.join('\n'); -} - -export function requirementTestsToMarkdown(groups: RequirementGroup[]) { - const lines = [ - '### Requirement Testcases', - '| Requirement ID | Test ID | Status |', - '| --- | --- | --- |', - ]; - for (const g of groups) { - for (const t of g.tests) { - lines.push(`| ${g.id} | ${t.name} | ${t.status} |`); - } - } - return lines.join('\n'); -} - -export function groupToMarkdown(groups: RequirementGroup[], limit?: number) { - const lines: string[] = []; - let remaining = limit ?? Infinity; - for (const g of groups) { - const total = g.tests.length; - const passedCount = g.tests.filter((t) => t.status === 'Passed').length; - const pct = total === 0 ? 0 : Math.round((passedCount / total) * 100); - const header = `${g.id} (${pct}% passed)`; - const table = [ - '| Requirement | Test ID | Status | Duration (s) | Owner | Evidence |', - '| --- | --- | --- | --- | --- | --- |', - ]; - for (const t of g.tests) { - if (remaining <= 0) break; - const evidence = t.evidence ? `[link](${t.evidence})` : ''; - table.push( - `| ${g.id} | ${t.name} | ${t.status} | ${t.duration.toFixed(3)} | ${t.owner ?? g.owner ?? ''} | ${evidence} |`, - ); - remaining--; - } - const content = table.join('\n'); - if (g.tests.length > 5) { - lines.push(`
${header}\n\n${content}\n\n
`); - } else { - lines.push(`#### ${header}\n\n${content}`); - } - if (remaining <= 0) break; - } - if (limit && remaining <= 0) lines.push('\n_Truncated. See traceability.md for full details._'); - return lines.join('\n\n'); -} - -async function generateActionDocs(dispatcherRegistryFile: string, wrapperDirs: string[]) { - const actionParams: any[] = []; - - let registry: any = null; - try { - const ext = path.extname(dispatcherRegistryFile); - if (ext === '.json') { - registry = JSON.parse(await fs.readFile(dispatcherRegistryFile, 'utf8')); - } else { - const mod = await import(pathToFileURL(path.resolve(dispatcherRegistryFile)).href); - registry = mod.default ?? mod; - } - } catch { - registry = null; - } - - const wrappers: Record = {}; - for (const dir of wrapperDirs) { - const p = path.join(dir, 'action.yml'); - try { - const y = yaml.load(await fs.readFile(p, 'utf8')) as any; - const params = Object.entries(y.inputs || {}).map(([n, inf]: any) => ({ - name: n, - description: inf.description || '', - required: inf.required === true, - default: inf.default ?? '', - type: inf.type || 'string', - })); - wrappers[dir] = params; - } catch { - continue; - } - } - - const docs = { action: actionParams, dispatcher: registry, wrappers }; - const lines: string[] = ['### Parameters', '| Name | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; - for (const p of actionParams) { - lines.push(`| ${p.name} | ${p.type} | ${p.required} | ${p.default} | ${p.description} |`); - } - if (registry) { - lines.push('\n### Dispatcher Functions'); - const fnNames = Object.keys(registry).sort(); - for (const fn of fnNames) { - const info = registry[fn]; - lines.push(`\n#### ${fn}`); - if (info.description) lines.push(info.description); - const tbl = ['| Parameter | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; - const paramNames = Object.keys(info.parameters || {}).sort(); - for (const pn of paramNames) { - const p = info.parameters[pn]; - tbl.push(`| ${pn} | ${p.type} | ${p.required} | ${p.default ?? ''} | ${p.description ?? ''} |`); - } - lines.push(tbl.join('\n')); - lines.push('\n```powershell'); - lines.push(`pwsh ./actions/Invoke-OSAction.ps1 -ActionName ${fn} -ArgsJson '{}'`); - lines.push('```'); - } - } - if (Object.keys(wrappers).length) { - lines.push('\n### Wrapper Actions'); - for (const [dir, params] of Object.entries(wrappers)) { - lines.push(`\n#### ${dir}`); - const tbl = ['| Name | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; - for (const p of params) { - tbl.push(`| ${p.name} | ${p.type} | ${p.required} | ${p.default} | ${p.description} |`); - } - lines.push(tbl.join('\n')); - } - } - return { docs, markdown: lines.join('\n') }; -} +import { + buildSummary, + summaryToMarkdown, + requirementsSummaryToMarkdown, + requirementTestsToMarkdown, + groupToMarkdown, + TestCase, + RequirementGroup, +} from './summary/index.ts'; +import { generateActionDocs } from './summary/generate-action-docs.ts'; +import { collectTestCases } from './summary/tests.ts'; +import { loadRequirements, mapToRequirements, redact } from './summary/requirements.ts'; async function main() { const mappingFile = process.env.REQ_MAPPING_FILE || 'requirements.json'; @@ -376,7 +35,7 @@ async function main() { } junitFiles = Array.from(found); } else { - const single = process.env.TEST_RESULTS_GLOB || '**/*junit*.xml'; + const single = process.env.TEST_RESULTS_GLOB || 'artifacts/**/*junit*.xml'; junitFiles = await glob(single, { nodir: true }); } let tests: TestCase[] = []; @@ -447,4 +106,3 @@ if (import.meta.url === pathToFileURL(process.argv[1] ?? '').href) { process.exit(1); }); } - diff --git a/scripts/junit-parser.ts b/scripts/junit-parser.ts index b365ee5a..2b75b245 100644 --- a/scripts/junit-parser.ts +++ b/scripts/junit-parser.ts @@ -9,6 +9,7 @@ export interface JUnitTestCase { skippedMessage?: string; requirements: string[]; attributes: Record; + properties: Record; } export interface JUnitTestSuite { @@ -67,9 +68,22 @@ export async function parseJUnit(xml: string): Promise { if (tc.failure || tc.error) status = 'Failed'; else if (tc.skipped) status = 'Skipped'; const skippedMessage = tc.skipped?.message ?? tc.skipped?._; + const props: Record = {}; + const propList = tc.properties?.property; + const propItems = Array.isArray(propList) ? propList : propList ? [propList] : []; const reqMatches = [...name.matchAll(/\[(REQ-\d+)\]/gi)].map((m) => m[1].toUpperCase()); - const requirements = Array.from(new Set(reqMatches)); - return { name, status, classname, assertions, time, skippedMessage, requirements, attributes: tcAttrs }; + const reqSet = new Set(reqMatches); + for (const p of propItems) { + if (p.name && (p.value || p._)) { + const val = p.value ?? p._ ?? ''; + props[p.name] = val; + if (p.name.toLowerCase() === 'requirement') { + reqSet.add(val.toUpperCase()); + } + } + } + const requirements = Array.from(reqSet); + return { name, status, classname, assertions, time, skippedMessage, requirements, attributes: tcAttrs, properties: props }; }); return { attributes: suiteAttrs, properties: props, testcases }; }); diff --git a/scripts/print-pester-traceability.ts b/scripts/print-pester-traceability.ts index 36953d7e..3189814e 100644 --- a/scripts/print-pester-traceability.ts +++ b/scripts/print-pester-traceability.ts @@ -1,10 +1,27 @@ #!/usr/bin/env tsx import path from 'path'; import { glob } from 'glob'; -import { collectTestCases } from './generate-ci-summary.ts'; +import { collectTestCases } from './summary/tests.ts'; async function main() { - const junitFiles = await glob('downloaded/pester-junit-*/pester-junit.xml'); + const overrideDir = process.env.PESTER_JUNIT_PATH; + let junitFiles: string[] = []; + if (overrideDir) { + junitFiles = await glob(path.join(overrideDir, 'pester-junit.xml')); + } else { + const matches = await glob('artifacts/pester-junit-*/pester-junit.xml'); + if (matches.length > 0) { + const latestDir = matches + .map((f) => path.dirname(f)) + .sort() + .pop()!; + junitFiles = matches.filter((f) => path.dirname(f) === latestDir); + } + } + if (junitFiles.length === 0) { + console.warn('No JUnit files found'); + process.exit(1); + } const tests = []; for (const file of junitFiles) { const dir = path.dirname(file); diff --git a/scripts/summary/generate-action-docs.ts b/scripts/summary/generate-action-docs.ts new file mode 100644 index 00000000..cb22b172 --- /dev/null +++ b/scripts/summary/generate-action-docs.ts @@ -0,0 +1,77 @@ +import fs from 'fs/promises'; +import path from 'path'; +import { pathToFileURL } from 'url'; +import yaml from 'js-yaml'; + +export async function generateActionDocs(dispatcherRegistryFile: string, wrapperDirs: string[]) { + const actionParams: any[] = []; + + let registry: any = null; + try { + const ext = path.extname(dispatcherRegistryFile); + if (ext === '.json') { + registry = JSON.parse(await fs.readFile(dispatcherRegistryFile, 'utf8')); + } else { + const mod = await import(pathToFileURL(path.resolve(dispatcherRegistryFile)).href); + registry = mod.default ?? mod; + } + } catch { + registry = null; + } + + const wrappers: Record = {}; + for (const dir of wrapperDirs) { + const p = path.join(dir, 'action.yml'); + try { + const y = yaml.load(await fs.readFile(p, 'utf8')) as any; + const params = Object.entries(y.inputs || {}).map(([n, inf]: any) => ({ + name: n, + description: inf.description || '', + required: inf.required === true, + default: inf.default ?? '', + type: inf.type || 'string', + })); + wrappers[dir] = params; + } catch { + continue; + } + } + + const docs = { action: actionParams, dispatcher: registry, wrappers }; + const lines: string[] = ['### Parameters', '| Name | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; + for (const p of actionParams) { + lines.push(`| ${p.name} | ${p.type} | ${p.required} | ${p.default} | ${p.description} |`); + } + if (registry) { + lines.push('\n### Dispatcher Functions'); + const fnNames = Object.keys(registry).sort(); + for (const fn of fnNames) { + const info = registry[fn]; + lines.push(`\n#### ${fn}`); + if (info.description) lines.push(info.description); + const tbl = ['| Parameter | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; + const paramNames = Object.keys(info.parameters || {}).sort(); + for (const pn of paramNames) { + const p = info.parameters[pn]; + tbl.push(`| ${pn} | ${p.type} | ${p.required} | ${p.default ?? ''} | ${p.description ?? ''} |`); + } + lines.push(tbl.join('\n')); + lines.push('\n```powershell'); + lines.push(`pwsh ./actions/Invoke-OSAction.ps1 -ActionName ${fn} -ArgsJson '{}'`); + lines.push('```'); + } + } + if (Object.keys(wrappers).length) { + lines.push('\n### Wrapper Actions'); + for (const [dir, params] of Object.entries(wrappers)) { + lines.push(`\n#### ${dir}`); + const tbl = ['| Name | Type | Required | Default | Description |', '| --- | --- | --- | --- | --- |']; + for (const p of params) { + tbl.push(`| ${p.name} | ${p.type} | ${p.required} | ${p.default} | ${p.description} |`); + } + lines.push(tbl.join('\n')); + } + } + return { docs, markdown: lines.join('\n') }; +} + diff --git a/scripts/summary/index.ts b/scripts/summary/index.ts new file mode 100644 index 00000000..c5bfb011 --- /dev/null +++ b/scripts/summary/index.ts @@ -0,0 +1,145 @@ +export interface TestCase { + id: string; + name: string; + className?: string; + status: 'Passed' | 'Failed' | 'Skipped'; + duration: number; + owner?: string; + evidence?: string; + requirements: string[]; + os?: string; +} + +export interface RequirementGroup { + id: string; + description?: string; + owner?: string; + runner_label?: string; + runner_type?: string; + skip_dry_run?: boolean; + tests: TestCase[]; +} + +export function computeStatusCounts(tests: TestCase[]) { + const counts = { total: tests.length, passed: 0, failed: 0, skipped: 0 }; + for (const t of tests) { + if (t.status === 'Passed') counts.passed++; + else if (t.status === 'Failed') counts.failed++; + else counts.skipped++; + } + return counts; +} + +export function buildSummary(groups: RequirementGroup[]) { + const overall = { passed: 0, failed: 0, skipped: 0, duration: 0, rate: 0 }; + const byOs: Record = {}; + for (const g of groups) { + for (const t of g.tests) { + const os = t.os || 'unknown'; + if (!byOs[os]) byOs[os] = { passed: 0, failed: 0, skipped: 0, duration: 0, rate: 0 }; + overall.duration += t.duration; + byOs[os].duration += t.duration; + if (t.status === 'Passed') { overall.passed++; byOs[os].passed++; } + else if (t.status === 'Failed') { overall.failed++; byOs[os].failed++; } + else { overall.skipped++; byOs[os].skipped++; } + } + } + overall.rate = overall.passed + overall.failed === 0 ? 0 : (overall.passed / (overall.passed + overall.failed)) * 100; + for (const os of Object.keys(byOs)) { + const t = byOs[os]; + t.rate = t.passed + t.failed === 0 ? 0 : (t.passed / (t.passed + t.failed)) * 100; + } + return { overall, byOs }; +} + +export function summaryToMarkdown(totals: { overall: { passed: number; failed: number; skipped: number; duration: number; rate: number }; byOs: Record }) { + const header = ['OS', 'Passed', 'Failed', 'Skipped', 'Duration (s)', 'Pass Rate (%)']; + const rows = [[ + 'overall', + `${totals.overall.passed}`, + `${totals.overall.failed}`, + `${totals.overall.skipped}`, + totals.overall.duration.toFixed(2), + totals.overall.rate.toFixed(2), + ]]; + for (const os of Object.keys(totals.byOs).sort()) { + const t = totals.byOs[os]; + rows.push([ + os, + `${t.passed}`, + `${t.failed}`, + `${t.skipped}`, + t.duration.toFixed(2), + t.rate.toFixed(2), + ]); + } + return ['### Test Summary', buildTable(header, rows)].join('\n'); +} + +export function requirementsSummaryToMarkdown(groups: RequirementGroup[]) { + const header = ['Requirement ID', 'Description', 'Owner', 'Total Tests', 'Passed', 'Failed', 'Skipped', 'Pass Rate (%)']; + const rows: string[][] = []; + for (const g of groups) { + const { total, passed, failed, skipped } = computeStatusCounts(g.tests); + const rate = passed + failed === 0 ? 0 : (passed / (passed + failed)) * 100; + rows.push([ + g.id, + g.description ?? '', + g.owner ?? '', + `${total}`, + `${passed}`, + `${failed}`, + `${skipped}`, + rate.toFixed(2), + ]); + } + return ['### Requirement Summary', buildTable(header, rows)].join('\n'); +} + +export function requirementTestsToMarkdown(groups: RequirementGroup[]) { + const header = ['Requirement ID', 'Test ID', 'Status']; + const rows: string[][] = []; + for (const g of groups) { + for (const t of g.tests) { + rows.push([g.id, t.name, t.status]); + } + } + return ['### Requirement Testcases', buildTable(header, rows)].join('\n'); +} + +export function groupToMarkdown(groups: RequirementGroup[], limit?: number) { + const lines: string[] = []; + let remaining = limit ?? Infinity; + for (const g of groups) { + const { total, passed } = computeStatusCounts(g.tests); + const pct = total === 0 ? 0 : Math.round((passed / total) * 100); + const heading = `${g.id} (${pct}% passed)`; + const tblHeader = ['Requirement', 'Test ID', 'Status', 'Duration (s)', 'Owner', 'Evidence']; + const rows: string[][] = []; + for (const t of g.tests) { + if (remaining <= 0) break; + const evidence = t.evidence ? `[link](${t.evidence})` : ''; + rows.push([ + g.id, + t.name, + t.status, + t.duration.toFixed(3), + t.owner ?? g.owner ?? '', + evidence, + ]); + remaining--; + } + const content = buildTable(tblHeader, rows); + if (g.tests.length > 5) { + lines.push(`
${heading}\n\n${content}\n\n
`); + } else { + lines.push(`#### ${heading}\n\n${content}`); + } + if (remaining <= 0) break; + } + if (limit && remaining <= 0) lines.push('\n_Truncated. See traceability.md for full details._'); + return lines.join('\n\n'); +} + +import { buildTable } from '../utils/markdown.ts'; + diff --git a/scripts/summary/requirements.ts b/scripts/summary/requirements.ts new file mode 100644 index 00000000..9b7e7838 --- /dev/null +++ b/scripts/summary/requirements.ts @@ -0,0 +1,149 @@ +import fs from 'fs/promises'; +import { writeErrorSummary } from '../error-handler.ts'; +import { TestCase, RequirementGroup } from './index.ts'; + +export interface RunnerDefaults { + owner?: string; + runner_label?: string; + runner_type?: string; + skip_dry_run?: boolean; +} + +export interface RequirementEntry extends RunnerDefaults { + id: string; + description?: string; + runner?: string; + tests: string[]; +} + +interface RequirementsFile { + runners?: Record; + defaults?: Record; + requirements?: unknown; +} + +export function redact(text: string): string { + return text.replace(/[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+/g, ''); +} + +function isRunnerDefaults(value: unknown): value is RunnerDefaults { + if (!value || typeof value !== 'object') return false; + const v = value as Record; + if ('owner' in v && typeof v.owner !== 'string') return false; + if ('runner_label' in v && typeof v.runner_label !== 'string') return false; + if ('runner_type' in v && typeof v.runner_type !== 'string') return false; + if ('skip_dry_run' in v && typeof v.skip_dry_run !== 'boolean') return false; + return true; +} + +function isRequirementEntry(value: unknown): value is RequirementEntry { + if (!value || typeof value !== 'object') return false; + const v = value as Record; + if (typeof v.id !== 'string') return false; + if (!Array.isArray(v.tests) || !v.tests.every((t) => typeof t === 'string')) return false; + if ('description' in v && typeof v.description !== 'string') return false; + if ('runner' in v && typeof v.runner !== 'string') return false; + if ('owner' in v && typeof v.owner !== 'string') return false; + if ('runner_label' in v && typeof v.runner_label !== 'string') return false; + if ('runner_type' in v && typeof v.runner_type !== 'string') return false; + if ('skip_dry_run' in v && typeof v.skip_dry_run !== 'boolean') return false; + return true; +} + +export async function loadRequirements(mappingFile: string) { + try { + const raw = await fs.readFile(mappingFile, 'utf8'); + const parsed: RequirementsFile = JSON.parse(raw); + + const defaults: Record = {}; + const rawDefs = (parsed.runners || parsed.defaults) ?? {}; + if (rawDefs && typeof rawDefs === 'object') { + for (const [name, val] of Object.entries(rawDefs)) { + if (isRunnerDefaults(val)) { + defaults[name] = val; + } else { + console.warn(`Invalid runner defaults for ${name}`); + } + } + } + + const map: Record = {}; + const meta: Record = {}; + + if (Array.isArray(parsed.requirements)) { + for (const r of parsed.requirements) { + if (!isRequirementEntry(r)) { + console.warn(`Invalid requirement entry: ${JSON.stringify(r)}`); + continue; + } + const def = (r.runner && defaults[r.runner]) || {}; + const owner = r.owner ?? def.owner; + const runner_label = r.runner_label ?? def.runner_label; + const runner_type = r.runner_type ?? def.runner_type; + const skip_dry_run = r.skip_dry_run ?? def.skip_dry_run; + meta[r.id] = { description: r.description, owner, runner_label, runner_type, skip_dry_run }; + for (const t of r.tests) { + const key = t.toLowerCase(); + if (!map[key]) map[key] = { requirements: [], owner }; + map[key].requirements.push(r.id); + } + } + } + return { map, meta }; + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + console.warn(`Failed to load requirements mapping from ${mappingFile}: ${msg}`); + await writeErrorSummary(err); + return { map: {}, meta: {} }; + } +} + +export function mapToRequirements( + tests: TestCase[], + mapping: Record, + meta: Record, +): RequirementGroup[] { + const groups: Map = new Map(); + for (const test of tests) { + const stripAnnotations = (s: string) => s.replace(/\[[^\]]+\]/g, '').trim(); + const nameKey = stripAnnotations(test.name).toLowerCase(); + const classKey = test.className ? stripAnnotations(test.className).toLowerCase() : undefined; + const mapped = mapping[nameKey] || (classKey ? mapping[classKey] : undefined); + const reqs = mapped ? mapped.requirements : test.requirements; + if (mapped && mapped.owner) test.owner = mapped.owner; + if (!test.owner) { + for (const r of reqs) { + if (meta[r]?.owner) { + test.owner = meta[r].owner; + break; + } + } + } + const targetReqs = reqs.length ? reqs : ['Unmapped']; + for (const reqId of targetReqs) { + if (!groups.has(reqId)) { + groups.set(reqId, { + id: reqId, + description: meta[reqId]?.description, + owner: meta[reqId]?.owner, + runner_label: meta[reqId]?.runner_label, + runner_type: meta[reqId]?.runner_type, + skip_dry_run: meta[reqId]?.skip_dry_run, + tests: [], + }); + } + groups.get(reqId)!.tests.push(test); + } + } + const statusRank: Record = { Failed: 0, Passed: 1, Skipped: 2 }; + const sorted = Array.from(groups.values()).sort((a, b) => a.id.localeCompare(b.id, undefined, { numeric: true })); + for (const g of sorted) { + g.tests.sort((a, b) => { + const diff = statusRank[a.status] - statusRank[b.status]; + if (diff !== 0) return diff; + return a.name.localeCompare(b.name); + }); + } + return sorted; +} + diff --git a/scripts/summary/tests.ts b/scripts/summary/tests.ts new file mode 100644 index 00000000..77f5d132 --- /dev/null +++ b/scripts/summary/tests.ts @@ -0,0 +1,54 @@ +import fs from 'fs/promises'; +import path from 'path'; +import { TestCase } from './index.ts'; +import { parseJUnit } from '../junit-parser.ts'; + +export function normalizeTestId(id: string): string { + return id.toLowerCase().replace(/::/g, '-').replace(/\s+/g, '-'); +} + +export async function collectTestCases(files: string[], evidenceDir: string, os?: string): Promise { + const evidenceFiles = await fs.readdir(evidenceDir).catch(() => []); + const tests: TestCase[] = []; + const osType = (os ?? process.env.RUNNER_OS ?? 'unknown').toLowerCase(); + for (const file of files) { + let report; + try { + const xml = await fs.readFile(file, 'utf8'); + report = await parseJUnit(xml); + } catch (err) { + console.warn('Failed to parse JUnit file:', file, err); + continue; + } + for (const suite of report.suites) { + for (const tc of suite.testcases) { + const id = normalizeTestId(tc.name); + const test: TestCase = { + id, + name: tc.name, + className: tc.classname, + status: tc.status, + duration: tc.time, + requirements: [...tc.requirements], + os: osType, + }; + const props = tc.properties; + const ownerVal = props['owner'] ?? props['machine-name']; + if (ownerVal) test.owner = ownerVal; + const evidenceVal = props['evidence'] ?? props['attachment'] ?? props['ci_link']; + if (evidenceVal) test.evidence = evidenceVal; + if (!test.evidence) { + const evidence = evidenceFiles.find((f) => f.startsWith(id) || f.startsWith(id + '.')); + if (evidence) test.evidence = path.join('evidence', evidence); + } + if (!test.owner) { + const ownerMatch = tc.name.match(/\[Owner:([^\]]+)\]/i); + if (ownerMatch) test.owner = ownerMatch[1]; + } + tests.push(test); + } + } + } + return tests; +} + diff --git a/scripts/utils/markdown.ts b/scripts/utils/markdown.ts new file mode 100644 index 00000000..988bdc74 --- /dev/null +++ b/scripts/utils/markdown.ts @@ -0,0 +1,10 @@ +export function escapeMarkdown(text: string): string { + return text.replace(/[|`*_\[\]]/g, '\\$&'); +} + +export function buildTable(header: string[], rows: string[][]): string { + const head = `| ${header.map(escapeMarkdown).join(' | ')} |`; + const sep = `| ${header.map(() => '---').join(' | ')} |`; + const body = rows.map(r => `| ${r.map(c => escapeMarkdown(c)).join(' | ')} |`); + return [head, sep, ...body].join('\n'); +} diff --git a/setup-mkdocs/action.yml b/setup-mkdocs/action.yml index b5e4f6fc..c510668d 100644 --- a/setup-mkdocs/action.yml +++ b/setup-mkdocs/action.yml @@ -15,4 +15,4 @@ runs: ${{ runner.os }}-pip-mkdocs- - name: Install MkDocs shell: bash - run: pip install mkdocs==1.5.3 + run: pip install mkdocs==1.5.3 mkdocs-material pymdown-extensions mkdocs-autorefs mkdocs-redirects diff --git a/tests/pester/PathRestoration.Actions.Tests.ps1 b/tests/pester/PathRestoration.Actions.Tests.ps1 index 97462006..9e62f59e 100644 --- a/tests/pester/PathRestoration.Actions.Tests.ps1 +++ b/tests/pester/PathRestoration.Actions.Tests.ps1 @@ -5,7 +5,7 @@ $ErrorActionPreference = 'Stop' $repoRoot = (Resolve-Path (Join-Path $PSScriptRoot '..' '..')).Path Import-Module (Join-Path $repoRoot 'actions' 'OpenSourceActions.psm1') -Force -Describe 'Adapters restore PATH' -Skip { +Describe 'Adapters restore PATH' { $meta = @{ requirement = 'REQ-000' Owner = 'DevTools' @@ -21,30 +21,29 @@ Describe 'Adapters restore PATH' -Skip { } $cases = @( - @{ Func='Invoke-AddTokenToLabVIEW'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','add-token-to-labview','AddTokenToLabVIEW.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.' } }, - @{ Func='Invoke-ApplyVIPC'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','apply-vipc','ApplyVIPC.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; VIP_LVVersion='2021'; SupportedBitness='64'; RelativePath='.'; VIPCPath='dummy.vipc' } }, - @{ Func='Invoke-BuildViPackage'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build-vi-package','build_vip.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; LabVIEWMinorRevision='2021'; RelativePath='.'; VIPBPath='dummy.vipb'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; DisplayInformationJSON='{}' } }, - @{ Func='Invoke-Build'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build','Build.ps1'); Args=@{ RelativePath='.'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; LabVIEWMinorRevision='2021'; CompanyName='Co'; AuthorName='Auth' } }, - @{ Func='Invoke-BuildLvlibp'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build-lvlibp','Build_lvlibp.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc' } }, - @{ Func='Invoke-CloseLabVIEW'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','close-labview','Close_LabVIEW.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64' } }, - @{ Func='Invoke-GenerateReleaseNotes'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','generate-release-notes','GenerateReleaseNotes.ps1'); Args=@{ OutputPath='notes.md' } }, - @{ Func='Invoke-MissingInProject'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','missing-in-project','Invoke-MissingInProjectCLI.ps1'); Args=@{ LVVersion='2021'; SupportedBitness='64'; ProjectFile='Proj.lvproj' } }, - @{ Func='Invoke-ModifyVIPBDisplayInfo'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','modify-vipb-display-info','ModifyVIPBDisplayInfo.ps1'); Args=@{ SupportedBitness='64'; RelativePath='.'; VIPBPath='dummy.vipb'; MinimumSupportedLVVersion='2021'; LabVIEWMinorRevision='2021'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; DisplayInformationJSON='{}' } }, - @{ Func='Invoke-PrepareLabVIEWSource'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','prepare-labview-source','Prepare_LabVIEW_source.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec' } }, - @{ Func='Invoke-RenameFile'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','rename-file','Rename-file.ps1'); Args=@{ CurrentFilename='a'; NewFilename='b' } }, - @{ Func='Invoke-RestoreSetupLVSource'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','restore-setup-lv-source','RestoreSetupLVSource.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec' } }, - @{ Func='Invoke-RevertDevelopmentMode'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','revert-development-mode','RevertDevelopmentMode.ps1'); Args=@{ RelativePath='.' } }, - @{ Func='Invoke-RunUnitTests'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','run-unit-tests','RunUnitTests.ps1'); Args=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64' } }, - @{ Func='Invoke-SetDevelopmentMode'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','set-development-mode','Set_Development_Mode.ps1'); Args=@{ RelativePath='.' } } + @{ Func='Invoke-AddTokenToLabVIEW'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','add-token-to-labview','AddTokenToLabVIEW.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.' } }, + @{ Func='Invoke-ApplyVIPC'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','apply-vipc','ApplyVIPC.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; VIP_LVVersion='2021'; SupportedBitness='64'; RelativePath='.'; VIPCPath='dummy.vipc' } }, + @{ Func='Invoke-BuildViPackage'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build-vi-package','build_vip.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; LabVIEWMinorRevision='2021'; RelativePath='.'; VIPBPath='dummy.vipb'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; DisplayInformationJSON='{}' } }, + @{ Func='Invoke-Build'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build','Build.ps1'); Arguments=@{ RelativePath='.'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; LabVIEWMinorRevision='2021'; CompanyName='Co'; AuthorName='Auth' } }, + @{ Func='Invoke-BuildLvlibp'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','build-lvlibp','Build_lvlibp.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc' } }, + @{ Func='Invoke-CloseLabVIEW'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','close-labview','Close_LabVIEW.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64' } }, + @{ Func='Invoke-GenerateReleaseNotes'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','generate-release-notes','GenerateReleaseNotes.ps1'); Arguments=@{ OutputPath='notes.md' } }, + @{ Func='Invoke-MissingInProject'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','missing-in-project','Invoke-MissingInProjectCLI.ps1'); Arguments=@{ LVVersion='2021'; SupportedBitness='64'; ProjectFile='Proj.lvproj' } }, + @{ Func='Invoke-ModifyVIPBDisplayInfo'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','modify-vipb-display-info','ModifyVIPBDisplayInfo.ps1'); Arguments=@{ SupportedBitness='64'; RelativePath='.'; VIPBPath='dummy.vipb'; MinimumSupportedLVVersion='2021'; LabVIEWMinorRevision='2021'; Major=1; Minor=0; Patch=0; Build=1; Commit='abc'; DisplayInformationJSON='{}' } }, + @{ Func='Invoke-PrepareLabVIEWSource'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','prepare-labview-source','Prepare_LabVIEW_source.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec' } }, + @{ Func='Invoke-RenameFile'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','rename-file','Rename-file.ps1'); Arguments=@{ CurrentFilename='a'; NewFilename='b' } }, + @{ Func='Invoke-RestoreSetupLVSource'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','restore-setup-lv-source','RestoreSetupLVSource.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64'; RelativePath='.'; LabVIEW_Project='Proj'; Build_Spec='Spec' } }, + @{ Func='Invoke-RevertDevelopmentMode'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','revert-development-mode','RevertDevelopmentMode.ps1'); Arguments=@{ RelativePath='.' } }, + @{ Func='Invoke-RunUnitTests'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','run-unit-tests','RunUnitTests.ps1'); Arguments=@{ MinimumSupportedLVVersion='2021'; SupportedBitness='64' } }, + @{ Func='Invoke-SetDevelopmentMode'; Script=[System.IO.Path]::Combine($repoRoot,'scripts','set-development-mode','Set_Development_Mode.ps1'); Arguments=@{ RelativePath='.' } } ) - foreach ($case in $cases) { - $caseCopy = $case - It "restores PATH after $($caseCopy.Func)" { - $originalPath = $env:PATH - & $caseCopy.Func @($caseCopy.Args) -DryRun -gcliPath $script:gcliPath | Out-Null - $env:PATH | Should -Be $originalPath - } + It "restores PATH after " -TestCases $cases { + param($Func, $Script, $Arguments) + $originalPath = $env:PATH + $params = $Arguments + & $Func @params -DryRun -gcliPath $script:gcliPath | Out-Null + $env:PATH | Should -Be $originalPath } }