Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

All notable changes to this project will be documented in this file.

## [1.52.0] (12/01/2026)
This update improves test execution performance when running tests with status checks across multiple template handles.
Tests are now run in parallel for multiple handles when using the `--status` flag, significantly reducing the overall execution time. Previously, tests with status checks for multiple handles would run sequentially, but now they leverage parallel processing for better efficiency. This change only affects the `silverfin run-test` command when both multiple handles and the status flag are used together.

## [1.51.0] (08/01/2026)

This update should have no user impact whatsoever.
Expand Down
44 changes: 33 additions & 11 deletions bin/cli.js
Original file line number Diff line number Diff line change
Expand Up @@ -453,16 +453,16 @@ program
.command("run-test")
.description("Run Liquid Tests for a reconciliation template from a YAML file")
.requiredOption("-f, --firm <firm-id>", "Specify the firm to be used", firmIdDefault)
.option("-h, --handle <handle>", "Specify the reconciliation to be used (mandatory)")
.option("-at, --account-template <name>", "Specify the account template to be used (mandatory)")
.option("-h, --handle <handle...>", "Specify one or more reconciliations to be used (mandatory)")
.option("-at, --account-template <name...>", "Specify one or more account templates to be used (mandatory)")
.option("-t, --test <test-name>", "Specify the name of the test to be run (optional)", "")
.option("--html-input", "Get a static html of the input-view of the template generated with the Liquid Test data (optional)", false)
.option("--html-preview", "Get a static html of the export-view of the template generated with the Liquid Test data (optional)", false)
.option("--preview-only", "Skip the checking of the results of the Liquid Test in case you only want to generate a preview template (optional)", false)
.option("--status", "Only return the status of the test runs as PASSED/FAILED (optional)", false)
.option("-p, --pattern <pattern>", "Run all tests that match this pattern (optional)", "")

.action((options) => {
.action(async (options) => {
if (!options.handle && !options.accountTemplate) {
consola.error("You need to specify either a reconciliation handle or an account template");
process.exit(1);
Expand All @@ -474,17 +474,39 @@ program
}

const templateType = options.handle ? "reconciliationText" : "accountTemplate";
const templateName = options.handle ? options.handle : options.accountTemplate;
let templateName = options.handle ? options.handle : options.accountTemplate;

// Support pipe-separated values: if single string contains pipes, split it
if (templateName.length === 1 && typeof templateName[0] === 'string' && templateName[0].includes('|')) {
templateName = templateName[0].split('|').map(name => name.trim()).filter(name => name.length > 0);
}

if (!templateName || templateName.length === 0) {
consola.error("You need to provide at least one handle or account template name");
process.exit(1);
}

// Block multiple handles/templates without --status
if (templateName.length > 1 && !options.status) {
consola.error("Multiple handles/templates are only allowed when used with the --status flag");
process.exit(1);
}

if (options.status) {
liquidTestRunner.runTestsStatusOnly(options.firm, templateType, templateName, options.test, options.pattern);
} else {
if (options.previewOnly && !options.htmlInput && !options.htmlPreview) {
consola.info(`When using "--preview-only" you need to specify at least one of the following options: "--html-input", "--html-preview"`);
process.exit(1);
}
liquidTestRunner.runTestsWithOutput(options.firm, templateType, templateName, options.test, options.previewOnly, options.htmlInput, options.htmlPreview, options.pattern);
// Status mode: allow multiple, pass array of template names
await liquidTestRunner.runTestsStatusOnly(options.firm, templateType, templateName, options.test, options.pattern);
return;
}

// Non-status mode: always run a single template, pass string handle/name
const singleTemplateName = templateName[0];

if (options.previewOnly && !options.htmlInput && !options.htmlPreview) {
consola.info(`When using "--preview-only" you need to specify at least one of the following options: "--html-input", "--html-preview"`);
process.exit(1);
}

await liquidTestRunner.runTestsWithOutput(options.firm, templateType, singleTemplateName, options.test, options.previewOnly, options.htmlInput, options.htmlPreview, options.pattern);
});

// Create Liquid Test
Expand Down
60 changes: 42 additions & 18 deletions lib/liquidTestRunner.js
Original file line number Diff line number Diff line change
Expand Up @@ -469,33 +469,57 @@ async function runTestsWithOutput(firmId, templateType, handle, testName = "", p

// RETURN (AND LOG) ONLY PASSED OR FAILED
// CAN BE USED BY GITHUB ACTIONS
async function runTestsStatusOnly(firmId, templateType, handle, testName = "", pattern = "") {
async function runTestsStatusOnly(firmId, templateType, handles, testName = "", pattern = "") {
if (templateType !== "reconciliationText" && templateType !== "accountTemplate") {
consola.error(`Template type is missing or invalid`);
process.exit(1);
}

let status = "FAILED";
const testResult = await runTests(firmId, templateType, handle, testName, false, "none", pattern);
const runSingleHandle = async (singleHandle) => {
let status = "FAILED";
const failedTestNames = [];
const testResult = await runTests(firmId, templateType, singleHandle, testName, false, "none", pattern);

if (!testResult) {
status = "PASSED";
consola.success(status);
return status;
}
if (!testResult) {
status = "PASSED";
Comment on lines +483 to +484
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Treating missing test results as PASSED may mask configuration errors.

When runTests returns falsy (undefined), this could indicate:

  1. Config file not found (error)
  2. Test file not found (error)
  3. Empty YAML file (info)

Setting status = "PASSED" for all these cases will silently pass handles with missing or misconfigured tests, potentially masking real issues in CI/CD pipelines.

Suggested fix: distinguish error cases from "no tests" cases
  const runSingleHandle = async (singleHandle) => {
    let status = "FAILED";
    const failedTestNames = [];
    const testResult = await runTests(firmId, templateType, singleHandle, testName, false, "none", pattern);

-   if (!testResult) {
-     status = "PASSED";
+   if (testResult === false) {
+     // Empty YAML file - no tests to run, consider as passed
+     status = "PASSED";
+   } else if (!testResult) {
+     // Config or test file not found - keep as FAILED
+     status = "FAILED";
    } else {

Note: This requires runTests to propagate the false return from buildTestParams for the empty YAML case, rather than returning undefined for all falsy inputs.

🤖 Prompt for AI Agents
In @lib/liquidTestRunner.js around lines 483 - 484, The code currently treats
any falsy testResult as PASSED; instead, update runTests and its caller to
distinguish three cases: if buildTestParams returns false (explicit "no tests"
from an empty YAML) propagate and set status = "SKIPPED" (or "NO_TESTS"); if
runTests returns undefined (indicates a config/file error) set status = "ERROR"
and emit a clear error log; otherwise handle the normal truthy testResult as
before. Specifically, ensure buildTestParams returns false for empty YAML and
runTests forwards that false rather than undefined, and change the caller logic
around the status variable (referencing runTests, buildTestParams, and status)
to branch on === false, === undefined, and truthy results, logging errors on
undefined.

} else {
const testRun = testResult?.testRun;

const testRun = testResult?.testRun;
if (testRun && testRun?.status === "completed") {
const errorsPresent = checkAllTestsErrorsPresent(testRun.tests);
if (errorsPresent === false) {
status = "PASSED";
} else {
// Extract failed test names
const testNames = Object.keys(testRun.tests).sort();
testNames.forEach((testName) => {
const testErrorsPresent = checkTestErrorsPresent(testName, testRun.tests);
if (testErrorsPresent) {
failedTestNames.push(testName);
}
});
}
}
}

if (testRun && testRun?.status === "completed") {
const errorsPresent = checkAllTestsErrorsPresent(testRun.tests);
if (errorsPresent === false) {
status = "PASSED";
consola.success(status);
return status;
if (status === "PASSED") {
consola.log(`${singleHandle}: ${status}`);
} else {
consola.log(`${singleHandle}: ${status}`);
// Display failed test names
failedTestNames.forEach((testName) => {
consola.log(` ${testName}: FAILED`);
});
}
}
consola.error(status);
return status;

return { handle: singleHandle, status, failedTestNames };
};

const results = await Promise.all(handles.map(runSingleHandle));

const overallStatus = results.every((result) => result.status === "PASSED") ? "PASSED" : "FAILED";

return overallStatus;
}

module.exports = {
Expand Down
22 changes: 11 additions & 11 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "silverfin-cli",
"version": "1.51.0",
"version": "1.52.0",
"description": "Command line tool for Silverfin template development",
"main": "index.js",
"license": "MIT",
Expand Down