Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@

All notable changes to this project will be documented in this file.

## [1.52.1] (21/01/2026)
In this update we solved a bug related to the `silverfin run-test -p "string pattern" -h template_handle` command. Previously, when we run tests and define a pattern, those tests can include an alias, but the anchor was defined in a different section that gets filtered out when running the tests. This has now been resolved.

## [1.52.0] (12/01/2026)
This update improves test execution performance when running tests with status checks across multiple template handles.
Tests are now run in parallel for multiple handles when using the `--status` flag, significantly reducing the overall execution time. Previously, tests with status checks for multiple handles would run sequentially, but now they leverage parallel processing for better efficiency. This change only affects the `silverfin run-test` command when both multiple handles and the status flag are used together.
Expand Down
122 changes: 121 additions & 1 deletion lib/liquidTestRunner.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,103 @@ function findTestRows(testContent) {
return indexes;
}

/**
* Find all anchor definitions in the YAML content.
* Returns a map of anchor names to their line ranges (start, end).
*/
function findAnchorDefinitions(testContent) {
const lines = testContent.split("\n");
const anchors = {};

// Parse the document to find anchors
const options = { maxAliasCount: 10000 };
const doc = yaml.parseDocument(testContent, options);

// Visit all nodes to find anchors
yaml.visit(doc, {
Node(key, node) {
if (node.anchor) {
// Find the line number of this anchor in the source
if (node.range) {
const startOffset = node.range[0];
let lineNumber = 0;
let currentOffset = 0;
for (let i = 0; i < lines.length; i++) {
if (currentOffset + lines[i].length >= startOffset) {
lineNumber = i;
break;
}
currentOffset += lines[i].length + 1; // +1 for newline
}
anchors[node.anchor] = { startLine: lineNumber, node };
}
}
},
});

return anchors;
}

/**
* Find all alias references in a given YAML content segment.
* Returns an array of alias names that are referenced.
*/
function findAliasReferences(segment) {
const aliasPattern = /\*([a-zA-Z_][a-zA-Z0-9_]*)/g;
const aliases = new Set();
let match;
while ((match = aliasPattern.exec(segment)) !== null) {
aliases.add(match[1]);
}
return Array.from(aliases);
}

/**
* Extract anchor definition blocks from the original content.
* Returns the YAML content for each anchor that needs to be preserved.
*/
function extractAnchorBlocks(testContent, anchorNames, testIndexes) {
if (anchorNames.length === 0) {
return "";
}

const anchors = findAnchorDefinitions(testContent);
const lines = testContent.split("\n");

// Get sorted test start positions
const testPositions = Object.entries(testIndexes)
.map(([name, index]) => ({ name, index }))
.sort((a, b) => a.index - b.index);

const anchorBlocks = [];

for (const anchorName of anchorNames) {
const anchor = anchors[anchorName];
if (!anchor) continue;

const anchorLine = anchor.startLine;

// Find which test this anchor belongs to
let testStart = 0;
let testEnd = lines.length;

for (let i = 0; i < testPositions.length; i++) {
if (testPositions[i].index <= anchorLine) {
testStart = testPositions[i].index;
testEnd = i + 1 < testPositions.length ? testPositions[i + 1].index : lines.length;
}
}

// Extract the test block containing this anchor
const block = lines.slice(testStart, testEnd).join("\n").trimEnd();
anchorBlocks.push(block);
}

// Remove duplicates and return
const uniqueBlocks = [...new Set(anchorBlocks)];
return uniqueBlocks.join("\n\n");
}

function filterTestsByPattern(testContent, pattern, testIndexes) {
const indexes = testIndexes || findTestRows(testContent);
const matchingTests = Object.keys(indexes).filter((testName) => testName.includes(pattern));
Expand Down Expand Up @@ -73,9 +170,32 @@ function filterTestsByPattern(testContent, pattern, testIndexes) {
segments.push(segment);
});

const filteredContent = segments.join("\n\n").trim();
let filteredContent = segments.join("\n\n").trim();
const orderedMatchingTests = orderedTests.filter((test) => matchingSet.has(test.name)).map((test) => test.name);

// Check for alias references in the filtered content and add missing anchor definitions
const aliasReferences = findAliasReferences(filteredContent);
if (aliasReferences.length > 0) {
// Find anchors that are defined in the filtered content
const definedAnchors = new Set();
const anchorPattern = /&([a-zA-Z_][a-zA-Z0-9_]*)/g;
let match;
while ((match = anchorPattern.exec(filteredContent)) !== null) {
definedAnchors.add(match[1]);
}

// Find aliases that reference anchors not in the filtered content
const missingAnchors = aliasReferences.filter((alias) => !definedAnchors.has(alias));

if (missingAnchors.length > 0) {
// Extract the test blocks containing the missing anchors
const anchorBlocks = extractAnchorBlocks(testContent, missingAnchors, indexes);
if (anchorBlocks) {
filteredContent = anchorBlocks + "\n\n" + filteredContent;
}
Comment on lines +176 to +195
Copy link

@coderabbitai coderabbitai bot Jan 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

find . -type f -name "liquidTestRunner.js" | head -20

Repository: silverfin/silverfin-cli

Length of output: 92


🏁 Script executed:

cd lib && wc -l liquidTestRunner.js

Repository: silverfin/silverfin-cli

Length of output: 90


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '170,200p'

Repository: silverfin/silverfin-cli

Length of output: 1477


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '195,250p'

Repository: silverfin/silverfin-cli

Length of output: 1947


🏁 Script executed:

ast-grep --pattern 'function extractAnchorBlocks($_) { $$$}'

Repository: silverfin/silverfin-cli

Length of output: 49


🏁 Script executed:

rg -n "extractAnchorBlocks" lib/liquidTestRunner.js

Repository: silverfin/silverfin-cli

Length of output: 229


🏁 Script executed:

rg -n "function extractAnchorBlocks" lib/

Repository: silverfin/silverfin-cli

Length of output: 163


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '84,150p'

Repository: silverfin/silverfin-cli

Length of output: 2495


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '1,83p' | tail -40

Repository: silverfin/silverfin-cli

Length of output: 1474


🏁 Script executed:

rg -n "filterTestsByPattern" lib/liquidTestRunner.js

Repository: silverfin/silverfin-cli

Length of output: 286


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '250,350p'

Repository: silverfin/silverfin-cli

Length of output: 4065


🏁 Script executed:

rg -n "lineAdjustments" lib/liquidTestRunner.js

Repository: silverfin/silverfin-cli

Length of output: 849


🏁 Script executed:

rg -n "test_line" lib/liquidTestRunner.js

Repository: silverfin/silverfin-cli

Length of output: 124


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '294,310p'

Repository: silverfin/silverfin-cli

Length of output: 596


🏁 Script executed:

rg -n "SF\." lib/liquidTestRunner.js | head -20

Repository: silverfin/silverfin-cli

Length of output: 336


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '420,470p'

Repository: silverfin/silverfin-cli

Length of output: 2497


🏁 Script executed:

rg -n "pattern" lib/liquidTestRunner.js | head -30

Repository: silverfin/silverfin-cli

Length of output: 1497


🏁 Script executed:

rg -n "createTestRun\|createPreviewRun" lib/ -A 5

Repository: silverfin/silverfin-cli

Length of output: 49


🏁 Script executed:

fd "*.js" lib/ -o | xargs grep -l "createTestRun\|createPreviewRun" | head -5

Repository: silverfin/silverfin-cli

Length of output: 183


🏁 Script executed:

find lib -name "*.js" -type f | head -20

Repository: silverfin/silverfin-cli

Length of output: 570


🏁 Script executed:

rg -n "createTestRun" lib/

Repository: silverfin/silverfin-cli

Length of output: 306


🏁 Script executed:

cat -n lib/api/sfApi.js | sed -n '407,450p'

Repository: silverfin/silverfin-cli

Length of output: 1784


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '550,565p'

Repository: silverfin/silverfin-cli

Length of output: 774


🏁 Script executed:

cat -n lib/liquidTestRunner.js | sed -n '116,125p'

Repository: silverfin/silverfin-cli

Length of output: 420


Pattern filtering is undermined by prepending full test blocks containing unmatched tests.

The extractAnchorBlocks function (line 117) returns entire test blocks, not just anchor definitions. These blocks come from tests that don't match the filter pattern and are prepended to filteredContent (line 194). Since no pattern information is passed to the backend API (lines 294-298 send only template, tests, and mode), the server executes all test blocks in the YAML—including those injected for anchor resolution. This reintroduces excluded tests and defeats the filtering guarantee.

Additionally, lineAdjustments only contains mappings for orderedMatchingTests (lines 202-208), so tests from injected anchor blocks lack proper line tracking, potentially causing incorrect error reporting.

Consider instead inlining referenced anchor nodes into matching tests or implementing server-side test filtering by name.

🤖 Prompt for AI Agents
In `@lib/liquidTestRunner.js` around lines 176 - 195, The current logic prepends
whole test blocks from extractAnchorBlocks into filteredContent, which
reintroduces excluded tests and breaks line tracking; change extractAnchorBlocks
(and its call site) to return only the anchor node definitions (or directly
inline the resolved anchor nodes into the matching tests found by
findAliasReferences) rather than full test blocks pulled from
testContent/indexes, and when you insert those anchor snippets update
lineAdjustments (which currently only maps orderedMatchingTests) to include
mappings for any added lines so error locations remain correct; alternatively,
if you prefer server-side enforcement, modify the payload sent to the backend to
include the filtering pattern or explicit test names instead of sending all
tests so the server will not execute injected blocks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried implementing this but I'm afraid it's not doable for the way we run liquid tests.

When filtering tests, we only added small anchor snippets back into the YAML. These snippets were often incomplete/indented, which broke the YAML structure. The backend then saw missing data (like periods) and failed validation.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@delphine-demeulenaere, thank you for trying the suggested approach and sharing the findings. That's a valid technical constraint—if extracting only the anchor definitions breaks the YAML structure and causes backend validation failures, then preserving the full test blocks is the pragmatic solution.

Given that the backend expects valid YAML with complete test structures, the current implementation makes sense. While it does mean that some non-matching tests (those containing the anchors) will be included when pattern filtering is active, that's preferable to broken YAML or validation errors.

Are you comfortable proceeding with the current approach, or would you like to explore any alternative solutions (such as server-side pattern filtering, if that's feasible)?


🧠 Learnings used
Learnt from: BenjaminLangenakenSF
Repo: silverfin/silverfin-cli PR: 235
File: lib/utils/fsUtils.js:359-410
Timestamp: 2026-01-20T16:12:48.884Z
Learning: In the Silverfin CLI codebase, liquid test YAML filenames should always match the template handle (e.g., for a template with handle "my_template", the test file should be named "my_template_liquid_test.yml").

}
}

const lineAdjustments = {};
if (filteredContent) {
const filteredIndexes = findTestRows(filteredContent);
Expand Down
Loading