Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
138 changes: 138 additions & 0 deletions .github/workflows/known_good_update.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
# *******************************************************************************
# Copyright (c) 2026 Contributors to the Eclipse Foundation
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# SPDX-License-Identifier: Apache-2.0
# *******************************************************************************

name: update known_good to the latest HEAD
on:
workflow_dispatch:
push: # to be removed after testing, we only want to trigger on the feature branch
branches:
- feature/update-known_good-to-latest-mains
schedule:
- cron: '30 1 * * *' # Every night at 01:30 UTC on main branch
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make it every 1h minutes and simply stop if there is no dif in known good. Maybe we shall add manual trigger for now for testing ?


jobs:
preparation:
name: Preparation
runs-on: ubuntu-latest
outputs:
known_good_artifact: ${{ steps.set_known_good.outputs.known_good_artifact }}
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
- name: Create updated known_good.json with PR commit
id: set_known_good
run: |
echo "Testing reference integration repository itself - updating to latest commits"
echo "::group::get latest commits from module branches"
python3 scripts/known_good/update_module_latest.py --output known_good.updated.json
cat known_good.updated.json
echo "::endgroup::"

# Output only the artifact name for downstream jobs
echo "known_good_artifact=known_good.updated.json" >> $GITHUB_OUTPUT
env:
GITHUB_TOKEN: ${{ secrets.REPO_READ_TOKEN != '' && secrets.REPO_READ_TOKEN || github.token }}
- name: Show updated known_good.json
run: |
echo "Updated known_good.json:"
cat known_good.updated.json
- name: Upload updated known_good.json artifact
uses: actions/upload-artifact@v4
with:
name: known_good.updated.json
path: known_good.updated.json
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These workflows shall call existing workflows that re-run on PR with changed know_good.json. Probbaly you shall trigger all existing workflows and they shall have conditonal step to change known good. We dont want to repeat workflows from PR but simply run them.


docs:
name: Generate Documentation
runs-on: ubuntu-latest
needs: preparation
steps:
- name: not implemented
run: echo "Documentation generation not yet implemented here."

integration-test:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not needed or ? as said, the required jobs are jobs from PR

name: Integration Testing (${{ matrix.config }})
needs: preparation
strategy:
fail-fast: false
matrix:
config:
- x86_64-linux
# - arm64-linux
# Add more configs here as needed
# - arm64-qnx8_0
# - x86_64-qnx8_0
uses: ./.github/workflows/reusable_integration-build.yml
secrets: inherit
with:
known_good_artifact: ${{ needs.preparation.outputs.known_good_artifact }}
config: ${{ matrix.config }}
repo_runner_labels: ${{ vars.REPO_RUNNER_LABELS != '' && vars.REPO_RUNNER_LABELS || 'ubuntu-latest' }}
target_branch: ${{ github.ref }}

summary:
name: Publish Summary
runs-on: ubuntu-latest
needs: [integration-test, docs]
if: always()
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
# get all artefacts from integration-test job with name bazel-build-logs-*
- name: Download Integration Test Artifacts
uses: actions/download-artifact@v4
with:
pattern: bazel-build-logs-*
path: _logs/integration_test_logs
merge-multiple: true
- name: Publish Integration Test Summary
run: |
ls -l _logs/integration_test_logs || true
python3 scripts/publish_integration_summary.py \
--integration-result "${{ needs.integration-test.result }}" \
--docs-result "${{ needs.docs.result }}" \
--logs-dir "_logs/integration_test_logs" \
>> "$GITHUB_STEP_SUMMARY"

update_known_good:
name: Update Known Good
needs: [summary, preparation, docs, integration-test]
#only in all previous steps succeeded, otherwise we might update to a known_good that is not actually good
if: needs.preparation.result == 'success' && needs.docs.result == 'success' && needs.integration-test.result == 'success'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
- name: Download known_good artifact
uses: actions/download-artifact@v4
with:
name: ${{ needs.preparation.outputs.known_good_artifact }}
- name: update known_good.json
run: cp "${{ needs.preparation.outputs.known_good_artifact }}" known_good.json
- name: Show updated known_good.json
run: |
echo "Updated known_good.json to be committed:"
cat known_good.json
- name: update score_modules.MODULE.bazel
run: |
python3 scripts/known_good/update_module_from_known_good.py --known known_good.json
cat score_modules.MODULE.bazel
env:
GITHUB_TOKEN: ${{ secrets.REPO_READ_TOKEN != '' && secrets.REPO_READ_TOKEN || github.token }}
- name: Commit and push known_good.json
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add known_good.json score_modules.MODULE.bazel
git commit -m "Update known_good.json" || echo "No changes to commit"
git push origin HEAD:${{ github.ref }}
14 changes: 7 additions & 7 deletions .github/workflows/reusable_integration-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ on:
description: 'GitHub token with read access to the score modules. Defaults to github.token'
required: false
inputs:
known_good:
description: 'Content of the known_good.json file to use for the integration test.'
known_good_artifact:
description: 'Name of the uploaded artifact containing the known_good JSON file.'
required: true
type: string
config:
Expand Down Expand Up @@ -102,13 +102,13 @@ jobs:
disk-cache: ${{ inputs.config }}
# Share repository cache between workflows.
repository-cache: true
- name: Download known_good artifact
uses: actions/download-artifact@v4
with:
name: ${{ inputs.known_good_artifact }}
- name: Update known good commits
run: |
echo "::group::write known_good.json from input"
# write the known_good.json from input
cat > known_good.updated.json <<'EOF'
${{ inputs.known_good }}
EOF
echo "::group::known_good.updated.json"
cat known_good.updated.json
echo "::endgroup::"

Expand Down
10 changes: 4 additions & 6 deletions .github/workflows/reusable_smoke-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ jobs:
name: Preparation
runs-on: ubuntu-latest
outputs:
known_good_updated: ${{ steps.set_known_good.outputs.known_good_updated }}
known_good_artifact: ${{ steps.set_known_good.outputs.known_good_artifact }}
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
Expand All @@ -107,10 +107,8 @@ jobs:
echo "::endgroup::"
fi

# Output the content as a JSON string
echo "known_good_updated<<EOF" >> $GITHUB_OUTPUT
cat known_good.updated.json >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
# Output only the artifact name for downstream jobs
echo "known_good_artifact=known_good.updated.json" >> $GITHUB_OUTPUT
env:
GITHUB_TOKEN: ${{ secrets.REPO_READ_TOKEN != '' && secrets.REPO_READ_TOKEN || github.token }}
- name: Show updated known_good.json
Expand Down Expand Up @@ -146,7 +144,7 @@ jobs:
uses: ./.github/workflows/reusable_integration-build.yml
secrets: inherit
with:
known_good: ${{ needs.preparation.outputs.known_good_updated }}
known_good_artifact: ${{ needs.preparation.outputs.known_good_artifact }}
config: ${{ matrix.config }}
repo_runner_labels: ${{ inputs.repo_runner_labels }}
target_branch: ${{ inputs.target_branch }}
Expand Down
29 changes: 14 additions & 15 deletions scripts/known_good/known_good_to_workspace_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,22 +29,21 @@ def main():
except ValueError as e:
raise SystemExit(f"ERROR: {e}")

modules = list(known_good.modules.values())

gita_metadata = []
for module in modules:
if not module.repo:
raise RuntimeError(f"Module {module.name}: repo must not be empty")

# if no hash is given, use branch
hash_value = module.hash if module.hash else module.branch

# workspace_path is not available in known_good.json, default to name of repository
workspace_path = module.name

# gita format: {url},{name},{path},{prop['type']},{repo_flags},{branch}
row = [module.repo, module.name, workspace_path, "", "", hash_value]
gita_metadata.append(row)
for group_modules in known_good.modules.values():
for module in group_modules.values():
if not module.repo:
raise RuntimeError(f"Module {module.name}: repo must not be empty")

# if no hash is given, use branch
hash_value = module.hash if module.hash else module.branch

# workspace_path is not available in known_good.json, default to name of repository
workspace_path = module.name

# gita format: {url},{name},{path},{prop['type']},{repo_flags},{branch}
row = [module.repo, module.name, workspace_path, "", "", hash_value]
gita_metadata.append(row)

with open(args.gita_workspace, "w", newline="") as f:
writer = csv.writer(f)
Expand Down
7 changes: 0 additions & 7 deletions scripts/known_good/models/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ class Module:
bazel_patches: list[str] | None = None
metadata: Metadata = field(default_factory=Metadata)
branch: str = "main"
pin_version: bool = False

@classmethod
def from_dict(cls, name: str, module_data: Dict[str, Any]) -> Module:
Expand All @@ -86,8 +85,6 @@ def from_dict(cls, name: str, module_data: Dict[str, Any]) -> Module:
}
If not present, uses default Metadata values.
- branch (str, optional): Git branch name (default: main)
- pin_version (bool, optional): If true, module hash is not updated
to latest HEAD by update scripts (default: false)

Returns:
Module instance
Expand Down Expand Up @@ -122,7 +119,6 @@ def from_dict(cls, name: str, module_data: Dict[str, Any]) -> Module:
metadata = Metadata()

branch = module_data.get("branch", "main")
pin_version = module_data.get("pin_version", False)

return cls(
name=name,
Expand All @@ -132,7 +128,6 @@ def from_dict(cls, name: str, module_data: Dict[str, Any]) -> Module:
bazel_patches=bazel_patches if bazel_patches else None,
metadata=metadata,
branch=branch,
pin_version=pin_version,
)

@classmethod
Expand Down Expand Up @@ -193,6 +188,4 @@ def to_dict(self) -> Dict[str, Any]:
result["bazel_patches"] = self.bazel_patches
if self.branch and self.branch != "main":
result["branch"] = self.branch
if self.pin_version:
result["pin_version"] = True
return result
7 changes: 6 additions & 1 deletion scripts/known_good/override_known_good_repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,13 @@ def apply_overrides(known_good: KnownGood, repo_overrides: List[str]) -> KnownGo
Returns:
Updated KnownGood instance
"""
# Flatten grouped modules into a single dict for override lookup
flat_modules: Dict[str, Module] = {}
for group_modules in known_good.modules.values():
flat_modules.update(group_modules)

# Parse and apply overrides
overrides_applied = parse_and_apply_overrides(known_good.modules, repo_overrides)
overrides_applied = parse_and_apply_overrides(flat_modules, repo_overrides)

if overrides_applied == 0:
logging.warning("No overrides were applied to any modules")
Expand Down
1 change: 0 additions & 1 deletion scripts/known_good/update_module_from_known_good.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,6 @@ def main() -> None:
raise SystemExit(f"ERROR: {e}")
except ValueError as e:
raise SystemExit(f"ERROR: {e}")

if not known_good.modules:
raise SystemExit("No modules found in known_good.json")

Expand Down
60 changes: 33 additions & 27 deletions scripts/known_good/update_module_latest.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,38 +131,44 @@ def main(argv: list[str]) -> int:
if args.no_gh and shutil.which("gh") is not None:
print("INFO: --no-gh specified; ignoring installed 'gh' CLI", file=sys.stderr)

for mod in known_good.modules.values():
if mod.pin_version:
print(f"{mod.name}: pinned, skipping")
continue

try:
branch = mod.branch if mod.branch else args.branch
if use_gh:
latest = fetch_latest_commit_gh(mod.owner_repo, branch)
else:
latest = fetch_latest_commit(mod.owner_repo, branch, token)

old_hash = mod.hash
if latest != old_hash:
mod.hash = latest
mod.version = None # Clear version when hash changes
if mod.version:
print(f"{mod.name}: {mod.version} -> {latest[:8]} (branch {branch})")
for group_name, group_modules in known_good.modules.items():
for mod in group_modules.values():
if not mod.branch is None:
print(f"{mod.name}: pinned, skipping")
continue

# Skip modules without a repo URL
if not mod.repo:
print(f"{mod.name}: no repo URL, skipping")
continue

try:
branch = mod.branch if mod.branch else args.branch
if use_gh:
latest = fetch_latest_commit_gh(mod.owner_repo, branch)
else:
latest = fetch_latest_commit(mod.owner_repo, branch, token)

old_hash = mod.hash
if latest != old_hash:
old_version = mod.version
mod.hash = latest
mod.version = None # Clear version when hash changes
if old_version:
print(f"{mod.name}: {old_version} -> {latest[:8]} (branch {branch})")
else:
print(f"{mod.name}: {old_hash[:8]} -> {latest[:8]} (branch {branch})")
else:
print(f"{mod.name}: {old_hash[:8]} -> {latest[:8]} (branch {branch})")
else:
print(f"{mod.name}: {old_hash[:8]} (no update)")
except Exception as e: # noqa: BLE001
failures += 1
print(f"ERROR {mod.name}: {e}", file=sys.stderr)
if args.fail_fast:
break
print(f"{mod.name}: {old_hash[:8]} (no update)")
except Exception as e: # noqa: BLE001
failures += 1
print(f"ERROR {mod.name}: {e}", file=sys.stderr)
if args.fail_fast:
break

if args.output:
try:
known_good.write(Path(args.output))
print(f"Updated JSON written to {args.output}")
except OSError as e:
print(f"ERROR: Failed writing output file: {e}", file=sys.stderr)
return 3
Expand Down