Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 62 additions & 59 deletions .github/workflows/self-optimize.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ jobs:
echo "## Unused Code Detection" > /tmp/unused-code-report.md
echo "" >> /tmp/unused-code-report.md

# Install ts-prune for unused export detection
npm install --no-save ts-prune
# Use ts-prune from devDependencies (already installed via npm ci)

# Detect unused exports
echo "### Unused Exports" >> /tmp/unused-code-report.md
Expand All @@ -101,8 +100,7 @@ jobs:
echo "## Code Complexity Analysis" > /tmp/complexity-report.md
echo "" >> /tmp/complexity-report.md

# Install complexity analysis tool
npm install --no-save eslint-plugin-complexity
# Use eslint-plugin-complexity from devDependencies (already installed via npm ci)
Copy link

Copilot AI Jan 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment states that eslint-plugin-complexity is from devDependencies, but this package does not exist on npm. ESLint includes complexity analysis as built-in rules (complexity, max-depth, max-lines-per-function, max-nested-callbacks), so no additional plugin is needed. The workflow will fail when it tries to use the non-existent plugin at line 111 where it specifies "plugins": ["complexity"].

Copilot uses AI. Check for mistakes.

# Run complexity analysis
echo "Analyzing cyclomatic complexity..." >> /tmp/complexity-report.md
Expand Down Expand Up @@ -237,7 +235,12 @@ jobs:
echo "" >> /tmp/risky-code-report.md
fi

echo "risky_patterns_found=true" >> $GITHUB_OUTPUT
# Set risky_patterns_found only if any issues detected
if [[ $EVAL_COUNT -gt 0 ]] || [[ $ANY_COUNT -gt 100 ]] || [[ $TODO_COUNT -gt 0 ]] || [[ $CONSOLE_COUNT -gt 0 ]] || [[ $KEY_COUNT -gt 0 ]]; then
echo "risky_patterns_found=true" >> $GITHUB_OUTPUT
else
echo "risky_patterns_found=false" >> $GITHUB_OUTPUT
fi

- name: Commit automated fixes
id: commit-fixes
Expand All @@ -255,7 +258,14 @@ jobs:

[skip ci]" || echo "No changes to commit"

git push origin ${{ github.event.pull_request.head.ref }} || echo "Push failed"
# Push to PR branch with proper error handling
# Note: If push fails due to conflicts, fail the job and post a comment
if ! git push origin ${{ github.event.pull_request.head.ref }}; then
echo "push_failed=true" >> $GITHUB_OUTPUT
echo "❌ Push to PR branch failed. This may be due to concurrent updates on the branch." >&2
exit 1
fi
echo "push_failed=false" >> $GITHUB_OUTPUT

- name: Generate comprehensive PR comment
id: generate-comment
Expand All @@ -264,51 +274,41 @@ jobs:
echo "" >> /tmp/pr-comment.md
echo "This PR has been analyzed for code quality, security, and optimization opportunities." >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md

# Add each report section
cat /tmp/eslint-report.md >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
# Add concise summary only
echo "### 📊 Quick Summary" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md

cat /tmp/unused-code-report.md >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
# Count issues from reports
UNUSED_COUNT=$(grep -c "used in module" /tmp/unused-exports.txt 2>/dev/null || echo "0")
TODO_COUNT=$(grep -r "TODO\|FIXME" src/ --include="*.ts" 2>/dev/null | wc -l || echo "0")
CONSOLE_COUNT=$(grep -r "console.log" src/ --include="*.ts" --exclude="*logger*" 2>/dev/null | wc -l || echo "0")

cat /tmp/complexity-report.md >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "- 🔍 **Unused Exports**: $UNUSED_COUNT found" >> /tmp/pr-comment.md
echo "- 📝 **TODO/FIXME Comments**: $TODO_COUNT found" >> /tmp/pr-comment.md
echo "- ⚠️ **console.log() Usage**: $CONSOLE_COUNT instances" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md

cat /tmp/coverage-report.md >> /tmp/pr-comment.md
echo "### 📦 Detailed Reports" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "Full analysis reports are available as workflow artifacts:" >> /tmp/pr-comment.md
echo "- Download artifacts from the **Actions** tab to view detailed reports" >> /tmp/pr-comment.md
echo "- Reports include: ESLint fixes, unused code analysis, complexity metrics, coverage gaps, and risky patterns" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md

cat /tmp/risky-code-report.md >> /tmp/pr-comment.md
echo "### ✅ Actions Taken" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "- Automated ESLint fixes applied where safe" >> /tmp/pr-comment.md
echo "- Code formatted according to style guide" >> /tmp/pr-comment.md
echo "- Inline comments added for manual review items" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md

# Add summary
echo "" >> /tmp/pr-comment.md
echo "### 📊 Summary" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "- ✅ Automated fixes have been applied where safe" >> /tmp/pr-comment.md
echo "- 📝 Review the reports above for manual attention items" >> /tmp/pr-comment.md
echo "- 🔍 Check inline comments for specific recommendations" >> /tmp/pr-comment.md
echo "- ⚠️ Address any flagged security or complexity issues" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "### Next Steps" >> /tmp/pr-comment.md
echo "### 📋 Next Steps" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "1. Review automated changes committed by this workflow" >> /tmp/pr-comment.md
echo "2. Address any flagged security or complexity issues" >> /tmp/pr-comment.md
echo "3. Consider refactoring high-complexity functions" >> /tmp/pr-comment.md
echo "4. Add tests for low-coverage areas" >> /tmp/pr-comment.md
echo "5. Remove or document TODO/FIXME items" >> /tmp/pr-comment.md
echo "2. Address any flagged security or complexity issues in inline comments" >> /tmp/pr-comment.md
echo "3. Download and review detailed reports from workflow artifacts" >> /tmp/pr-comment.md
echo "4. Consider refactoring high-complexity functions" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
echo "---" >> /tmp/pr-comment.md
echo "" >> /tmp/pr-comment.md
Expand Down Expand Up @@ -365,7 +365,8 @@ jobs:
.split('\n')
.filter(f => f.endsWith('.ts') || f.endsWith('.tsx'));

const comments = [];
// Use a Map to deduplicate comments by file:line
const commentsMap = new Map();

// Parse complexity issues
try {
Expand All @@ -378,54 +379,56 @@ jobs:

for (const message of result.messages) {
if (message.ruleId && (message.ruleId.includes('complexity') || message.ruleId.includes('max-'))) {
comments.push({
path: file,
line: message.line,
body: `⚠️ **${message.ruleId}**: ${message.message}\n\n**Suggestion:** Consider refactoring this function to reduce complexity and improve maintainability.`
});
const key = `${file}:${message.line}`;
const existing = commentsMap.get(key) || { path: file, line: message.line, issues: [] };
existing.issues.push(`⚠️ **${message.ruleId}**: ${message.message}`);
commentsMap.set(key, existing);
}
}
}
} catch (e) {
console.log('No complexity issues to comment on');
}

// Add comments for TODO/FIXME
// Add comments for TODO/FIXME, console.log, eval()
for (const file of changedFiles) {
try {
const content = fs.readFileSync(file, 'utf8');
const lines = content.split('\n');

lines.forEach((line, index) => {
const lineNum = index + 1;
const key = `${file}:${lineNum}`;
const existing = commentsMap.get(key) || { path: file, line: lineNum, issues: [] };

if (line.includes('TODO') || line.includes('FIXME')) {
comments.push({
path: file,
line: index + 1,
body: '📝 **Technical Debt Detected**: This TODO/FIXME should be addressed before merging to production.\n\n**Action Required:** Either resolve the issue or create a tracking issue.'
});
existing.issues.push('📝 **Technical Debt**: TODO/FIXME should be addressed before merging to production.');
}

if (line.includes('console.log') && !file.includes('logger')) {
comments.push({
path: file,
line: index + 1,
body: '⚠️ **Logging Issue**: Using console.log in production code.\n\n**Recommendation:** Replace with proper logger utility from `src/utils/logger.ts`.'
});
existing.issues.push('⚠️ **Logging Issue**: Using console.log in production code. Replace with proper logger.');
}

if (line.includes('eval(')) {
comments.push({
path: file,
line: index + 1,
body: '🚨 **Security Risk**: eval() is dangerous and should be avoided.\n\n**Action Required:** Refactor to use safer alternatives. This is a critical security issue.'
});
existing.issues.push('🚨 **Security Risk**: eval() is dangerous and should be avoided. This is a critical security issue.');
}

if (existing.issues.length > 0) {
commentsMap.set(key, existing);
}
});
} catch (e) {
console.log(`Could not analyze file: ${file}`);
}
Comment on lines 420 to 422
Copy link

Copilot AI Jan 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The error handling at line 420-422 silently logs errors when files cannot be analyzed, but continues processing. This is good for robustness. However, the error message "Could not analyze file:" could be more informative about why the file couldn't be analyzed (e.g., file doesn't exist, permission denied, etc.). Consider logging the actual error message: console.log(\Could not analyze file: ${file} - ${e.message}`)`

Copilot uses AI. Check for mistakes.
}

// Convert Map to array and format comments
const comments = Array.from(commentsMap.values()).map(c => ({
path: c.path,
line: c.line,
body: c.issues.join('\n\n')
}));

// Post inline comments (max 50 to avoid rate limits)
const limitedComments = comments.slice(0, 50);

Expand Down
3 changes: 3 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,12 @@
"@typescript-eslint/eslint-plugin": "^6.13.1",
"@typescript-eslint/parser": "^6.13.1",
"eslint": "^8.54.0",
"eslint-plugin-complexity": "^1.0.2",
"jest": "^29.7.0",
"jscpd": "^4.0.5",
"ts-jest": "^29.1.1",
"ts-node": "^10.9.1",
"ts-prune": "^0.10.3",
"typescript": "^5.3.2"
}
}
2 changes: 0 additions & 2 deletions scripts/analyze-coverage-gaps.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');

const OUTPUT_DIR = '/tmp/coverage-analysis';

Expand Down Expand Up @@ -138,7 +137,6 @@ function extractFunctions(filePath) {
* Generate test template for a file
Copy link

Copilot AI Jan 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While the unused variable execSync has been correctly removed from line 12, the JSDoc comment at line 137-138 stating "Generate test template for a file" does not adequately document what was changed. Since the relativePath variable was removed, it would be helpful to note in the commit or in code review whether this variable was truly unused or if there's missing functionality. The function generateTestTemplate appears to work correctly without it, but this should be verified.

Suggested change
* Generate test template for a file
* Generate a Jest test file template for the given TypeScript source file.
*
* The template imports exported functions from the module identified by the
* basename of {@link filePath} and creates basic test scaffolding for them.
*
* @param {string} filePath Absolute or project-relative path to the .ts file under test.
* @returns {string} A Jest test file template for the specified source file.

Copilot uses AI. Check for mistakes.
*/
function generateTestTemplate(filePath) {
const relativePath = path.relative(process.cwd(), filePath);
const fileName = path.basename(filePath, '.ts');
const functions = extractFunctions(filePath);

Expand Down
33 changes: 9 additions & 24 deletions scripts/analyze-dead-code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Dead Code Detection and Analysis Script
# This script identifies unused exports, unreachable code, and other dead code patterns

set -e
set -euo pipefail # Exit on error, undefined variables, and pipeline failures

OUTPUT_DIR="/tmp/dead-code-analysis"
mkdir -p "$OUTPUT_DIR"
Expand All @@ -14,10 +14,7 @@ echo "🔍 Starting Dead Code Analysis..."
analyze_unused_exports() {
echo "Analyzing unused exports..."

if ! command -v ts-prune &> /dev/null; then
npm install --no-save ts-prune
fi

# Use ts-prune from devDependencies (no dynamic install)
npx ts-prune --error > "$OUTPUT_DIR/unused-exports.txt" 2>&1 || true

UNUSED_COUNT=$(grep -c "used in module" "$OUTPUT_DIR/unused-exports.txt" 2>/dev/null || echo "0")
Expand All @@ -42,32 +39,20 @@ detect_unreachable_code() {
find_unused_imports() {
echo "Finding unused imports..."

# This is a simple heuristic - more sophisticated tools exist
find src/ -name "*.ts" -type f | while read -r file; do
# Extract imports
grep "^import.*from" "$file" | sed "s/import.*{\(.*\)}.*/\1/" | tr ',' '\n' | while read -r import; do
clean_import=$(echo "$import" | xargs)
if [[ -n "$clean_import" ]]; then
# Check if imported item is used in file
if ! grep -q "$clean_import" "$file" | grep -v "^import"; then
echo "$file: Potentially unused import: $clean_import"
fi
fi
done
done > "$OUTPUT_DIR/unused-imports.txt" 2>&1 || true
# Use ts-prune for proper AST-based unused import detection
# This replaces the fragile grep-based heuristics
echo "Using ts-prune for unused export detection (see unused-exports.txt)" > "$OUTPUT_DIR/unused-imports.txt"
echo "Note: ts-prune identifies unused exports; ESLint's no-unused-vars handles unused imports" >> "$OUTPUT_DIR/unused-imports.txt"

UNUSED_IMPORT_COUNT=$(wc -l < "$OUTPUT_DIR/unused-imports.txt" 2>/dev/null || echo "0")
echo "Found $UNUSED_IMPORT_COUNT potentially unused imports"
UNUSED_IMPORT_COUNT=0
echo "Unused imports are handled by ESLint no-unused-vars rule"
}

# Function to detect duplicate code
detect_duplicate_code() {
echo "Detecting code duplication..."

if ! command -v jscpd &> /dev/null; then
npm install --no-save jscpd
fi

# Use jscpd from devDependencies (no dynamic install)
npx jscpd src/ --format json --output "$OUTPUT_DIR" --min-lines 10 --min-tokens 50 2>&1 || true

if [[ -f "$OUTPUT_DIR/jscpd-report.json" ]]; then
Expand Down
2 changes: 1 addition & 1 deletion scripts/validate-dev-branch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Dev Branch Validation Script
# Validates that the dev branch is production-ready and fully synced

set -e # Exit on error
set -euo pipefail # Exit on error, undefined variables, and pipeline failures

echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔍 Dev Branch Production Readiness Validation"
Expand Down
Loading