diff --git a/.github/workflows/coding-agent.yml b/.github/workflows/coding-agent.yml index 555fb3a..445d6e5 100644 --- a/.github/workflows/coding-agent.yml +++ b/.github/workflows/coding-agent.yml @@ -3,13 +3,10 @@ name: QDI Coding Agent on: pull_request: types: [opened, synchronize, reopened] - issues: - types: [opened, edited] permissions: contents: read pull-requests: write - issues: write jobs: ethical-review: @@ -23,9 +20,9 @@ jobs: python-version: '3.12' - name: Install Dependencies - run: pip install qiskit + run: pip install 'qiskit>=0.45.0,<2.0.0' - - name: Run Ethical Sims + - name: Simulate Default Bell State Circuit id: simulate run: | if [ -f agent_skills.py ]; then @@ -45,13 +42,24 @@ jobs: echo "ATOM provenance tracked for ethical review" - name: Comment on PR - if: github.event_name == 'pull_request' + if: >- + github.event_name == 'pull_request' && + steps.simulate.outcome == 'success' uses: actions/github-script@v7 with: script: | + // NOTE: If user-controlled data (e.g., PR titles or bodies) + // is added to this message in the future, be sure to sanitize + // it before including it in the comment to avoid injection + // attacks. + + // Use hardcoded safe message (current implementation) + const message = '🌀 **Agent Review**: Coherence >60%. ' + + 'Ethical quantum sims validated. Ready for merge.'; + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: '🌀 **Agent Review**: Coherence >60%. Ethical quantum sims validated. Ready for merge.' + body: message }) diff --git a/README.md b/README.md index 00f1315..c93fb82 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ SAIF loops back to KENL creating infinite improvement **3. Wave Analysis (Physics)** | Metric | What It Detects | Optimal -|------------|---------------------|------------------------ +|------------|---------------------|-------- | Curl | Circular reasoning | <0.3 | Divergence | Expansion rate | ~0.2 (golden ratio) | Potential | Structure depth | >0.6 @@ -124,7 +124,7 @@ When coherence hits 70%, the **waveform collapses**: Four tools that work together: | Package | What It Does -|--------------------|------------------------------------------------------------------------------- +|--------------------|----------------------------------------------------------------------------- | **wave-toolkit** | Analyzes text coherence using physics (curl, divergence, potential, entropy) | **atom-trail** | Tracks every decision through phase gates (KENL → AWI → ATOM → SAIF → Spiral) | **ax-signatures** | Optimizes LLM prompts for coherence @@ -160,6 +160,7 @@ cat .vortex-logs/snap-in-*.json | jq '.coherence.coherence_score' ``` **Tags applied automatically:** + - Phase: `phase:KENL`, `phase:AWI`, `phase:ATOM`, `phase:SAIF`, `phase:Spiral` - Component: `pkg:wave-toolkit`, `pkg:atom-trail`, etc. - Quality: `coherence:high`, `coherence:review`, `coherence:low` @@ -197,7 +198,33 @@ qdi/ - Use connectives: therefore, moreover, consequently - Increase vocabulary diversity -See [CONTRIBUTING.md](CONTRIBUTING.md) for details. +## Coding Agent + +The repository includes a GitHub Actions-based coding agent for automated ethical review of PRs. + +### Agent Setup + +```bash +# Install Python dependencies for quantum simulations +pip install qiskit + +# Run agent skills locally +python agent_skills.py simulate +python agent_skills.py check_coherence --threshold 0.6 +``` + +### Agent Commands + +| Command | Description | +|-------------------|----------------------------------------| +| `simulate` | Run Qiskit circuit simulation | +| `check_coherence` | Verify coherence threshold (>60%) | +| `cascade` | Integrate provenance for cascading PRs | +| `review_pr` | Generate automated PR review | + +See [docs/instructions.md](docs/instructions.md) for full agent documentation. + +## Feedback & Contributions ## 📚 Learn More diff --git a/agent_skills.py b/agent_skills.py index 78fd52c..9ca2646 100644 --- a/agent_skills.py +++ b/agent_skills.py @@ -21,48 +21,239 @@ import argparse import json import sys +from datetime import datetime from pathlib import Path -from typing import Any, Dict, Optional, Tuple +from typing import Optional, Tuple, Union -# Default simulated coherence for well-prepared quantum states -# In production, this would be measured via state tomography -DEFAULT_SIMULATED_COHERENCE = 0.85 +# Default simulated coherence aligned with the '>60%' workflow threshold +# In production, this would be measured via state tomography instead of a fixed stub value +DEFAULT_SIMULATED_COHERENCE = 0.6 + +# Maximum allowed qubit index to prevent resource exhaustion +MAX_QUBIT_INDEX = 100 # VORTEX marker for endpoint integration VORTEX_MARKER = "VORTEX::QDI::v1" +# ATOM trail directory structure +ATOM_TRAIL_DIR = Path(".atom-trail") +ATOM_COUNTERS_DIR = ATOM_TRAIL_DIR / "counters" +ATOM_DECISIONS_DIR = ATOM_TRAIL_DIR / "decisions" + + +def _ensure_atom_trail_dirs(): + """Ensure ATOM trail directory structure exists.""" + ATOM_TRAIL_DIR.mkdir(exist_ok=True) + ATOM_COUNTERS_DIR.mkdir(exist_ok=True) + ATOM_DECISIONS_DIR.mkdir(exist_ok=True) + + +def _get_atom_counter(atom_type: str) -> int: + """ + Get and increment the counter for a given ATOM type. + + Note: This function is not thread-safe or multi-process safe. It is designed + for single-process execution in GitHub Actions workflows. + + Args: + atom_type: ATOM decision type (e.g., 'COMPLETE', 'DOC', 'VERIFY') + + Returns: + The next counter value + """ + date_str = datetime.now().strftime('%Y%m%d') + counter_key = f"{atom_type}-{date_str}" + counter_file = ATOM_COUNTERS_DIR / f"{counter_key}.txt" + + counter = 1 + if counter_file.exists(): + try: + counter = int(counter_file.read_text().strip()) + 1 + except ValueError as e: + # Log error and reset counter if file is corrupted + print(f"Warning: Counter file corrupted ({counter_file}): {e}. Resetting to 1.", file=sys.stderr) + counter = 1 + + counter_file.write_text(str(counter)) + return counter + + +def _generate_atom_tag(atom_type: str, description: str) -> str: + """ + Generate ATOM tag: ATOM-TYPE-YYYYMMDD-NNN-description + + Args: + atom_type: ATOM decision type + description: Description of the decision + + Returns: + Formatted ATOM tag + """ + date_str = datetime.now().strftime('%Y%m%d') + counter = _get_atom_counter(atom_type) + + # Create slug from description + slug = description.lower() + slug = ''.join(c if c.isalnum() or c == '-' else '-' for c in slug) + slug = '-'.join(filter(None, slug.split('-')))[:50] + # Ensure slug doesn't start or end with hyphens + slug = slug.strip('-') + + return f"ATOM-{atom_type}-{date_str}-{counter:03d}-{slug}" + + +def _create_atom_decision( + atom_type: str, + description: str, + files: Optional[list] = None, + tags: Optional[list] = None +) -> dict: + """ + Create an ATOM decision record and persist it to the trail. + + Args: + atom_type: ATOM decision type + description: Description of the decision + files: Optional list of files associated with the decision + tags: Optional list of tags + + Returns: + ATOM decision dictionary + + Raises: + OSError: If unable to create directories or write decision file + """ + try: + _ensure_atom_trail_dirs() + except OSError as e: + print(f"Error: Failed to create ATOM trail directories: {e}", file=sys.stderr) + raise + + atom_tag = _generate_atom_tag(atom_type, description) + timestamp = datetime.now().isoformat() + + decision = { + 'atom_tag': atom_tag, + 'type': atom_type, + 'description': description, + 'timestamp': timestamp, + 'files': files or [], + 'tags': tags or [], + 'freshness': 'fresh', + 'verified': False + } + + # Persist decision to file + decision_file = ATOM_DECISIONS_DIR / f"{atom_tag}.json" + try: + with open(decision_file, 'w') as f: + json.dump(decision, f, indent=2) + except (OSError, PermissionError) as e: + print(f"Error: Failed to write ATOM decision to {decision_file}: {e}", file=sys.stderr) + raise + + return decision + + +def _extract_qubit_indices(gate_str: str) -> Optional[Union[Tuple[int], Tuple[int, int]]]: + """ + Extract qubit indices from a gate string without validation. + + Args: + gate_str: Lowercase gate string like 'h(0)' or 'cx(0,1)' + + Returns: + Tuple of qubit indices or None if parsing fails + """ + try: + if gate_str.startswith('h(') and gate_str.endswith(')'): + return (int(gate_str[2:-1]),) + elif gate_str.startswith('x(') and gate_str.endswith(')'): + return (int(gate_str[2:-1]),) + elif gate_str.startswith('cx(') and gate_str.endswith(')'): + params = gate_str[3:-1].split(',') + if len(params) == 2: + return (int(params[0].strip()), int(params[1].strip())) + except (ValueError, IndexError): + # Parsing failed (e.g., invalid integer or malformed indices); treat as no qubit indices. + pass + return None + + +def _validate_qubit_range(qubits: Union[Tuple[int], Tuple[int, int]]) -> bool: + """ + Validate that all qubit indices are within the allowed range. + + Args: + qubits: Tuple of qubit indices + + Returns: + True if all indices are valid, False otherwise + """ + return all(0 <= q <= MAX_QUBIT_INDEX for q in qubits) + + +def _get_range_error_message(qubits: Union[Tuple[int], Tuple[int, int]]) -> str: + """ + Generate a descriptive error message for out-of-range qubit indices. + + Args: + qubits: Tuple of qubit indices + + Returns: + Error message describing which qubit index is out of range + """ + if len(qubits) == 1: + # Single-qubit gate + return f"Qubit index {qubits[0]} out of range. Must be between 0 and {MAX_QUBIT_INDEX}." + elif len(qubits) == 2: + # Two-qubit gate + control, target = qubits + if control < 0 or control > MAX_QUBIT_INDEX: + return f"Control qubit index {control} out of range. Must be between 0 and {MAX_QUBIT_INDEX}." + elif target < 0 or target > MAX_QUBIT_INDEX: + return f"Target qubit index {target} out of range. Must be between 0 and {MAX_QUBIT_INDEX}." + return f"Qubit indices out of range. Must be between 0 and {MAX_QUBIT_INDEX}." + def _parse_gate(raw_gate: str) -> Optional[Tuple[str, Tuple]]: """ - Parse a single gate specification. + Parse a single gate specification with qubit index validation. Args: raw_gate: Gate string like 'h(0)' or 'cx(0,1)' Returns: Tuple (gate_type, qubits) or None for invalid/empty input + + Note: + Qubit indices must be within range [0, 100] to prevent resource exhaustion. """ gate = raw_gate.strip().lower() if not gate: return None - try: - if gate.startswith('h(') and gate.endswith(')'): - qubit = int(gate[2:-1]) - return ('h', (qubit,)) - elif gate.startswith('x(') and gate.endswith(')'): - qubit = int(gate[2:-1]) - return ('x', (qubit,)) - elif gate.startswith('cx(') and gate.endswith(')'): - params = gate[3:-1].split(',') - if len(params) != 2: - return None - control, target = int(params[0].strip()), int(params[1].strip()) - return ('cx', (control, target)) - except (ValueError, IndexError): + # Determine gate type + gate_type = None + if gate.startswith('h(') and gate.endswith(')'): + gate_type = 'h' + elif gate.startswith('x(') and gate.endswith(')'): + gate_type = 'x' + elif gate.startswith('cx(') and gate.endswith(')'): + gate_type = 'cx' + else: return None - return None + # Extract and validate qubit indices + qubits = _extract_qubit_indices(gate) + if qubits is None: + return None + + # Validate range + if not _validate_qubit_range(qubits): + return None + + return (gate_type, qubits) def simulate_circuit(circuit_str: Optional[str] = None) -> dict: @@ -75,6 +266,30 @@ def simulate_circuit(circuit_str: Optional[str] = None) -> dict: Returns: dict with simulation results including VORTEX marker """ + # Validate circuit string before attempting simulation + # This ensures errors are caught even when Qiskit is not installed + if circuit_str: + for raw_gate in circuit_str.split(';'): + if not raw_gate.strip(): + continue + + parsed = _parse_gate(raw_gate) + if parsed is None: + # Generate specific error message + gate_str = raw_gate.strip().lower() + error_msg = f"Invalid gate syntax: {raw_gate.strip()}" + + # Check if it's a range error vs syntax error + qubits = _extract_qubit_indices(gate_str) + if qubits is not None and not _validate_qubit_range(qubits): + error_msg = _get_range_error_message(qubits) + + return { + 'status': 'error', + 'error': error_msg, + 'vortex': VORTEX_MARKER + } + try: # Local imports keep qiskit/qiskit_aer as optional dependencies and # avoid import-time failures when these libraries are not installed. @@ -84,19 +299,17 @@ def simulate_circuit(circuit_str: Optional[str] = None) -> dict: if circuit_str: # Parse simple gate sequence like "h(0); cx(0,1)" - # First pass: determine number of qubits needed + # Gates have already been validated above max_qubit = 1 parsed_gates = [] for raw_gate in circuit_str.split(';'): + if not raw_gate.strip(): + continue + parsed = _parse_gate(raw_gate) + # This should not be None due to validation above, but check defensively if parsed is None: - if raw_gate.strip(): # Non-empty but invalid - return { - 'status': 'error', - 'error': f"Invalid gate syntax: '{raw_gate.strip()}'", - 'vortex': VORTEX_MARKER - } continue gate_type, qubits = parsed @@ -191,13 +404,16 @@ def check_coherence(threshold: float = 0.6) -> dict: def cascade_integration(pr_body: Optional[str] = None) -> dict: """ - Cascade provenance integration for PRs. + Cascade provenance integration for PRs with ATOM trail tracking. + + Creates an ATOM decision record for the cascade operation and persists + it to the ATOM trail directory structure for full provenance tracking. Args: pr_body: Pull request body text Returns: - dict with cascade results including VORTEX marker + dict with cascade results including ATOM decision and VORTEX marker """ keywords = ['provenance', 'ethical', 'quantum', 'coherence', 'atom', 'vortex', 'spiral'] found = [] @@ -206,27 +422,56 @@ def cascade_integration(pr_body: Optional[str] = None) -> dict: body_lower = pr_body.lower() found = [kw for kw in keywords if kw in body_lower] + # Create ATOM decision for provenance tracking + description = f"PR cascade integration: {len(found)} ethical keywords detected" + decision = _create_atom_decision( + atom_type='VERIFY', + description=description, + files=['pr_body'], + tags=['cascade', 'provenance', 'ethical-review'] + found + ) + return { 'status': 'cascaded', 'keywords_found': found, 'provenance_tracked': True, - 'message': f"Cascade complete. Found {len(found)} ethical keywords.", + 'atom_decision': decision, + 'atom_tag': decision['atom_tag'], + 'message': f"Cascade complete. Found {len(found)} ethical keywords. ATOM decision: {decision['atom_tag']}", 'vortex': VORTEX_MARKER } def review_pr() -> dict: """ - Generate PR review comments. + Generate PR review comments based on a coherence check. Returns: - dict with review results including VORTEX marker + dict with review results including VORTEX marker. The result is + derived from an actual coherence check rather than hardcoded values. """ + # Perform an actual coherence check using the default threshold. + coherence_result = check_coherence() + passed = bool(coherence_result.get('passed')) + coherence_value = coherence_result.get('coherence') + + coherence_check_status = 'passed' if passed else 'failed' + ethical_review_status = 'approved' if passed else 'requires_additional_review' + + if isinstance(coherence_value, (int, float)): + coherence_str = f"{coherence_value:.2%}" + else: + coherence_str = str(coherence_value) + + readiness = "Ready for merge." if passed else "Review required." + message = f"🌀 Agent Review: Coherence {coherence_str}. {readiness}" + return { - 'status': 'reviewed', - 'coherence_check': 'passed', - 'ethical_review': 'approved', - 'message': '🌀 Agent Review: Coherence >60%. Ready for merge.', + 'status': 'reviewed' if passed else 'review_required', + 'coherence_check': coherence_check_status, + 'ethical_review': ethical_review_status, + 'message': message, + 'coherence_details': coherence_result, 'vortex': VORTEX_MARKER } diff --git a/docs/instructions.md b/docs/instructions.md index e4b9f19..913fdeb 100644 --- a/docs/instructions.md +++ b/docs/instructions.md @@ -9,6 +9,32 @@ Emergent quality is maintained at >60% via isomorphic Fibonacci patterns: ## Agent Setup +### Automated Setup Script + +The repository includes an automated setup script (`scripts/setup.ts`) that streamlines onboarding for new users and agents. This script automatically configures the development environment, installs dependencies, and sets up the ATOM provenance trail. + +**Usage**: +```bash +# Run full setup (recommended for first-time setup) +bun run scripts/setup.ts + +# Check environment without making changes +bun run scripts/setup.ts --check + +# Force reinstall dependencies +bun run scripts/setup.ts --force +``` + +**What the setup script does**: +- Verifies Bun installation (requires v1.0.0+) +- Installs or updates project dependencies +- Creates ATOM trail directories for provenance tracking +- Validates TypeScript configuration +- Runs test suite to verify setup +- Records setup in ATOM trail for provenance + +### Manual Setup + 1. **Install Dependencies**: ```bash # Clone repo and install diff --git a/packages/ax-signatures/src/index.ts b/packages/ax-signatures/src/index.ts index b2a248f..271c9b7 100644 --- a/packages/ax-signatures/src/index.ts +++ b/packages/ax-signatures/src/index.ts @@ -10,6 +10,10 @@ import { AxAI, AxSignature, AxChainOfThought } from '@ax-llm/ax'; /** * Coherence Interpreter Signature * Interprets wave analysis metrics and provides actionable insights. + * + * Note: All metrics should be normalized to 0-1 scale: + * - curl, divergence, potential: Already in 0-1 scale from wave-toolkit + * - coherence_score: Must be normalized from 0-100 to 0-1 (divide by 100) */ export const coherenceInterpreter = new AxSignature({ name: 'CoherenceInterpreter', @@ -18,7 +22,7 @@ export const coherenceInterpreter = new AxSignature({ curl: 'number: Repetition metric (0-1)', divergence: 'number: Expansion metric (0-1)', potential: 'number: Undeveloped ideas metric (0-1)', - coherence_score: 'number: Overall coherence (0-1)', + coherence_score: 'number: Overall coherence normalized to 0-1 scale (divide wave-toolkit score by 100)', context: 'string: The analyzed text or document reference' }, output: { @@ -95,6 +99,13 @@ export const waveAnalyzer = new AxSignature({ /** * Create optimized program for coherence interpretation + * + * @param ai - The AI instance to use for generation + * @param waveResults - Wave analysis results with normalized metrics + * @param waveResults.coherence_score - Must be normalized to 0-1 scale (not 0-100) + * + * Note: If using wave-toolkit's analyzeWave(), divide coherence_score by 100 + * before passing to this function to match the expected 0-1 scale. */ export async function interpretCoherence( ai: AxAI, diff --git a/packages/quantum-ethics/src/__tests__/vortex-wavespec.test.ts b/packages/quantum-ethics/src/__tests__/vortex-wavespec.test.ts new file mode 100644 index 0000000..32b316b --- /dev/null +++ b/packages/quantum-ethics/src/__tests__/vortex-wavespec.test.ts @@ -0,0 +1,623 @@ +/** + * Tests for VORTEX WAVEspec - Verified Operational Runtime Testing for Ecosystem Xecution + */ + +import { describe, test, expect } from 'bun:test'; +import { + VORTEX_MARKER, + COHERENCE_THRESHOLD, + runVortexCheck, + createVortexPayload, + formatVortexReport, + applyFibonacciWeightedBoost, + type VortexNode, + type VortexConfig, + type VortexResult, + type VortexDashboardPayload, +} from '../vortex-wavespec'; + +describe('VORTEX Constants', () => { + test('should export VORTEX_MARKER constant', () => { + expect(VORTEX_MARKER).toBe('VORTEX::QDI::v1'); + }); + + test('should export COHERENCE_THRESHOLD constant', () => { + expect(COHERENCE_THRESHOLD).toBe(0.6); + }); +}); + +describe('runVortexCheck', () => { + test('should run basic vortex check with default config', () => { + const analysisText = 'This is a test analysis with good coherence and quality metrics'; + const result = runVortexCheck(analysisText); + + expect(result).toBeDefined(); + expect(result.timestamp).toBeDefined(); + expect(result.coherenceScore).toBeGreaterThanOrEqual(0); + expect(result.coherenceScore).toBeLessThanOrEqual(1); + expect(result.emergentQuality).toContain('%'); + expect(result.passed).toBeDefined(); + expect(result.nodes).toEqual([]); + expect(result.waveAnalysis).toBeDefined(); + expect(result.atomDecision).toBeDefined(); + expect(result.marker).toBe(VORTEX_MARKER); + }); + + test('should pass when coherence meets threshold', () => { + // This text should generate high coherence score + const analysisText = 'Excellent coherence quality metrics with consistent wave patterns and optimal performance indicators demonstrating superior system alignment'; + const result = runVortexCheck(analysisText); + + // The score should be normalized to 0-1 range + expect(result.coherenceScore).toBeGreaterThanOrEqual(0); + expect(result.coherenceScore).toBeLessThanOrEqual(1); + }); + + test('should handle custom nodes', () => { + const analysisText = 'Test analysis'; + const nodes: VortexNode[] = [ + { + name: 'TestNode1', + status: 'CLEAN', + branch: 'main', + commit: 'abc123def456', + changes: { added: 10, removed: 5 }, + }, + { + name: 'TestNode2', + status: 'DIRTY', + branch: 'develop', + commit: 'def456abc123', + changes: { added: 3, removed: 2 }, + }, + ]; + + const result = runVortexCheck(analysisText, nodes); + + expect(result.nodes).toHaveLength(2); + expect(result.nodes[0].name).toBe('TestNode1'); + expect(result.nodes[1].name).toBe('TestNode2'); + }); + + test('should handle custom config', () => { + const analysisText = 'Test analysis with custom configuration'; + const customConfig: Partial = { + emergentQuality: 0.7, + fibonacciRatio: 1.5, + }; + + const result = runVortexCheck(analysisText, [], customConfig); + + expect(result).toBeDefined(); + expect(result.coherenceScore).toBeGreaterThanOrEqual(0); + }); + + test('should create ATOM decision', () => { + const analysisText = 'Test analysis'; + const result = runVortexCheck(analysisText); + + expect(result.atomDecision).toBeDefined(); + expect(result.atomDecision.type).toBe('VERIFY'); + expect(result.atomDecision.atom_tag).toContain('ATOM-'); + expect(result.atomDecision.description).toContain('VORTEX check'); + }); + + test('should format timestamp as ISO string', () => { + const analysisText = 'Test analysis'; + const result = runVortexCheck(analysisText); + + expect(result.timestamp).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/); + }); + + test('should handle different node statuses', () => { + const analysisText = 'Test analysis'; + const nodes: VortexNode[] = [ + { + name: 'CleanNode', + status: 'CLEAN', + branch: 'main', + commit: 'abc123', + changes: { added: 0, removed: 0 }, + }, + { + name: 'DirtyNode', + status: 'DIRTY', + branch: 'feature', + commit: 'def456', + changes: { added: 5, removed: 3 }, + }, + { + name: 'WarnNode', + status: 'WARN', + branch: 'hotfix', + commit: 'ghi789', + changes: { added: 2, removed: 1 }, + }, + { + name: 'ErrorNode', + status: 'ERROR', + branch: 'bugfix', + commit: 'jkl012', + changes: { added: 1, removed: 0 }, + }, + ]; + + const result = runVortexCheck(analysisText, nodes); + + expect(result.nodes).toHaveLength(4); + expect(result.nodes.map(n => n.status)).toContain('CLEAN'); + expect(result.nodes.map(n => n.status)).toContain('DIRTY'); + expect(result.nodes.map(n => n.status)).toContain('WARN'); + expect(result.nodes.map(n => n.status)).toContain('ERROR'); + }); +}); + +describe('createVortexPayload', () => { + test('should create valid dashboard payload with default config', () => { + const analysisText = 'Test analysis for dashboard payload generation'; + const payload = createVortexPayload(analysisText); + + expect(payload).toBeDefined(); + expect(payload.$schema).toBe('https://spiralsafe.dev/vortex-dashboard-v1.json'); + expect(payload.meta).toBeDefined(); + expect(payload.meta.name).toBe('VORTEX Dashboard Payload'); + expect(payload.meta.version).toBe('1.0.0'); + expect(payload.meta.emergentQuality).toContain('%'); + expect(payload.meta.fibonacciRatio).toBeGreaterThan(0); + expect(payload.meta.timestamp).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/); + }); + + test('should include vortex clusters', () => { + const analysisText = 'Test analysis'; + const payload = createVortexPayload(analysisText); + + expect(payload.vortexes).toBeDefined(); + expect(payload.vortexes.length).toBeGreaterThan(0); + + const monitoringVortex = payload.vortexes.find(v => v.vortex_name === 'MonitoringVortex'); + expect(monitoringVortex).toBeDefined(); + expect(monitoringVortex?.description).toContain('monitoring'); + expect(monitoringVortex?.coherence).toBeGreaterThanOrEqual(0); + expect(monitoringVortex?.coherence).toBeLessThanOrEqual(1); + expect(monitoringVortex?.components).toBeDefined(); + expect(monitoringVortex?.refinements).toBeDefined(); + + const testingVortex = payload.vortexes.find(v => v.vortex_name === 'TestingVortex'); + expect(testingVortex).toBeDefined(); + }); + + test('should include endpoint statuses', () => { + const analysisText = 'Test analysis'; + const payload = createVortexPayload(analysisText); + + expect(payload.endpoints).toBeDefined(); + expect(payload.endpoints.length).toBeGreaterThan(0); + + const endpointNames = payload.endpoints.map(e => e.name); + expect(endpointNames).toContain('Datalore'); + expect(endpointNames).toContain('Runpod'); + expect(endpointNames).toContain('SpiralSafe'); + expect(endpointNames).toContain('QDI'); + expect(endpointNames).toContain('HOPE'); + expect(endpointNames).toContain('Cloudflare'); + + // Check endpoint structure + const dataloreEndpoint = payload.endpoints.find(e => e.name === 'Datalore'); + expect(dataloreEndpoint).toBeDefined(); + expect(dataloreEndpoint?.url).toBe('https://datalore.jetbrains.com'); + expect(dataloreEndpoint?.status).toMatch(/^(connected|pending|disconnected)$/); + expect(dataloreEndpoint?.vortexEnabled).toBe(true); + }); + + test('should handle custom config', () => { + const analysisText = 'Test analysis'; + const customConfig: Partial = { + emergentQuality: 0.75, + fibonacciRatio: 1.5, + endpoints: { + datalore: false, + runpod: true, + spiralsafe: true, + qdi: false, + hope: true, + cloudflare: false, + }, + }; + + const payload = createVortexPayload(analysisText, customConfig); + + expect(payload.meta.emergentQuality).toBe('>75%'); + expect(payload.meta.fibonacciRatio).toBe(1.5); + + const dataloreEndpoint = payload.endpoints.find(e => e.name === 'Datalore'); + expect(dataloreEndpoint?.status).toBe('pending'); + + const runpodEndpoint = payload.endpoints.find(e => e.name === 'Runpod'); + expect(runpodEndpoint?.status).toBe('connected'); + }); + + test('should normalize coherence scores in vortex clusters', () => { + const analysisText = 'Test analysis'; + const payload = createVortexPayload(analysisText); + + payload.vortexes.forEach(vortex => { + expect(vortex.coherence).toBeGreaterThanOrEqual(0); + expect(vortex.coherence).toBeLessThanOrEqual(1); + }); + }); + + test('should include all required vortex refinements', () => { + const analysisText = 'Test analysis'; + const payload = createVortexPayload(analysisText); + + payload.vortexes.forEach(vortex => { + expect(vortex.refinements).toBeDefined(); + vortex.refinements.forEach(refinement => { + expect(refinement.original).toBeDefined(); + expect(refinement.refined).toBeDefined(); + expect(refinement.autonomy).toBeDefined(); + }); + }); + }); +}); + +describe('formatVortexReport', () => { + test('should format basic report with no nodes', () => { + const result: VortexResult = { + timestamp: '2024-01-15T10:00:00.000Z', + coherenceScore: 0.75, + emergentQuality: '75.0%', + passed: true, + nodes: [], + waveAnalysis: { + coherence_score: 75, + pattern_strength: 0.8, + emergent_patterns: ['test'], + recommendations: ['improve'], + }, + atomDecision: { + type: 'VERIFY', + atom_tag: 'ATOM-test-123', + description: 'Test decision', + timestamp: '2024-01-15T10:00:00.000Z', + tags: ['test'], + freshness: 'fresh', + }, + marker: VORTEX_MARKER, + }; + + const report = formatVortexReport(result); + + expect(report).toContain('VORTEX'); + expect(report).toContain('Verified Operational Runtime Testing for Ecosystem Xecution'); + expect(report).toContain('Timestamp: 2024-01-15T10:00:00.000Z'); + expect(report).toContain('No nodes configured'); + expect(report).toContain('Score: 75.0%'); + expect(report).toContain('Threshold: 60%'); + expect(report).toContain('✅ PASS'); + expect(report).toContain(VORTEX_MARKER); + }); + + test('should format report with nodes', () => { + const result: VortexResult = { + timestamp: '2024-01-15T10:00:00.000Z', + coherenceScore: 0.65, + emergentQuality: '65.0%', + passed: true, + nodes: [ + { + name: 'Node1', + status: 'CLEAN', + branch: 'main', + commit: 'abc123def456', + changes: { added: 10, removed: 5 }, + }, + { + name: 'Node2', + status: 'DIRTY', + branch: 'feature', + commit: 'def456abc123', + changes: { added: 3, removed: 2 }, + }, + ], + waveAnalysis: { + coherence_score: 65, + pattern_strength: 0.7, + emergent_patterns: [], + recommendations: [], + }, + atomDecision: { + type: 'VERIFY', + atom_tag: 'ATOM-test-456', + description: 'Test decision', + timestamp: '2024-01-15T10:00:00.000Z', + tags: ['test'], + freshness: 'fresh', + }, + marker: VORTEX_MARKER, + }; + + const report = formatVortexReport(result); + + expect(report).toContain('NODE STATUS:'); + expect(report).toContain('Node1'); + expect(report).toContain('CLEAN'); + expect(report).toContain('main'); + expect(report).toContain('abc123de'); // First 8 chars of commit + expect(report).toContain('+10/-5'); + expect(report).toContain('Node2'); + expect(report).toContain('DIRTY'); + expect(report).toContain('+3/-2'); + }); + + test('should show FAIL status when not passed', () => { + const result: VortexResult = { + timestamp: '2024-01-15T10:00:00.000Z', + coherenceScore: 0.45, + emergentQuality: '45.0%', + passed: false, + nodes: [], + waveAnalysis: { + coherence_score: 45, + pattern_strength: 0.5, + emergent_patterns: [], + recommendations: [], + }, + atomDecision: { + type: 'VERIFY', + atom_tag: 'ATOM-test-789', + description: 'Test decision', + timestamp: '2024-01-15T10:00:00.000Z', + tags: ['test'], + freshness: 'fresh', + }, + marker: VORTEX_MARKER, + }; + + const report = formatVortexReport(result); + + expect(report).toContain('❌ FAIL'); + expect(report).toContain('Score: 45.0%'); + }); + + test('should include section separators', () => { + const result: VortexResult = { + timestamp: '2024-01-15T10:00:00.000Z', + coherenceScore: 0.75, + emergentQuality: '75.0%', + passed: true, + nodes: [], + waveAnalysis: { + coherence_score: 75, + pattern_strength: 0.8, + emergent_patterns: [], + recommendations: [], + }, + atomDecision: { + type: 'VERIFY', + atom_tag: 'ATOM-test-abc', + description: 'Test decision', + timestamp: '2024-01-15T10:00:00.000Z', + tags: ['test'], + freshness: 'fresh', + }, + marker: VORTEX_MARKER, + }; + + const report = formatVortexReport(result); + + expect(report).toContain('='.repeat(70)); + expect(report).toContain('-'.repeat(70)); + }); + + test('should handle multiple nodes with different statuses', () => { + const result: VortexResult = { + timestamp: '2024-01-15T10:00:00.000Z', + coherenceScore: 0.70, + emergentQuality: '70.0%', + passed: true, + nodes: [ + { + name: 'Clean', + status: 'CLEAN', + branch: 'main', + commit: 'a1b2c3d4', + changes: { added: 0, removed: 0 }, + }, + { + name: 'Dirty', + status: 'DIRTY', + branch: 'dev', + commit: 'e5f6g7h8', + changes: { added: 100, removed: 50 }, + }, + { + name: 'Warn', + status: 'WARN', + branch: 'hotfix', + commit: 'i9j0k1l2', + changes: { added: 5, removed: 3 }, + }, + { + name: 'Error', + status: 'ERROR', + branch: 'bugfix', + commit: 'm3n4o5p6', + changes: { added: 1, removed: 1 }, + }, + ], + waveAnalysis: { + coherence_score: 70, + pattern_strength: 0.75, + emergent_patterns: [], + recommendations: [], + }, + atomDecision: { + type: 'VERIFY', + atom_tag: 'ATOM-test-multi', + description: 'Test decision', + timestamp: '2024-01-15T10:00:00.000Z', + tags: ['test'], + freshness: 'fresh', + }, + marker: VORTEX_MARKER, + }; + + const report = formatVortexReport(result); + + expect(report).toContain('Clean'); + expect(report).toContain('Dirty'); + expect(report).toContain('Warn'); + expect(report).toContain('Error'); + expect(report).toContain('+100/-50'); + }); +}); + +describe('applyFibonacciWeightedBoost', () => { + test('should boost coherence at iteration 0', () => { + const baseCoherence = 0.5; + const boosted = applyFibonacciWeightedBoost(baseCoherence, 0); + + expect(boosted).toBeGreaterThanOrEqual(baseCoherence); + expect(boosted).toBeLessThanOrEqual(1); + }); + + test('should boost coherence at higher iterations', () => { + const baseCoherence = 0.6; + const boost1 = applyFibonacciWeightedBoost(baseCoherence, 1); + const boost2 = applyFibonacciWeightedBoost(baseCoherence, 2); + const boost3 = applyFibonacciWeightedBoost(baseCoherence, 5); + + expect(boost1).toBeGreaterThanOrEqual(baseCoherence); + expect(boost2).toBeGreaterThanOrEqual(baseCoherence); + expect(boost3).toBeGreaterThanOrEqual(baseCoherence); + + // All should be capped at 1.0 + expect(boost1).toBeLessThanOrEqual(1); + expect(boost2).toBeLessThanOrEqual(1); + expect(boost3).toBeLessThanOrEqual(1); + }); + + test('should cap boost at 1.0', () => { + const baseCoherence = 0.95; + const boosted = applyFibonacciWeightedBoost(baseCoherence, 10); + + expect(boosted).toBe(1); + }); + + test('should handle low base coherence', () => { + const baseCoherence = 0.1; + const boosted = applyFibonacciWeightedBoost(baseCoherence, 3); + + expect(boosted).toBeGreaterThan(baseCoherence); + expect(boosted).toBeLessThanOrEqual(1); + }); + + test('should handle high iteration counts', () => { + const baseCoherence = 0.5; + const boosted = applyFibonacciWeightedBoost(baseCoherence, 100); + + // Should be capped even with very high iterations + expect(boosted).toBeGreaterThanOrEqual(baseCoherence); + expect(boosted).toBeLessThanOrEqual(1); + }); + + test('should produce consistent results', () => { + const baseCoherence = 0.7; + const iteration = 5; + + const result1 = applyFibonacciWeightedBoost(baseCoherence, iteration); + const result2 = applyFibonacciWeightedBoost(baseCoherence, iteration); + + expect(result1).toBe(result2); + }); + + test('should increase boost with higher iterations', () => { + const baseCoherence = 0.5; + + const boost1 = applyFibonacciWeightedBoost(baseCoherence, 1); + const boost5 = applyFibonacciWeightedBoost(baseCoherence, 5); + const boost10 = applyFibonacciWeightedBoost(baseCoherence, 10); + + // Later iterations should generally give higher boosts (until cap) + expect(boost5).toBeGreaterThanOrEqual(boost1); + expect(boost10).toBeGreaterThanOrEqual(boost5); + }); + + test('should handle zero base coherence', () => { + const baseCoherence = 0; + const boosted = applyFibonacciWeightedBoost(baseCoherence, 5); + + expect(boosted).toBeGreaterThanOrEqual(0); + expect(boosted).toBeLessThanOrEqual(1); + }); + + test('should handle perfect base coherence', () => { + const baseCoherence = 1.0; + if (typeof applyFibonacciWeightedBoost !== 'function') { + throw new Error('applyFibonacciWeightedBoost is not a function'); + } + const boosted = applyFibonacciWeightedBoost(baseCoherence, 5); + + // Should remain at 1.0 + expect(boosted).toBe(1); + }); +}); + +describe('Integration Tests', () => { + test('should create full workflow: check -> payload -> report', () => { + const analysisText = 'Comprehensive integration test for VORTEX workflow with excellent coherence'; + const nodes: VortexNode[] = [ + { + name: 'IntNode', + status: 'CLEAN', + branch: 'integration', + commit: 'integration123', + changes: { added: 20, removed: 10 }, + }, + ]; + + // Step 1: Run check + const checkResult = runVortexCheck(analysisText, nodes); + expect(checkResult).toBeDefined(); + expect(checkResult.passed).toBeDefined(); + + // Step 2: Create payload + const payload = createVortexPayload(analysisText); + expect(payload).toBeDefined(); + expect(payload.vortexes.length).toBeGreaterThan(0); + + // Step 3: Format report + const report = formatVortexReport(checkResult); + expect(report).toBeDefined(); + expect(report).toContain('IntNode'); + }); + + test('should handle empty analysis text', () => { + const analysisText = ''; + + const result = runVortexCheck(analysisText); + expect(result).toBeDefined(); + expect(result.coherenceScore).toBeGreaterThanOrEqual(0); + + const payload = createVortexPayload(analysisText); + expect(payload).toBeDefined(); + }); + + test('should maintain consistency between check and payload', () => { + const analysisText = 'Consistency test for coherence metrics'; + + const checkResult = runVortexCheck(analysisText); + const payload = createVortexPayload(analysisText); + + // Both should use same underlying wave analysis + // Coherence scores should be in same normalized range + expect(checkResult.coherenceScore).toBeGreaterThanOrEqual(0); + expect(checkResult.coherenceScore).toBeLessThanOrEqual(1); + + payload.vortexes.forEach(vortex => { + expect(vortex.coherence).toBeGreaterThanOrEqual(0); + expect(vortex.coherence).toBeLessThanOrEqual(1); + }); + }); +}); diff --git a/packages/quantum-ethics/src/ai-integration.ts b/packages/quantum-ethics/src/ai-integration.ts index dc0564d..ffccff6 100644 --- a/packages/quantum-ethics/src/ai-integration.ts +++ b/packages/quantum-ethics/src/ai-integration.ts @@ -52,6 +52,7 @@ export interface QuantumAIHybridAlgorithm { /** * Default coherence baseline (70% as specified) + * Note: This is in 0-100 scale to match wave-toolkit's coherence_score output */ export const COHERENCE_BASELINE = 70; diff --git a/packages/quantum-ethics/src/coherence-dashboard.ts b/packages/quantum-ethics/src/coherence-dashboard.ts index f84e6ef..55c32f8 100644 --- a/packages/quantum-ethics/src/coherence-dashboard.ts +++ b/packages/quantum-ethics/src/coherence-dashboard.ts @@ -45,7 +45,7 @@ export interface CoherenceDashboard { } export interface DashboardConfig { - coherenceTarget: number; // Target coherence score (default 70 or 95) + coherenceTarget: number; // Target coherence score in 0-100 scale (default 70 or 95) curlThreshold: number; // Max acceptable curl (default 0.3) divergenceIdeal: number; // Ideal divergence (default 0.2) potentialMinimum: number; // Min acceptable potential (default 0.5) diff --git a/packages/quantum-ethics/src/vortex-wavespec.ts b/packages/quantum-ethics/src/vortex-wavespec.ts index b3d43d1..e67df4c 100644 --- a/packages/quantum-ethics/src/vortex-wavespec.ts +++ b/packages/quantum-ethics/src/vortex-wavespec.ts @@ -9,6 +9,22 @@ * * Emergent quality target: >60% * Fibonacci ratio: 1.618 + * + * === COHERENCE SCORE SCALE CONVENTION === + * This module works with coherence scores in two scales: + * + * 1. INPUT (from wave-toolkit): 0-100 scale + * - analyzeWave() returns coherence_score in 0-100 range + * - This is the "raw" score for human readability + * + * 2. INTERNAL PROCESSING: 0-1 scale + * - COHERENCE_THRESHOLD and other thresholds use 0-1 scale + * - normalizeCoherenceScore() converts from 0-100 to 0-1 + * - VortexResult.coherenceScore is in 0-1 scale + * - VortexCluster.coherence is in 0-1 scale + * + * Always use normalizeCoherenceScore() when converting from wave-toolkit + * results to internal processing values. */ import { type AtomDecision, createDecision } from "@spiralsafe/atom-trail"; @@ -21,17 +37,12 @@ import { // VORTEX Marker for cross-system integration export const VORTEX_MARKER = "VORTEX::QDI::v1"; -export const COHERENCE_THRESHOLD = 0.6; // 60% minimum for PASS - /** - * Coherence boost constants for different vortex types - * Each vortex type receives a different boost to its coherence score - * to reflect its relative importance in the system architecture + * Minimum coherence threshold for PASS status (0-1 scale) + * Note: This is in 0-1 scale. Wave-toolkit returns scores in 0-100 scale, + * so use normalizeCoherenceScore() to convert before comparison. */ -export const COHERENCE_BOOST_MONITORING = 0; // Base monitoring - no boost -export const COHERENCE_BOOST_TESTING = 0.05; // Testing vortex - moderate boost for quality assurance -export const COHERENCE_BOOST_PLANNING = 0.03; // Planning vortex - small boost for infrastructure planning -export const COHERENCE_BOOST_CORE = 0.08; // Core vortex - highest boost for ethical/philosophical foundation +export const COHERENCE_THRESHOLD = 0.6; // 60% minimum for PASS export interface VortexNode { name: string; @@ -57,6 +68,7 @@ export interface VortexConfig { export interface VortexResult { timestamp: string; + /** Coherence score in 0-1 scale (normalized from wave-toolkit's 0-100 scale) */ coherenceScore: number; emergentQuality: string; passed: boolean; @@ -82,6 +94,7 @@ export interface VortexDashboardPayload { export interface VortexCluster { vortex_name: string; description: string; + /** Coherence score in 0-1 scale (normalized and possibly boosted) */ coherence: number; components: string[]; refinements: VortexRefinement[]; @@ -117,13 +130,39 @@ const DEFAULT_VORTEX_CONFIG: VortexConfig = { /** * Normalize coherence score from 0-100 scale to 0-1 scale - * @param rawScore - The coherence score in 0-100 range + * + * Note: wave-toolkit's analyzeWave() returns coherence_score in 0-100 scale + * for human readability. This function converts it to 0-1 scale for internal + * calculations and comparison with COHERENCE_THRESHOLD (which is in 0-1 scale). + * + * @param rawScore - The coherence score in 0-100 range from wave-toolkit * @return Normalized score in 0-1 range, clamped to valid bounds */ function normalizeCoherenceScore(rawScore: number): number { return Math.max(0, Math.min(1, rawScore / 100)); } +/** + * Vortex criticality levels determine coherence boost based on system importance. + * Higher criticality = greater coherence boost to ensure stability of critical subsystems. + */ +enum VortexCriticality { + MONITORING = 0, // Baseline - observational role, no boost needed + PLANNING = 3, // Infrastructure planning requires moderate stability + TESTING = 5, // Testing/compliance needs high reliability for audit integrity + CORE = 8, // Core ethics/philosophy requires maximum coherence for system integrity +} + +/** + * Calculate coherence boost based on vortex criticality level. + * Formula: boost = criticality / 100 + * This ensures critical vortexes maintain higher coherence thresholds + * while staying within the 0-1 normalized range. + */ +function calculateCoherenceBoost(criticality: VortexCriticality): number { + return criticality / 100; +} + /** * Create VORTEX dashboard payload for endpoint integration */ @@ -134,12 +173,22 @@ export function createVortexPayload( const cfg = { ...DEFAULT_VORTEX_CONFIG, ...config }; const waveAnalysis = analyzeWave(analysisText); + const baseCoherence = normalizeCoherenceScore(waveAnalysis.coherence_score); + const vortexes: VortexCluster[] = [ { - vortex_name: 'MonitoringVortex', - description: 'Autonomous monitoring cluster – self-maintains coherence metrics', - coherence: Math.min(1, waveAnalysis.coherence_score / 100 + COHERENCE_BOOST_MONITORING), - components: ['CoherenceConstellation.tsx', 'SpectralAnalyzer.tsx', 'SessionMonitor.tsx'], + vortex_name: "MonitoringVortex", + description: + "Autonomous monitoring cluster – self-maintains coherence metrics", + coherence: Math.min( + 1, + baseCoherence + calculateCoherenceBoost(VortexCriticality.MONITORING), + ), + components: [ + "CoherenceConstellation.tsx", + "SpectralAnalyzer.tsx", + "SessionMonitor.tsx", + ], refinements: [ { original: "CoherenceConstell.tsx", @@ -151,10 +200,18 @@ export function createVortexPayload( ], }, { - vortex_name: 'TestingVortex', - description: 'Autonomous testing/compliance – self-maintains audits/proofs', - coherence: Math.min(1, waveAnalysis.coherence_score / 100 + COHERENCE_BOOST_TESTING), - components: ['LoadTestingSimulator.tsx', 'ComplianceTracker.tsx', 'SortingHat.tsx'], + vortex_name: "TestingVortex", + description: + "Autonomous testing/compliance – self-maintains audits/proofs", + coherence: Math.min( + 1, + baseCoherence + calculateCoherenceBoost(VortexCriticality.TESTING), + ), + components: [ + "LoadTestingSimulator.tsx", + "ComplianceTracker.tsx", + "SortingHat.tsx", + ], refinements: [ { original: "LoadTestingSimu.tsx", @@ -165,10 +222,17 @@ export function createVortexPayload( ], }, { - vortex_name: 'PlanningVortex', - description: 'Autonomous planning/infra – self-maintains transitions', - coherence: Math.min(1, waveAnalysis.coherence_score / 100 + COHERENCE_BOOST_PLANNING), - components: ['MigrationPlanner.tsx', 'HardwareBridge.tsx', 'TransitionTimeline.tsx'], + vortex_name: "PlanningVortex", + description: "Autonomous planning/infra – self-maintains transitions", + coherence: Math.min( + 1, + baseCoherence + calculateCoherenceBoost(VortexCriticality.PLANNING), + ), + components: [ + "MigrationPlanner.tsx", + "HardwareBridge.tsx", + "TransitionTimeline.tsx", + ], refinements: [ { original: "MigrationPlanner.tsx", @@ -179,10 +243,13 @@ export function createVortexPayload( ], }, { - vortex_name: 'CoreVortex', - description: 'Autonomous core/philo – self-maintains ethics', - coherence: Math.min(1, waveAnalysis.coherence_score / 100 + COHERENCE_BOOST_CORE), - components: ['HopeSaucedPhilosophy.tsx', 'StakeholderHub.tsx'], + vortex_name: "CoreVortex", + description: "Autonomous core/philo – self-maintains ethics", + coherence: Math.min( + 1, + baseCoherence + calculateCoherenceBoost(VortexCriticality.CORE), + ), + components: ["HopeSaucedPhilosophy.tsx", "StakeholderHub.tsx"], refinements: [ { original: "HopeSaucedPhilo.tsx", @@ -330,15 +397,17 @@ export function formatVortexReport(result: VortexResult): string { } /** - * Calculate Fibonacci-weighted coherence boost + * Apply Fibonacci-weighted boost to coherence score */ -export function fibonacciCoherenceBoost( +export function applyFibonacciWeightedBoost( baseCoherence: number, iteration: number, ): number { + // Ensure base coherence is within [0, 1] before applying boost + const normalizedBaseCoherence = Math.max(0, Math.min(1, baseCoherence)); const fibIndex = Math.min(iteration, FIBONACCI.length - 1); const fibWeight = FIBONACCI[fibIndex] / FIBONACCI[FIBONACCI.length - 1]; - return Math.min(1, baseCoherence + fibWeight * 0.1); + return Math.min(1, normalizedBaseCoherence + fibWeight * 0.1); } // Export for use in dashboard and endpoints diff --git a/packages/wave-toolkit/src/index.ts b/packages/wave-toolkit/src/index.ts index 4105d5d..9394c03 100644 --- a/packages/wave-toolkit/src/index.ts +++ b/packages/wave-toolkit/src/index.ts @@ -30,6 +30,7 @@ export interface WaveAnalysisResult { input_preview: string; metrics: WaveMetrics; coherence: CoherenceMetrics; + /** Overall coherence score on a 0-100 scale (higher is better) */ coherence_score: number; chaos_score: number; warnings: string[]; @@ -130,7 +131,14 @@ export function calculateChaosScore(metrics: CoherenceMetrics): number { } /** - * Analyze text for WAVE patterns + * Analyze text as wave field + * + * @param input - The text to analyze + * @returns WaveAnalysisResult with coherence_score in 0-100 scale + * + * Note: The coherence_score is returned on a 0-100 scale for human readability. + * If you need to use this score in calculations with 0-1 normalized values, + * divide by 100 or use a normalization function. */ export function analyzeWave(input: string): WaveAnalysisResult { const warnings: string[] = []; diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..7d68688 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = + -v + --strict-markers + --tb=short diff --git a/scripts/setup.ts b/scripts/setup.ts index 716685f..78c8f73 100644 --- a/scripts/setup.ts +++ b/scripts/setup.ts @@ -16,6 +16,7 @@ import { $ } from "bun"; import { existsSync, rmSync } from "fs"; import { mkdir, writeFile } from "fs/promises"; import { join, resolve } from "path"; +import { generateAtomTag, saveDecision } from "./atom-tag"; const ROOT_DIR = join(import.meta.dir, ".."); const ATOM_TRAIL_DIR = join(ROOT_DIR, ".atom-trail"); @@ -101,11 +102,18 @@ async function installDependencies(force: boolean): Promise { const nodeModulesPath = join(ROOT_DIR, "node_modules"); if (force) { log("step", "Force reinstalling dependencies..."); - // Validate path before removal for safety - ensure it resolves to expected location - const resolvedPath = resolve(nodeModulesPath); - const expectedPath = resolve(ROOT_DIR, "node_modules"); - if (existsSync(resolvedPath) && resolvedPath === expectedPath) { - rmSync(resolvedPath, { recursive: true, force: true }); + // Validate path before removal for security + // Ensure the resolved path is within the project directory + const resolvedNodeModules = resolve(nodeModulesPath); + const resolvedRoot = resolve(ROOT_DIR); + const expectedPath = resolve(resolvedRoot, "node_modules"); + + // Check that the resolved path exactly matches the expected node_modules path + // This prevents path traversal attacks (e.g., /etc/node_modules) + const isValidPath = resolvedNodeModules === expectedPath; + + if (existsSync(nodeModulesPath) && isValidPath) { + rmSync(nodeModulesPath, { recursive: true, force: true }); } await $`cd ${ROOT_DIR} && bun install`.quiet(); } else { @@ -136,14 +144,6 @@ async function setupAtomTrail(): Promise { } } - // Add .gitkeep files to preserve directory structure - for (const dir of dirs.slice(1)) { - const gitkeep = join(dir, ".gitkeep"); - if (!existsSync(gitkeep)) { - await writeFile(gitkeep, ""); - } - } - return true; } catch (error) { log("error", `Failed to setup ATOM trail: ${error}`); @@ -162,37 +162,38 @@ function checkTypeScriptConfig(): boolean { * Run tests to verify setup */ async function runTests(): Promise { - try { - await $({ cwd: ROOT_DIR })`bun test`.quiet(); - return true; - } catch { + const result = await $({ cwd: ROOT_DIR })`bun test`.quiet().nothrow(); + + if (result.exitCode !== 0) { + log( + "error", + [ + `Test command failed with exit code ${result.exitCode}.`, + result.stdout ? `STDOUT:\n${result.stdout}` : "", + result.stderr ? `STDERR:\n${result.stderr}` : "", + ] + .filter(Boolean) + .join("\n\n") + ); return false; } + + return true; } /** * Create initial ATOM decision for setup - * Uses timestamp-based unique suffix to avoid counter conflicts + * Uses counter-based system from atom-tag.ts for consistency */ async function recordSetupAtom(): Promise { try { - const now = new Date(); - const dateStr = now.toISOString().slice(0, 10).replace(/-/g, ""); - // Use timestamp suffix for uniqueness (HHmmss format) - const timeStr = now.toISOString().slice(11, 19).replace(/:/g, ""); - const tag = `ATOM-INIT-${dateStr}-${timeStr}-new-user-setup`; - - const decision = { - atom_tag: tag, - type: "INIT", - description: "New user environment setup completed", - files: ["package.json", "tsconfig.json", ".atom-trail/"], - timestamp: now.toISOString(), - freshness: "fresh", - }; - - const decisionPath = join(ATOM_TRAIL_DIR, "decisions", `${tag}.json`); - await writeFile(decisionPath, JSON.stringify(decision, null, 2)); + const tag = await generateAtomTag("INIT", "new user environment setup completed"); + await saveDecision( + tag, + "INIT", + "New user environment setup completed", + ["package.json", "tsconfig.json", ".atom-trail/"] + ); return true; } catch { @@ -312,6 +313,7 @@ async function setup(options: SetupOptions): Promise { log("success", "Test suite passing"); } else { log("warn", "Some tests may be failing (check output)"); + result.success = false; } } diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_agent_skills.py b/tests/test_agent_skills.py new file mode 100644 index 0000000..e6642d1 --- /dev/null +++ b/tests/test_agent_skills.py @@ -0,0 +1,988 @@ +""" +PyTest tests for agent_skills.py + +Tests quantum circuit simulation, coherence checking, cascade integration, +and PR review functionality for the QDI agent script. +""" + +import pytest +import sys +from pathlib import Path + +# Add parent directory to path to import agent_skills module +# This is necessary since agent_skills.py is a standalone script at the repo root +# and not part of an installed package +repo_root = Path(__file__).parent.parent +if str(repo_root) not in sys.path: + sys.path.insert(0, str(repo_root)) + +import agent_skills + + +class TestParseGate: + """Test gate parsing functionality""" + + def test_parse_hadamard_gate(self): + """Test parsing H gate""" + result = agent_skills._parse_gate("h(0)") + assert result == ('h', (0,)) + + result = agent_skills._parse_gate("H(1)") + assert result == ('h', (1,)) + + def test_parse_x_gate(self): + """Test parsing X gate""" + result = agent_skills._parse_gate("x(0)") + assert result == ('x', (0,)) + + result = agent_skills._parse_gate("X(2)") + assert result == ('x', (2,)) + + def test_parse_cx_gate(self): + """Test parsing CX gate""" + result = agent_skills._parse_gate("cx(0,1)") + assert result == ('cx', (0, 1)) + + result = agent_skills._parse_gate("CX(2, 3)") + assert result == ('cx', (2, 3)) + + def test_parse_invalid_gate(self): + """Test parsing invalid gates""" + assert agent_skills._parse_gate("invalid") is None + assert agent_skills._parse_gate("h(a)") is None + assert agent_skills._parse_gate("cx(0)") is None + assert agent_skills._parse_gate("cx(0,1,2)") is None + assert agent_skills._parse_gate("") is None + assert agent_skills._parse_gate(" ") is None + + def test_parse_gate_with_whitespace(self): + """Test parsing gates with extra whitespace""" + result = agent_skills._parse_gate(" h(0) ") + assert result == ('h', (0,)) + + result = agent_skills._parse_gate("cx( 0 , 1 )") + assert result == ('cx', (0, 1)) + + +class TestSimulateCircuit: + """Test circuit simulation functionality""" + + def test_default_bell_state(self): + """Test default Bell state circuit simulation""" + result = agent_skills.simulate_circuit() + + assert result['status'] in ['success', 'simulated'] + assert 'counts' in result + assert 'circuit_depth' in result + assert 'num_qubits' in result + assert result['vortex'] == agent_skills.VORTEX_MARKER + + # Check for Bell state outcomes (00 or 11) + counts = result['counts'] + assert isinstance(counts, dict) + # Bell state should have 2 qubits + assert result['num_qubits'] == 2 + + def test_single_hadamard_gate(self): + """Test circuit with single Hadamard gate""" + result = agent_skills.simulate_circuit("h(0)") + + assert result['status'] in ['success', 'simulated', 'error'] + assert 'vortex' in result + + if result['status'] == 'success': + assert 'counts' in result + assert result['num_qubits'] >= 1 + + def test_bell_state_explicit(self): + """Test explicit Bell state circuit""" + result = agent_skills.simulate_circuit("h(0); cx(0,1)") + + assert result['status'] in ['success', 'simulated', 'error'] + assert 'vortex' in result + + if result['status'] == 'success': + assert 'counts' in result + assert result['num_qubits'] >= 2 + + def test_multiple_gates(self): + """Test circuit with multiple gates""" + result = agent_skills.simulate_circuit("h(0); x(1); cx(0,1)") + + assert result['status'] in ['success', 'simulated', 'error'] + assert 'vortex' in result + + if result['status'] in ['success', 'simulated']: + assert 'counts' in result + + def test_invalid_gate_syntax(self): + """Test circuit with invalid gate syntax""" + result = agent_skills.simulate_circuit("invalid_gate") + + # When Qiskit is available, should return error + # When Qiskit is not available, returns simulated (fallback) + assert result['status'] in ['error', 'simulated'] + assert 'vortex' in result + + def test_empty_circuit(self): + """Test empty circuit string""" + result = agent_skills.simulate_circuit("") + + # Empty string should fall through to default Bell state + assert result['status'] in ['success', 'simulated'] + assert 'vortex' in result + + def test_circuit_with_spaces(self): + """Test circuit with extra spaces""" + result = agent_skills.simulate_circuit(" h(0) ; cx(0,1) ") + + assert result['status'] in ['success', 'simulated', 'error'] + assert 'vortex' in result + + def test_circuit_depth_calculation(self): + """Test circuit depth is calculated""" + result = agent_skills.simulate_circuit("h(0); cx(0,1)") + + if result['status'] in ['success', 'simulated']: + assert 'circuit_depth' in result + assert isinstance(result['circuit_depth'], int) + assert result['circuit_depth'] >= 0 + + def test_vortex_marker_present(self): + """Test VORTEX marker is always present""" + result = agent_skills.simulate_circuit() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + result = agent_skills.simulate_circuit("h(0)") + assert result['vortex'] == agent_skills.VORTEX_MARKER + + result = agent_skills.simulate_circuit("invalid") + assert result['vortex'] == agent_skills.VORTEX_MARKER + + +class TestCheckCoherence: + """Test coherence checking functionality""" + + def test_default_threshold(self): + """Test coherence check with default threshold""" + result = agent_skills.check_coherence() + + assert 'coherence' in result + assert 'threshold' in result + assert 'passed' in result + assert 'message' in result + assert result['vortex'] == agent_skills.VORTEX_MARKER + + # Default threshold is 0.6 + assert result['threshold'] == 0.6 + assert isinstance(result['passed'], bool) + + def test_custom_threshold_pass(self): + """Test coherence check that passes""" + result = agent_skills.check_coherence(threshold=0.5) + + assert result['threshold'] == 0.5 + # With simulated coherence of 0.6, should pass 0.5 threshold + assert result['coherence'] == agent_skills.DEFAULT_SIMULATED_COHERENCE + assert result['passed'] is True + + def test_custom_threshold_fail(self): + """Test coherence check that fails""" + result = agent_skills.check_coherence(threshold=0.7) + + assert result['threshold'] == 0.7 + # With simulated coherence of 0.6, should fail 0.7 threshold + assert result['coherence'] == agent_skills.DEFAULT_SIMULATED_COHERENCE + assert result['passed'] is False + + def test_threshold_edge_case_equal(self): + """Test coherence check at exact threshold""" + result = agent_skills.check_coherence(threshold=0.6) + + assert result['threshold'] == 0.6 + assert result['coherence'] == 0.6 + # Equal should pass (>=) + assert result['passed'] is True + + def test_threshold_zero(self): + """Test coherence check with zero threshold""" + result = agent_skills.check_coherence(threshold=0.0) + + assert result['threshold'] == 0.0 + assert result['passed'] is True + + def test_threshold_one(self): + """Test coherence check with threshold of 1.0""" + result = agent_skills.check_coherence(threshold=1.0) + + assert result['threshold'] == 1.0 + # Simulated coherence is 0.6, should fail + assert result['passed'] is False + + def test_message_format(self): + """Test message format is correct""" + result = agent_skills.check_coherence(threshold=0.6) + + assert 'message' in result + assert isinstance(result['message'], str) + assert '60%' in result['message'] or '0.60' in result['message'] + + def test_vortex_marker_present(self): + """Test VORTEX marker is always present""" + result = agent_skills.check_coherence() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + result = agent_skills.check_coherence(threshold=0.5) + assert result['vortex'] == agent_skills.VORTEX_MARKER + + +class TestCascadeIntegration: + """Test cascade provenance integration""" + + def test_no_pr_body(self): + """Test cascade with no PR body""" + result = agent_skills.cascade_integration() + + assert result['status'] == 'cascaded' + assert result['keywords_found'] == [] + assert result['provenance_tracked'] is True + assert result['vortex'] == agent_skills.VORTEX_MARKER + + def test_empty_pr_body(self): + """Test cascade with empty PR body""" + result = agent_skills.cascade_integration("") + + assert result['status'] == 'cascaded' + assert result['keywords_found'] == [] + assert result['provenance_tracked'] is True + + def test_single_keyword(self): + """Test cascade with single keyword""" + result = agent_skills.cascade_integration("This PR improves quantum coherence") + + assert result['status'] == 'cascaded' + assert 'quantum' in result['keywords_found'] + assert 'coherence' in result['keywords_found'] + assert len(result['keywords_found']) >= 2 + + def test_multiple_keywords(self): + """Test cascade with multiple keywords""" + pr_body = "This PR adds quantum provenance tracking with ethical vortex spiral" + result = agent_skills.cascade_integration(pr_body) + + assert result['status'] == 'cascaded' + keywords = result['keywords_found'] + assert 'quantum' in keywords + assert 'provenance' in keywords + assert 'ethical' in keywords + assert 'vortex' in keywords + assert 'spiral' in keywords + + def test_case_insensitive(self): + """Test cascade is case-insensitive""" + result = agent_skills.cascade_integration("QUANTUM Provenance ETHICAL") + + keywords = result['keywords_found'] + assert 'quantum' in keywords + assert 'provenance' in keywords + assert 'ethical' in keywords + + def test_no_keywords(self): + """Test cascade with no matching keywords""" + result = agent_skills.cascade_integration("This is a simple bug fix") + + assert result['status'] == 'cascaded' + assert result['keywords_found'] == [] + assert result['provenance_tracked'] is True + + def test_message_format(self): + """Test message format includes count""" + result = agent_skills.cascade_integration("quantum atom spiral") + + assert 'message' in result + assert isinstance(result['message'], str) + count = len(result['keywords_found']) + assert str(count) in result['message'] + + def test_vortex_marker_present(self): + """Test VORTEX marker is always present""" + result = agent_skills.cascade_integration() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + result = agent_skills.cascade_integration("test body") + assert result['vortex'] == agent_skills.VORTEX_MARKER + + def test_atom_decision_present(self): + """Test that ATOM decision is present in result""" + result = agent_skills.cascade_integration("quantum provenance test") + + assert 'atom_decision' in result + assert isinstance(result['atom_decision'], dict) + + def test_atom_tag_present(self): + """Test that ATOM tag is present in result""" + result = agent_skills.cascade_integration("ethical review test") + + assert 'atom_tag' in result + assert isinstance(result['atom_tag'], str) + assert result['atom_tag'].startswith('ATOM-') + + def test_atom_decision_structure(self): + """Test that ATOM decision has correct structure""" + result = agent_skills.cascade_integration("quantum coherence test") + + decision = result['atom_decision'] + + # Verify required fields + assert 'atom_tag' in decision + assert 'type' in decision + assert 'description' in decision + assert 'timestamp' in decision + assert 'files' in decision + assert 'tags' in decision + assert 'freshness' in decision + assert 'verified' in decision + + # Verify field types and values + assert decision['type'] == 'VERIFY' + assert isinstance(decision['description'], str) + assert isinstance(decision['timestamp'], str) + assert isinstance(decision['files'], list) + assert isinstance(decision['tags'], list) + assert decision['freshness'] == 'fresh' + assert decision['verified'] is False + + def test_atom_decision_tags_include_keywords(self): + """Test that ATOM decision tags include detected keywords""" + result = agent_skills.cascade_integration("quantum provenance ethical test") + + decision = result['atom_decision'] + tags = decision['tags'] + + # Base tags should always be present + assert 'cascade' in tags + assert 'provenance' in tags + assert 'ethical-review' in tags + + # Keywords found should be in tags + assert 'quantum' in tags + assert 'ethical' in tags + + def test_atom_trail_file_created(self, tmp_path, monkeypatch): + """Test that ATOM trail decision file is created""" + # Use temporary directory for ATOM trail + atom_trail_dir = tmp_path / ".atom-trail" + atom_counters_dir = atom_trail_dir / "counters" + atom_decisions_dir = atom_trail_dir / "decisions" + + # Monkey patch the ATOM trail directories + monkeypatch.setattr(agent_skills, 'ATOM_TRAIL_DIR', atom_trail_dir) + monkeypatch.setattr(agent_skills, 'ATOM_COUNTERS_DIR', atom_counters_dir) + monkeypatch.setattr(agent_skills, 'ATOM_DECISIONS_DIR', atom_decisions_dir) + + result = agent_skills.cascade_integration("test body") + + # Verify directories were created + assert atom_trail_dir.exists() + assert atom_counters_dir.exists() + assert atom_decisions_dir.exists() + + # Verify decision file was created + atom_tag = result['atom_tag'] + decision_file = atom_decisions_dir / f"{atom_tag}.json" + assert decision_file.exists() + + # Verify file content + with open(decision_file, 'r', encoding='utf-8') as f: + file_decision = json.load(f) + + assert file_decision['atom_tag'] == atom_tag + assert file_decision['type'] == 'VERIFY' + + def test_atom_tag_format(self): + """Test that ATOM tag follows correct format""" + result = agent_skills.cascade_integration("test") + + atom_tag = result['atom_tag'] + + # Format: ATOM-TYPE-YYYYMMDD-NNN-description + parts = atom_tag.split('-') + assert len(parts) >= 5 # At least 5 parts + assert parts[0] == 'ATOM' + assert parts[1] == 'VERIFY' + assert len(parts[2]) == 8 # YYYYMMDD + assert parts[3].isdigit() # Counter + assert len(parts[3]) == 3 # Three-digit counter + + def test_atom_decision_consistency(self): + """Test that atom_decision and atom_tag are consistent""" + result = agent_skills.cascade_integration("consistency test") + + # The atom_tag in result should match the one in atom_decision + assert result['atom_tag'] == result['atom_decision']['atom_tag'] + + +class TestReviewPR: + """Test PR review functionality""" + + def test_review_pr_returns_coherence_check(self): + """Test review_pr performs coherence check""" + result = agent_skills.review_pr() + + assert 'status' in result + assert 'coherence_check' in result + assert 'ethical_review' in result + assert 'message' in result + assert 'coherence_details' in result + assert result['vortex'] == agent_skills.VORTEX_MARKER + + def test_review_pr_coherence_pass(self): + """Test review when coherence passes""" + result = agent_skills.review_pr() + + # With default simulated coherence of 0.6 and threshold 0.6 + assert result['coherence_check'] in ['passed', 'failed'] + + if result['coherence_check'] == 'passed': + assert result['status'] == 'reviewed' + assert result['ethical_review'] == 'approved' + assert 'Ready for merge' in result['message'] + + def test_review_pr_message_format(self): + """Test review message format""" + result = agent_skills.review_pr() + + assert 'message' in result + message = result['message'] + assert isinstance(message, str) + assert '🌀' in message # Vortex emoji + assert 'Coherence' in message + + def test_review_pr_coherence_details(self): + """Test review includes coherence details""" + result = agent_skills.review_pr() + + assert 'coherence_details' in result + details = result['coherence_details'] + assert isinstance(details, dict) + assert 'coherence' in details + assert 'threshold' in details + assert 'passed' in details + + def test_review_pr_status_consistency(self): + """Test status is consistent with coherence check""" + result = agent_skills.review_pr() + + coherence_passed = result['coherence_details']['passed'] + + if coherence_passed: + assert result['status'] == 'reviewed' + assert result['coherence_check'] == 'passed' + assert result['ethical_review'] == 'approved' + else: + assert result['status'] == 'review_required' + assert result['coherence_check'] == 'failed' + assert result['ethical_review'] == 'requires_additional_review' + + def test_vortex_marker_present(self): + """Test VORTEX marker is always present""" + result = agent_skills.review_pr() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + +class TestCLIArguments: + """Test command-line argument parsing""" + + def test_simulate_command_default(self): + """Test simulate command with default circuit""" + # This would be tested through main() integration + # Here we test the function directly + result = agent_skills.simulate_circuit() + assert result['status'] in ['success', 'simulated'] + + def test_simulate_command_with_circuit(self): + """Test simulate command with custom circuit""" + result = agent_skills.simulate_circuit("h(0); cx(0,1)") + assert result['status'] in ['success', 'simulated', 'error'] + + def test_check_coherence_command_default(self): + """Test check_coherence command with default threshold""" + result = agent_skills.check_coherence() + assert result['threshold'] == 0.6 + + def test_check_coherence_command_custom(self): + """Test check_coherence command with custom threshold""" + result = agent_skills.check_coherence(0.8) + assert result['threshold'] == 0.8 + + def test_cascade_command_no_body(self): + """Test cascade command without PR body""" + result = agent_skills.cascade_integration() + assert result['status'] == 'cascaded' + + def test_cascade_command_with_body(self): + """Test cascade command with PR body""" + result = agent_skills.cascade_integration("test body") + assert result['status'] == 'cascaded' + + def test_review_pr_command(self): + """Test review_pr command""" + result = agent_skills.review_pr() + assert 'status' in result + assert 'coherence_check' in result + + +class TestVortexMarker: + """Test VORTEX marker integration""" + + def test_vortex_marker_constant(self): + """Test VORTEX marker constant""" + assert agent_skills.VORTEX_MARKER == "VORTEX::QDI::v1" + + def test_all_functions_include_vortex(self): + """Test all functions include VORTEX marker""" + # simulate_circuit + result = agent_skills.simulate_circuit() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + # check_coherence + result = agent_skills.check_coherence() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + # cascade_integration + result = agent_skills.cascade_integration() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + # review_pr + result = agent_skills.review_pr() + assert result['vortex'] == agent_skills.VORTEX_MARKER + + +class TestDefaultCoherence: + """Test default simulated coherence constant""" + + def test_default_coherence_value(self): + """Test default coherence is 0.6""" + assert agent_skills.DEFAULT_SIMULATED_COHERENCE == 0.6 + + def test_default_coherence_used(self): + """Test default coherence is used in check_coherence""" + result = agent_skills.check_coherence() + assert result['coherence'] == agent_skills.DEFAULT_SIMULATED_COHERENCE + + +class TestQubitRangeValidation: + """Test qubit range validation to prevent resource exhaustion""" + + def test_valid_qubit_index_zero(self): + """Test qubit index 0 is valid""" + result = agent_skills.simulate_circuit("h(0)") + assert result['status'] in ['success', 'simulated'] + + def test_valid_qubit_index_max(self): + """Test qubit index 100 (MAX_QUBIT_INDEX) is valid""" + result = agent_skills.simulate_circuit("h(100)") + assert result['status'] in ['success', 'simulated'] + + def test_valid_qubit_index_middle(self): + """Test qubit index in middle of range is valid""" + result = agent_skills.simulate_circuit("h(50)") + assert result['status'] in ['success', 'simulated'] + + def test_invalid_qubit_index_negative(self): + """Test negative qubit index is rejected""" + result = agent_skills.simulate_circuit("h(-1)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert '-1' in result['error'] + + def test_invalid_qubit_index_above_max(self): + """Test qubit index above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("h(101)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert '101' in result['error'] + + def test_invalid_qubit_index_far_above_max(self): + """Test very large qubit index is rejected""" + result = agent_skills.simulate_circuit("h(1000)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert '1000' in result['error'] + + def test_x_gate_negative_qubit(self): + """Test X gate with negative qubit is rejected""" + result = agent_skills.simulate_circuit("x(-5)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + + def test_x_gate_above_max_qubit(self): + """Test X gate with qubit above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("x(150)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + + def test_cx_gate_valid_qubits(self): + """Test CX gate with valid qubit indices""" + result = agent_skills.simulate_circuit("cx(0,1)") + assert result['status'] in ['success', 'simulated'] + + def test_cx_gate_valid_at_max(self): + """Test CX gate with qubits at MAX_QUBIT_INDEX""" + result = agent_skills.simulate_circuit("cx(99,100)") + assert result['status'] in ['success', 'simulated'] + + def test_cx_gate_invalid_control_negative(self): + """Test CX gate with negative control qubit is rejected""" + result = agent_skills.simulate_circuit("cx(-1,0)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert 'control' in result['error'].lower() or '-1' in result['error'] + + def test_cx_gate_invalid_control_above_max(self): + """Test CX gate with control qubit above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("cx(101,0)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert 'control' in result['error'].lower() or '101' in result['error'] + + def test_cx_gate_invalid_target_negative(self): + """Test CX gate with negative target qubit is rejected""" + result = agent_skills.simulate_circuit("cx(0,-1)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert 'target' in result['error'].lower() or '-1' in result['error'] + + def test_cx_gate_invalid_target_above_max(self): + """Test CX gate with target qubit above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("cx(0,101)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + assert 'target' in result['error'].lower() or '101' in result['error'] + + def test_cx_gate_both_qubits_invalid(self): + """Test CX gate with both qubits out of range""" + result = agent_skills.simulate_circuit("cx(-1,101)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + + def test_error_message_includes_max_value(self): + """Test error message includes MAX_QUBIT_INDEX value""" + result = agent_skills.simulate_circuit("h(200)") + assert result['status'] == 'error' + assert 'error' in result + assert '100' in result['error'] # MAX_QUBIT_INDEX + + def test_multiple_gates_first_invalid(self): + """Test circuit with first gate having invalid qubit""" + result = agent_skills.simulate_circuit("h(101); cx(0,1)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + + def test_multiple_gates_second_invalid(self): + """Test circuit with second gate having invalid qubit""" + result = agent_skills.simulate_circuit("h(0); cx(0,101)") + assert result['status'] == 'error' + assert 'error' in result + assert 'out of range' in result['error'].lower() + + def test_error_message_format_single_qubit(self): + """Test error message format for single-qubit gate""" + result = agent_skills.simulate_circuit("h(150)") + assert result['status'] == 'error' + expected_msg = "Qubit index 150 out of range. Must be between 0 and 100." + assert result['error'] == expected_msg + + def test_error_message_format_cx_control(self): + """Test error message format for CX gate with invalid control""" + result = agent_skills.simulate_circuit("cx(150,0)") + assert result['status'] == 'error' + expected_msg = "Control qubit index 150 out of range. Must be between 0 and 100." + assert result['error'] == expected_msg + + def test_error_message_format_cx_target(self): + """Test error message format for CX gate with invalid target""" + result = agent_skills.simulate_circuit("cx(0,150)") + assert result['status'] == 'error' + expected_msg = "Target qubit index 150 out of range. Must be between 0 and 100." + assert result['error'] == expected_msg + + +class TestErrorHandling: + """Test error handling in various scenarios""" + + def test_invalid_circuit_returns_error(self): + """Test invalid circuit syntax returns error""" + result = agent_skills.simulate_circuit("bad_gate()") + # When Qiskit is available, should return error + # When Qiskit is not available, returns simulated (fallback) + assert result['status'] in ['error', 'simulated'] + + def test_malformed_gate_returns_error(self): + """Test malformed gate returns error""" + result = agent_skills.simulate_circuit("h()") + # When Qiskit is available, should return error + # When Qiskit is not available, returns simulated (fallback) + assert result['status'] in ['error', 'simulated'] + + def test_functions_never_raise_exceptions(self): + """Test functions handle exceptions gracefully""" + # All these should return dict results, not raise exceptions + result = agent_skills.simulate_circuit("invalid") + assert isinstance(result, dict) + + result = agent_skills.check_coherence(-1) + assert isinstance(result, dict) + + result = agent_skills.cascade_integration(None) + assert isinstance(result, dict) + + result = agent_skills.review_pr() + assert isinstance(result, dict) + + +class TestQubitRangeValidation: + """Test qubit index validation to prevent resource exhaustion""" + + def test_valid_single_qubit_gate(self): + """Test that valid single-qubit gate indices are accepted""" + result = agent_skills.simulate_circuit("h(0)") + assert result['status'] == 'simulated' + + result = agent_skills.simulate_circuit("h(50)") + assert result['status'] == 'simulated' + + result = agent_skills.simulate_circuit("h(100)") + assert result['status'] == 'simulated' + + def test_valid_two_qubit_gate(self): + """Test that valid two-qubit gate indices are accepted""" + result = agent_skills.simulate_circuit("cx(0,1)") + assert result['status'] == 'simulated' + + result = agent_skills.simulate_circuit("cx(50,51)") + assert result['status'] == 'simulated' + + result = agent_skills.simulate_circuit("cx(99,100)") + assert result['status'] == 'simulated' + + def test_single_qubit_below_range(self): + """Test that qubit index below 0 is rejected""" + result = agent_skills.simulate_circuit("h(-1)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert '-1' in result['error'] + assert 'Must be between 0 and 100' in result['error'] + + def test_single_qubit_above_range(self): + """Test that qubit index above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("h(101)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert '101' in result['error'] + assert 'Must be between 0 and 100' in result['error'] + + def test_two_qubit_control_below_range(self): + """Test that control qubit below 0 is rejected""" + result = agent_skills.simulate_circuit("cx(-1,0)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert 'Control qubit' in result['error'] + assert '-1' in result['error'] + + def test_two_qubit_control_above_range(self): + """Test that control qubit above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("cx(101,0)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert 'Control qubit' in result['error'] + assert '101' in result['error'] + + def test_two_qubit_target_below_range(self): + """Test that target qubit below 0 is rejected""" + result = agent_skills.simulate_circuit("cx(0,-1)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert 'Target qubit' in result['error'] + assert '-1' in result['error'] + + def test_two_qubit_target_above_range(self): + """Test that target qubit above MAX_QUBIT_INDEX is rejected""" + result = agent_skills.simulate_circuit("cx(0,101)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert 'Target qubit' in result['error'] + assert '101' in result['error'] + + def test_very_large_qubit_index(self): + """Test that extremely large qubit indices are rejected""" + result = agent_skills.simulate_circuit("h(10000)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + assert '10000' in result['error'] + + def test_multiple_gates_with_invalid_qubit(self): + """Test that circuit with one invalid qubit is rejected""" + result = agent_skills.simulate_circuit("h(0); h(101); x(1)") + assert result['status'] == 'error' + assert 'out of range' in result['error'] + + def test_boundary_values(self): + """Test boundary values for qubit indices""" + # Test exact boundaries + result = agent_skills.simulate_circuit("h(0)") + assert result['status'] == 'simulated' + + result = agent_skills.simulate_circuit("h(100)") + assert result['status'] == 'simulated' + + # Test just outside boundaries + result = agent_skills.simulate_circuit("h(-1)") + assert result['status'] == 'error' + + result = agent_skills.simulate_circuit("h(101)") + assert result['status'] == 'error' + + +class TestATOMDecisionFunctionality: + """Test ATOM trail provenance tracking functionality""" + + def test_cascade_integration_creates_atom_decision(self): + """Test that cascade_integration creates ATOM decision""" + result = agent_skills.cascade_integration("provenance quantum ethical") + + # Check that ATOM decision fields are present + assert 'atom_decision' in result + assert 'atom_tag' in result + + # Verify ATOM decision structure + decision = result['atom_decision'] + assert 'atom_tag' in decision + assert 'type' in decision + assert 'description' in decision + assert 'timestamp' in decision + assert 'files' in decision + assert 'tags' in decision + assert 'freshness' in decision + assert 'verified' in decision + + def test_atom_decision_type_is_verify(self): + """Test that cascade ATOM decisions have type VERIFY""" + result = agent_skills.cascade_integration("test") + decision = result['atom_decision'] + assert decision['type'] == 'VERIFY' + + def test_atom_decision_includes_keyword_count(self): + """Test that ATOM decision description includes keyword count""" + result = agent_skills.cascade_integration("provenance quantum ethical") + decision = result['atom_decision'] + assert '3 ethical keywords' in decision['description'] + + result = agent_skills.cascade_integration("no keywords here") + decision = result['atom_decision'] + assert '0 ethical keywords' in decision['description'] + + def test_atom_decision_tags_include_found_keywords(self): + """Test that ATOM decision tags include found keywords""" + result = agent_skills.cascade_integration("provenance quantum") + decision = result['atom_decision'] + + # Should include base tags plus found keywords + assert 'cascade' in decision['tags'] + assert 'provenance' in decision['tags'] + assert 'ethical-review' in decision['tags'] + assert 'quantum' in decision['tags'] + + def test_atom_tag_format(self): + """Test that ATOM tag follows correct format: ATOM-TYPE-YYYYMMDD-NNN-description""" + result = agent_skills.cascade_integration("test") + atom_tag = result['atom_tag'] + + # Check format + parts = atom_tag.split('-') + assert parts[0] == 'ATOM' + assert parts[1] == 'VERIFY' + # parts[2] should be YYYYMMDD + assert len(parts[2]) == 8 + assert parts[2].isdigit() + # parts[3] should be NNN (counter) + assert len(parts[3]) == 3 + assert parts[3].isdigit() + # parts[4+] should be slugified description + assert len(parts) >= 5 + + def test_atom_decision_timestamp_format(self): + """Test that ATOM decision timestamp is ISO format""" + result = agent_skills.cascade_integration("test") + decision = result['atom_decision'] + + # Check ISO format timestamp + timestamp = decision['timestamp'] + assert 'T' in timestamp + # Should be parseable as ISO format + from datetime import datetime + datetime.fromisoformat(timestamp) + + def test_atom_decision_freshness_and_verified(self): + """Test that new ATOM decisions are marked fresh and unverified""" + result = agent_skills.cascade_integration("test") + decision = result['atom_decision'] + + assert decision['freshness'] == 'fresh' + assert decision['verified'] == False + + def test_atom_decision_files_field(self): + """Test that ATOM decision includes files field""" + result = agent_skills.cascade_integration("test") + decision = result['atom_decision'] + + assert isinstance(decision['files'], list) + assert 'pr_body' in decision['files'] + + def test_atom_trail_directory_created(self): + """Test that ATOM trail directories are created""" + from pathlib import Path + + # Trigger ATOM decision creation + agent_skills.cascade_integration("test") + + # Check directories exist + assert Path(".atom-trail").exists() + assert Path(".atom-trail/counters").exists() + assert Path(".atom-trail/decisions").exists() + + def test_atom_decision_persisted_to_file(self): + """Test that ATOM decision is persisted to JSON file""" + import json + from pathlib import Path + + result = agent_skills.cascade_integration("test") + atom_tag = result['atom_tag'] + + # Check that decision file exists + decision_file = Path(".atom-trail/decisions") / f"{atom_tag}.json" + assert decision_file.exists() + + # Verify file content matches decision + with open(decision_file, 'r') as f: + persisted = json.load(f) + + assert persisted['atom_tag'] == atom_tag + assert persisted['type'] == 'VERIFY' + assert 'description' in persisted + assert 'timestamp' in persisted + + +if __name__ == '__main__': + pytest.main([__file__, '-v'])