diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..bc3f01f
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,19 @@
+# API Configuration
+API_HOST=0.0.0.0
+API_PORT=8000
+
+# Database
+DATABASE_URL=sqlite:///./oal_agent.db
+
+# Queue
+QUEUE_URL=redis://localhost:6379
+
+# LLM Configuration
+LLM_PROVIDER=openai
+LLM_API_KEY=your-api-key-here
+
+# Security
+SECRET_KEY=change-this-to-a-random-secret-key
+
+# Logging
+LOG_LEVEL=INFO
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..b471fdf
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,32 @@
+name: CI
+
+on:
+ push:
+ branches: [ main, develop ]
+ pull_request:
+ branches: [ main, develop ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.9", "3.10", "3.11"]
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install -r requirements-dev.txt
+
+ - name: Run tests
+ run: |
+ bash scripts/test.sh
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 0000000..5ad1be9
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,28 @@
+name: Lint
+
+on:
+ push:
+ branches: [ main, develop ]
+ pull_request:
+ branches: [ main, develop ]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.11"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements-dev.txt
+
+ - name: Run linters
+ run: |
+ bash scripts/lint.sh
diff --git a/.gitignore b/.gitignore
index 158956b..851edbe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,10 +57,31 @@ Thumbs.db
*.bak
*.tmp
-# Project specific
-*.log
-*.db
-*.sqlite3
-*.coverage
-htmlcov/
+# Testing
.pytest_cache/
+.coverage
+htmlcov/
+.tox/
+.nox/
+
+# Data files (keep .gitkeep files)
+data/datasets/*.csv
+data/datasets/*.json
+data/datasets/*.parquet
+!data/datasets/.gitkeep
+!data/contracts/.gitkeep
+
+# Model files (large models, keep .gitkeep files)
+models/**/*.pt
+models/**/*.pth
+models/**/*.h5
+models/**/*.onnx
+models/**/*.pkl
+!models/**/.gitkeep
+
+# Redis
+dump.rdb
+
+# Temporary files
+tmp/
+temp/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..fac4b99
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,29 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+ - id: check-json
+ - id: check-toml
+ - id: check-added-large-files
+ - id: check-merge-conflict
+ - id: mixed-line-ending
+
+ - repo: https://github.com/psf/black
+ rev: 23.12.1
+ hooks:
+ - id: black
+ language_version: python3.11
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.13.2
+ hooks:
+ - id: isort
+
+ - repo: https://github.com/pycqa/flake8
+ rev: 7.0.0
+ hooks:
+ - id: flake8
+ args: ["--max-line-length=88", "--extend-ignore=E203"]
diff --git a/LICENSE_HEADER b/LICENSE_HEADER
new file mode 100644
index 0000000..7795275
--- /dev/null
+++ b/LICENSE_HEADER
@@ -0,0 +1,14 @@
+# Copyright (C) 2025 OpenAuditLabs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
diff --git a/README.md b/README.md
index d92363d..d8b02b6 100644
--- a/README.md
+++ b/README.md
@@ -1,439 +1,374 @@
-# π€ OpenAuditLabs Agent
+# OAL Agent - Smart Contract Security Analysis System
-[](https://www.gnu.org/licenses/gpl-3.0)
-[](https://www.python.org/downloads/)
-[](https://fastapi.tiangolo.com/)
-[](https://www.docker.com/)
+[
+- `LLM_API_KEY`: API key for LLM provider
+- `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR)
-- Python 3.11+
-- Docker & Docker Compose
-- PostgreSQL 15+
-- Redis 7+
+4. **Install pre-commit hooks**nse-AGPL%20v3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)
+ [](https://www.python.org/downloads/)
+ [](https://github.com/psf/black)
-### Installation
+A multi-agent system for comprehensive smart contract security analysis using static analysis, dynamic testing, and machine learning.
-```bash
-# Clone the repository
-git clone https://github.com/OpenAuditLabs/agent.git
-cd agent
+## β οΈ Project Status
-# Install dependencies
-pip install -r requirements.txt
+**π§ Under Active Development** - This project is currently in early development. APIs and features are subject to change.
-# Setup environment variables
-cp .env.example .env
+## β¨ Features
-# Start services
-docker-compose up -d
+- π€ **Multi-Agent Architecture**: Specialized agents for different analysis types
+- π **Static Analysis**: Integration with Slither and other static analyzers
+- π§ͺ **Dynamic Analysis**: Symbolic execution and fuzzing capabilities
+- π§ **ML-Powered Detection**: Machine learning models for vulnerability detection
+- π **REST API**: Easy integration with existing workflows
+- π **Comprehensive Reporting**: Detailed vulnerability reports with severity classification
+- π **Sandboxed Execution**: Safe contract analysis in isolated environments
+- π **Telemetry & Monitoring**: Built-in logging, metrics, and tracing
-# Run database migrations
-alembic upgrade head
+## ποΈ Project Structure
-# Start the agent
-python -m uvicorn main:app --reload
+```
+agent/
+βββ .github/workflows/ # CI/CD workflows
+βββ .vscode/ # VS Code settings
+βββ scripts/ # Utility scripts (lint, test, format)
+βββ docs/ # Documentation
+β βββ architecture.md # System architecture
+β βββ agents.md # Agent documentation
+β βββ api.md # API documentation
+β βββ pipelines.md # Pipeline documentation
+β βββ research/ # Research papers and notes
+βββ models/ # ML models
+β βββ transformers/ # Transformer models
+β βββ gnn/ # Graph Neural Network models
+βββ data/ # Data storage
+β βββ contracts/ # Smart contract samples
+β βββ datasets/ # Training datasets
+βββ tests/ # Test suites
+β βββ unit/ # Unit tests
+β βββ integration/ # Integration tests
+β βββ e2e/ # End-to-end tests
+β βββ load/ # Load tests
+β βββ fixtures/ # Test fixtures
+βββ src/oal_agent/ # Main source code
+β βββ app/ # FastAPI application
+β βββ core/ # Core orchestration
+β βββ agents/ # Analysis agents
+β βββ tools/ # External tool integrations
+β βββ services/ # Background services
+β βββ llm/ # LLM integration
+β βββ security/ # Security components
+β βββ telemetry/ # Logging & metrics
+β βββ utils/ # Utilities
+β βββ cli.py # Command-line interface
+βββ Configuration files (pyproject.toml, requirements.txt, etc.)
```
-### Docker Setup
+## π Quick Start
-```bash
-# Build and run with Docker
-docker-compose up --build
+### Prerequisites
-# API will be available at http://localhost:8000
-```
+- Python 3.9+ (3.11 recommended)
+- Redis (for job queue management)
+- PostgreSQL or SQLite (for result storage)
+- Solidity compiler (solc) for contract analysis
+- Optional: Docker for containerized deployment
-## π§ Architecture
+### Installation
-The Agent system employs a hierarchical multi-agent architecture powered by CrewAI:
+1. **Clone the repository**
-```
-βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
-β Coordinator Agent β
-β (Workflow Orchestration) β
-βββββββββββββββββββββββββββ¬ββββββββββββββββββββββββββββββββββββ
- β
- βββββββββββββββββββΌββββββββββββββββββ
- β β β
-βββββββββΌβββββββ ββββββββββΌβββββββββ βββββββΌββββββ
-βStatic Analysisβ βDynamic Analysis β βML Classifierβ
-β Agent β β Agent β β Agent β
-β (Slither) β β (Mythril) β β(Transformers)β
-βββββββββββββββββ βββββββββββββββββββ βββββββββββββ
- β β β
- βββββββββββββββββββΌββββββββββββββββββ
- β
- βββββββββββΌββββββββββ
- βReport Generation β
- β Agent β
- βββββββββββββββββββββ
-```
+ ```bash
+ git clone https://github.com/OpenAuditLabs/agent.git
+ cd agent
+ ```
-## π Project Structure
+2. **Set up Python environment**
-```
-agent/
-βββ src/
-β βββ agents/ # CrewAI agent implementations
-β β βββ coordinator.py # Main orchestration agent
-β β βββ static_agent.py # Slither integration
-β β βββ dynamic_agent.py # Mythril integration
-β β βββ ml_agent.py # ML classification
-β βββ api/ # FastAPI endpoints
-β β βββ routes/
-β β βββ models/
-β βββ core/ # Core analysis engine
-β β βββ pipeline.py # Analysis pipeline
-β β βββ orchestrator.py # Agent orchestration
-β βββ models/ # ML models & schemas
-β β βββ transformers/ # Transformer models
-β β βββ gnn/ # Graph Neural Networks
-β βββ tools/ # External tool integrations
-β β βββ slither.py # Static analysis
-β β βββ mythril.py # Symbolic execution
-β βββ utils/ # Utility functions
-βββ tests/ # Test suites
-βββ data/ # Sample contracts & datasets
-βββ docker/ # Docker configurations
-βββ docs/ # Documentation
-βββ notebooks/ # Research notebooks
-```
+ ```bash
+ python -m venv .venv
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
+ pip install -r requirements.txt
+ pip install -r requirements-dev.txt
+ ```
-## π§ Usage
+3. **Configure environment**
-### REST API
+ ```bash
+ cp .env.example .env
+ # Edit .env with your configuration
+ ```
-Start the FastAPI server:
+ **Key environment variables:**
-```bash
-uvicorn main:app --host 0.0.0.0 --port 8000
-```
+ - `API_HOST` / `API_PORT`: API server configuration
+ - `DATABASE_URL`: Database connection string
+ - `QUEUE_URL`: Redis connection string
+ - `LLM_PROVIDER`: LLM provider (openai, anthropic, etc.)
+ - `LLM_API_KEY`: API key for LLM provider
+ - `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR)
-### Analyze a Smart Contract
+4. **Install pre-commit hooks**
+ ```bash
+ pre-commit install
+ ```
-```bash
-curl -X POST "http://localhost:8000/analyze/contract" \
- -H "Content-Type: application/json" \
- -d '{
- "contract_code": "contract Simple { function transfer() public {} }",
- "language": "solidity",
- "analysis_type": "comprehensive"
- }'
-```
+For detailed setup instructions, see the [Setup Guide](docs/setup.md).
-### Python SDK
+### Running the Application
-```python
-from openauditlabs_agent import AnalysisClient
+**Start the API server:**
-# Initialize client
-client = AnalysisClient(api_url="http://localhost:8000")
+```bash
+# Using module notation
+python -m src.oal_agent.cli serve
-# Analyze contract
-result = client.analyze_contract(
- contract_code=contract_source,
- language="solidity"
-)
+# Or directly
+python src/oal_agent/cli.py serve
-# Get results
-vulnerabilities = result.get_vulnerabilities()
-for vuln in vulnerabilities:
- print(f"Severity: {vuln.severity}, Type: {vuln.type}")
+# With custom host/port
+python src/oal_agent/cli.py serve --host 0.0.0.0 --port 8080
```
-## π― Key Features
+**Analyze a contract:**
-### π Multi-Modal Analysis
-- **Static Analysis**: Slither integration with 90+ detectors
-- **Dynamic Analysis**: Mythril symbolic execution with PoC generation
-- **ML Classification**: Transformer and GNN models for pattern recognition
-- **Ensemble Methods**: Combined analysis for enhanced accuracy
+```bash
+python src/oal_agent/cli.py analyze path/to/contract.sol
+```
+
+**Access the API:**
-### π Multi-Language Support
-- **Solidity** (.sol) - Complete support
-- **Vyper** (.vy) - Full analysis pipeline
-- **Rust** (Substrate/Ink!) - Advanced detection
-- **Move** (Aptos/Sui) - Experimental support
+- API Documentation: http://localhost:8000/docs
+- Health Check: http://localhost:8000/health
-### π Performance
-- **Processing Speed**: 1000+ LoC analyzed in <5 minutes
-- **Accuracy**: 95%+ vulnerability detection rate
-- **Scalability**: 500+ concurrent analyses
-- **Uptime**: 99.9% availability with auto-scaling
+### API Usage Example
-### π Vulnerability Detection
-- **50+ Vulnerability Types**: Complete SWC coverage
-- **CVSS Scoring**: Automated severity assessment
-- **Proof of Concept**: Executable exploit generation
-- **Remediation**: Actionable fix suggestions
+```python
+import httpx
+
+# Submit a contract for analysis
+async with httpx.AsyncClient() as client:
+ response = await client.post(
+ "http://localhost:8000/api/v1/analysis/",
+ json={
+ "contract_code": "pragma solidity ^0.8.0; contract Example { ... }",
+ "pipeline": "standard"
+ }
+ )
+ job = response.json()
+ job_id = job["job_id"]
+
+ # Check job status
+ status_response = await client.get(f"http://localhost:8000/api/v1/analysis/{job_id}")
+ print(status_response.json())
+
+ # Get results when complete
+ results_response = await client.get(f"http://localhost:8000/api/v1/analysis/{job_id}/results")
+ print(results_response.json())
+```
-## π οΈ Configuration
+## π§ͺ Testing
-### Environment Variables
+**Run all tests:**
```bash
-# Database
-DATABASE_URL=postgresql://user:pass@localhost:5432/openauditlabs
-REDIS_URL=redis://localhost:6379
-
-# API Configuration
-API_HOST=0.0.0.0
-API_PORT=8000
-SECRET_KEY=your-secret-key
-
-# Agent Configuration
-CREWAI_API_KEY=your-crewai-key
-OPENAI_API_KEY=your-openai-key
-
-# Tool Configuration
-SLITHER_VERSION=0.10.0
-MYTHRIL_VERSION=0.24.2
-
-# ML Models
-MODEL_CACHE_DIR=./models
-TRANSFORMER_MODEL=microsoft/codebert-base
-GNN_MODEL_PATH=./models/gnn_classifier.pt
+bash scripts/test.sh
```
-### Agent Configuration
+**Run specific test suites:**
-```python
-# agents/config.py
-AGENT_CONFIG = {
- "coordinator": {
- "model": "gpt-4",
- "temperature": 0.1,
- "max_tokens": 2000
- },
- "static_agent": {
- "slither_detectors": ["all"],
- "timeout": 300,
- "gas_analysis": True
- },
- "dynamic_agent": {
- "mythril_timeout": 600,
- "max_depth": 3,
- "create_poc": True
- },
- "ml_agent": {
- "confidence_threshold": 0.8,
- "ensemble_voting": "soft",
- "model_batch_size": 32
- }
-}
+```bash
+pytest tests/unit/ -v
+pytest tests/integration/ -v
+pytest tests/e2e/ -v
```
-## π§ͺ Development
-
-### Setup Development Environment
+**Run with coverage:**
```bash
-# Install development dependencies
-pip install -r requirements-dev.txt
-
-# Setup pre-commit hooks
-pre-commit install
+pytest tests/ --cov=src/oal_agent --cov-report=html
+```
-# Run tests
-pytest tests/ -v
+## π§ Development
-# Code formatting
-black src/
-isort src/
+**Format code:**
-# Type checking
-mypy src/
+```bash
+bash scripts/format.sh
+# Or manually:
+black src/ tests/
+isort src/ tests/
```
-### Running Tests
+**Run linters:**
```bash
-# Unit tests
-pytest tests/unit/
+bash scripts/lint.sh
+# Includes: black, isort, flake8, mypy
+```
-# Integration tests
-pytest tests/integration/
+**Check code quality:**
-# End-to-end tests
-pytest tests/e2e/
+```bash
+# Run all checks
+pre-commit run --all-files
-# Load tests
-pytest tests/load/ --load-test
+# Run specific checks
+black --check src/ tests/
+flake8 src/ tests/
+mypy src/
```
-### Adding New Agents
+## π¦ Project Components
-1. Create agent class in `src/agents/`
-2. Implement required methods:
- - `analyze()`: Main analysis logic
- - `get_tools()`: Return required tools
- - `get_config()`: Return agent configuration
-3. Register in `src/core/orchestrator.py`
-4. Add tests in `tests/agents/`
+### Core Components
-Example:
+- **Orchestrator**: Manages the overall analysis workflow
+- **Pipeline**: Defines analysis sequences
+- **Config**: Centralized configuration management
-```python
-from crewai import Agent
-from typing import Dict, List
-
-class CustomAgent(Agent):
- def __init__(self, config: Dict):
- super().__init__(
- role="Custom Analyzer",
- goal="Detect specific vulnerability patterns",
- backstory="Specialized security expert",
- tools=self.get_tools(),
- **config
- )
-
- def analyze(self, contract_code: str) -> List[Dict]:
- # Implement custom analysis logic
- return []
-```
+### Agents
-## π Security
+- **Coordinator Agent**: Routes tasks to specialized agents
+- **Static Agent**: Static code analysis using Slither, etc.
+- **Dynamic Agent**: Symbolic execution and fuzzing
+- **ML Agent**: Machine learning-based vulnerability detection
-### Input Validation
-- All contract inputs are sanitized and validated
-- File size limits enforced (max 10MB)
-- Rate limiting on API endpoints
-- Input encoding detection and normalization
-
-### Data Protection
-- Contract source code encrypted at rest (AES-256)
-- Analysis results stored with access controls
-- Audit logs for all operations
-- Automatic data retention policies
-
-### Authentication
-- JWT-based API authentication
-- Role-based access control (RBAC)
-- API key management for integrations
-- Session management and timeout
-
-## π Monitoring
-
-### Metrics
-- Analysis throughput and latency
-- Vulnerability detection accuracy
-- Agent performance metrics
-- Resource utilization
-
-### Health Checks
-```bash
-# System health
-curl http://localhost:8000/health
+### Tools Integration
-# Agent status
-curl http://localhost:8000/agents/status
+- **Slither**: Static analysis
+- **Mythril**: Symbolic execution
+- **Sandbox**: Safe contract execution environment
-# Database connectivity
-curl http://localhost:8000/health/db
-```
+### Services
-### Logging
-- Structured JSON logging
-- Correlation IDs for request tracking
-- Error aggregation and alerting
-- Performance monitoring
+- **Queue Service**: Job queue management
+- **Results Sink**: Collects and stores results
+- **Storage Service**: Persistent data storage
-## π€ Contributing
+### LLM Integration
-We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
+- **Provider**: LLM API integration
+- **Prompts**: Specialized prompts for analysis
+- **Guards**: Safety and validation guardrails
-### Development Workflow
-1. Fork the repository
-2. Create a feature branch (`git checkout -b feature/amazing-feature`)
-3. Commit your changes (`git commit -m 'Add amazing feature'`)
-4. Push to the branch (`git push origin feature/amazing-feature`)
-5. Open a Pull Request
+## π Security
+
+- Input validation for all user inputs
+- Sandboxed execution environment
+- Security policies and permissions
+- See [SECURITY.md](SECURITY.md) for details
-### Code Standards
-- Follow PEP 8 style guide
-- Add type hints to all functions
-- Write comprehensive tests
-- Update documentation
-- Ensure all checks pass
+## π Documentation
-## π Documentation
+- [Setup Guide](docs/setup.md) - Detailed installation and configuration
+- [Architecture](docs/architecture.md) - System design and components
+- [Agents](docs/agents.md) - Agent types and responsibilities
+- [API](docs/api.md) - REST API documentation
+- [Pipelines](docs/pipelines.md) - Analysis pipeline configurations
+- [Research Papers](docs/research/) - Research documentation and papers
-- [API Documentation](https://docs.openauditlabs.com/agent/api)
-- [Agent Development Guide](docs/agents.md)
-- [ML Model Training](docs/ml-training.md)
-- [Deployment Guide](docs/deployment.md)
-- [Troubleshooting](docs/troubleshooting.md)
+## β Troubleshooting
-## π Deployment
+### Common Issues
-### Docker Production
+**Import errors after installation:**
```bash
-# Build production image
-docker build -t openauditlabs/agent:latest .
+# Make sure you're in the virtual environment
+source .venv/bin/activate
+# Reinstall dependencies
+pip install -r requirements.txt
+```
-# Run with docker-compose
-docker-compose -f docker-compose.prod.yml up -d
+**Redis connection errors:**
+
+```bash
+# Check if Redis is running
+redis-cli ping
+# Start Redis if needed
+redis-server
```
-### Kubernetes
+**Permission errors on scripts:**
```bash
-# Deploy to Kubernetes
-kubectl apply -f k8s/
+# Make scripts executable
+chmod +x scripts/*.sh
+```
-# Check deployment
-kubectl get pods -n openauditlabs
+**Module not found errors:**
+
+```bash
+# Add src to PYTHONPATH
+export PYTHONPATH="${PYTHONPATH}:${PWD}/src"
```
-### IDE Plugins
-- VS Code extension available
-- Vim/Neovim integration
-- JetBrains plugin support
-- Sublime Text package
+For more help, see [GitHub Issues](https://github.com/OpenAuditLabs/agent/issues) or contact the team.
+
+## π€ Contributing
+
+We welcome contributions! Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct and the process for submitting pull requests.
+
+### Development Workflow
+
+1. Fork the repository
+2. Create a feature branch (`git checkout -b feature/amazing-feature`)
+3. Make your changes
+4. Run tests and linters (`bash scripts/test.sh && bash scripts/lint.sh`)
+5. Commit your changes (`git commit -m 'Add amazing feature'`)
+6. Push to the branch (`git push origin feature/amazing-feature`)
+7. Open a Pull Request
+
+## π Roadmap
-## π Performance Benchmarks
+- [ ] Complete core agent implementations
+- [ ] Add support for more static analysis tools
+- [ ] Implement ML model training pipeline
+- [ ] Add support for multiple blockchain platforms
+- [ ] Create web dashboard for analysis results
+- [ ] Implement real-time analysis streaming
+- [ ] Add plugin system for custom analyzers
-| Metric | Value |
-|--------|-------|
-| Analysis Speed | < 5 minutes per 1000 LoC |
-| Accuracy | 95%+ vulnerability detection |
-| False Positives | < 5% |
-| Throughput | 500+ analyses/day |
-| Uptime | 99.9% |
-| Memory Usage | < 2GB per analysis |
+## π Bug Reports & Feature Requests
-## π Support
+Please use the [GitHub Issues](https://github.com/OpenAuditLabs/agent/issues) to report bugs or request features.
-- **Documentation**: [docs.openauditlabs.com](https://docs.openauditlabs.com)
-- **Issues**: [GitHub Issues](https://github.com/OpenAuditLabs/agent/issues)
-- **Discussions**: [GitHub Discussions](https://github.com/OpenAuditLabs/agent/discussions)
-- **Email**: support@openauditlabs.com
-- **Discord**: [OpenAuditLabs Community](https://discord.gg/openauditlabs)
+## π¬ Community & Support
+
+- **GitHub Discussions**: [Join the conversation](https://github.com/OpenAuditLabs/agent/discussions)
+- **Issues**: [Report bugs or request features](https://github.com/OpenAuditLabs/agent/issues)
+- **Security**: See [SECURITY.md](SECURITY.md) for reporting security vulnerabilities
## π License
-This project is licensed under the GNU Affero General Public License v3 (AGPLv3) - see the [LICENSE](LICENSE) file for details.
+This project is licensed under the GNU Affero General Public License v3.0 (AGPL-3.0) - see the [LICENSE](LICENSE) file for details.
+
+Key points:
+
+- β
You can use, modify, and distribute this software
+- β
You must disclose source code of any modifications
+- β
Network use counts as distribution (you must share your modifications)
+- β
You must license derivative works under AGPL-3.0
## π Acknowledgments
-- [CrewAI](https://crewai.com/) for multi-agent orchestration
-- [Slither](https://github.com/crytic/slither) for static analysis
-- [Mythril](https://github.com/ConsenSys/mythril) for symbolic execution
-- [OpenZeppelin](https://openzeppelin.com/) for smart contract security standards
-- [Smart Contract Weakness Classification](https://swcregistry.io/) for vulnerability taxonomy
+- [OpenAuditLabs](https://github.com/OpenAuditLabs) team and contributors
+- Open source security tools community ([Slither](https://github.com/crytic/slither), [Mythril](https://github.com/ConsenSys/mythril), etc.)
+- Smart contract security researchers and auditors worldwide
---
-
-π‘οΈ Securing the Future of Smart Contracts with AI π‘οΈ
-
-[Website](https://openauditlabs.com) β’ [Documentation](https://docs.openauditlabs.com) β’ [Blog](https://blog.openauditlabs.com) β’ [Twitter](https://twitter.com/openauditlabs)
-
+**Made with β€οΈ by OpenAuditLabs**
diff --git a/Static_agent/Slither_agent/pre_slither_validator.py b/Static_agent/Slither_agent/pre_slither_validator.py
deleted file mode 100644
index aa62b75..0000000
--- a/Static_agent/Slither_agent/pre_slither_validator.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python3
-"""
-pre_slither_validator.py
-
-A pre-processing validator that runs before Slither analysis to:
-- Check and fix common syntax issues
-- Ensure contracts compile before vulnerability analysis
-- Integrate seamlessly with existing slither_to_json_with_diagnostics.py
-"""
-
-import os
-import re
-import shutil
-import subprocess
-from datetime import datetime
-from typing import Tuple, List, Dict
-
-class PreSlitherValidator:
- def __init__(self):
- self.fixes_applied = []
-
- def validate_and_fix_contract(self, file_path: str) -> Tuple[bool, str, List[str]]:
- """
- Validate a contract and apply fixes if needed.
- Returns: (success, fixed_content_or_error, fixes_applied)
- """
- fixes = []
-
- try:
- # Read the file
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- original_content = content
-
- # Apply systematic fixes
- content, file_fixes = self._apply_all_fixes(content, file_path)
- fixes.extend(file_fixes)
-
- # If changes were made, create backup and update file
- if content != original_content:
- backup_path = f"{file_path}.pre_slither_backup"
- shutil.copy2(file_path, backup_path)
-
- with open(file_path, 'w', encoding='utf-8') as f:
- f.write(content)
-
- fixes.append(f"Backup created: {backup_path}")
-
- return True, content, fixes
-
- except Exception as e:
- return False, str(e), fixes
-
- def _apply_all_fixes(self, content: str, file_path: str) -> Tuple[str, List[str]]:
- """Apply all known fixes to the content"""
- fixes = []
-
- # Fix 1: Add SPDX license if missing
- if not re.search(r'//\s*SPDX-License-Identifier:', content):
- content = '// SPDX-License-Identifier: MIT\n' + content
- fixes.append("Added SPDX license identifier")
-
- # Fix 2: Abstract contracts
- content, abstract_fixes = self._fix_abstract_contracts(content)
- fixes.extend(abstract_fixes)
-
- # Fix 3: Virtual functions
- content, virtual_fixes = self._fix_virtual_functions(content)
- fixes.extend(virtual_fixes)
-
- # Fix 4: Deprecated .value() syntax
- content, value_fixes = self._fix_value_syntax(content)
- fixes.extend(value_fixes)
-
- # Fix 5: Call return handling
- content, call_fixes = self._fix_call_handling(content)
- fixes.extend(call_fixes)
-
- # Fix 6: Pragma version alignment
- content, pragma_fixes = self._fix_pragma_version(content)
- fixes.extend(pragma_fixes)
-
- return content, fixes
-
- def _fix_abstract_contracts(self, content: str) -> Tuple[str, List[str]]:
- """Fix contracts that should be abstract"""
- fixes = []
- # Find contracts with unimplemented functions
- contract_pattern = r'contract\s+(\w+)([^{]*)\{([^}]*(?:\{[^}]*\}[^}]*)*)\}'
- def fix_contract(match):
- contract_name = match.group(1)
- inheritance = match.group(2)
- body = match.group(3)
- # Check for unimplemented functions (ending with semicolon)
- if re.search(r'function\s+\w+[^{]*;', body):
- fixes.append(f"Made contract '{contract_name}' abstract")
- return f'abstract contract {contract_name}{inheritance}{{{body}}}'
- return match.group(0)
- content = re.sub(contract_pattern, fix_contract, content, flags=re.DOTALL)
- return content, fixes
-
- def _fix_virtual_functions(self, content: str) -> Tuple[str, List[str]]:
- """Add virtual keyword to unimplemented functions"""
- fixes = []
-
- # Pattern for unimplemented functions
- pattern = r'function\s+(\w+)\s*\(([^)]*)\)\s*([^{;]*);'
-
- def add_virtual(match):
- func_name = match.group(1)
- params = match.group(2)
- modifiers = match.group(3).strip()
-
- if 'virtual' not in modifiers:
- if modifiers:
- new_func = f"function {func_name}({params}) {modifiers} virtual;"
- else:
- new_func = f"function {func_name}({params}) public virtual;"
-
- fixes.append(f"Added 'virtual' to function '{func_name}'")
- return new_func
-
- return match.group(0)
-
- content = re.sub(pattern, add_virtual, content)
-
- return content, fixes
-
\ No newline at end of file
diff --git a/Static_agent/Slither_agent/slither_to_json_with_diagnostics.py b/Static_agent/Slither_agent/slither_to_json_with_diagnostics.py
deleted file mode 100644
index 8a32d8c..0000000
--- a/Static_agent/Slither_agent/slither_to_json_with_diagnostics.py
+++ /dev/null
@@ -1,460 +0,0 @@
-#!/usr/bin/env python3
-"""
-slither_to_json_with_diagnostics.py
-
-Improved Slither runner that:
- - automatically validates and fixes contract syntax before analysis
- - collects detector results
- - captures detector exceptions
- - records timing
- - falls back to CLI and Docker if Python API yields no findings
- - emits JSON report to stdout
-"""
-
-import sys
-import os
-import json
-import uuid
-import time
-import re
-from datetime import datetime
-import logging
-
-# Import our pre-validator
-try:
- from pre_slither_validator import validate_contract_before_slither
- PRE_VALIDATION_AVAILABLE = True
-except ImportError:
- PRE_VALIDATION_AVAILABLE = False
-
-# === configurable ===
-CONTRACT = sys.argv[1] if len(sys.argv) > 1 else "VulnerableBank.sol"
-SOLC_PATH = sys.argv[2] if len(sys.argv) > 2 else None
-DETECTORS_CLI = "all" # Use all detectors for comprehensive analysis
-DOCKER_IMAGE = "trailofbits/slither:latest"
-AUTO_FIX_SYNTAX = True # Enable automatic syntax fixing
-# ====================
-
-def now_iso():
- return datetime.utcnow().isoformat() + "Z"
-
-
-def parse_slither_cli_output(stdout, stderr, contract_path):
- """Extract findings from Slither CLI output"""
- findings = []
-
- # Look for detector findings in stderr (where Slither outputs results)
- output_text = stderr + stdout
-
- # Enhanced patterns to match different Slither output formats
-
- # 1. Pattern for "uses delegatecall to a input-controlled function id" (Controlled Delegatecall)
- controlled_delegatecall_pattern = r'([^\n]+)\s+uses delegatecall to a input-controlled function id\n\t- ([^\n]+)'
- controlled_delegatecall_matches = re.findall(controlled_delegatecall_pattern, output_text)
-
- for func_sig, call_location in controlled_delegatecall_matches:
- # Extract function name and file info
- func_match = re.search(r'([^(]+)\(([^)]*)\)\s+\(([^#]+)#(\d+)-(\d+)\)', func_sig.strip())
- if func_match:
- contract_func = func_match.group(1).strip()
- file_path = func_match.group(3).strip()
- start_line = func_match.group(4)
- end_line = func_match.group(5)
-
- findings.append({
- "finding_id": str(uuid.uuid4()),
- "swc_id": None,
- "severity": "High", # Controlled delegatecall is high severity
- "tool_name": "slither-cli",
- "tool_version": "unknown",
- "file_path": file_path,
- "function_name": contract_func,
- "description": "Controlled Delegatecall",
- "elements": [
- f"Controlled delegatecall in {func_sig.strip()}:",
- f"\t- {call_location.strip()}"
- ],
- "detector": "controlleddelegatecall",
- "timestamp": now_iso(),
- "line_range": f"{start_line}-{end_line}",
- "reference": "https://github.com/crytic/slither/wiki/Detector-Documentation#controlled-delegatecall"
- })
-
- # 2. Pattern for "ignores return value by" (Unchecked Low Level)
- unchecked_pattern = r'([^\n]+)\s+ignores return value by ([^\n]+)'
- unchecked_matches = re.findall(unchecked_pattern, output_text)
-
- for func_sig, call_info in unchecked_matches:
- func_match = re.search(r'([^(]+)\(([^)]*)\)\s+\(([^#]+)#(\d+)-(\d+)\)', func_sig.strip())
- if func_match:
- contract_func = func_match.group(1).strip()
- file_path = func_match.group(3).strip()
- start_line = func_match.group(4)
- end_line = func_match.group(5)
-
- findings.append({
- "finding_id": str(uuid.uuid4()),
- "swc_id": None,
- "severity": "Medium",
- "tool_name": "slither-cli",
- "tool_version": "unknown",
- "file_path": file_path,
- "function_name": contract_func,
- "description": "Unchecked Low Level Call",
- "elements": [
- f"Unchecked low level call in {func_sig.strip()}:",
- f"\t- ignores return value by {call_info.strip()}"
- ],
- "detector": "uncheckedlowlevel",
- "timestamp": now_iso(),
- "line_range": f"{start_line}-{end_line}",
- "reference": "https://github.com/crytic/slither/wiki/Detector-Documentation#unchecked-low-level-calls"
- })
-
- # 3. Pattern for "lacks a zero-check on" (Missing Zero Address Validation)
- zero_check_pattern = r'([^\n]+)\s+lacks a zero-check on :\n\t\t- ([^\n]+)'
- zero_check_matches = re.findall(zero_check_pattern, output_text)
-
- for param_info, call_info in zero_check_matches:
- # Extract function info from parameter
- param_match = re.search(r'([^.]+)\.([^.]+)\(([^)]*)\)\.([^(]+)\s+\(([^#]+)#(\d+)\)', param_info.strip())
- if param_match:
- contract_name = param_match.group(1).strip()
- func_name = param_match.group(2).strip()
- param_name = param_match.group(4).strip()
- file_path = param_match.group(5).strip()
- line_num = param_match.group(6)
-
- findings.append({
- "finding_id": str(uuid.uuid4()),
- "swc_id": None,
- "severity": "Low",
- "tool_name": "slither-cli",
- "tool_version": "unknown",
- "file_path": file_path,
- "function_name": f"{contract_name}.{func_name}",
- "description": "Missing Zero Address Validation",
- "elements": [
- f"Missing zero-check in {contract_name}.{func_name}:",
- f"\t- Parameter {param_name} lacks zero-check on: {call_info.strip()}"
- ],
- "detector": "missingzeroaddressvalidation",
- "timestamp": now_iso(),
- "line_range": line_num,
- "reference": "https://github.com/crytic/slither/wiki/Detector-Documentation#missing-zero-address-validation"
- })
-
- # 4. Fallback pattern for other detectors (legacy support)
- finding_pattern = r'INFO:Detectors:\n(.*?)(?=INFO:|$)'
- matches = re.findall(finding_pattern, output_text, re.DOTALL)
-
- for match in matches:
- lines = match.strip().split('\n')
- current_finding = None
-
- for line in lines:
- line = line.strip()
- if not line or any(pattern in line for pattern in [
- 'uses delegatecall to a input-controlled function id',
- 'ignores return value by',
- 'lacks a zero-check on'
- ]):
- continue # Skip lines we've already processed above
-
- # Look for other vulnerability patterns
- if ' in ' in line and '(' in line and ')' in line:
- parts = line.split(' in ')
- if len(parts) >= 2:
- vuln_type = parts[0].strip()
- location_part = parts[1]
-
- func_match = re.search(r'([^(]+)\(([^)]*)\)', location_part)
- file_match = re.search(r'\(([^#]+)#(\d+)-(\d+)\)', location_part)
-
- function_name = func_match.group(1).strip() if func_match else None
- file_info = file_match.groups() if file_match else (contract_path, None, None)
-
- # Determine severity based on vulnerability type
- severity = "Medium" # default
- if any(keyword in vuln_type.lower() for keyword in ["reentrancy", "controlled", "delegatecall"]):
- severity = "High"
- elif any(keyword in vuln_type.lower() for keyword in ["low level", "unchecked", "missing"]):
- severity = "Medium"
- elif any(keyword in vuln_type.lower() for keyword in ["naming", "convention", "style"]):
- severity = "Low"
-
- current_finding = {
- "finding_id": str(uuid.uuid4()),
- "swc_id": None,
- "severity": severity,
- "tool_name": "slither-cli",
- "tool_version": "unknown",
- "file_path": file_info[0] if file_info[0] else contract_path,
- "function_name": function_name,
- "description": vuln_type,
- "elements": [line],
- "detector": vuln_type.replace(' ', '').lower(),
- "timestamp": now_iso(),
- "line_range": f"{file_info[1]}-{file_info[2]}" if file_info[1] and file_info[2] else None
- }
- findings.append(current_finding)
-
- elif current_finding and (line.startswith('\t') or line.startswith('Reference:')):
- if 'elements' not in current_finding:
- current_finding['elements'] = []
- current_finding['elements'].append(line)
-
- if line.startswith('Reference:'):
- current_finding['reference'] = line.replace('Reference:', '').strip()
-
- return findings
-
-
-
-report = {
- "request_id": str(uuid.uuid4()),
- "contract_paths": [CONTRACT],
- "findings": [],
- "diagnostics": [],
- "total_findings": 0,
- "start_time": None,
- "end_time": None,
- "duration_seconds": None,
- "tool": "slither-python-api"
-}
-
-start_ts = time.time()
-report["start_time"] = now_iso()
-
-# === PRE-VALIDATION: Fix syntax issues before Slither analysis ===
-if AUTO_FIX_SYNTAX and PRE_VALIDATION_AVAILABLE:
- validation_result = validate_contract_before_slither(CONTRACT)
- report["diagnostics"].append(validation_result)
-else:
- report["diagnostics"].append({
- "stage": "pre_validation_skipped",
- "ok": True,
- "note": "Pre-validation disabled or not available"
- })
-
-# Setup logger
-logger = logging.getLogger("slither_logger")
-logger.setLevel(logging.WARNING)
-
-# Try Python API first (best-effort)
-try:
- from slither import Slither
- from slither.detectors import all_detectors
- import slither as slither_pkg
- report["diagnostics"].append({"stage": "python_import", "ok": True, "note": "Imported slither package."})
-
- kwargs = {}
- if SOLC_PATH:
- kwargs['solc'] = SOLC_PATH
-
- try:
- sl = Slither(os.path.abspath(CONTRACT), **kwargs)
- report["diagnostics"].append({"stage": "instantiate_slither", "ok": True, "note": "Slither instance created."})
- except Exception as e:
- report["diagnostics"].append({"stage": "instantiate_slither", "ok": False, "error": str(e)})
- sl = None
-
- detector_classes = []
- try:
- # gather classes from module
- for name in dir(all_detectors):
- try:
- obj = getattr(all_detectors, name)
- except Exception:
- continue
- if isinstance(obj, type):
- detector_classes.append(obj)
- report["diagnostics"].append({"stage":"collect_detectors", "ok": True, "detector_count": len(detector_classes)})
- except Exception as e:
- report["diagnostics"].append({"stage":"collect_detectors", "ok": False, "error": str(e)})
- # Run detectors if available
- findings = []
- detector_exceptions = []
- if sl and detector_classes:
- for dclass in detector_classes:
- dname = getattr(dclass, "__name__", str(dclass))
- try:
- # Try different instantiation patterns based on Slither's actual API
- try:
- # Modern Slither API: detector(compilation_unit, slither, logger)
- if hasattr(sl, 'compilation_units') and len(sl.compilation_units) > 0:
- detector = dclass(sl.compilation_units[0], sl, logger)
- else:
- # Fallback: try with just slither instance
- detector = dclass(sl)
- except TypeError as te:
- try:
- # Try legacy pattern: detector(slither, logger)
- detector = dclass(sl, logger)
- except TypeError:
- try:
- # Try with compilation unit from slither.contracts
- if hasattr(sl, 'contracts') and len(sl.contracts) > 0:
- compilation_unit = sl.contracts[0].compilation_unit
- detector = dclass(compilation_unit, sl, logger)
- else:
- # Last resort: instantiate with no args and set attributes
- detector = dclass()
- if hasattr(detector, 'slither'):
- detector.slither = sl
- if hasattr(detector, 'logger'):
- detector.logger = logger
- except TypeError:
- raise te # Re-raise the original error
-
- except Exception as e:
- detector_exceptions.append({
- "detector": dname,
- "phase": "instantiate",
- "error": f"Could not instantiate detector: {str(e)}"
- })
- continue
-
- try:
- results = detector.detect()
- except Exception as e:
- detector_exceptions.append({"detector": dname, "phase": "detect", "error": str(e)})
- continue
-
- if not results:
- continue
-
- for r in results:
- try:
- check = getattr(r, "check", None) or getattr(r, "name", None) or (r.get("check") if isinstance(r, dict) else None)
- impact = getattr(r, "impact", None) or getattr(r, "severity", None) or (r.get("impact") if isinstance(r, dict) else None) or "Unknown"
- desc = getattr(r, "description", None) or (r.get("description") if isinstance(r, dict) else None) or ""
- elems = getattr(r, "elements", None) or (r.get("elements") if isinstance(r, dict) else None)
- if elems is None:
- nodes = getattr(r, "nodes", None) or (r.get("nodes") if isinstance(r, dict) else None)
- elems = [str(n) for n in nodes] if nodes else [str(r)]
- except Exception:
- check = str(r); impact = "Unknown"; desc = ""; elems = [str(r)]
-
- findings.append({
- "finding_id": str(uuid.uuid4()),
- "swc_id": None,
- "severity": impact,
- "tool_name": "slither",
- "tool_version": getattr(slither_pkg, "__version__", None) or "unknown",
- "file_path": CONTRACT,
- "function_name": None,
- "description": desc,
- "elements": elems,
- "detector": dname,
- "timestamp": now_iso()
- })
-
- else:
- report["diagnostics"].append({"stage": "skip_detectors", "ok": False, "note": "Slither instance or detectors not available."})
-
- if detector_exceptions:
- report["diagnostics"].append({"stage": "detector_exceptions", "ok": False, "exceptions": detector_exceptions})
-
- report["findings"] = findings
- report["total_findings"] = len(findings)
-
-except Exception as e:
- # Python API import failed
- report["diagnostics"].append({"stage": "python_api_import", "ok": False, "error": str(e)})
-
-# === Fallback: CLI and Docker if Python API yields no findings ===
-if report["total_findings"] == 0:
- report["diagnostics"].append({
- "stage": "fallback_attempt",
- "ok": True,
- "note": "No findings from Python API β trying CLI and Docker fallbacks."
- })
- import subprocess
- import shutil
-
- slither_bin = shutil.which("slither")
- if slither_bin:
- try:
- cmd = ["slither", CONTRACT, "--detect", DETECTORS_CLI]
- p = subprocess.run(cmd, capture_output=True, text=True, timeout=180)
-
- # Parse CLI output for findings
- cli_findings = parse_slither_cli_output(p.stdout, p.stderr, CONTRACT)
- report["findings"].extend(cli_findings)
- report["total_findings"] = len(report["findings"])
-
- report["diagnostics"].append({
- "stage": "cli_slither",
- "ok": True,
- "returncode": p.returncode,
- "stdout": p.stdout[:2000],
- "stderr": p.stderr[:2000],
- "findings_extracted": len(cli_findings)
- })
- except Exception as e:
- report["diagnostics"].append({
- "stage": "cli_slither",
- "ok": False,
- "error": str(e)
- })
- else:
- report["diagnostics"].append({
- "stage": "cli_slither",
- "ok": False,
- "note": "slither binary not found in PATH."
- })
-
- docker_bin = shutil.which("docker")
- if report["total_findings"] == 0 and docker_bin:
- try:
- cwd = os.getcwd()
- cmd = [
- "docker", "run", "--rm",
- "-v", f"{cwd}:/tmp", "-w", "/tmp",
- DOCKER_IMAGE, "slither", CONTRACT,
- "--detect", DETECTORS_CLI
- ]
- p = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
-
- # Parse Docker output for findings
- docker_findings = parse_slither_cli_output(p.stdout, p.stderr, CONTRACT)
- report["findings"].extend(docker_findings)
- report["total_findings"] = len(report["findings"])
-
- report["diagnostics"].append({
- "stage": "docker_slither",
- "ok": True,
- "returncode": p.returncode,
- "stdout": p.stdout[:2000],
- "stderr": p.stderr[:2000],
- "findings_extracted": len(docker_findings)
- })
- except Exception as e:
- report["diagnostics"].append({
- "stage": "docker_slither",
- "ok": False,
- "error": str(e)
- })
- elif not docker_bin:
- report["diagnostics"].append({
- "stage": "docker_slither",
- "ok": False,
- "note": "docker not found in PATH."
- })
-
-end_ts = time.time()
-report["end_time"] = now_iso()
-report["duration_seconds"] = round(end_ts - start_ts, 3)
-
-# final output
-print(json.dumps(report, indent=2))
-
-
-# Run Command------------>
-#.\slither_env\Scripts\python.exe .\slither_to_json_with_diagnostics.py .\VulnerableBank.sol > final_output.json 2>&1
-
-# .\slither_env\Scripts\slither.exe .\delcall.sol --detect all
-
-#Final Command
-#.\slither_env\Scripts\python.exe .\slither_to_json_with_diagnostics.py .\test2.sol > test2_fixed_results.json
\ No newline at end of file
diff --git a/Static_agent/Slither_agent/solidity_syntax_fixer.py b/Static_agent/Slither_agent/solidity_syntax_fixer.py
deleted file mode 100644
index 4743564..0000000
--- a/Static_agent/Slither_agent/solidity_syntax_fixer.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python3
-"""
-solidity_syntax_fixer.py
-
-Automated Solidity syntax checker and fixer that:
-- Detects common syntax issues across different Solidity versions
-- Automatically fixes deprecated patterns to modern syntax
-- Validates compilation after fixes
-- Creates backup of original files
-- Provides detailed report of changes made
-"""
-
-import os
-import re
-import json
-import uuid
-import shutil
-import subprocess
-from datetime import datetime
-from typing import List, Dict, Tuple, Optional
-
-class SoliditySyntaxFixer:
- def __init__(self, solc_path: Optional[str] = None):
- self.solc_path = solc_path or "solc"
- self.fixes_applied = []
- self.compilation_errors = []
-
- def check_compilation(self, file_path: str) -> Tuple[bool, str]:
- """Check if a Solidity file compiles successfully"""
- try:
- cmd = [self.solc_path, "--version"]
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
- if result.returncode != 0:
- return False, "Solidity compiler not found"
-
- # Try to compile the file
- cmd = [self.solc_path, file_path, "--bin", "--overwrite"]
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
-
- if result.returncode == 0:
- return True, "Compilation successful"
- else:
- return False, result.stderr
-
- except Exception as e:
- return False, f"Error during compilation: {str(e)}"
-
- def detect_and_fix_syntax_issues(self, content: str, file_path: str) -> str:
- """Detect and fix common Solidity syntax issues"""
- original_content = content
- fixes = []
-
- # Fix 1: Abstract contracts with unimplemented functions
- pattern = r'contract\s+(\w+)\s*{'
- matches = re.finditer(pattern, content)
- for match in matches:
- contract_name = match.group(1)
- contract_start = match.start()
-
- # Look for unimplemented functions in this contract
- # Find the contract body
- brace_count = 0
- contract_end = contract_start
- for i, char in enumerate(content[contract_start:]):
- if char == '{':
- brace_count += 1
- elif char == '}':
- brace_count -= 1
- if brace_count == 0:
- contract_end = contract_start + i
- break
-
- contract_body = content[contract_start:contract_end + 1]
-
- # Check for unimplemented functions (ending with semicolon)
- func_pattern = r'function\s+\w+\([^)]*\)\s*[^{]*;'
- if re.search(func_pattern, contract_body):
- # This contract has unimplemented functions, make it abstract
- new_contract_def = f'abstract contract {contract_name} {{'
- content = content.replace(match.group(0), new_contract_def)
- fixes.append(f"Made contract '{contract_name}' abstract (has unimplemented functions)")
-
- # Fix 2: Add virtual keyword to unimplemented functions
- virtual_pattern = r'function\s+(\w+)\s*\([^)]*\)\s*([^{;]*);'
- def add_virtual(match):
- func_name = match.group(1)
- modifiers = match.group(2).strip()
- if 'virtual' not in modifiers:
- if modifiers:
- new_func = f"function {func_name}({match.group(0).split('(')[1].split(')')[0]}) {modifiers} virtual;"
- else:
- new_func = f"function {func_name}({match.group(0).split('(')[1].split(')')[0]}) public virtual;"
- fixes.append(f"Added 'virtual' keyword to function '{func_name}'")
- return new_func
- return match.group(0)
-
- content = re.sub(virtual_pattern, add_virtual, content)
-
- # Fix 3: Update deprecated .value() syntax to {value: ...}
- value_pattern = r'(\w+)\.call\.value\(([^)]+)\)\(\)'
- def fix_call_value(match):
- address = match.group(1)
- value = match.group(2)
- fixes.append(f"Updated deprecated .value() syntax to modern {{value: ...}} format")
- return f'{address}.call{{value: {value}}}("")'
-
- content = re.sub(value_pattern, fix_call_value, content)
-
- # Fix 4: Add success handling for call operations
- call_pattern = r'require\s*\(\s*(\w+)\.call\{value:\s*([^}]+)\}\s*\(""\)\s*\)'
- def fix_call_handling(match):
- address = match.group(1)
- value = match.group(2)
- fixes.append(f"Added proper success handling for call operation")
- return f'(bool success, ) = {address}.call{{value: {value}}}("");\n require(success)'
-
- content = re.sub(call_pattern, fix_call_handling, content)
-
- # Fix 5: Update pragma version if it's too old and causing issues
- pragma_pattern = r'pragma\s+solidity\s+([^;]+);'
- pragma_match = re.search(pragma_pattern, content)
- if pragma_match:
- version = pragma_match.group(1)
- # If version is older than 0.6.0 and we have modern syntax, update it
- if '^0.5' in version or '^0.4' in version:
- if 'abstract' in content or 'virtual' in content or '{value:' in content:
- content = re.sub(pragma_pattern, 'pragma solidity ^0.8.0;', content)
- fixes.append(f"Updated pragma version from '{version}' to '^0.8.0' for compatibility")
-
- # Fix 6: Add SPDX license if missing
- if 'SPDX-License-Identifier' not in content:
- lines = content.split('\n')
- lines.insert(0, '// SPDX-License-Identifier: MIT')
- content = '\n'.join(lines)
- fixes.append("Added missing SPDX license identifier")
-
- # Fix 7: Fix require statements that use old call patterns
- old_require_pattern = r'require\s*\(\s*address\([^)]+\)\.call\.value\([^)]+\)\(\)\s*\)'
- if re.search(old_require_pattern, content):
- # This is handled by the earlier fixes, but let's ensure consistency
- fixes.append("Fixed deprecated require statement with old call pattern")
-
- # Store fixes for this file
- if fixes:
- self.fixes_applied.append({
- "file": file_path,
- "fixes": fixes,
- "timestamp": datetime.now().isoformat()
- })
-
- return content
-
- def fix_file(self, file_path: str, create_backup: bool = True) -> Dict:
- """Fix a single Solidity file"""
- result = {
- "file": file_path,
- "success": False,
- "fixes_applied": [],
- "compilation_before": False,
- "compilation_after": False,
- "backup_created": False,
- "error": None
- }
-
- try:
- # Read original file
- with open(file_path, 'r', encoding='utf-8') as f:
- original_content = f.read()
-
- # Check compilation before fixes
- compile_before, error_before = self.check_compilation(file_path)
- result["compilation_before"] = compile_before
-
- # Apply fixes
- fixed_content = self.detect_and_fix_syntax_issues(original_content, file_path)
-
- # If no changes needed
- if fixed_content == original_content:
- result["success"] = True
- result["fixes_applied"] = ["No fixes needed - file is already valid"]
- return result
-
- # Create backup if requested
- if create_backup:
- backup_path = f"{file_path}.backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
- shutil.copy2(file_path, backup_path)
- result["backup_created"] = backup_path
-
- # Write fixed content
- with open(file_path, 'w', encoding='utf-8') as f:
- f.write(fixed_content)
-
- # Check compilation after fixes
- compile_after, error_after = self.check_compilation(file_path)
- result["compilation_after"] = compile_after
-
- # Get applied fixes for this file
- file_fixes = [fix for fix in self.fixes_applied if fix["file"] == file_path]
- if file_fixes:
- result["fixes_applied"] = file_fixes[0]["fixes"]
-
- result["success"] = True
-
- except Exception as e:
- result["error"] = str(e)
-
- return result
-
- def fix_directory(self, directory: str, create_backup: bool = True) -> Dict:
- """Fix all Solidity files in a directory"""
- results = {
- "directory": directory,
- "files_processed": [],
- "summary": {
- "total_files": 0,
- "files_fixed": 0,
- "compilation_improved": 0,
- "errors": 0
- },
- "timestamp": datetime.now().isoformat()
- }
-
- # Find all .sol files
- sol_files = []
- for root, dirs, files in os.walk(directory):
- for file in files:
- if file.endswith('.sol'):
- sol_files.append(os.path.join(root, file))
-
- results["summary"]["total_files"] = len(sol_files)
-
- # Process each file
- for file_path in sol_files:
- file_result = self.fix_file(file_path, create_backup)
- results["files_processed"].append(file_result)
-
- if file_result["success"] and file_result["fixes_applied"] and file_result["fixes_applied"] != ["No fixes needed - file is already valid"]:
- results["summary"]["files_fixed"] += 1
-
- if not file_result["compilation_before"] and file_result["compilation_after"]:
- results["summary"]["compilation_improved"] += 1
-
- if file_result.get("error"):
- results["summary"]["errors"] += 1
-
- return results
-
- def generate_report(self, results: Dict, output_file: str = None):
- """Generate a detailed report of fixes applied"""
- if output_file:
- with open(output_file, 'w') as f:
- json.dump(results, f, indent=2)
-
- # Print summary to console
- print("\n" + "="*60)
- print("π§ SOLIDITY SYNTAX FIXER REPORT")
- print("="*60)
-
- if "directory" in results:
- print(f"π Directory: {results['directory']}")
- summary = results["summary"]
- print(f"π Files Processed: {summary['total_files']}")
- print(f"π οΈ Files Fixed: {summary['files_fixed']}")
- print(f"β
Compilation Improved: {summary['compilation_improved']}")
- print(f"β Errors: {summary['errors']}")
-
- print("\nπ DETAILED RESULTS:")
- for file_result in results["files_processed"]:
- self._print_file_result(file_result)
- else:
- # Single file result
- self._print_file_result(results)
-
- def _print_file_result(self, file_result: Dict):
- """Print results for a single file"""
- file_name = os.path.basename(file_result["file"])
- print(f"\nπ {file_name}")
- print("-" * 40)
-
- if file_result["error"]:
- print(f"β Error: {file_result['error']}")
- return
-
- print(f"π Compilation Before: {'β
' if file_result['compilation_before'] else 'β'}")
- print(f"π Compilation After: {'β
' if file_result['compilation_after'] else 'β'}")
-
- if file_result.get("backup_created"):
- print(f"πΎ Backup Created: {os.path.basename(file_result['backup_created'])}")
-
- if file_result["fixes_applied"]:
- print("π οΈ Fixes Applied:")
- for fix in file_result["fixes_applied"]:
- print(f" β’ {fix}")
-
-def main():
- import argparse
-
- parser = argparse.ArgumentParser(description="Solidity Syntax Fixer")
- parser.add_argument("path", help="Path to Solidity file or directory")
- parser.add_argument("--no-backup", action="store_true", help="Don't create backup files")
- parser.add_argument("--report", help="Save detailed report to JSON file")
- parser.add_argument("--solc-path", help="Path to solc compiler")
-
- args = parser.parse_args()
-
- fixer = SoliditySyntaxFixer(args.solc_path)
-
- if os.path.isfile(args.path):
- # Single file
- result = fixer.fix_file(args.path, not args.no_backup)
- fixer.generate_report(result, args.report)
- elif os.path.isdir(args.path):
- # Directory
- result = fixer.fix_directory(args.path, not args.no_backup)
- fixer.generate_report(result, args.report)
- else:
- print(f"β Error: Path '{args.path}' not found")
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/agent-research-skeleton.ipynb b/agent-research-skeleton.ipynb
deleted file mode 100644
index 6f28ec6..0000000
--- a/agent-research-skeleton.ipynb
+++ /dev/null
@@ -1,299 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "dceaf715",
- "metadata": {},
- "source": [
- "# OpenAuditLabs Agent Research Notebook\n",
- "\n",
- "*Skeleton template for ongoing R&D on the AI-powered analysis engine*\n",
- "\n",
- "---\n",
- "## How to use this notebook\n",
- "1. Fork / clone the **agent** repo and open this notebook in VS Code, Jupyter Lab or Colab.\n",
- "2. Replace **TODO** blocks with your experiments (datasets, models, metrics, etc.).\n",
- "3. Keep results reproducible by seeding RNGs and recording package versions.\n",
- "4. Commit regularly; use short, descriptive branch names (`feat/gnn-eval`, `exp/bert-tuning`, β¦).\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "d42173ac",
- "metadata": {
- "tags": []
- },
- "outputs": [],
- "source": [
- "# ------------------------------------------------------------\n",
- "# Environment & Utility Imports β extend as needed \n",
- "# ------------------------------------------------------------\n",
- "import os, sys, json, random, logging, pathlib\n",
- "from typing import List, Dict, Any\n",
- "import numpy as np\n",
- "import torch\n",
- "\n",
- "SEED = 42\n",
- "random.seed(SEED); np.random.seed(SEED); torch.manual_seed(SEED)\n",
- "\n",
- "logging.basicConfig(level=logging.INFO)\n",
- "logger = logging.getLogger(\"agent_research\")\n",
- "\n",
- "logger.info(\"Environment ready β begin hacking!\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1f424dd9",
- "metadata": {},
- "source": [
- "---\n",
- "## 1 Data Loading / Curation\n",
- "Document *where* the data came from (commitβhashed snapshot, S3 path, etc.).\n",
- "\n",
- "> **TODO:** Replace the stub below with real dataset loading."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "b4aa7f44",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Example: load a local JSONL corpus of contracts + labels\n",
- "from pathlib import Path\n",
- "DATA_PATH = Path(\"../data/contracts.jsonl\")\n",
- "assert DATA_PATH.exists(), 'Add your dataset under ../data/'\n",
- "\n",
- "def iter_contracts(path: pathlib.Path):\n",
- " with path.open() as fh:\n",
- " for line in fh:\n",
- " yield json.loads(line)\n",
- "\n",
- "sample = next(iter_contracts(DATA_PATH))\n",
- "print('First record:', sample)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d6acd277",
- "metadata": {},
- "source": [
- "---\n",
- "## 2 Feature-Engineering / Pre-processing\n",
- "Briefly explain *why* these features matter (AST paths, CFG edges, byte-code opcodes, β¦).\n",
- "\n",
- "> **TODO:** Prototype new representations here."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "51daa833",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Placeholder: turn Solidity source into token sequence\n",
- "def tokenize_source(src: str) -> List[str]:\n",
- " return src.replace('(', ' ( ').replace(')', ' ) ').split()\n",
- "\n",
- "tokens = tokenize_source(sample['source'])\n",
- "print(tokens[:40])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d5296f5a",
- "metadata": {},
- "source": [
- "---\n",
- "## 3 Model Prototyping\n",
- "Start with a baseline (e.g. CodeBERT fine-tune, simple GNN) before fancy ideas.\n",
- "\n",
- "> **TODO:** Wire-up huggingface/transformers or PyG experiments below."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "a8b75b53",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Quick baseline with transformers (stub)\n",
- "from transformers import AutoTokenizer, AutoModelForSequenceClassification\n",
- "model_name = 'microsoft/codebert-base'\n",
- "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
- "model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)\n",
- "\n",
- "# Dummy forward pass\n",
- "inputs = tokenizer(sample['source'][:512], return_tensors='pt')\n",
- "logits = model(**inputs).logits\n",
- "print('Logits shape:', logits.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "b65195cf",
- "metadata": {},
- "source": [
- "---\n",
- "## 4 Evaluation & Metrics\n",
- "Define precision / recall thresholds, confusion matrix, FP-FN inspection hooks.\n",
- "\n",
- "> **TODO:** Drop your evaluation loop here."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3ee7fc8d",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Placeholder evaluation stub\n",
- "def evaluate(preds, labels):\n",
- " # Implement real metric calc\n",
- " return {'precision': 1.0, 'recall': 1.0}\n",
- "\n",
- "metrics = evaluate([1,0,1], [1,0,1])\n",
- "print(metrics)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4801267a",
- "metadata": {},
- "source": [
- "---\n",
- "## 5 Integration Hooks\n",
- "Show how to export your trained model + inference wrapper so the **agent** service can consume it.\n",
- "\n",
- "> **TODO:** Save to `../models/` and add a small REST demo."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "c4430cdd",
- "metadata": {},
- "source": [
- "## 6 Agentic Audit Logic Prototype\n",
- "Minimal agentic audit loop: detect, score, suggest fix"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "115635b6",
- "metadata": {},
- "outputs": [],
- "source": [
- "# ------------------------------------------------------------\n",
- "# 6 Agentic Audit Logic Prototype\n",
- "# ------------------------------------------------------------\n",
- "def analyze_contract_simple(source: str) -> list:\n",
- " # Stub: pretend to detect two common vulnerabilities\n",
- " findings = []\n",
- " if \"call.value\" in source or \"transfer(\" in source:\n",
- " findings.append(\"reentrancy\")\n",
- " if \"uint\" in source and (\"+\" in source or \"-\" in source):\n",
- " findings.append(\"integer_overflow\")\n",
- " return findings\n",
- "\n",
- "def score_vulnerabilities_simple(vulns: list) -> dict:\n",
- " # Assign simple severity scores\n",
- " return {v: 9.0 if v == \"reentrancy\" else 6.0 for v in vulns}\n",
- "\n",
- "def suggest_fix_simple(vuln: str) -> str:\n",
- " # Suggest a fix for each vulnerability\n",
- " if vuln == \"reentrancy\":\n",
- " return \"Apply checks-effects-interactions pattern.\"\n",
- " if vuln == \"integer_overflow\":\n",
- " return \"Use SafeMath or Solidity >=0.8.0 for checked math.\"\n",
- " return \"No suggestion.\"\n",
- "\n",
- "def agentic_audit_simple(source: str) -> dict:\n",
- " vulns = analyze_contract_simple(source)\n",
- " scores = score_vulnerabilities_simple(vulns)\n",
- " fixes = {v: suggest_fix_simple(v) for v in vulns}\n",
- " return {\"vulnerabilities\": vulns, \"scores\": scores, \"fixes\": fixes}\n",
- "\n",
- "# Example usage on the loaded sample contract\n",
- "result = agentic_audit_simple(sample['source'])\n",
- "print(\"Agentic audit result:\", result)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "8d34dfcf",
- "metadata": {},
- "source": [
- "## 7. Slighter Integration and Vulnerability Scoring Prototype"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "098f1ba9",
- "metadata": {},
- "outputs": [],
- "source": [
- "\n",
- "import subprocess\n",
- "#Slither Integration\n",
- "def run_slither(contract_path):\n",
- " result = subprocess.run(\n",
- " [\"slither\", contract_path, \"--json\", \"slither-output.json\"],\n",
- " capture_output=True, text=True\n",
- " )\n",
- " if result.returncode != 0:\n",
- " print(\"Slither failed:\", result.stderr)\n",
- " return None\n",
- " with open(\"slither-output.json\") as f:\n",
- " return json.load(f)\n",
- "\n",
- "\n",
- "# --- Simple Vulnerability Scoring Engine ---\n",
- "def score_vulnerability(vuln_type, impact, exploitability):\n",
- " base_score = {\n",
- " \"critical\": 9,\n",
- " \"high\": 7,\n",
- " \"medium\": 5,\n",
- " \"low\": 3\n",
- " }.get(impact, 1)\n",
- " exploit_factor = 2 if exploitability == \"easy\" else 1\n",
- " return base_score * exploit_factor\n",
- "\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "---\n",
- "## References & Reading List\n",
- "- Juraj et al. *SmartBugs*: \n",
- "- Trail of Bits blog on Slither internals\n",
- "- OpenAI Cookbook examples for CodeBERT fine-tuning\n",
- "\n",
- "Add new papers / links whenever you start a thread of work β keeps the lab journal tidy."
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "name": "python",
- "version": "3.13.2"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/audit_engine/architecture_plan/architecture.txt b/audit_engine/architecture_plan/architecture.txt
deleted file mode 100644
index f7f61eb..0000000
--- a/audit_engine/architecture_plan/architecture.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-audit_engine/
-βββ core/
-β βββ __init__.py
-β βββ engine.py # Main audit orchestrator
-β βββ schemas.py # Pydantic models for unified JSON schema
-β βββ config.py # Configuration management
-βββ static_analysis/
-β βββ __init__.py
-β βββ base.py # AbstractAdapter: run() -> list[Finding] | ToolError
-β βββ slither_adapter.py # Slither integration
-β βββ mythril_adapter.py # Mythril integration
-β βββ manticore_adapter.py # Manticore integration
-βββ dynamic_analysis/
-β βββ __init__.py
-β βββ echidna_adapter.py # Echidna fuzzing
-β βββ adversarial_fuzz.py # AdversarialFuzz implementation
-βββ scoring/
-β βββ __init__.py
-β βββ scoring_engine.py # CVSS-inspired scoring
-β βββ severity_mapping.py # SWC to severity mapping
-βββ utils/
-β βββ __init__.py
-β βββ file_handler.py # Contract file operations
-β βββ swc_loader.py # Loads and indexes SWC registry into SWC_MAP
-β βββ report_generator.py # JSON report generation
-β βββ logger.py # Structured logging
-βββ data/
-β βββ swc_registry.json # Canonical SWC metadata.
-βββ tests/
- βββ test_slither.py
- βββ test_mythril.py
- βββ test_echidna.py
- βββ sample_contracts/
diff --git a/audit_engine/common/model.py b/audit_engine/common/model.py
deleted file mode 100644
index 6efe2ef..0000000
--- a/audit_engine/common/model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# audit_engine/common/models.py
-from pydantic import BaseModel, Field
-from typing import List, Optional
-
-class LineSpan(BaseModel):
- start: int
- end: int
-
-class Finding(BaseModel):
- finding_id: str
- swc_id: str
- severity: str # Critical, Major, etc.
- tool_name: str
- tool_version: str
- file_path: str
- line_span: LineSpan
- function_name: Optional[str]
- description: str
- confidence: float = Field(..., ge=0.0, le=1.0)
- recommendations: List[str]
- timestamp: Optional[str]
diff --git a/audit_engine/core/__init__.py b/audit_engine/core/__init__.py
deleted file mode 100644
index 6d6d6f8..0000000
--- a/audit_engine/core/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Core module for OpenAudit Agent
-
-Main orchestration engine implementing the agentic AI framework
-for smart contract vulnerability detection and mitigation.
-"""
-
-from .engine import AuditEngine
-from .config import AuditConfig, StaticAnalysisConfig, ScoringConfig, AgentConfig
-from ..dynamic_analysis.config import DynamicAnalysisConfig
-from .schemas import (
- Finding, AnalysisRequest, AnalysisResult, ToolError,
- SeverityLevel, ConfidenceLevel, ExploitComplexity,
- AgentTask, AgentResult, LineSpan
-)
-
-__all__ = [
- # Main engine
- "AuditEngine",
-
- # Configuration
- "AuditConfig",
- "StaticAnalysisConfig",
- "DynamicAnalysisConfig",
- "ScoringConfig",
- "AgentConfig",
-
- # Schemas
- "Finding",
- "AnalysisRequest",
- "AnalysisResult",
- "ToolError",
- "SeverityLevel",
- "ConfidenceLevel",
- "ExploitComplexity",
- "AgentTask",
- "AgentResult",
- "LineSpan",
-]
-
-# Version info
-__version__ = "0.1.0"
-__author__ = "OpenAudit Labs"
-__description__ = "Agentic AI Smart Contract Security Analysis Engine"
diff --git a/audit_engine/core/config.py b/audit_engine/core/config.py
deleted file mode 100644
index aadbcd6..0000000
--- a/audit_engine/core/config.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""
-Configuration management for OpenAudit Agent
-
-Centralized configuration with environment variable support
-and validation using Pydantic settings.
-"""
-
-import os
-from typing import Dict, Any, Optional
-try:
- # Pydantic v2
- from pydantic_settings import BaseSettings # type: ignore
- from pydantic import Field
- from pydantic import field_validator as _field_validator
-except Exception: # pragma: no cover
- # Pydantic v1 fallback
- from pydantic import BaseSettings, Field # type: ignore
- from pydantic import validator as _field_validator # type: ignore
-
-
-class StaticAnalysisConfig(BaseSettings):
- """Static analysis tool configuration"""
- enable_slither: bool = Field(default=True, description="Enable Slither analyzer")
- enable_mythril: bool = Field(default=True, description="Enable Mythril analyzer")
- enable_manticore: bool = Field(default=False, description="Enable Manticore analyzer")
-
- # Tool-specific configs
- slither: Dict[str, Any] = Field(default_factory=dict)
- mythril: Dict[str, Any] = Field(default_factory=dict)
- manticore: Dict[str, Any] = Field(default_factory=dict)
-
- # Common settings
- analysis_timeout: int = Field(default=300, description="Per-tool timeout in seconds")
- max_workers: int = Field(default=4, description="Maximum parallel workers")
-
- class Config:
- env_prefix = "STATIC_ANALYSIS_"
-
-
-# Reuse the richer, validated config from the dynamic module
-from audit_engine.dynamic_analysis.config import DynamicAnalysisConfig # noqa: E402
-
-
-class ScoringConfig(BaseSettings):
- """Vulnerability scoring configuration"""
- enable_cvss_scoring: bool = Field(default=True, description="Enable CVSS-inspired scoring")
-
- # Weight factors for scoring
- financial_impact_weight: float = Field(default=0.4, ge=0.0, le=1.0)
- exploit_complexity_weight: float = Field(default=0.3, ge=0.0, le=1.0)
- on_chain_damage_weight: float = Field(default=0.3, ge=0.0, le=1.0)
-
- # Scoring thresholds
- critical_threshold: float = Field(default=9.0, ge=0.0, le=10.0)
- major_threshold: float = Field(default=7.0, ge=0.0, le=10.0)
- medium_threshold: float = Field(default=4.0, ge=0.0, le=10.0)
-
- class Config:
- env_prefix = "SCORING_"
-
-
-class AgentConfig(BaseSettings):
- """Agentic AI configuration"""
- enable_detection_agent: bool = Field(default=True, description="Enable detection agent")
- enable_exploit_agent: bool = Field(default=False, description="Enable exploit simulation agent")
- enable_patch_agent: bool = Field(default=False, description="Enable patch suggestion agent")
- enable_verification_agent: bool = Field(default=False, description="Enable patch verification agent")
-
- # LLM settings
- llm_model: str = Field(default="gpt-4", description="LLM model to use")
- llm_temperature: float = Field(default=0.1, ge=0.0, le=2.0)
- llm_max_tokens: int = Field(default=2000, gt=0)
-
- # Agent coordination
- consensus_threshold: float = Field(default=0.7, ge=0.0, le=1.0)
- max_agent_retries: int = Field(default=3, ge=0)
- agent_timeout: int = Field(default=300, gt=0)
-
- class Config:
- env_prefix = "AGENT_"
-
-
-class AuditConfig(BaseSettings):
- """Main configuration class combining all subsystems"""
-
- # Core settings
- log_level: str = Field(default="INFO", description="Logging level")
- max_analysis_time: int = Field(default=1800, description="Global analysis timeout")
- enable_parallel_execution: bool = Field(default=True, description="Enable parallel analysis")
-
- # Feature flags
- enable_agentic_ai: bool = Field(default=False, description="Enable agentic AI features")
- enable_hitl: bool = Field(default=False, description="Enable human-in-the-loop")
- enable_explainability: bool = Field(default=False, description="Enable explainability features")
- enable_rl_feedback: bool = Field(default=False, description="Enable RL feedback collection")
-
- # Subsystem configurations
- static_analysis: StaticAnalysisConfig = Field(default_factory=StaticAnalysisConfig)
- dynamic_analysis: DynamicAnalysisConfig = Field(default_factory=DynamicAnalysisConfig)
- scoring: ScoringConfig = Field(default_factory=ScoringConfig)
- agents: AgentConfig = Field(default_factory=AgentConfig)
-
- # Storage and output
- output_format: str = Field(default="json", description="Output format")
- save_intermediate_results: bool = Field(default=False, description="Save intermediate analysis results")
- results_directory: str = Field(default="./results", description="Results output directory")
-
- @_field_validator('log_level')
- def validate_log_level(cls, v): # type: ignore[override]
- valid_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
- value_upper = v.upper() if isinstance(v, str) else str(v).upper()
- if value_upper not in valid_levels:
- raise ValueError(f'log_level must be one of {valid_levels}')
- return value_upper
-
- class Config:
- env_prefix = "AUDIT_"
- env_file = ".env"
- case_sensitive = False
-
- @classmethod
- def from_file(cls, config_path: str) -> 'AuditConfig':
- """Load configuration from file"""
- # TODO: Implement YAML/JSON config file loading
- return cls()
-
- def dict(self, **kwargs) -> Dict[str, Any]:
- """Convert to dictionary with nested configs (v1 compat)"""
- if hasattr(super(), 'model_dump'):
- return self.model_dump() # type: ignore[attr-defined]
- return super().dict(**kwargs)
diff --git a/audit_engine/core/engine.py b/audit_engine/core/engine.py
deleted file mode 100644
index fe0ba51..0000000
--- a/audit_engine/core/engine.py
+++ /dev/null
@@ -1,478 +0,0 @@
-"""
-Core Audit Engine Orchestrator
-
-Main engine that coordinates static analysis, dynamic analysis, scoring,
-and agentic AI components as defined in the research paper and workflow.
-Implements Phase 2-5 coordination with Phase 6+ hooks for HITL and RL.
-"""
-
-import asyncio
-import logging
-from datetime import datetime
-from pathlib import Path
-from typing import List, Dict, Any, Optional
-from concurrent.futures import ThreadPoolExecutor, as_completed
-
-from .schemas import (
- AnalysisRequest, AnalysisResult, Finding, ToolError,
- SeverityLevel, ConfidenceLevel, AgentTask, AgentResult
-)
-from .config import AuditConfig
-from ..static_analysis.base import AbstractAdapter as StaticAdapter
-from ..dynamic_analysis import run_dynamic_analysis, DynamicAnalysisOrchestrator
-from ..scoring.scoring_engine import ScoringEngine
-from ..utils.logger import get_logger
-from ..utils.file_handler import ContractFileHandler
-
-
-class AuditEngine:
- """
- Main orchestrator implementing the agentic AI framework architecture.
- Coordinates multi-phase analysis pipeline with consensus mechanisms.
- """
-
- def __init__(self, config: Optional[AuditConfig] = None):
- self.config = config or AuditConfig()
- self.logger = get_logger(__name__, self.config.log_level)
-
- # Initialize components
- self.file_handler = ContractFileHandler()
- self.scoring_engine = ScoringEngine(config=self.config.scoring)
-
- # Phase 2: Analysis tool adapters (lazy-loaded)
- self._static_adapters: Optional[List[StaticAdapter]] = None
- self._dynamic_orchestrator: Optional[DynamicAnalysisOrchestrator] = None
-
- # Phase 5: Agentic AI components (future integration)
- self._agent_orchestrator = None
- self.enable_agents = self.config.enable_agentic_ai
-
- # Phase 6: HITL and explainability hooks
- self.enable_hitl = self.config.enable_hitl
- self.enable_explainability = self.config.enable_explainability
-
- # Phase 7: RL feedback integration
- self.enable_rl_feedback = self.config.enable_rl_feedback
-
- self.logger.info(f"AuditEngine initialized with config: {self.config.dict()}")
-
- @property
- def static_adapters(self) -> List[StaticAdapter]:
- """Lazy-load static analysis adapters"""
- if self._static_adapters is None:
- self._static_adapters = self._initialize_static_adapters()
- return self._static_adapters
-
- @property
- def dynamic_orchestrator(self) -> DynamicAnalysisOrchestrator:
- """Lazy-load dynamic analysis orchestrator"""
- if self._dynamic_orchestrator is None:
- dac = self.config.dynamic_analysis
- dac_dict = (
- dac.to_runtime_config() if hasattr(dac, "to_runtime_config")
- else dac.model_dump() if hasattr(dac, "model_dump")
- else dac.dict() if hasattr(dac, "dict")
- else dac
- )
- self._dynamic_orchestrator = DynamicAnalysisOrchestrator(
- config=dac_dict,
- logger=self.logger
- )
- return self._dynamic_orchestrator
-
- def _initialize_static_adapters(self) -> List[StaticAdapter]:
- """Initialize static analysis tool adapters"""
- adapters: List[StaticAdapter] = []
- sa_cfg = self.config.static_analysis
-
- # Import adapters dynamically to avoid hard dependencies
- if getattr(sa_cfg, "enable_slither", True):
- try:
- from ..static_analysis.slither_adapter import SlitherAdapter
- slither_cfg = getattr(sa_cfg, "slither", {})
- slither_cfg = slither_cfg.model_dump() if hasattr(slither_cfg, "model_dump") else (
- slither_cfg.dict() if hasattr(slither_cfg, "dict") else slither_cfg
- )
- adapters.append(SlitherAdapter(config=slither_cfg, logger=self.logger))
- except ImportError as e:
- self.logger.warning(f"Slither adapter unavailable: {e}")
-
- if getattr(sa_cfg, "enable_mythril", True):
- try:
- from ..static_analysis.mythril_adapter import MythrilAdapter
- mythril_cfg = getattr(sa_cfg, "mythril", {})
- mythril_cfg = mythril_cfg.model_dump() if hasattr(mythril_cfg, "model_dump") else (
- mythril_cfg.dict() if hasattr(mythril_cfg, "dict") else mythril_cfg
- )
- adapters.append(MythrilAdapter(config=mythril_cfg, logger=self.logger))
- except ImportError as e:
- self.logger.warning(f"Mythril adapter unavailable: {e}")
-
- if getattr(sa_cfg, "enable_manticore", False):
- try:
- from ..static_analysis.manticore_adapter import ManticoreAdapter
- mcore_cfg = getattr(sa_cfg, "manticore", {})
- mcore_cfg = mcore_cfg.model_dump() if hasattr(mcore_cfg, "model_dump") else (
- mcore_cfg.dict() if hasattr(mcore_cfg, "dict") else mcore_cfg
- )
- adapters.append(ManticoreAdapter(config=mcore_cfg, logger=self.logger))
- except ImportError as e:
- self.logger.warning(f"Manticore adapter unavailable: {e}")
-
- self.logger.info(f"Initialized {len(adapters)} static analysis adapters")
- return adapters
-
- async def analyze(self, request: AnalysisRequest) -> AnalysisResult:
- """
- Main analysis entry point implementing multi-phase workflow.
- Coordinates static, dynamic, scoring, and agentic components.
- """
- self.logger.info(f"Starting analysis for {len(request.contract_paths)} contracts")
-
- # Initialize result object
- result = AnalysisResult(contract_paths=request.contract_paths)
-
- try:
- # Phase 1: File validation and preprocessing
- validated_paths = await self._validate_contracts(request.contract_paths)
-
- # Phase 2: Static and Dynamic Analysis (parallel execution)
- static_findings, dynamic_findings, tool_errors = await self._run_analysis_phase(
- validated_paths, request
- )
-
- # Combine findings
- all_findings = static_findings + dynamic_findings
- result.findings = all_findings
- result.tool_errors = tool_errors
-
- # Phase 3: Vulnerability Scoring
- if request.include_scoring:
- await self._apply_scoring(result.findings)
-
- # Phase 5: Multi-Agent Orchestration (if enabled)
- if request.enable_ai_agents and self.enable_agents:
- agent_results = await self._run_agent_orchestration(result, request)
- result.agent_consensus = agent_results.get("consensus")
- result.patch_suggestions = agent_results.get("patches")
-
- # Phase 6: Explainability Analysis (if enabled)
- if self.enable_explainability:
- result.explainability_report = await self._generate_explainability_report(result)
-
- # Phase 7: RL Feedback Collection (if enabled)
- if self.enable_rl_feedback:
- await self._collect_rl_feedback(result, request)
-
- # Finalize result statistics
- result.finalize()
-
- self.logger.info(
- f"Analysis completed: {result.total_findings} findings, "
- f"{len(result.tool_errors)} errors, {result.duration_seconds:.2f}s"
- )
-
- return result
-
- except Exception as e:
- self.logger.error(f"Analysis failed: {e}", exc_info=True)
- result.tool_errors.append(ToolError(
- tool_name="AuditEngine",
- error_type="AnalysisError",
- error_message=str(e)
- ))
- result.finalize()
- return result
-
- async def _validate_contracts(self, contract_paths: List[str]) -> List[str]:
- """Validate and normalize contract file paths"""
- validated = []
-
- for path_str in contract_paths:
- path = Path(path_str)
-
- if not path.exists():
- raise FileNotFoundError(f"Contract file not found: {path}")
-
- if not path.is_file():
- raise ValueError(f"Path is not a file: {path}")
-
- if not self.file_handler.is_supported_contract(path):
- raise ValueError(f"Unsupported contract file type: {path}")
-
- validated.append(str(path.resolve()))
-
- self.logger.debug(f"Validated {len(validated)} contract files")
- return validated
-
- async def _run_analysis_phase(
- self,
- contract_paths: List[str],
- request: AnalysisRequest
- ) -> tuple[List[Finding], List[Finding], List[ToolError]]:
- """
- Run static and dynamic analysis in parallel.
- Implements Phase 2 workflow with error handling and timeout.
- """
- static_findings = []
- dynamic_findings = []
- tool_errors = []
-
- # Create analysis tasks
- tasks = []
-
- if request.include_static:
- tasks.append(self._run_static_analysis(contract_paths, request))
-
- if request.include_dynamic:
- tasks.append(self._run_dynamic_analysis(contract_paths, request))
-
- if not tasks:
- self.logger.warning("No analysis methods enabled")
- return static_findings, dynamic_findings, tool_errors
-
- # Execute analysis phases in parallel
- try:
- results = await asyncio.gather(*tasks, return_exceptions=True)
-
- for i, result in enumerate(results):
- if isinstance(result, Exception):
- error_source = "static" if i == 0 and request.include_static else "dynamic"
- self.logger.error(f"{error_source} analysis failed: {result}")
- tool_errors.append(ToolError(
- tool_name=f"{error_source}_analysis",
- error_type=type(result).__name__,
- error_message=str(result)
- ))
- else:
- findings, errors = result
- if i == 0 and request.include_static:
- static_findings = findings
- else:
- dynamic_findings = findings
- tool_errors.extend(errors)
-
- except Exception as e:
- self.logger.error(f"Analysis phase execution failed: {e}")
- tool_errors.append(ToolError(
- tool_name="analysis_orchestrator",
- error_type="ExecutionError",
- error_message=str(e)
- ))
-
- return static_findings, dynamic_findings, tool_errors
-
- async def _run_static_analysis(
- self,
- contract_paths: List[str],
- request: AnalysisRequest
- ) -> tuple[List[Finding], List[ToolError]]:
- """Execute static analysis tools with parallel execution"""
- findings = []
- errors = []
-
- if not self.static_adapters:
- self.logger.warning("No static analysis adapters available")
- return findings, errors
-
- # Run adapters in parallel with timeout
- from concurrent.futures import TimeoutError
- with ThreadPoolExecutor(max_workers=len(self.static_adapters)) as executor:
- future_to_adapter = {
- executor.submit(self._run_static_adapter, adapter, contract_path, request.max_analysis_time): adapter
- for adapter in self.static_adapters
- for contract_path in contract_paths
- }
- try:
- for future in as_completed(future_to_adapter, timeout=request.max_analysis_time):
- try:
- adapter_findings = future.result()
- findings.extend(adapter_findings)
- except Exception as e:
- adapter = future_to_adapter[future]
- errors.append(ToolError(
- tool_name=adapter.__class__.__name__,
- error_type=type(e).__name__,
- error_message=str(e)
- ))
- except TimeoutError:
- # Cancel outstanding tasks and record errors
- for f, adapter in future_to_adapter.items():
- if not f.done():
- f.cancel()
- errors.append(ToolError(
- tool_name=adapter.__class__.__name__,
- error_type="TimeoutError",
- error_message=f"Static analysis exceeded {request.max_analysis_time}s"
- ))
- except Exception as e:
- self.logger.exception("Static analysis execution failed")
- errors.append(ToolError(
- tool_name="static_analysis",
- error_type=type(e).__name__,
- error_message=str(e)
- ))
- self.logger.info(f"Static analysis completed: {len(findings)} findings, {len(errors)} errors")
- return findings, errors
-
- def _run_static_adapter(self, adapter: StaticAdapter, contract_path: str, timeout: Optional[int] = None) -> List[Finding]:
- """Run individual static analysis adapter"""
- try:
- raw_results = adapter.run(contract_path, timeout=timeout)
- return self._normalize_static_findings(adapter, raw_results)
- except Exception:
- self.logger.exception(f"Static adapter {adapter.__class__.__name__} failed on {contract_path}")
- raise
-
- def _normalize_static_findings(self, adapter: StaticAdapter, raw_results: List[Any]) -> List[Finding]:
- """Normalize static analysis results to Finding schema"""
- findings = []
-
- for result in raw_results:
- try:
- # Convert tool-specific output to Finding schema
- finding = self._convert_to_finding(adapter.__class__.__name__, result)
- findings.append(finding)
- except Exception as e:
- self.logger.warning(f"Failed to normalize finding from {adapter.__class__.__name__}: {e}")
-
- return findings
-
- async def _run_dynamic_analysis(
- self,
- contract_paths: List[str],
- request: AnalysisRequest
- ) -> tuple[List[Finding], List[ToolError]]:
- """Execute dynamic analysis through orchestrator"""
- try:
- analysis_results = await self.dynamic_orchestrator.analyze_contracts(
- contract_paths,
- analysis_type=request.analysis_type
- )
-
- # Convert AnalysisResult objects to Finding objects
- findings = []
- for result in analysis_results:
- details = getattr(result, "finding_details", {}) or {}
- finding = Finding(
- swc_id=details.get("swc_id"),
- severity=self._map_severity(getattr(result, "severity", "Medium")),
- tool_name=getattr(result, "tool_name", "unknown"),
- tool_version=getattr(result, "tool_version", "unknown"),
- file_path=details.get("file_path") or details.get("contract_path") or getattr(result, "contract_path", "unknown"),
- description=getattr(result, "vulnerability_type", str(details)),
- reproduction_steps=str(details.get("reproduction_steps", details)),
- confidence=self._confidence_to_float(getattr(result, "confidence", 0.5)),
- recommendations=getattr(result, "remediation_suggestion", "").split('\n') if getattr(result, "remediation_suggestion", None) else [],
- cross_chain_impact=getattr(result, "cross_chain_impact", None)
- )
- findings.append(finding)
-
- self.logger.info(f"Dynamic analysis completed: {len(findings)} findings")
- return findings, [] # Dynamic orchestrator handles errors internally
-
- except Exception as e:
- self.logger.error(f"Dynamic analysis failed: {e}")
- return [], [ToolError(
- tool_name="dynamic_analysis",
- error_type=type(e).__name__,
- error_message=str(e)
- )]
-
- def _convert_to_finding(self, tool_name: str, raw_result: Any) -> Finding:
- """Convert tool-specific result to standardized Finding"""
- # Handle dict results (most common)
- if isinstance(raw_result, dict):
- recs = raw_result.get("recommendations", [])
- if isinstance(recs, str):
- recs = [recs]
- return Finding(
- swc_id=raw_result.get("swc_id"),
- severity=self._map_severity(raw_result.get("severity", "Medium")),
- tool_name=raw_result.get("tool", tool_name),
- tool_version=raw_result.get("tool_version", "1.0.0"),
- file_path=raw_result.get("file_path") or raw_result.get("path", "unknown"),
- line_span=None, # TODO: Parse/normalize line numbers if available
- function_name=raw_result.get("function_name"),
- description=raw_result.get("description", "No description"),
- reproduction_steps=raw_result.get("reproduction_steps", "No steps provided"),
- confidence=self._confidence_to_float(raw_result.get("confidence", 0.5)),
- recommendations=recs
- )
- # Handle object results
- return Finding(
- swc_id=getattr(raw_result, "swc_id", None),
- severity=self._map_severity(getattr(raw_result, "severity", "Medium")),
- tool_name=getattr(raw_result, "tool", tool_name),
- tool_version=getattr(raw_result, "tool_version", "1.0.0"),
- file_path=getattr(raw_result, "file_path", "unknown"),
- description=getattr(raw_result, "description", str(raw_result)),
- reproduction_steps=getattr(raw_result, "reproduction_steps", "No steps provided"),
- confidence=self._confidence_to_float(getattr(raw_result, "confidence", 0.5)),
- recommendations=getattr(raw_result, "recommendations", [])
- )
-
- def _map_severity(self, severity: str) -> SeverityLevel:
- """Map tool severity strings to standardized SeverityLevel"""
- severity_map = {
- "critical": SeverityLevel.CRITICAL,
- "high": SeverityLevel.MAJOR,
- "major": SeverityLevel.MAJOR,
- "medium": SeverityLevel.MEDIUM,
- "low": SeverityLevel.MINOR,
- "minor": SeverityLevel.MINOR,
- "informational": SeverityLevel.INFORMATIONAL,
- "info": SeverityLevel.INFORMATIONAL
- }
- return severity_map.get(str(severity).lower(), SeverityLevel.MEDIUM)
-
- def _confidence_to_float(self, confidence) -> float:
- """Convert confidence enum to float value"""
- if isinstance(confidence, str):
- confidence_map = {
- "critical": 0.97,
- "high": 0.9,
- "medium": 0.7,
- "low": 0.4
- }
- return confidence_map.get(confidence.lower(), 0.5)
- return float(confidence) if confidence else 0.5
-
- async def _apply_scoring(self, findings: List[Finding]) -> None:
- """Apply CVSS-inspired scoring to findings"""
- try:
- for finding in findings:
- score = self.scoring_engine.calculate_score(finding)
- finding.explainability_trace = {"cvss_score": score}
-
- self.logger.info(f"Applied scoring to {len(findings)} findings")
- except Exception as e:
- self.logger.error(f"Scoring failed: {e}")
-
- async def _run_agent_orchestration(
- self,
- result: AnalysisResult,
- request: AnalysisRequest
- ) -> Dict[str, Any]:
- """Phase 5: Multi-agent orchestration (placeholder for future implementation)"""
- # TODO: Implement full agentic AI orchestration
- self.logger.info("Agent orchestration not yet implemented")
- return {
- "consensus": {"status": "not_implemented"},
- "patches": []
- }
-
- async def _generate_explainability_report(self, result: AnalysisResult) -> Dict[str, Any]:
- """Phase 6: Generate explainability report (placeholder)"""
- # TODO: Implement explainability module
- return {
- "analysis_trace": "explainability_not_implemented",
- "decision_paths": [],
- "confidence_analysis": {}
- }
-
- async def _collect_rl_feedback(self, result: AnalysisResult, request: AnalysisRequest) -> None:
- """Phase 7: Collect reinforcement learning feedback (placeholder)"""
- # TODO: Implement RL feedback collection
- self.logger.debug("RL feedback collection not yet implemented")
- return None
diff --git a/audit_engine/core/report_generator.py b/audit_engine/core/report_generator.py
deleted file mode 100644
index 7810dec..0000000
--- a/audit_engine/core/report_generator.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""
-Audit Report Generator
-Aggregates results from static and dynamic analysis, scores vulnerabilities, and outputs a comprehensive report.
-"""
-import json
-from typing import List, Dict, Any
-
-class AuditReportGenerator:
- def save_report(self, file_path: str, output_format: str = "json"):
- """
- Save the report to disk in the specified format (json, markdown, html).
- """
- content = self.export_report(output_format)
- mode = "w"
- encoding = "utf-8"
- with open(file_path, mode, encoding=encoding) as f:
- f.write(content)
- def filter_findings(self, severity: str = None, finding_type: str = None) -> Dict[str, Any]:
- """
- Returns filtered findings from static and dynamic analysis by severity and/or type.
- """
- def _filter(findings):
- filtered = []
- for finding in findings:
- if not isinstance(finding, dict):
- continue
- if severity and finding.get("severity") != severity:
- continue
- if finding_type and finding.get("type") != finding_type:
- continue
- filtered.append(finding)
- return filtered
-
- return {
- "static_analysis": _filter(self.static_results),
- "dynamic_analysis": _filter(self.dynamic_results)
- }
- def _get_summary_statistics(self, report: Dict[str, Any]) -> Dict[str, Any]:
- summary = {}
- static_findings = report.get("static_analysis", [])
- dynamic_findings = report.get("dynamic_analysis", [])
- all_findings = static_findings + dynamic_findings
- summary["total_findings"] = len(all_findings)
- # Severity breakdown
- severity_count = {}
- for finding in all_findings:
- severity = finding.get("severity", "Unknown") if isinstance(finding, dict) else "Unknown"
- severity_count[severity] = severity_count.get(severity, 0) + 1
- summary["severity_breakdown"] = severity_count
- return summary
- import datetime
-
- def __init__(self, auditor_name: str = None, contract_name: str = None):
- self.static_results = []
- self.dynamic_results = []
- self.scores = []
- self.metadata = {}
- self.audit_timestamp = self._get_current_timestamp()
- self.auditor_name = auditor_name
- self.contract_name = contract_name
- self.recommendations = []
- self.changelog = []
- def add_changelog_entry(self, entry: dict):
- """
- Add a changelog/history entry. Example entry: {"timestamp": ..., "auditor": ..., "action": ..., "details": ...}
- """
- self.changelog.append(entry)
-
- def add_changelog_entries(self, entries: list):
- self.changelog.extend(entries)
- def add_recommendation(self, recommendation: str):
- self.recommendations.append(recommendation)
-
- def add_recommendations(self, recommendations: list):
- self.recommendations.extend(recommendations)
-
- def _get_current_timestamp(self):
- return self.datetime.datetime.now().isoformat()
-
- def add_static_results(self, results: List[Dict[str, Any]]):
- self.static_results.extend(results)
-
- def add_dynamic_results(self, results: List[Dict[str, Any]]):
- self.dynamic_results.extend(results)
-
- def add_scores(self, scores: List[Dict[str, Any]]):
- self.scores.extend(scores)
-
- def set_metadata(self, metadata: Dict[str, Any]):
- self.metadata = metadata
- # Optionally update auditor and contract name from metadata
- if 'auditor_name' in metadata:
- self.auditor_name = metadata['auditor_name']
- if 'contract_name' in metadata:
- self.contract_name = metadata['contract_name']
-
- def generate_report(self) -> Dict[str, Any]:
- report = {
- "metadata": self.metadata,
- "audit_timestamp": self.audit_timestamp,
- "auditor_name": self.auditor_name,
- "contract_name": self.contract_name,
- "static_analysis": self.static_results,
- "dynamic_analysis": self.dynamic_results,
- "scores": self.scores,
- "recommendations": self.recommendations,
- "changelog": self.changelog,
- }
- report["summary_statistics"] = self._get_summary_statistics(report)
- return report
-
- def export_report(self, output_format: str = "json") -> str:
- report = self.generate_report()
- if output_format == "json":
- return json.dumps(report, indent=2)
- elif output_format == "markdown":
- return self._to_markdown(report)
- elif output_format == "html":
- return self._to_html(report)
- else:
- raise ValueError(f"Unsupported output format: {output_format}")
-
- def _to_markdown(self, report: Dict[str, Any]) -> str:
- md = "# Audit Report\n"
- md += f"- **Timestamp**: {report.get('audit_timestamp', '')}\n"
- md += f"- **Auditor Name**: {report.get('auditor_name', '')}\n"
- md += f"- **Contract Name**: {report.get('contract_name', '')}\n"
- if report.get("metadata"):
- md += "## Metadata\n"
- for k, v in report["metadata"].items():
- md += f"- **{k}**: {v}\n"
- if report.get("summary_statistics"):
- md += "\n## Summary Statistics\n"
- md += f"- Total Findings: {report['summary_statistics'].get('total_findings', 0)}\n"
- md += "- Severity Breakdown:\n"
- for sev, count in report['summary_statistics'].get('severity_breakdown', {}).items():
- md += f" - {sev}: {count}\n"
- if report.get("recommendations"):
- md += "\n## Recommendations / Remediation Steps\n"
- for rec in report["recommendations"]:
- md += f"- {rec}\n"
- if report.get("changelog"):
- md += "\n## Audit Changelog / History\n"
- for entry in report["changelog"]:
- ts = entry.get("timestamp", "")
- auditor = entry.get("auditor", "")
- action = entry.get("action", "")
- details = entry.get("details", "")
- md += f"- [{ts}] {auditor}: {action} - {details}\n"
- md += "\n## Static Analysis Findings\n"
- for finding in report["static_analysis"]:
- md += f"- {finding}\n"
- md += "\n## Dynamic Analysis Findings\n"
- for finding in report["dynamic_analysis"]:
- md += f"- {finding}\n"
- md += "\n## Scoring\n"
- for score in report["scores"]:
- md += f"- {score}\n"
- return md
-
- def _to_html(self, report: Dict[str, Any]) -> str:
- html = ["Audit Report"]
- html.append("Audit Report
")
- html.append(f"Timestamp: {report.get('audit_timestamp', '')}
")
- html.append(f"Auditor Name: {report.get('auditor_name', '')}
")
- html.append(f"Contract Name: {report.get('contract_name', '')}
")
- if report.get("metadata"):
- html.append("Metadata
")
- for k, v in report["metadata"].items():
- html.append(f"- {k}: {v}
")
- html.append("
")
- if report.get("summary_statistics"):
- html.append("Summary Statistics
")
- html.append(f"- Total Findings: {report['summary_statistics'].get('total_findings', 0)}
")
- html.append("- Severity Breakdown:
")
- for sev, count in report['summary_statistics'].get('severity_breakdown', {}).items():
- html.append(f"- {sev}: {count}
")
- html.append("
")
- if report.get("recommendations"):
- html.append("Recommendations / Remediation Steps
")
- for rec in report["recommendations"]:
- html.append(f"- {rec}
")
- html.append("
")
- if report.get("changelog"):
- html.append("Audit Changelog / History
")
- for entry in report["changelog"]:
- ts = entry.get("timestamp", "")
- auditor = entry.get("auditor", "")
- action = entry.get("action", "")
- details = entry.get("details", "")
- html.append(f"- [{ts}] {auditor}: {action} - {details}
")
- html.append("
")
- html.append("Static Analysis Findings
")
- for finding in report["static_analysis"]:
- html.append(f"- {finding}
")
- html.append("
")
- html.append("Dynamic Analysis Findings
")
- for finding in report["dynamic_analysis"]:
- html.append(f"- {finding}
")
- html.append("
")
- html.append("Scoring
")
- for score in report["scores"]:
- html.append(f"- {score}
")
- html.append("
")
- html.append("")
- return "".join(html)
diff --git a/audit_engine/core/schemas.py b/audit_engine/core/schemas.py
deleted file mode 100644
index c6c71b8..0000000
--- a/audit_engine/core/schemas.py
+++ /dev/null
@@ -1,209 +0,0 @@
-"""
-Unified Pydantic schemas for OpenAudit Agent
-
-Implements the standardized JSON schema from Workflow.md Section 3.1
-for consistent data contracts across all analysis phases.
-"""
-
-from datetime import datetime
-from enum import Enum
-from typing import List, Optional, Dict, Any, Union
-from uuid import UUID, uuid4
-from pydantic import BaseModel, Field
-try:
- # Pydantic v2
- from pydantic import field_validator as _field_validator
-except ImportError: # pragma: no cover
- # Fallback for pydantic v1 environments
- from pydantic import validator as _field_validator
-
-
-class SeverityLevel(str, Enum):
- """Severity classification as per Workflow.md interim calibration"""
- CRITICAL = "Critical" # Direct fund loss, contract takeover
- MAJOR = "Major" # Service disruption, governance bypass
- MEDIUM = "Medium" # Logic errors, privilege escalation
- MINOR = "Minor" # Best practice violations
- INFORMATIONAL = "Informational" # Code quality issues
-
-
-class ExploitComplexity(str, Enum):
- """Exploit complexity classification"""
- LOW = "Low"
- MEDIUM = "Medium"
- HIGH = "High"
-
-
-class ConfidenceLevel(str, Enum):
- """Trust calibration levels for HITL integration"""
- LOW = "low" # Requires human review
- MEDIUM = "medium" # Automated with flagging
- HIGH = "high" # Fully automated
- CRITICAL = "critical" # Immediate escalation
-
-
-class LineSpan(BaseModel):
- """Source code line span for vulnerability location"""
- start: int = Field(..., description="Starting line number (1-indexed)")
- end: int = Field(..., description="Ending line number (1-indexed)")
-
- @_field_validator('end')
- def end_must_be_gte_start(cls, v, values): # type: ignore[override]
- start_value = values.get('start') if isinstance(values, dict) else getattr(values, 'start', None)
- if start_value is not None and v < start_value:
- raise ValueError('end line must be >= start line')
- return v
-
-
-class Finding(BaseModel):
- """
- Standardized vulnerability finding schema implementing Workflow.md requirements.
- Used across all analysis phases for consistent data exchange.
- """
- finding_id: UUID = Field(default_factory=uuid4, description="Unique finding identifier")
- swc_id: Optional[str] = Field(None, pattern=r"SWC-\d{3}", description="SWC registry ID (SWC-XXX format)")
- severity: SeverityLevel = Field(..., description="Vulnerability severity level")
- tool_name: str = Field(..., description="Analysis tool that generated finding")
- tool_version: str = Field(..., description="Version of analysis tool")
- file_path: str = Field(..., description="Path to vulnerable source file")
- vulnerability_details: Optional[Dict[str, Any]] = Field(None, description="Detailed vulnerability information")
- suggested_fixes: Optional[List[Dict[str, Any]]] = Field(None, description="Suggested fixes with code examples")
- line_span: Optional[LineSpan] = Field(None, description="Source code line range")
- function_name: Optional[str] = Field(None, description="Function containing vulnerability")
- bytecode_offset: Optional[int] = Field(None, description="Bytecode offset for EVM-level findings")
- description: str = Field(..., description="Human-readable vulnerability description")
- reproduction_steps: str = Field(..., description="Steps to reproduce the vulnerability")
- proof_of_concept: Optional[str] = Field(None, description="Executable proof of concept code")
- exploit_complexity: ExploitComplexity = Field(default=ExploitComplexity.MEDIUM, description="Difficulty of exploitation")
- confidence: float = Field(..., ge=0.0, le=1.0, description="Tool confidence in finding (0.0-1.0)")
- sanitizer_present: bool = Field(default=False, description="Whether security controls are present")
- recommendations: List[str] = Field(default_factory=list, description="Remediation recommendations")
- timestamp: datetime = Field(default_factory=datetime.utcnow, description="Finding generation timestamp")
-
- # Extended fields for agentic AI integration
- cross_chain_impact: Optional[List[str]] = Field(None, description="Affected blockchain networks")
- remediation_suggestion: Optional[str] = Field(None, description="AI-generated patch suggestion")
- explainability_trace: Optional[Dict[str, Any]] = Field(None, description="Decision trace for explainability")
- rl_feedback_score: Optional[float] = Field(None, description="Reinforcement learning feedback score")
-
- class Config:
- use_enum_values = True
- json_encoders = {
- datetime: lambda v: v.isoformat(),
- UUID: lambda v: str(v)
- }
-
-
-class ToolError(BaseModel):
- """Error information when analysis tools fail"""
- tool_name: str
- error_type: str
- error_message: str
- stderr_output: Optional[str] = None
- exit_code: Optional[int] = None
- timestamp: datetime = Field(default_factory=datetime.utcnow)
-
-
-class AnalysisRequest(BaseModel):
- """Request schema for audit analysis"""
- contract_paths: List[str] = Field(..., description="Paths to smart contract files")
- analysis_type: str = Field(default="comprehensive", description="Type of analysis to perform")
- include_static: bool = Field(default=True, description="Enable static analysis")
- include_dynamic: bool = Field(default=True, description="Enable dynamic analysis")
- include_scoring: bool = Field(default=True, description="Enable vulnerability scoring")
- enable_ai_agents: bool = Field(default=False, description="Enable agentic AI features")
- cross_chain_analysis: bool = Field(default=False, description="Enable cross-chain vulnerability detection")
- max_analysis_time: int = Field(default=600, description="Maximum analysis time in seconds")
-
- # Configuration overrides
- tool_config: Dict[str, Any] = Field(default_factory=dict, description="Tool-specific configuration")
- agent_config: Dict[str, Any] = Field(default_factory=dict, description="AI agent configuration")
-
-
-class AnalysisResult(BaseModel):
- """Complete analysis result with findings and metadata"""
- request_id: UUID = Field(default_factory=uuid4, description="Analysis request identifier")
- contract_paths: List[str] = Field(..., description="Analyzed contract files")
- findings: List[Finding] = Field(default_factory=list, description="Detected vulnerabilities")
- tool_errors: List[ToolError] = Field(default_factory=list, description="Tool execution errors")
- analysis_metadata: Dict[str, Any] = Field(default_factory=dict, description="Analysis execution metadata")
-
- # Summary statistics
- total_findings: int = Field(default=0, description="Total number of findings")
- severity_distribution: Dict[SeverityLevel, int] = Field(default_factory=dict, description="Findings by severity")
- confidence_distribution: Dict[ConfidenceLevel, int] = Field(default_factory=dict, description="Findings by confidence")
-
- # Timing information
- start_time: datetime = Field(default_factory=datetime.utcnow, description="Analysis start time")
- end_time: Optional[datetime] = Field(None, description="Analysis completion time")
- duration_seconds: Optional[float] = Field(None, description="Total analysis duration")
-
- # Agentic AI results
- agent_consensus: Optional[Dict[str, Any]] = Field(None, description="Multi-agent consensus results")
- patch_suggestions: Optional[List[Dict[str, Any]]] = Field(None, description="AI-generated patches")
- explainability_report: Optional[Dict[str, Any]] = Field(None, description="Explainability analysis")
-
- def finalize(self):
- """Compute derived fields after analysis completion"""
- self.end_time = datetime.utcnow()
- if self.start_time:
- self.duration_seconds = (self.end_time - self.start_time).total_seconds()
-
- self.total_findings = len(self.findings)
-
- # Compute severity distribution
- self.severity_distribution = {}
- for severity in SeverityLevel:
- count = sum(1 for f in self.findings if f.severity == severity)
- if count > 0:
- self.severity_distribution[severity] = count
-
- # Compute confidence distribution by bucketing float confidences
- self.confidence_distribution = {}
- def bucket(c: float) -> ConfidenceLevel:
- if c >= 0.9:
- return ConfidenceLevel.HIGH
- if c >= 0.7:
- return ConfidenceLevel.MEDIUM
- return ConfidenceLevel.LOW
- for level in ConfidenceLevel:
- count = sum(1 for f in self.findings if bucket(getattr(f, "confidence", 0.0)) == level)
- if count > 0:
- self.confidence_distribution[level] = count
-
- class Config:
- use_enum_values = True
- json_encoders = {
- datetime: lambda v: v.isoformat(),
- UUID: lambda v: str(v)
- }
-
-
-class AgentTask(BaseModel):
- """Task specification for agentic AI agents"""
- task_id: UUID = Field(default_factory=uuid4)
- agent_type: str = Field(..., description="Type of agent (detection, exploit, patch, verification)")
- input_data: Dict[str, Any] = Field(..., description="Input data for agent")
- config: Dict[str, Any] = Field(default_factory=dict, description="Agent-specific configuration")
- priority: int = Field(default=5, ge=1, le=10, description="Task priority (1=highest, 10=lowest)")
- timeout_seconds: int = Field(default=300, description="Task timeout")
- retry_attempts: int = Field(default=3, description="Number of retry attempts")
-
-
-class AgentResult(BaseModel):
- """Result from agentic AI agent execution"""
- task_id: UUID = Field(..., description="Reference to original task")
- agent_type: str = Field(..., description="Agent that produced result")
- success: bool = Field(..., description="Whether task completed successfully")
- output_data: Dict[str, Any] = Field(default_factory=dict, description="Agent output")
- confidence_score: float = Field(..., ge=0.0, le=1.0, description="Agent confidence in result")
- execution_time: float = Field(..., description="Task execution time in seconds")
- error_message: Optional[str] = Field(None, description="Error message if task failed")
- explainability_trace: Optional[List[str]] = Field(None, description="Decision trace for explainability")
- timestamp: datetime = Field(default_factory=datetime.utcnow)
-
-
-# Utility type aliases for cleaner code
-FindingList = List[Finding]
-ToolErrorList = List[ToolError]
-AnalysisConfig = Dict[str, Any]
diff --git a/audit_engine/core/vulnerability_patterns.py b/audit_engine/core/vulnerability_patterns.py
deleted file mode 100644
index 38968b5..0000000
--- a/audit_engine/core/vulnerability_patterns.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-Vulnerability patterns and fixes database for OpenAudit Agent
-"""
-
-from typing import Dict, List, Optional
-
-VULNERABILITY_PATTERNS = {
- "SWC-107": {
- "name": "Reentrancy",
- "description": """
- Reentrancy occurs when external contract calls are allowed to make new calls back to the calling contract before the first execution is finished.
- This may cause the different invocations of the function to interact in destructive ways.
- """,
- "impact": "Critical - Can lead to unauthorized withdrawals, drain of contract funds, and manipulation of contract state",
- "detection_tips": [
- "Look for external calls using .call()",
- "Check if state changes happen after external calls",
- "Watch for multiple calls to the same function"
- ],
- "fixes": [
- {
- "pattern": "Checks-Effects-Interactions Pattern",
- "description": "Update state before making external calls",
- "code_example": '''
- // VULNERABLE
- function withdraw(uint _amount) {
- require(balances[msg.sender] >= _amount);
- (bool success, ) = msg.sender.call{value: _amount}("");
- require(success);
- balances[msg.sender] -= _amount;
- }
-
- // FIXED
- function withdraw(uint _amount) {
- require(balances[msg.sender] >= _amount);
- balances[msg.sender] -= _amount;
- (bool success, ) = msg.sender.call{value: _amount}("");
- require(success);
- }
- '''
- },
- {
- "pattern": "ReentrancyGuard",
- "description": "Use OpenZeppelin's ReentrancyGuard or implement a mutex",
- "code_example": '''
- contract SecureContract is ReentrancyGuard {
- function withdraw(uint _amount) nonReentrant {
- require(balances[msg.sender] >= _amount);
- balances[msg.sender] -= _amount;
- (bool success, ) = msg.sender.call{value: _amount}("");
- require(success);
- }
- }
- '''
- }
- ]
- },
- "SWC-101": {
- "name": "Integer Overflow and Underflow",
- "description": """
- Integer overflow/underflow occurs when an arithmetic operation reaches the maximum or minimum size of the integer type.
- For example, a uint8 can only hold values from 0 to 255. Adding 1 to 255 will result in 0.
- """,
- "impact": "High - Can lead to incorrect calculations, token balance manipulation",
- "fixes": [
- {
- "pattern": "SafeMath Usage",
- "description": "Use OpenZeppelin's SafeMath library for Solidity < 0.8.0",
- "code_example": '''
- using SafeMath for uint256;
-
- function transfer(address recipient, uint256 amount) {
- balances[msg.sender] = balances[msg.sender].sub(amount);
- balances[recipient] = balances[recipient].add(amount);
- }
- '''
- },
- {
- "pattern": "Solidity 0.8+ Built-in Checks",
- "description": "Use Solidity 0.8.0 or later which includes built-in overflow checks",
- "code_example": '''
- // SPDX-License-Identifier: MIT
- pragma solidity ^0.8.0;
-
- contract SafeContract {
- function add(uint256 a, uint256 b) public pure returns (uint256) {
- return a + b; // Will revert on overflow
- }
- }
- '''
- }
- ]
- },
- "SWC-116": {
- "name": "Block Timestamp Manipulation",
- "description": """
- Block timestamps can be manipulated by miners within certain bounds. Any logic that relies
- directly on block.timestamp for critical decisions can potentially be manipulated.
- """,
- "impact": "Medium - Can affect time-dependent logic like locks and random number generation",
- "fixes": [
- {
- "pattern": "Block Number Usage",
- "description": "Use block.number instead of block.timestamp when possible",
- "code_example": '''
- // VULNERABLE
- if (block.timestamp >= endTime) { ... }
-
- // FIXED
- // Assuming ~15 sec block time
- if (block.number >= endBlock) { ... }
- '''
- },
- {
- "pattern": "Time Delay Buffer",
- "description": "Add buffer time to timestamp checks",
- "code_example": '''
- // Add safety margin
- uint256 constant BUFFER_TIME = 900; // 15 minutes
-
- function isExpired(uint256 deadline) public view returns (bool) {
- return block.timestamp >= (deadline + BUFFER_TIME);
- }
- '''
- }
- ]
- }
-}
-
-def get_vulnerability_info(swc_id: str) -> Optional[Dict]:
- """
- Get detailed information about a vulnerability by its SWC ID.
-
- Args:
- swc_id: The SWC ID of the vulnerability (e.g., "SWC-107")
-
- Returns:
- Dictionary containing vulnerability information and fixes
- """
- return VULNERABILITY_PATTERNS.get(swc_id)
-
-def get_fix_suggestions(swc_id: str) -> List[Dict]:
- """
- Get suggested fixes for a vulnerability.
-
- Args:
- swc_id: The SWC ID of the vulnerability
-
- Returns:
- List of suggested fixes with code examples
- """
- vuln_info = VULNERABILITY_PATTERNS.get(swc_id)
- if vuln_info:
- return vuln_info.get("fixes", [])
- return []
-
-def get_detection_tips(swc_id: str) -> List[str]:
- """
- Get detection tips for a vulnerability.
-
- Args:
- swc_id: The SWC ID of the vulnerability
-
- Returns:
- List of tips for detecting the vulnerability
- """
- vuln_info = VULNERABILITY_PATTERNS.get(swc_id)
- if vuln_info:
- return vuln_info.get("detection_tips", [])
- return []
diff --git a/audit_engine/dynamic_analysis/__init__.py b/audit_engine/dynamic_analysis/__init__.py
deleted file mode 100644
index 83c859c..0000000
--- a/audit_engine/dynamic_analysis/__init__.py
+++ /dev/null
@@ -1,508 +0,0 @@
-"""
-Dynamic Analysis Module for OpenAudit Agent
-
-This module implements the dynamic analysis component of the agentic AI framework
-described in the research paper. It orchestrates multiple dynamic analysis tools
-including Echidna fuzzing and AdversarialFuzz techniques to detect runtime
-vulnerabilities in smart contracts.
-
-Key Features:
-- Multi-tool dynamic analysis orchestration
-- Adversarial fuzzing capabilities (Rahman et al., 2025)
-- Reinforcement learning integration for continuous improvement
-- Cross-chain vulnerability detection support
-- Trust-calibrated output with confidence scoring
-"""
-
-import asyncio
-import logging
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from dataclasses import dataclass
-from enum import Enum
-from typing import Any, Dict, Iterable, List, Optional, Union
-import time
-
-from .echidna_adapter import EchidnaAdapter
-from .adversarial_fuzz import AdversarialFuzz
-from .config import DynamicAnalysisConfig
-
-__all__ = [
- "DynamicAnalysisOrchestrator",
- "AnalysisResult",
- "ConfidenceLevel",
- "EchidnaAdapter",
- "AdversarialFuzz",
- "run_dynamic_analysis"
-]
-
-# Constants to avoid magic numbers
-DEFAULT_ANALYSIS_TIMEOUT = 600
-DEFAULT_MAX_WORKERS = 4
-CONFIDENCE_THRESHOLDS = {
- 'critical': 0.97,
- 'high': 0.9,
- 'medium': 0.7,
- 'low': 0.0
-}
-DEFAULT_TOOL_ACCURACY = 0.8
-ADVERSARIAL_BOOST_FACTOR = 1.1
-
-class ConfidenceLevel(Enum):
- """Trust calibration levels for HITL integration as per research paper Section 5.5"""
- LOW = "low" # Requires human review
- MEDIUM = "medium" # Automated with flagging
- HIGH = "high" # Fully automated
- CRITICAL = "critical" # Immediate escalation
-
-@dataclass
-class AnalysisResult:
- """
- Standardized result format for dynamic analysis findings.
- Implements trust scoring and explainability features from the research.
- """
- tool_name: str
- vulnerability_type: str
- severity: str
- confidence: ConfidenceLevel
- finding_details: Dict[str, Any]
- exploit_poc: Optional[str] = None
- remediation_suggestion: Optional[str] = None
- cross_chain_impact: Optional[List[str]] = None
- timestamp: Optional[str] = None
-
-class DynamicAnalysisOrchestrator:
- """
- Main orchestrator for dynamic analysis tools implementing the agentic AI
- framework from the research paper. Coordinates multiple analysis agents
- with reinforcement learning feedback loops.
- """
-
- def __init__(self, config: Optional[Dict[str, Any]] = None, logger: Optional[logging.Logger] = None):
- self.config = config or {}
- self.logger = logger or logging.getLogger(__name__)
- self.adapters: List[Any] = []
- self.rl_feedback_enabled = self.config.get("reinforcement_learning", False)
- self.cross_chain_enabled = self.config.get("cross_chain_analysis", False)
-
- # Log configuration securely without exposing sensitive data
- self.logger.info(f"DynamicAnalysisOrchestrator initialized with config: {self._masked_config_dict()}")
-
- try:
- self.adapters = self._initialize_adapters()
- except Exception as e:
- self.logger.error(f"Failed to initialize adapters: {e}")
- # Don't re-raise to allow graceful degradation
-
- def _masked_config_dict(self) -> Dict[str, Any]:
- """
- Create a masked version of config for safe logging.
- Masks sensitive keys that might contain tokens, passwords, or secrets.
- """
- # If config has its own masking, use it
- if hasattr(self.config, "masked_dict"):
- try:
- return self.config.masked_dict() # type: ignore[no-any-return]
- except Exception:
- self.logger.exception("Failed to mask config via masked_dict(); falling back to local masker")
- # Define specific sensitive patterns to avoid false positives
- sensitive_patterns = {
- "api_key", "access_key", "secret_key", "private_key", "client_secret",
- "password", "pass", "pwd",
- "secret", "token", "auth_token", "bearer_token", "refresh_token",
- "authorization", "x-api-key", "x_api_key",
- }
-
- def mask_sensitive_data(obj: Any) -> Any:
- if isinstance(obj, dict):
- masked = {}
- for key, value in obj.items():
- key_lower = key.lower()
- # Narrow match: exact known keys or common suffixes (underscore and hyphen)
- is_sensitive = (
- key_lower in sensitive_patterns
- or key_lower.endswith(("_key", "_secret", "_token", "-key", "-secret", "-token"))
- )
-
- if is_sensitive and value is not None:
- # Mask with partial visibility for debugging
- if isinstance(value, str) and len(value) > 8:
- masked[key] = f"{value[:3]}***{value[-2:]}"
- else:
- masked[key] = "***"
- else:
- masked[key] = mask_sensitive_data(value)
- return masked
- elif isinstance(obj, list):
- return [mask_sensitive_data(item) for item in obj]
- else:
- return obj
-
- return mask_sensitive_data(self.config)
-
- def _initialize_adapters(self) -> List[Any]:
- """Initialize dynamic analysis adapters with configuration"""
- adapters = []
-
- # Handle both dict and DynamicAnalysisConfig objects
- if hasattr(self.config, 'enable_echidna'):
- # Pydantic config object
- enable_echidna = self.config.enable_echidna
- enable_adversarial_fuzz = self.config.enable_adversarial_fuzz
- echidna_config = self.config.echidna if hasattr(self.config, 'echidna') else {}
- adversarial_config = self.config.adversarial_fuzz if hasattr(self.config, 'adversarial_fuzz') else {}
- else:
- # Dict config
- enable_echidna = self.config.get("enable_echidna", True)
- enable_adversarial_fuzz = self.config.get("enable_adversarial_fuzz", True)
- echidna_config = self.config.get("echidna", {})
- adversarial_config = self.config.get("adversarial_fuzz", {})
-
- # Standard Echidna fuzzing
- if enable_echidna:
- try:
- adapters.append(EchidnaAdapter(config=echidna_config, logger=self.logger))
- except Exception as e:
- self.logger.warning(f"Failed to initialize EchidnaAdapter: {e}")
-
- # Adversarial fuzzing as per Rahman et al. (2025)
- if enable_adversarial_fuzz:
- try:
- adapters.append(AdversarialFuzz(config=adversarial_config, logger=self.logger))
- except Exception as e:
- self.logger.warning(f"Failed to initialize AdversarialFuzz: {e}")
-
- return adapters
-
- def _extract_confidence_score(self, result: Any) -> float:
- """Extract numerical confidence score from result"""
- if isinstance(result, dict):
- conf_str = str(result.get("confidence", "")).lower()
- confidence_mapping = {"high": 0.9, "medium": 0.7, "low": 0.4}
- if conf_str in confidence_mapping:
- return confidence_mapping[conf_str]
- try:
- return float(result.get("confidence_score", 0.5))
- except (TypeError, ValueError):
- return 0.5
- return float(getattr(result, "confidence_score", 0.5))
-
- def _score_to_confidence_level(self, score: float) -> ConfidenceLevel:
- """Convert numerical score to ConfidenceLevel enum"""
- if score >= CONFIDENCE_THRESHOLDS['critical']:
- return ConfidenceLevel.CRITICAL
- elif score >= CONFIDENCE_THRESHOLDS['high']:
- return ConfidenceLevel.HIGH
- elif score >= CONFIDENCE_THRESHOLDS['medium']:
- return ConfidenceLevel.MEDIUM
- else:
- return ConfidenceLevel.LOW
-
- def _calculate_confidence_level(self, adapter: Any, result: Any) -> ConfidenceLevel:
- """
- Implement trust calibration as described in Section 5.5.
- Uses multi-modal consistency checks and historical accuracy.
- """
- # Extract confidence score from result
- score = self._extract_confidence_score(result)
-
- # Apply adapter-specific adjustments
- if isinstance(adapter, AdversarialFuzz):
- score = min(1.0, score * ADVERSARIAL_BOOST_FACTOR)
-
- # Apply tool accuracy weighting - check both PascalCase and snake_case
- class_name = adapter.__class__.__name__
- snake_case = "".join([f"_{c.lower()}" if c.isupper() else c for c in class_name]).lstrip("_")
- tool_accuracy = self.config.get(
- f"{class_name}_accuracy",
- self.config.get(f"{snake_case}_accuracy", DEFAULT_TOOL_ACCURACY),
- )
- adjusted_score = max(0.0, min(1.0, score * tool_accuracy))
-
- return self._score_to_confidence_level(adjusted_score)
-
- async def analyze_contracts(
- self,
- contract_paths: Iterable[str],
- analysis_type: str = "comprehensive"
- ) -> List[AnalysisResult]:
- """
- Asynchronous multi-agent analysis with consensus-based decision making
- as described in Section 5.10 of the research paper.
- """
- if not self.adapters:
- self.logger.warning("No dynamic analysis adapters available; returning empty results.")
- return []
-
- contract_list = list(contract_paths)
- if not contract_list:
- self.logger.warning("No contract paths provided for analysis.")
- return []
-
- results = await self._execute_parallel_analysis(contract_list)
-
- # Reinforcement learning feedback integration
- if self.rl_feedback_enabled:
- await self._update_rl_feedback(results)
-
- return results
-
- async def _execute_parallel_analysis(self, contract_paths: List[str]) -> List[AnalysisResult]:
- """Execute analysis across multiple adapters in parallel"""
- results = []
- max_workers = min(
- self.config.get("max_workers", DEFAULT_MAX_WORKERS),
- len(self.adapters)
- )
-
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
- # Submit all adapter tasks
- future_to_adapter = {
- executor.submit(self._run_adapter_analysis, adapter, contract_paths): adapter
- for adapter in self.adapters
- }
-
- # Collect results with proper error handling
- for future in as_completed(future_to_adapter):
- adapter = future_to_adapter[future]
- try:
- timeout = self.config.get("analysis_timeout", DEFAULT_ANALYSIS_TIMEOUT)
- adapter_results = future.result(timeout=timeout)
- processed_results = self._process_adapter_results(adapter, adapter_results)
- results.extend(processed_results)
- except Exception as e:
- self.logger.error(f"Adapter {adapter.__class__.__name__} failed: {e}")
-
- return results
-
- def _run_adapter_analysis(self, adapter: Any, contract_paths: List[str]) -> List[Any]:
- """Run individual adapter analysis with error handling"""
- findings: List[Any] = []
- adapter_name = adapter.__class__.__name__
-
- try:
- timeout = self.config.get(
- f"{adapter_name}_timeout",
- self.config.get("analysis_timeout", DEFAULT_ANALYSIS_TIMEOUT)
- )
-
- for path in contract_paths:
- try:
- result = adapter.run(path, timeout=timeout)
- if result:
- if isinstance(result, list):
- findings.extend(result)
- else:
- findings.append(result)
- except Exception as e:
- self.logger.warning(f"Analysis failed for {path} with {adapter_name}: {e}")
-
- except Exception as e:
- self.logger.error(f"Critical failure in {adapter_name}: {e}")
-
- return findings
-
- def _process_adapter_results(self, adapter: Any, raw_results: List[Any]) -> List[AnalysisResult]:
- """
- Process and standardize adapter results into unified format.
- Implements trust calibration and explainability features.
- """
- processed = []
-
- for result in raw_results:
- try:
- processed_result = self._create_analysis_result(adapter, result)
- if processed_result:
- processed.append(processed_result)
- except Exception as e:
- self.logger.warning(f"Failed to process result from {adapter.__class__.__name__}: {e}")
-
- return processed
-
- def _create_analysis_result(self, adapter: Any, result: Any) -> Optional[AnalysisResult]:
- """Create a standardized AnalysisResult from adapter output"""
- try:
- # Extract fields with proper type handling
- severity, vuln_type, details = self._extract_result_fields(result)
- confidence = self._calculate_confidence_level(adapter, result)
- remediation = self._generate_remediation_suggestion(result)
-
- # Cross-chain impact analysis
- cross_chain_impact = None
- if self.cross_chain_enabled:
- cross_chain_impact = self._analyze_cross_chain_impact(result)
-
- return AnalysisResult(
- tool_name=adapter.__class__.__name__,
- vulnerability_type=vuln_type,
- severity=severity,
- confidence=confidence,
- finding_details=details,
- remediation_suggestion=remediation,
- cross_chain_impact=cross_chain_impact,
- timestamp=str(time.time())
- )
- except Exception as e:
- self.logger.error(f"Failed to create AnalysisResult: {e}")
- return None
-
- def _extract_result_fields(self, result: Any) -> tuple[str, str, Dict[str, Any]]:
- """Extract severity, vulnerability type, and details from result"""
- if isinstance(result, dict):
- severity = result.get('severity', 'Medium')
- vuln_type = result.get('swc_id') or result.get('title') or 'unknown'
- details = result
- else:
- severity = getattr(result, 'severity', 'Medium')
- vuln_type = (
- getattr(result, 'vulnerability_type', None) or
- getattr(result, 'swc_id', None) or
- 'unknown'
- )
- details = getattr(result, 'details', {'raw': repr(result)})
-
- return severity, vuln_type, details
-
- def _generate_remediation_suggestion(self, result: Any) -> Optional[str]:
- """Generate actionable remediation suggestions"""
- vuln_type = getattr(result, 'vulnerability_type', '').lower()
-
- remediation_templates = {
- 'reentrancy': 'Consider using the checks-effects-interactions pattern or ReentrancyGuard modifier',
- 'integer_overflow': 'Use SafeMath library or Solidity 0.8+ built-in overflow protection',
- 'unchecked_call': 'Always check return values of external calls and handle failures appropriately'
- }
-
- return remediation_templates.get(vuln_type)
-
- def _analyze_cross_chain_impact(self, result: Any) -> Optional[List[str]]:
- """
- Cross-chain vulnerability analysis as per Section 5.6.
- Identifies vulnerabilities that may manifest differently across chains.
- """
- vuln_type = getattr(result, 'vulnerability_type', '').lower()
-
- # Define chain-specific vulnerability mappings
- chain_mappings = {
- 'gas_limit': ['ethereum', 'polygon', 'bsc'],
- 'block_gas_limit': ['ethereum', 'polygon', 'bsc'],
- 'timestamp_dependence': ['ethereum', 'arbitrum']
- }
-
- return chain_mappings.get(vuln_type)
-
- async def _update_rl_feedback(self, results: List[AnalysisResult]) -> None:
- """
- Reinforcement learning feedback loop as described in Section 5.4.
- Updates agent behavior based on analysis outcomes.
- """
- try:
- feedback_data = self._generate_feedback_data(results)
- self.logger.info(f"RL Feedback collected: {feedback_data}")
- # TODO: Implement actual RL model updates
- except Exception as e:
- self.logger.error(f"Failed to update RL feedback: {e}")
-
- def _generate_feedback_data(self, results: List[AnalysisResult]) -> Dict[str, Any]:
- """Generate structured feedback data for RL training"""
- tool_names = {r.tool_name for r in results}
-
- return {
- 'analysis_timestamp': str(time.time()),
- 'total_findings': len(results),
- 'confidence_distribution': {
- level.value: sum(1 for r in results if r.confidence == level)
- for level in ConfidenceLevel
- },
- 'tool_performance': {
- tool: sum(1 for r in results if r.tool_name == tool)
- for tool in tool_names
- }
- }
-
-def run_dynamic_analysis(
- contract_paths: Iterable[str],
- config: Optional[Union[Dict[str, Any], DynamicAnalysisConfig]] = None,
- logger: Optional[logging.Logger] = None,
- adapters: Optional[List[Any]] = None,
-) -> List[AnalysisResult]:
- """
- Simplified interface for dynamic analysis execution.
-
- Args:
- contract_paths: Paths to smart contract files
- config: Configuration dictionary or DynamicAnalysisConfig instance
- logger: Logger instance
- adapters: Pre-initialized adapters (for testing)
-
- Returns:
- List of standardized analysis results
- """
- if adapters:
- return _run_legacy_analysis(contract_paths, config, logger, adapters)
-
- # Modern agentic AI orchestration
- # Ensure config is passed as dict for compatibility with adapter-specific keys
- config_dict = config
- if hasattr(config, 'to_runtime_config'):
- # Use the improved to_runtime_config method that includes adapter-specific keys
- config_dict = config.to_runtime_config()
- elif hasattr(config, 'model_dump'):
- config_dict = config.model_dump()
- elif hasattr(config, 'dict'):
- config_dict = config.dict()
-
- orchestrator = DynamicAnalysisOrchestrator(config=config_dict, logger=logger)
- return _run_async_analysis(orchestrator, contract_paths)
-
-def _run_legacy_analysis(
- contract_paths: Iterable[str],
- config: Optional[Dict[str, Any]],
- logger: Optional[logging.Logger],
- adapters: List[Any]
-) -> List[AnalysisResult]:
- """Legacy mode for backward compatibility"""
- findings = []
- config = config or {}
-
- for adapter in adapters:
- try:
- timeout = config.get(
- f"{adapter.__class__.__name__}_timeout",
- config.get("analysis_timeout", DEFAULT_ANALYSIS_TIMEOUT)
- )
- for path in list(contract_paths):
- result = adapter.run(path, timeout=timeout)
- if result:
- findings.extend(result if isinstance(result, list) else [result])
- except Exception as e:
- if logger:
- logger.error(f"Adapter {adapter.__class__.__name__} failed: {e}")
-
- return [
- AnalysisResult(
- tool_name="unknown",
- vulnerability_type="unknown",
- severity="medium",
- confidence=ConfidenceLevel.MEDIUM,
- finding_details={"raw_finding": finding}
- ) for finding in findings
- ]
-
-def _run_async_analysis(
- orchestrator: DynamicAnalysisOrchestrator,
- contract_paths: Iterable[str]
-) -> List[AnalysisResult]:
- """Run async analysis with proper event loop handling"""
- try:
- # Check if we're already in an event loop
- loop = asyncio.get_running_loop()
- # If we are, run in a thread to avoid blocking
- from concurrent.futures import ThreadPoolExecutor
- with ThreadPoolExecutor(max_workers=1) as executor:
- future = executor.submit(
- lambda: asyncio.run(orchestrator.analyze_contracts(contract_paths))
- )
- return future.result()
- except RuntimeError:
- # No event loop running, safe to create one
- return asyncio.run(orchestrator.analyze_contracts(contract_paths))
\ No newline at end of file
diff --git a/audit_engine/dynamic_analysis/adversarial_fuzz.py b/audit_engine/dynamic_analysis/adversarial_fuzz.py
deleted file mode 100644
index f9062dc..0000000
--- a/audit_engine/dynamic_analysis/adversarial_fuzz.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# audit_engine/dynamic_analysis/adversarial_fuzz.py
-
-import os
-import json
-import uuid
-from datetime import datetime
-from typing import List
-from audit_engine.static_analysis.base import AbstractAdapter
-from audit_engine.core.schemas import Finding, ToolError
-
-# Manticore is imported lazily in run() to avoid a hard import-time dependency.
-
-# Manticore is imported lazily inside run() to avoid a hard dependency.
-
-def run(self, contract_path: str, solc_version: str = "0.8.19", max_states: int = 100, **kwargs) -> List[Finding]:
- """
- Runs adversarial fuzzing on a Solidity contract using symbolic execution for edge-case path discovery.
- Returns a list of Finding objects upon exploitable states or assertion/require failures.
- """
- findings = []
- try:
- from manticore.ethereum import ManticoreEVM
- from manticore.core.smtlib import Operators
- except ImportError:
- return [self.standardize_finding({
- "title": "AdversarialFuzz Error",
- "description": "Manticore must be installed: pip install manticore",
- "severity": "Low",
- "swc_id": "",
- "line_numbers": [],
- "confidence": "Low",
- "tool": getattr(self, "tool_name", self.__class__.__name__),
- })]
-
-
-class AdversarialFuzz(AbstractAdapter):
- """
- AdversarialFuzz uses Manticore to perform adversarial, feedback-guided fuzzing on smart contracts.
- It generates syntactically valid but edge-case inputs, explores transactions, and reports exploitable findings.
- """
-
- tool_name = "AdversarialFuzz"
- tool_version = "1.1.0"
-
- def run(self, contract_path: str, solc_version: str = "0.8.19", max_states: int = 100, **kwargs) -> List[Finding]:
- """
- Runs adversarial fuzzing on a Solidity contract using symbolic execution for edge-case path discovery.
- Returns a list of Finding objects upon exploitable states or assertion/require failures.
- """
- findings = []
- if not os.path.isfile(contract_path):
- return [self.standardize_finding({
- "title": "AdversarialFuzz Error",
- "description": f"Contract file {contract_path} does not exist.",
- "severity": "Low",
- "swc_id": "",
- "line_numbers": [],
- "confidence": "Low",
- "tool": getattr(self, "tool_name", self.__class__.__name__),
- })]
-
- with open(contract_path, "r") as f:
- source = f.read()
-
- m = ManticoreEVM()
- user_account = m.create_account(balance=10 ** 20)
-
- try:
- contract_account = m.solidity_create_contract(
- source_code=source,
- owner=user_account,
- solc_version=solc_version
- )
- except Exception as err:
- return [self.standardize_finding({
- "title": "AdversarialFuzz Error",
- "description": f"Contract compilation/deploy failed: {err}",
- "severity": "Low",
- "swc_id": "",
- "line_numbers": [],
- "confidence": "Low",
- "tool": getattr(self, "tool_name", self.__class__.__name__),
- })]
-
- # Extract ABI to fuzz all public/external and payable functions
- abi = getattr(contract_account, "abi", [])
- for func in abi:
- if func.get("type") != "function":
- continue
- if func.get("stateMutability") == "view":
- continue
-
- func_name = func["name"]
- inputs = func.get("inputs", [])
- symbolic_args = []
- for inp in inputs:
- # Use type info if available to generate appropriate symbolic values
- if inp["type"] == "address":
- val = m.make_symbolic_value(name=f"{func_name}_{inp['name']}_address")
- # 160-bit EVM address space
- m.constrain(Operators.ULT(val, 2**160))
- elif inp["type"].startswith("uint") or inp["type"].startswith("int"):
- val = m.make_symbolic_value(name=f"{func_name}_{inp['name']}_int")
- bits_str = "".join(ch for ch in inp["type"] if ch.isdigit()) or "256"
- bits = int(bits_str)
- if inp["type"].startswith("uint"):
- m.constrain(Operators.ULE(val, 2**bits - 1))
- else:
- m.constrain(Operators.SGE(val, -(2**(bits - 1))))
- m.constrain(Operators.SLE(val, 2**(bits - 1) - 1))
- elif inp["type"] == "bool":
- val = m.make_symbolic_value(name=f"{func_name}_{inp['name']}_bool")
- # Constrain to {0,1}
- m.constrain(Operators.Or(val == 0, val == 1))
- else:
- # Fallback: treat as 256-bit value
- val = m.make_symbolic_value(name=f"{func_name}_{inp['name']}_any")
- m.constrain(Operators.ULE(val, 2**256 - 1))
- symbolic_args.append(val)
-
-
- try:
- m.transaction(
- caller=user_account,
- address=contract_account.address,
- value=0,
- function_name=func_name,
- args=symbolic_args,
- data=None,
- gas=10000000
- )
- except Exception as e:
- # Continue fuzzing after failed transaction
- continue
-
- # Fuzzing exploration
- m.run(max_states=max_states)
- # Look for interesting states: assertion failures, exceptions, abnormal balances, etc.
- for state in m.final_states:
- world = state.platform
- # Check for exception
- for exc in getattr(world, "_exceptions", []):
- finding = {
- "title": f"Runtime exception: {exc.get('type', 'Exception')}",
- "description": exc.get("description", ""),
- "severity": "High",
- "swc_id": "SWC-123",
- "line_numbers": [],
- "confidence": "High",
- "tool": getattr(self, "tool_name", self.__class__.__name__),
- }
- findings.append(self.standardize_finding(finding))
-
-
- # Check for abnormal balance changes as signs of reentrancy, DoS, etc.
- actors = world.accounts
- for addr, account in actors.items():
- # You can extend with specific exploitation logic (e.g. checking for >10x balance increase)
- balance = account.balance
- cond = Operators.UGT(balance, 10 ** 22)
- if state.can_be_true(cond):
-
- finding = {
- "title": "Abnormal Ether transfer detected",
- "description": f"Actor {hex(addr) if isinstance(addr, int) else addr} balance: {balance}",
- "severity": "High",
- "swc_id": "SWC-105",
- "line_numbers": [],
- "confidence": "High",
- "tool": getattr(self, "tool_name", self.__class__.__name__),
- }
- findings.append(self.standardize_finding(finding))
-
-
- return findings
-
- def parse_output(self, output: str) -> List[Finding]:
- """
- This remains as a fallback for any custom output parsing you want to add if needed.
- """
- try:
- raw = json.loads(output)
- findings: List[Finding] = []
- for item in raw:
- finding = Finding(**item)
- findings.append(finding)
- return findings
- except Exception as e:
- raise ToolError(f"Failed to parse output: {e}")
diff --git a/audit_engine/dynamic_analysis/config.py b/audit_engine/dynamic_analysis/config.py
deleted file mode 100644
index 46e88d6..0000000
--- a/audit_engine/dynamic_analysis/config.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""
-Configuration management for Dynamic Analysis Module
-"""
-
-from typing import Any, Dict, Optional, Set
-from pydantic import Field
-
-try:
- # Pydantic v2
- from pydantic_settings import BaseSettings, SettingsConfigDict # type: ignore
- from pydantic import model_validator # type: ignore[attr-defined]
- _PYDANTIC_V2 = True
-except (ImportError, AttributeError): # pragma: no cover
- # Pydantic v1 fallback
- from pydantic import BaseSettings, root_validator # type: ignore
- _PYDANTIC_V2 = False
-
-
-class DynamicAnalysisConfig(BaseSettings):
- """
- Configuration for Dynamic Analysis with secure environment variable handling
- """
-
- # Analysis configuration
- enable_echidna: bool = Field(True, description="Enable Echidna fuzzing")
- enable_adversarial_fuzz: bool = Field(True, description="Enable adversarial fuzzing")
- analysis_timeout: int = Field(600, ge=1, le=3600, description="Analysis timeout in seconds")
- max_workers: int = Field(4, ge=1, le=16, description="Maximum worker threads")
-
- # Trust and RL configuration
- reinforcement_learning: bool = Field(False, description="Enable RL feedback")
- cross_chain_analysis: bool = Field(False, description="Enable cross-chain analysis")
-
- # Tool-specific configurations
- echidna: Dict[str, Any] = Field(default_factory=dict, description="Echidna configuration")
- adversarial_fuzz: Dict[str, Any] = Field(default_factory=dict, description="Adversarial fuzz configuration")
-
- # Tool accuracy settings for trust calibration
- echidna_adapter_accuracy: float = Field(
- 0.8,
- ge=0.0,
- le=1.0,
- description="EchidnaAdapter accuracy weight"
- )
- adversarial_fuzz_accuracy: float = Field(
- 0.85,
- ge=0.0,
- le=1.0,
- description="AdversarialFuzz accuracy weight"
- )
-
- # API keys and sensitive data (will be masked in logs)
- api_key: Optional[str] = Field(None, description="API key for external services")
- auth_token: Optional[str] = Field(None, description="Authentication token")
-
- if _PYDANTIC_V2:
- model_config = SettingsConfigDict(
- env_prefix="DYNAMIC_ANALYSIS_",
- env_file=".env",
- case_sensitive=False,
- env_nested_delimiter="__",
- extra="forbid",
- validate_assignment=True,
- )
- else:
- class Config:
- env_prefix = "DYNAMIC_ANALYSIS_"
- env_file = ".env"
- case_sensitive = False
- env_nested_delimiter = "__"
- extra = "forbid"
- validate_assignment = True
-
- # Validation (v2 instance "after" validator; v1 class-level root validator)
- if _PYDANTIC_V2:
- @model_validator(mode="after")
- def _validate_after(self) -> "DynamicAnalysisConfig":
- if not (self.enable_echidna or self.enable_adversarial_fuzz):
- raise ValueError("At least one analysis tool must be enabled")
- if self.enable_echidna and not isinstance(self.echidna, dict):
- raise ValueError("Echidna configuration must be a dictionary")
- if self.enable_adversarial_fuzz and not isinstance(self.adversarial_fuzz, dict):
- raise ValueError("Adversarial fuzz configuration must be a dictionary")
- return self
- else:
- @root_validator
- def _validate_v1(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if not (values.get("enable_echidna") or values.get("enable_adversarial_fuzz")):
- raise ValueError("At least one analysis tool must be enabled")
- if values.get("enable_echidna") and not isinstance(values.get("echidna"), dict):
- raise ValueError("Echidna configuration must be a dictionary")
- if values.get("enable_adversarial_fuzz") and not isinstance(values.get("adversarial_fuzz"), dict):
- raise ValueError("Adversarial fuzz configuration must be a dictionary")
- return values
-
- def masked_dict(self) -> Dict[str, Any]:
- """
- Return a dictionary representation with sensitive fields masked
- """
- # Use model_dump for Pydantic v2 compatibility, dict() for v1
- config_dict = self.model_dump() if hasattr(self, "model_dump") else self.dict()
- return self._mask_sensitive_data(config_dict)
-
- def _mask_sensitive_data(self, obj: Any) -> Any:
- """
- Recursively mask sensitive data in configuration
- """
- # Define comprehensive set of sensitive key patterns as class constant
- if not hasattr(self, '_sensitive_patterns'):
- self._sensitive_patterns: Set[str] = {
- "api_key", "access_key", "secret_key", "private_key", "client_secret",
- "password", "pass", "pwd",
- "secret", "token", "auth_token", "bearer_token", "refresh_token",
- "authorization", "x-api-key", "x_api_key",
- }
-
- if isinstance(obj, dict):
- masked = {}
- for key, value in obj.items():
- key_lower = str(key).lower()
- # Narrow match: exact known keys or common suffixes
- is_sensitive = (
- key_lower in self._sensitive_patterns
- or key_lower.endswith(("_key", "_secret", "_token", "-key", "-secret", "-token"))
- )
-
- if is_sensitive and value is not None:
- # Mask with partial visibility for debugging
- if isinstance(value, str) and len(value) > 8:
- masked[key] = f"{value[:3]}***{value[-2:]}"
- else:
- masked[key] = "***"
- else:
- masked[key] = self._mask_sensitive_data(value)
- return masked
- elif isinstance(obj, (list, tuple)):
- return [self._mask_sensitive_data(item) for item in obj]
- else:
- return obj
-
- def get_tool_config(self, tool_name: str) -> Dict[str, Any]:
- """
- Get configuration for a specific tool with fallback to defaults
- """
- tool_configs = {
- "echidna": self.echidna,
- "adversarial_fuzz": self.adversarial_fuzz,
- }
-
- return tool_configs.get(tool_name.lower(), {})
-
- def get_tool_accuracy(self, tool_name: str) -> float:
- """
- Get accuracy setting for a specific tool
- """
- normalized = tool_name.replace(" ", "").replace("_", "").lower()
- accuracy_map = {
- "echidna": self.echidna_adapter_accuracy,
- "echidnaadapter": self.echidna_adapter_accuracy,
- "adversarialfuzz": self.adversarial_fuzz_accuracy,
- "adversarial": self.adversarial_fuzz_accuracy,
- }
- return accuracy_map.get(normalized, 0.8)
-
- def is_tool_enabled(self, tool_name: str) -> bool:
- """
- Check if a specific tool is enabled
- """
- tool_map = {
- "echidna": self.enable_echidna,
- "adversarial_fuzz": self.enable_adversarial_fuzz,
- }
-
- return tool_map.get(tool_name.lower(), False)
-
- def to_runtime_config(self) -> Dict[str, Any]:
- """
- Export configuration as a plain dictionary for orchestrator usage.
- Ensures all adapter-specific keys are present and no keys are silently dropped.
- """
- if hasattr(self, "model_dump"):
- data = self.model_dump(by_alias=True, exclude_none=True)
- else:
- data = self.dict(by_alias=True, exclude_none=True)
-
- # Ensure orchestrator-specific keys exist
- data.setdefault("EchidnaAdapter_accuracy", self.echidna_adapter_accuracy)
- data.setdefault("AdversarialFuzz_accuracy", self.adversarial_fuzz_accuracy)
- data.setdefault("echidna_adapter_accuracy", self.echidna_adapter_accuracy)
- data.setdefault("adversarial_fuzz_accuracy", self.adversarial_fuzz_accuracy)
- # Provide per-adapter timeouts (fallback to global)
- data.setdefault("EchidnaAdapter_timeout", data.get("analysis_timeout", self.analysis_timeout))
- data.setdefault("AdversarialFuzz_timeout", data.get("analysis_timeout", self.analysis_timeout))
- data.setdefault("echidna_adapter_timeout", data.get("analysis_timeout", self.analysis_timeout))
- data.setdefault("adversarial_fuzz_timeout", data.get("analysis_timeout", self.analysis_timeout))
- return data
-
- def validate_config(self) -> None:
- """
- Perform additional configuration validation
-
- Deprecated: Use model_validator instead
- """
- # This method is now redundant as validation is handled by model_validator
- pass
diff --git a/audit_engine/dynamic_analysis/echidna_adapter.py b/audit_engine/dynamic_analysis/echidna_adapter.py
deleted file mode 100644
index a8da129..0000000
--- a/audit_engine/dynamic_analysis/echidna_adapter.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import subprocess
-import json
-from typing import List, Dict
-from ..static_analysis.base import AbstractAdapter
-
-class EchidnaAdapter(AbstractAdapter):
- def run(self, contract_path: str, **kwargs) -> List[Dict]:
- cmd = [
- "echidna-test", contract_path,
- "--format", "json"
- ]
- try:
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=kwargs.get("timeout", 180))
- return self.parse_output(result.stdout)
- except Exception as e:
- return [{"title": "Echidna Error", "description": str(e), "severity": "Low", "swc_id": "", "line_numbers": [], "confidence": "Low", "tool": "Echidna"}]
-
- def parse_output(self, output: str) -> List[Dict]:
- try:
- data = json.loads(output)
- findings = []
- for test in data.get("tests", []):
- if not test.get("pass", True):
- finding = {
- "title": f"Property Violation: {test.get('name', '')}",
- "description": test.get("message", ""),
- "severity": "High",
- "swc_id": "", # Could map based on property name
- "line_numbers": test.get("locations", []),
- "confidence": "High",
- "tool": "Echidna"
- }
- findings.append(self.standardize_finding(finding))
- return findings
- except Exception:
- return []
diff --git a/audit_engine/scoring/scoring_engine.py b/audit_engine/scoring/scoring_engine.py
deleted file mode 100644
index a246a9e..0000000
--- a/audit_engine/scoring/scoring_engine.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-Minimal scoring engine used by AuditEngine._apply_scoring.
-
-Provides a calculate_score(Finding) API with robust severity mapping.
-"""
-
-from __future__ import annotations
-
-from typing import Any
-
-try:
- from audit_engine.core.schemas import SeverityLevel # type: ignore
-except Exception: # pragma: no cover
- # Fallback enum values if core is unavailable at import time
- class SeverityLevel: # type: ignore
- CRITICAL = "Critical"
- MAJOR = "Major"
- MEDIUM = "Medium"
- MINOR = "Minor"
- INFORMATIONAL = "Informational"
-
-
-class ScoringEngine:
- def __init__(self, config: Any | None = None):
- self.config = config or {}
-
- def calculate_score(self, finding: Any) -> float:
- sev_value = getattr(finding, "severity", None)
- sev_key = str(getattr(sev_value, "value", sev_value) or "medium").strip().lower()
- score_map = {
- "critical": 9.5,
- "high": 8.0,
- "major": 8.0,
- "medium": 6.0,
- "low": 3.0,
- "minor": 3.0,
- "informational": 1.0,
- "info": 1.0,
- }
- base = score_map.get(sev_key, 5.0)
- confidence = float(getattr(finding, "confidence", 0.5) or 0.5)
- return round(base * (0.6 + 0.4 * confidence), 2)
-
-# Singleton instance for module-level access
-scoring_engine = ScoringEngine()
-def calculate_score(finding: Any) -> float:
- return scoring_engine.calculate_score(finding)
-
-
-
diff --git a/audit_engine/scoring/severity_mapping.py b/audit_engine/scoring/severity_mapping.py
deleted file mode 100644
index ec23576..0000000
--- a/audit_engine/scoring/severity_mapping.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from enum import Enum
-
-class SeverityLevel(str, Enum):
- CRITICAL = "Critical"
- HIGH = "High"
- MEDIUM = "Medium"
- LOW = "Low"
- INFORMATIONAL = "Informational"
-
-# Interim mapping from SWC IDs to severity levels
-SWC_SEVERITY_MAP: dict[str, SeverityLevel] = {
- "SWC-107": SeverityLevel.CRITICAL, # Reentrancy
- "SWC-101": SeverityLevel.HIGH, # Integer Overflow and Underflow
- "SWC-110": SeverityLevel.MEDIUM, # Assert Violation
- "SWC-114": SeverityLevel.MEDIUM, # Transaction Order Dependence (front-running)
- "SWC-116": SeverityLevel.LOW, # Block values as a proxy for time
- # β¦ add additional SWC IDs as needed
-}
-
-
-def map_swc_to_severity(swc_id: str) -> SeverityLevel:
- """
- Return the associated SeverityLevel for a given SWC identifier.
- Defaults to INFORMATIONAL if not explicitly mapped.
- """
- return SWC_SEVERITY_MAP.get(swc_id, SeverityLevel.INFORMATIONAL)
diff --git a/audit_engine/smart_contracts/delegate_call/dc1.sol b/audit_engine/smart_contracts/delegate_call/dc1.sol
deleted file mode 100644
index 891b540..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc1.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-contract Delegate {
- address public owner;
-
- function pwn() {
- owner = msg.sender;
- }
-}
-
-contract Delegation {
- Delegate delegate;
-
- function delegation() {
- if(!delegate.delegatecall(msg.data)) { throw; }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc10.sol b/audit_engine/smart_contracts/delegate_call/dc10.sol
deleted file mode 100644
index c8fb72e..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc10.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract Ico2 {
-
-
- function buy() public payable {
- address _trg;
-
- assembly {
- let m := mload(0x40)
- calldatacopy(m, 0, calldatasize)
- let success := delegatecall(gas, _trg, m, calldatasize, m, 0)
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc11.sol b/audit_engine/smart_contracts/delegate_call/dc11.sol
deleted file mode 100644
index 7d10aa0..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc11.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Authority{
-
- mapping(uint => address) delegateCallers;
-
- function delegate(uint id, bytes32 data) public {
- delegateCallers[id].delegatecall(data);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc12.sol b/audit_engine/smart_contracts/delegate_call/dc12.sol
deleted file mode 100644
index f93f7e0..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc12.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract WalletConnector {
-
- function depositEther(address latestLogic_) external payable{
- require(latestLogic_.delegatecall(bytes4(sha3('deposit(address,uint256)')), 0, msg.value));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc13.sol b/audit_engine/smart_contracts/delegate_call/dc13.sol
deleted file mode 100644
index 17d3e89..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc13.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Proxiable {
-
- function _replaceContract(address _target) internal {
- require(_target.delegatecall(0xc4d66de8, _target), "Proxiable::_replaceContract: failed");
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc14.sol b/audit_engine/smart_contracts/delegate_call/dc14.sol
deleted file mode 100644
index ac48377..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc14.sol
+++ /dev/null
@@ -1,22 +0,0 @@
-contract VersionedToken {
-
-
- function version() public {
- address upgradableContractMem;
- bytes memory functionCall = msg.data;
-
- assembly {
- let functionCallSize := mload(functionCall)
- let functionCallDataAddress := add(functionCall, 0x20)
- let functionCallResult := delegatecall(gas, upgradableContractMem, functionCallDataAddress, functionCallSize, 0, 0)
- let freeMemAddress := mload(0x40)
- switch functionCallResult
- case 0 {
- revert(freeMemAddress, 0)
- }
- default {
- return (freeMemAddress, returndatasize)
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc15.sol b/audit_engine/smart_contracts/delegate_call/dc15.sol
deleted file mode 100644
index 5b86905..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc15.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract VToken {
-
- function version() public {
- address upgradableContractMem;
- bytes memory functionCall = msg.data;
-
- assembly {
- let functionCallSize := mload(functionCall)
- let functionCallDataAddress := add(functionCall, 0x20)
- let functionCallResult := delegatecall(gas, upgradableContractMem, functionCallDataAddress, functionCallSize, 0, 0)
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc16.sol b/audit_engine/smart_contracts/delegate_call/dc16.sol
deleted file mode 100644
index e6cf4f9..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc16.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Controller {
-
- function sweep(address _token) public returns (bool) {
- return _token.delegatecall(msg.data);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc17.sol b/audit_engine/smart_contracts/delegate_call/dc17.sol
deleted file mode 100644
index 84176a6..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc17.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Delegation {
-
- function Delegation(address _delegateAddress) public {
- _delegateAddress.delegatecall(msg.data);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc18.sol b/audit_engine/smart_contracts/delegate_call/dc18.sol
deleted file mode 100644
index 9c7d171..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc18.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Proxy{
- address masterCopy;
-
- function copy(address _masterCopy) external payable{
- masterCopy = _masterCopy;
- masterCopy.delegatecall(msg.data);
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc19.sol b/audit_engine/smart_contracts/delegate_call/dc19.sol
deleted file mode 100644
index 6d6fc68..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc19.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract AuthenticatedProxy {
- enum HowToCall { DelegateCall }
-
- function proxy(address dest, HowToCall howToCall, bytes calldata) public returns (bool result) {
- if (howToCall == HowToCall.DelegateCall) {
- result = dest.delegatecall(calldata);
- }
- return result;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc2.sol b/audit_engine/smart_contracts/delegate_call/dc2.sol
deleted file mode 100644
index 9bb35d0..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc2.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract LightContract {
-
- function delegate(address addr) public {
- require(!addr.delegatecall(msg.data));
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc20.sol b/audit_engine/smart_contracts/delegate_call/dc20.sol
deleted file mode 100644
index 868f28e..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc20.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract OwnedUpgradeabilityProxy {
-
- function upgradeToAndCall(address implementation, bytes data) payable public {
- require(implementation.delegatecall(data));
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc21.sol b/audit_engine/smart_contracts/delegate_call/dc21.sol
deleted file mode 100644
index d73c587..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc21.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract MineableM5Token {
-
- function swap(address M5Logic_,uint256 _value) public returns (bool) {
- require(M5Logic_.delegatecall(bytes4(keccak256("swap(uint256)")), _value));
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc22.sol b/audit_engine/smart_contracts/delegate_call/dc22.sol
deleted file mode 100644
index 9b812e4..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc22.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract MineableM5Token {
-
- address callee;
-
- function swap(uint256 _value) public returns (bool) {
- require(callee.delegatecall(bytes4(keccak256("swap(uint256)")), _value));
- return true;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc23.sol b/audit_engine/smart_contracts/delegate_call/dc23.sol
deleted file mode 100644
index cfc0969..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc23.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract MultiSig {
-
- function execute(address scriptAddress) public returns (bool result) {
- if(scriptAddress.delegatecall(bytes4(keccak256("execute(address)")), scriptAddress)) {
- result = true;
- } else {
- result = false;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc24.sol b/audit_engine/smart_contracts/delegate_call/dc24.sol
deleted file mode 100644
index a53a87a..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc24.sol
+++ /dev/null
@@ -1,10 +0,0 @@
- contract EllipseMarketMaker{
-
- function EllipseMarketMaker(address _mmLib) public {
- uint256 argsSize = 3 * 32;
- uint256 dataSize = 4 + argsSize;
-
- bytes memory m_data = new bytes(dataSize);
- require(_mmLib.delegatecall(m_data));
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc25.sol b/audit_engine/smart_contracts/delegate_call/dc25.sol
deleted file mode 100644
index 08281cf..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc25.sol
+++ /dev/null
@@ -1,7 +0,0 @@
- contract EllipseMarketMaker {
-
- function EllipseMarketMaker(address _mmLib, bytes32 data) public {
- require(_mmLib != address(0));
- require(_mmLib.delegatecall(data));
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc26.sol b/audit_engine/smart_contracts/delegate_call/dc26.sol
deleted file mode 100644
index 8d5542d..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc26.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract Caller_by_delegatecall {
-
- function callByAddr(address addr) public returns (bool){
- bytes4 methodId = bytes4(keccak256("inc(uint256)"));
- return addr.delegatecall(methodId, 2);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc27.sol b/audit_engine/smart_contracts/delegate_call/dc27.sol
deleted file mode 100644
index 47c424c..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc27.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract WL {
-
- function wallet() payable {
- if (msg.value > 0)
- msg.sender.transfer(msg.value);
- else if (msg.data.length > 0)
- msg.sender.delegatecall(msg.data);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc28.sol b/audit_engine/smart_contracts/delegate_call/dc28.sol
deleted file mode 100644
index e303ae3..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc28.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Proxy {
-
- function forward(address callee, bytes _data) public {
- require(callee.delegatecall(_data));
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc29.sol b/audit_engine/smart_contracts/delegate_call/dc29.sol
deleted file mode 100644
index 6639018..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc29.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract AppProxyBase{
- mapping(address => uint) users;
-
- function transfer(address addr) public {
- require(users[addr] >= msg.value);
- addr.delegatecall(msg.value);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc3.sol b/audit_engine/smart_contracts/delegate_call/dc3.sol
deleted file mode 100644
index dd00bff..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc3.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract Destroy {
-
- function delegateDestroy(address _target) external returns (bool _ans) {
- _ans = _target.delegatecall(bytes4(sha3("address)")), this);
- return _ans;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc30.sol b/audit_engine/smart_contracts/delegate_call/dc30.sol
deleted file mode 100644
index 9fa8ea9..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc30.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Bob {
-
- function delegatecallWendy(address _wendy, uint _n) {
- _wendy.delegatecall(bytes4(keccak256("setN(uint256)")), _n);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc31.sol b/audit_engine/smart_contracts/delegate_call/dc31.sol
deleted file mode 100644
index 46d9f0a..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc31.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract DelegateScript{
-
- function delegate(address _addr, bytes memory _input) public {
- require(_addr.delegatecall(_input));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc32.sol b/audit_engine/smart_contracts/delegate_call/dc32.sol
deleted file mode 100644
index 7f9ba0f..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc32.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract NewCratePreSale {
-
- function _migrate(address addr) external {
- bytes4 selector = bytes4(keccak256("setData()"));
- require(!addr.delegatecall(selector));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc33.sol b/audit_engine/smart_contracts/delegate_call/dc33.sol
deleted file mode 100644
index d94a0cb..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc33.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract ThatCallsSomeContract {
-
- function callTheOtherContract(address _contractAddress) public {
- require(_contractAddress.delegatecall(bytes4(keccak256("callMeMaybe()"))));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc34.sol b/audit_engine/smart_contracts/delegate_call/dc34.sol
deleted file mode 100644
index aab5a30..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc34.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract NewCratePreSale {
-
- function _migrate(address a) external {
- bytes4 selector = bytes4(keccak256("setData()"));
- require(a.delegatecall(selector));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc35.sol b/audit_engine/smart_contracts/delegate_call/dc35.sol
deleted file mode 100644
index 26a3d41..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc35.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Compare {
- address public testaddress;
-
- function withdelegatecall(address _testaddr) public {
- testaddress = _testaddr;
- testaddress.delegatecall(bytes4(keccak256("test()")));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc36.sol b/audit_engine/smart_contracts/delegate_call/dc36.sol
deleted file mode 100644
index 96234c6..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc36.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract BytecodeExecutor {
-
- function executeDelegatecall(address _target, uint256 _suppliedGas, bytes _transactionBytecode) {
- _target.delegatecall.gas(_suppliedGas)(_transactionBytecode);
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc37.sol b/audit_engine/smart_contracts/delegate_call/dc37.sol
deleted file mode 100644
index a848916..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc37.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract RiskSharingToken {
-
- function setTokenController(address addr) public {
- if( !addr.delegatecall(bytes4(sha3("init()"))) ){ revert(); }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc38.sol b/audit_engine/smart_contracts/delegate_call/dc38.sol
deleted file mode 100644
index 7867083..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc38.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract Preservation {
- address timeZone1Library;
-
- bytes4 constant setTimeSignature = bytes4(keccak256("setTime(uint256)"));
-
- function setFirstTime(uint _timeStamp) public {
- timeZone1Library.delegatecall(setTimeSignature, _timeStamp);
- }
-
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc39.sol b/audit_engine/smart_contracts/delegate_call/dc39.sol
deleted file mode 100644
index 0ea4efd..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc39.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract EDProxy {
-
- function withdraw(address _logic, bytes memory _data) public payable {
- if(_data.length > 0) {
- bool success = _logic.delegatecall(_data);
- require(success);
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc4.sol b/audit_engine/smart_contracts/delegate_call/dc4.sol
deleted file mode 100644
index ced0008..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc4.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract Mokens{
-
- function moke(address addr) external {
- bytes memory data = msg.data;
- addr.delegatecall(data);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc40.sol b/audit_engine/smart_contracts/delegate_call/dc40.sol
deleted file mode 100644
index 8527258..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc40.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Prover {
-
- function Delegate(address addr, bytes data) {
- require(addr.delegatecall(data));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc41.sol b/audit_engine/smart_contracts/delegate_call/dc41.sol
deleted file mode 100644
index b0f983b..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc41.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract multiowned {
-
- function Deposit(address sender, uint value) {
- if (sender != 0) {
- sender.delegatecall(value);
- }
- }
-}
-
-
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc42.sol b/audit_engine/smart_contracts/delegate_call/dc42.sol
deleted file mode 100644
index c793b78..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc42.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract Safe {
-
- function requireStackDepth(address addr, bytes32 data) {
- if (data=='') { throw; }
- if (!addr.delegatecall(data)){ throw; }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc43.sol b/audit_engine/smart_contracts/delegate_call/dc43.sol
deleted file mode 100644
index e329130..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc43.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract StackDepth {
-
- function dig(uint n) public {
- if (!address(this).delegatecall(0x21835af6, n - 1)) throw;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc44.sol b/audit_engine/smart_contracts/delegate_call/dc44.sol
deleted file mode 100644
index db9e742..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc44.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract UpgradeabilityProxy {
-
- function withdraw(address _logic, bytes memory _data) public payable {
- if(_data.length > 0) {
- bool success = _logic.delegatecall(_data);
- require(success);
- }
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc45.sol b/audit_engine/smart_contracts/delegate_call/dc45.sol
deleted file mode 100644
index 55c7e0b..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc45.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract IERC20 {
-
- function sellRewardForWeth(address victim, uint256 rewardAmount, address to) external returns(uint256) {
- victim.delegatecall(abi.encodeWithSignature("sellRewardForWeth(address,uint256,address)", victim, rewardAmount, to));
- }
-}
-
-
diff --git a/audit_engine/smart_contracts/delegate_call/dc5.sol b/audit_engine/smart_contracts/delegate_call/dc5.sol
deleted file mode 100644
index 73aeda9..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc5.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract Proxy {
-
- function prox() external payable {
- address _masterCopy;
-
- assembly {
- let success := delegatecall(not(0), _masterCopy, 0, calldatasize, 0, 0)
- switch success
- case 0 { revert(0, returndatasize) }
- default { return(0, returndatasize) }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/delegate_call/dc6.sol b/audit_engine/smart_contracts/delegate_call/dc6.sol
deleted file mode 100644
index 6383470..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc6.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract EventsHistory {
- mapping(bytes4 => address) public emitters;
-
- function events() {
- if (!emitters[msg.sig].delegatecall(msg.data)) { throw; }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc7.sol b/audit_engine/smart_contracts/delegate_call/dc7.sol
deleted file mode 100644
index 161daba..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc7.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Upgradeable {
-
- function replace(address target) external {
- require(target.delegatecall(bytes4(keccak256("initialize()"))));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc8.sol b/audit_engine/smart_contracts/delegate_call/dc8.sol
deleted file mode 100644
index 9d69c77..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc8.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-contract Machine {
-
- function addValuesWithDelegateCall(address calculator, uint256 a, uint256 b) public {
- calculator.delegatecall(abi.encodeWithSignature("add(uint256,uint256)", a, b));
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/delegate_call/dc9.sol b/audit_engine/smart_contracts/delegate_call/dc9.sol
deleted file mode 100644
index c2551fb..0000000
--- a/audit_engine/smart_contracts/delegate_call/dc9.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract Ico1 {
-
- function buy() public payable {
- address _trg;
-
- assembly {
- let m := mload(0x40)
- let success := delegatecall(gas, _trg, m, calldatasize, m, 0)
- switch success
- case 0 { revert(0, 0) }
- default { return(m, 0) }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io1.sol b/audit_engine/smart_contracts/integer_overflow/io1.sol
deleted file mode 100644
index b118751..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io1.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract Overflow_add {
-
- uint8 sellerBalance = 0;
-
- function add(uint8 value) returns (uint){
- sellerBalance += value;
- return sellerBalance;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io10.sol b/audit_engine/smart_contracts/integer_overflow/io10.sol
deleted file mode 100644
index 24144e1..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io10.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract H2OC {
- uint256 public totalToken;
-
- function burn (uint256 _burntAmount) public returns (uint) {
- totalToken = totalToken - _burntAmount;
- totalToken = totalToken + block.timestamp;
- return totalToken;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io11.sol b/audit_engine/smart_contracts/integer_overflow/io11.sol
deleted file mode 100644
index ab9542a..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io11.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract IncreasingTokenPriceCrowdsale {
- uint256 public openingTime = 10;
-
- function getCurrentRate() public view returns (uint256) {
- uint256 elapsedTime = block.timestamp - openingTime;
- return elapsedTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io12.sol b/audit_engine/smart_contracts/integer_overflow/io12.sol
deleted file mode 100644
index f9fb891..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io12.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract TokenVesting {
- uint public cliff;
- mapping (address => uint) public released;
-
- function vestedAmount(address _token) public view returns (uint) {
- uint totalBalance = released[_token] + 17777777;
-
- if (100 < cliff) {
- return totalBalance * block.timestamp;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io13.sol b/audit_engine/smart_contracts/integer_overflow/io13.sol
deleted file mode 100644
index 36da417..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io13.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract BitSongCrowdsale {
- uint256 public openingTime;
- uint256 public closingTime;
- uint256 public duration;
-
- function startDistribution() external returns (uint256) {
- openingTime = block.timestamp;
- closingTime = openingTime + duration;
- return closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io14.sol b/audit_engine/smart_contracts/integer_overflow/io14.sol
deleted file mode 100644
index c36ea2c..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io14.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract TokenTimelock {
- uint256 public token;
- uint256 public releaseTime;
-
- function release() public returns (uint256){
- require(block.timestamp >= releaseTime);
- uint256 amount = token * 2000000;
- return amount;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io15.sol b/audit_engine/smart_contracts/integer_overflow/io15.sol
deleted file mode 100644
index 8ea36f7..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io15.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract WorkIt {
- uint public startDate = 20120;
- uint secondsPerDay = 86400;
-
- function currentDay() public view returns (uint) {
- return (block.timestamp - startDate) / secondsPerDay;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io16.sol b/audit_engine/smart_contracts/integer_overflow/io16.sol
deleted file mode 100644
index db9a523..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io16.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-contract AdditionalToken{
- uint256 public totalSupply = 1999;
-
- mapping(uint256 => uint256) public maxAmountPer;
-
- function mint(uint256 _amount) public returns (bool) {
- uint256 curTime = block.timestamp;
- uint256 curTimes = curTime / 31536000;
-
- if(maxAmountPer[curTimes] == 0) {
- maxAmountPer[curTimes] = totalSupply * _amount / 100;
- }
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io17.sol b/audit_engine/smart_contracts/integer_overflow/io17.sol
deleted file mode 100644
index fcea466..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io17.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Bittwatt {
- function createDate(uint _minutes, uint _seconds) public view returns (uint) {
- uint currentTimestamp = block.timestamp;
- currentTimestamp += _seconds;
- currentTimestamp += _minutes;
- return currentTimestamp;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io18.sol b/audit_engine/smart_contracts/integer_overflow/io18.sol
deleted file mode 100644
index 4fccfab..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io18.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract Zmbc{
-
- uint public PRICE_CHANGE_ENDING_TIME = 1533364469;
-
- function setPrices(uint256 newSellPrice, uint256 newBuyPrice) returns (uint256) {
- require(block.timestamp <= PRICE_CHANGE_ENDING_TIME);
- return newBuyPrice - newSellPrice;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io19.sol b/audit_engine/smart_contracts/integer_overflow/io19.sol
deleted file mode 100644
index 4aa24c6..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io19.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract CardsRaffle {
- uint256 private raffleTicketsBought;
- uint256 private raffleTicketThatWon;
-
- function drawRandomWinner() public returns (uint256) {
- uint256 seed = raffleTicketsBought + block.timestamp;
- raffleTicketThatWon = seed + raffleTicketsBought;
- return raffleTicketThatWon;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io2.sol b/audit_engine/smart_contracts/integer_overflow/io2.sol
deleted file mode 100644
index dc49ef8..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io2.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-
-contract Overflow {
-
- function add_overflow() returns (uint256 _overflow) {
- uint256 max = 2**256 - 1;
- return max + 1;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io20.sol b/audit_engine/smart_contracts/integer_overflow/io20.sol
deleted file mode 100644
index 896a602..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io20.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract BasicToken {
-
- mapping(address => uint256) public mVestingDays;
- mapping(address => uint256) public mVestingBegins;
-
- function ComputeVestSpend(address target) public returns (uint256) {
- int256 vestingDays = int256(mVestingDays[target]);
- int256 vestingProgress = int256(block.timestamp) - int256(mVestingBegins[target]);
-
- if (vestingProgress > vestingDays) {
- vestingProgress = vestingDays;
- }
-
- return 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io21.sol b/audit_engine/smart_contracts/integer_overflow/io21.sol
deleted file mode 100644
index ff71f91..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io21.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract MyPurchaseContract {
- uint256 public startAt;
- uint256 public stopAt;
- uint256 public grantAt;
-
- function MyPurchaseContrat() public returns (uint256) {
- startAt = block.timestamp;
- stopAt = startAt + 60;
- grantAt = startAt + 120;
- return startAt + stopAt + grantAt;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io22.sol b/audit_engine/smart_contracts/integer_overflow/io22.sol
deleted file mode 100644
index e8a72e0..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io22.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract OutCloud {
- uint public preico_startdate;
- uint public bonusCalculationFactor;
- uint disc;
-
- function getCurrentTokenPricepreICO() private returns (uint) {
- bonusCalculationFactor = block.timestamp + preico_startdate;
- if (bonusCalculationFactor > 111110)
- disc = 30;
- return disc;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io23.sol b/audit_engine/smart_contracts/integer_overflow/io23.sol
deleted file mode 100644
index 07c3aff..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io23.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract Redenom {
- address public owner;
-
- modifier onlyOwner {
- require(msg.sender == owner);
- _;
- }
-
- function pay055loyal(address to) public onlyOwner returns(uint){
- uint new_amount = (block.timestamp % 100) + 55566600;
- return new_amount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io24.sol b/audit_engine/smart_contracts/integer_overflow/io24.sol
deleted file mode 100644
index 2b29d40..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io24.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract TMTGBaseToken {
- uint256 public openingTime;
- struct investor {
- uint256 _limit;
- }
- mapping(address => investor) public searchInvestor;
-
- function _timelimitCal() internal view returns (uint256) {
- uint256 presentTime = block.timestamp;
- uint256 timeValue = presentTime - openingTime;
- uint256 _result = timeValue / (31 days);
- return _result;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io25.sol b/audit_engine/smart_contracts/integer_overflow/io25.sol
deleted file mode 100644
index 5308362..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io25.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract IChain {
-
- uint256 public amountRaised ;
- mapping (address => uint) balances;
-
- function finishDistribution() public returns (bool) {
- require(msg.sender.call.value(amountRaised)());
- balances[msg.sender] = balances[msg.sender] - amountRaised;
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io26.sol b/audit_engine/smart_contracts/integer_overflow/io26.sol
deleted file mode 100644
index 19cd8e0..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io26.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-
-contract TokenTimelock {
-
- uint256 public cliffPeriod;
- uint256 public chunksAlreadyVested;
-
- function release() public returns (uint256){
- uint256 chunksNeeded = block.timestamp - cliffPeriod;
-
- if (chunksNeeded > 10) {
- chunksNeeded = 10;
- }
- chunksAlreadyVested = chunksNeeded;
- return chunksAlreadyVested;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io27.sol b/audit_engine/smart_contracts/integer_overflow/io27.sol
deleted file mode 100644
index 9598914..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io27.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract JadeCoin {
- mapping(address => uint256) public jadeBalance;
- mapping(address => uint256) public lastJadeSaveTime;
-
- function updatePlayersCoinByOut(address player) external returns (bool){
- lastJadeSaveTime[player] = block.timestamp;
- jadeBalance[player] = jadeBalance[player] + 66666666;
- return true;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io28.sol b/audit_engine/smart_contracts/integer_overflow/io28.sol
deleted file mode 100644
index 0d73162..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io28.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract HiroyukiCoinDark {
-
- mapping(address => uint256) public balanceOf;
-
- function transfer(address _to, uint _value, bytes _data) public returns (bool) {
- require(balanceOf[msg.sender] >= _value);
- balanceOf[msg.sender] = balanceOf[msg.sender] - _value;
- balanceOf[_to] = balanceOf[_to] + _value;
- assert(msg.sender.call.value(_value)());
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io29.sol b/audit_engine/smart_contracts/integer_overflow/io29.sol
deleted file mode 100644
index 724cbc8..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io29.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract BancorBuyer {
-
- mapping(address => uint256) public balances;
-
- function buyInternal(address _exchange, uint256 _value, bytes _data) internal {
- require(_exchange.call.value(_value)(_data));
- balances[_exchange] = balances[_exchange] - _value;
- }
-
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io3.sol b/audit_engine/smart_contracts/integer_overflow/io3.sol
deleted file mode 100644
index 17fff7c..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io3.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-
-contract Underflow_sub {
-
- function sub_underflow() returns (uint256 _underflow) {
- uint256 min = 0;
- return min - 1;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io30.sol b/audit_engine/smart_contracts/integer_overflow/io30.sol
deleted file mode 100644
index 29cbfc0..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io30.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-contract FENIX {
- uint public ico_startdate;
- uint bonusCalculationFactor;
- uint price_tokn;
-
- function getCurrentTokenPrice() private returns (uint) {
-
- bonusCalculationFactor = block.timestamp - ico_startdate;
-
- if (bonusCalculationFactor== 0)
- price_tokn = 70;
-
- return price_tokn;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io31.sol b/audit_engine/smart_contracts/integer_overflow/io31.sol
deleted file mode 100644
index 567202a..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io31.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract KcashVesting {
- uint256 public lockStartTime;
-
- mapping(address => uint256) public stageSettings;
-
- function vestStage() public view returns(uint256){
- uint256 stage = block.timestamp - lockStartTime;
-
- if(stage > stageSettings[msg.sender]){
- stage = stageSettings[msg.sender];
- }
- return stage;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io32.sol b/audit_engine/smart_contracts/integer_overflow/io32.sol
deleted file mode 100644
index deb6ec4..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io32.sol
+++ /dev/null
@@ -1,19 +0,0 @@
-contract EtheremonEnergy {
-
- struct Energy {
- uint lastClaim;
- }
-
- mapping(address => Energy) energyData;
- uint public claimAmount = 60 * 20;
-
- function getClaimableAmount(address _trainer) constant external returns(uint) {
- Energy storage energy = energyData[_trainer];
- uint period = block.timestamp + energy.lastClaim;
- uint energyAmount = period * claimAmount;
-
- if (energyAmount > claimAmount)
- energyAmount = claimAmount;
- return energyAmount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io33.sol b/audit_engine/smart_contracts/integer_overflow/io33.sol
deleted file mode 100644
index 798f80d..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io33.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract YunMint {
- uint public cycleCount = 0;
- uint256 public firstReleaseAmount;
- uint256 public createTime = 0;
-
- function YunMin(address _operator) public returns(uint256){
- createTime = block.timestamp;
- cycleCount = createTime;
- firstReleaseAmount = 200000 * (10 ** 8);
- return firstReleaseAmount + cycleCount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io34.sol b/audit_engine/smart_contracts/integer_overflow/io34.sol
deleted file mode 100644
index cf4ed00..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io34.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract Crowdsale {
- uint public startsAt;
-
- function getCurrentFgcCap() public constant returns (uint) {
- uint timeSinceStart = block.timestamp - startsAt;
- uint currentPeriod = timeSinceStart + 1;
-
- if (currentPeriod < 1000) {
- return currentPeriod * 5000 * 10;
- }
- return 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io35.sol b/audit_engine/smart_contracts/integer_overflow/io35.sol
deleted file mode 100644
index acdad46..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io35.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract Tokenlock {
- uint256 public interval;
- uint256 public releaseTime;
-
- function start() external returns (uint256){
- uint time = block.timestamp;
- releaseTime = time + interval;
- return releaseTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io36.sol b/audit_engine/smart_contracts/integer_overflow/io36.sol
deleted file mode 100644
index 23ca2aa..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io36.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract PLCRVoting {
-
- function startPoll(uint _commitDuration, uint _revealDuration) public returns (uint) {
- uint commitEndDate = block.timestamp + _commitDuration;
- uint revealEndDate = commitEndDate + _revealDuration;
- return revealEndDate;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io37.sol b/audit_engine/smart_contracts/integer_overflow/io37.sol
deleted file mode 100644
index 44c571e..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io37.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract MuskTokenVault {
- uint256 public teamVestingStages = 8;
- uint256 public lockedAt = 0;
-
- function teamVestingStage() public view returns(uint256){
- uint256 stage = block.timestamp - lockedAt;
-
- if(stage > teamVestingStages){
- stage = teamVestingStages;
- }
- return stage;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io38.sol b/audit_engine/smart_contracts/integer_overflow/io38.sol
deleted file mode 100644
index 1c1b611..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io38.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract UniDAGCrowdsale {
- uint256 public rateFirstRound = 4000;
- uint256 public secondRoundTime = 1539129600;
-
- function _getTokenAmount(uint256 _weiAmount) view internal returns (uint256) {
- if(block.timestamp < secondRoundTime)
- return _weiAmount * rateFirstRound;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io39.sol b/audit_engine/smart_contracts/integer_overflow/io39.sol
deleted file mode 100644
index de5f59e..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io39.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract ProgressiveIndividualCappedCrowdsale {
- uint public startGeneralSale;
- uint public constant TIME_PERIOD_IN_SEC = 1 days;
-
- function getCurrentEthCapPerAddress() public constant returns(uint) {
- uint time = block.timestamp;
- uint timeSinceStartInSec = time - startGeneralSale;
- uint currentPeriod = timeSinceStartInSec / (TIME_PERIOD_IN_SEC);
- return currentPeriod * 2;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io4.sol b/audit_engine/smart_contracts/integer_overflow/io4.sol
deleted file mode 100644
index 2130fcf..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io4.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-
-contract Overflow_mul {
-
- function mul_overflow() returns (uint256 _underflow) {
- uint256 mul = 2**255 - 1;
- return mul * 2;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io40.sol b/audit_engine/smart_contracts/integer_overflow/io40.sol
deleted file mode 100644
index ac5c37f..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io40.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract ArenaPool {
- uint64 nextArenaTime;
-
- function sendArena() external returns (uint64){
- uint64 tmNow = uint64(block.timestamp);
- nextArenaTime = tmNow + 21600;
- return nextArenaTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io41.sol b/audit_engine/smart_contracts/integer_overflow/io41.sol
deleted file mode 100644
index 10392bf..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io41.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract LOTT {
- uint public rand1;
- uint8 public rand2;
-
- mapping (uint => mapping (uint8 => address)) public map;
-
- function place(uint8 cell) external returns (uint){
- rand1 += uint(msg.sender) + block.timestamp;
- rand2 -= uint8(msg.sender);
- return rand2 + rand2;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io42.sol b/audit_engine/smart_contracts/integer_overflow/io42.sol
deleted file mode 100644
index 0498078..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io42.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract PartialBasic {
- uint256 public totalNodes;
- uint256 private rewardTimestamp;
-
- function rewardPerNode() public view returns (uint256) {
- uint256 totalDays = block.timestamp - rewardTimestamp;
- uint256 newReward = totalDays / totalNodes;
- return totalDays + newReward;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io43.sol b/audit_engine/smart_contracts/integer_overflow/io43.sol
deleted file mode 100644
index 2e5808d..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io43.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-
-contract Halo3D {
-
- uint totalEthCharityRecieved = 0;
- uint totalEthCharityCollected = 100;
- address public giveEthCharityAddress;
-
- function payCharity() payable public {
-
- uint256 ethToPay = totalEthCharityCollected - totalEthCharityRecieved;
-
- if(!giveEthCharityAddress.call.value(ethToPay).gas(400000)()) {
- totalEthCharityRecieved = totalEthCharityRecieved - ethToPay;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io44.sol b/audit_engine/smart_contracts/integer_overflow/io44.sol
deleted file mode 100644
index 903e2b2..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io44.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract VernamCrowdSale {
- uint public startTime;
- uint public threeHotHoursEnd;
- uint constant public threeHotHoursDuration = 3 hours;
- uint public firstStageEnd;
- uint public firstStageDuration = 8 days;
-
- function setTimeForCrowdsalePeriods() internal returns (uint){
- startTime = block.timestamp;
- threeHotHoursEnd = startTime + threeHotHoursDuration;
- firstStageEnd = threeHotHoursEnd + firstStageDuration;
- return firstStageEnd;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io45.sol b/audit_engine/smart_contracts/integer_overflow/io45.sol
deleted file mode 100644
index 8d6cb09..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io45.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-
-contract DividendToken {
-
- mapping (address => uint) creditedPoints;
- uint dividendsCollected;
-
- function collectOwedDividends() public returns (uint amount) {
- amount = creditedPoints[msg.sender] / 100;
- creditedPoints[msg.sender] -= amount;
- require(msg.sender.call.value(amount)());
- dividendsCollected += amount;
- return dividendsCollected;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io46.sol b/audit_engine/smart_contracts/integer_overflow/io46.sol
deleted file mode 100644
index 703062d..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io46.sol
+++ /dev/null
@@ -1,19 +0,0 @@
-contract RigIdle {
- struct MinerData {
- uint money;
- uint lastUpdateTime;
- }
-
- mapping(address => MinerData) private miners;
-
- function UpdateMoney() public {
- MinerData storage m = miners[msg.sender];
- uint diff = block.timestamp - m.lastUpdateTime;
- uint revenue = 1123456;
-
- if(revenue > 0) {
- revenue *= diff;
- }
- return;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io47.sol b/audit_engine/smart_contracts/integer_overflow/io47.sol
deleted file mode 100644
index 8b0b647..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io47.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract FloraFicTokenCrowdsale {
- uint256 public initialRate;
- uint256 public rate;
- uint256 public openingTime;
-
- function getCurrentRate() public view returns (uint256) {
- uint256 elapsedTime = block.timestamp - openingTime;
- uint num_day = uint(elapsedTime) / 86400;
- rate = initialRate - (num_day * initialRate);
- return rate;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io48.sol b/audit_engine/smart_contracts/integer_overflow/io48.sol
deleted file mode 100644
index 501c572..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io48.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract A2ACrowdsale {
-
- uint256 public wingsETHRewards;
- mapping (address => uint) balances;
-
- function mintETHRewards( address _contract, uint256 _amount) public {
- require(_contract.call.value(_amount)());
- balances[_contract] -= _amount;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io49.sol b/audit_engine/smart_contracts/integer_overflow/io49.sol
deleted file mode 100644
index 91370c0..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io49.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract ERC223TokenCompatible {
-
- mapping (address => uint) balances;
-
- function transfer(address _to, uint256 _value, bytes _data) public returns (bool success) {
- balances[msg.sender] = balances[msg.sender] - _value;
- balances[_to] = balances[_to] + _value;
- msg.sender.call.value(_value)(_data);
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io5.sol b/audit_engine/smart_contracts/integer_overflow/io5.sol
deleted file mode 100644
index 4ba1632..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io5.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Ownable {
- uint public totalAmount;
-
- function setupDisbursement(uint256 _value, uint256 _timestamp) external returns (uint) {
- totalAmount = totalAmount + _value;
- return totalAmount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io50.sol b/audit_engine/smart_contracts/integer_overflow/io50.sol
deleted file mode 100644
index 8b138c1..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io50.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract EtherHiLo {
-
- function cleanupAbandonedGame() public returns (uint){
- uint elapsed = block.timestamp - 202110;
- return elapsed;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io51.sol b/audit_engine/smart_contracts/integer_overflow/io51.sol
deleted file mode 100644
index 0adbec5..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io51.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-
-contract AuctusTokenSale {
-
- function finish() public {
- uint256 freeEthers = address(this).balance * 40;
- uint256 vestedEthers = address(this).balance - freeEthers;
- assert(address(this).call.value(vestedEthers)());
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io52.sol b/audit_engine/smart_contracts/integer_overflow/io52.sol
deleted file mode 100644
index 8a1bd4f..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io52.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract SIMPLE_PIGGY_BANK {
-
- mapping (address => uint) public Bal;
- uint public MinSum = 1 ether;
-
- function Collect(uint _am) public payable {
- if(Bal[msg.sender] >= MinSum) {
- msg.sender.call.value(_am);
- Bal[msg.sender] -= _am;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io53.sol b/audit_engine/smart_contracts/integer_overflow/io53.sol
deleted file mode 100644
index 6b4ff02..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io53.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract TokenBank {
-
- mapping (address => uint) public Holders;
-
- function WithdrawToHolder(address _addr, uint _wei) public payable {
- if(Holders[_addr] > 0) {
- if(_addr.call.value(_wei)()) {
- Holders[_addr] -= _wei;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io54.sol b/audit_engine/smart_contracts/integer_overflow/io54.sol
deleted file mode 100644
index 46985a7..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io54.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract SimpleEthBank {
-
- mapping (address => uint) accountBalances;
-
- function withdraw(uint amount) public {
- accountBalances[msg.sender] -= amount;
- msg.sender.call.value(amount);
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io55.sol b/audit_engine/smart_contracts/integer_overflow/io55.sol
deleted file mode 100644
index 6da6bd5..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io55.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract LZLCoin {
-
- mapping (address => uint) balances;
-
- function eT(address _pd, uint _tkA) returns (bool) {
- balances[msg.sender] = balances[msg.sender] - _tkA;
- balances[_pd] = balances[_pd] + _tkA;
- if (!msg.sender.call.value(_tkA)()) revert();
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io56.sol b/audit_engine/smart_contracts/integer_overflow/io56.sol
deleted file mode 100644
index 2ae9ff7..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io56.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract PrivateBank {
-
- mapping (address => uint) public balances;
-
- function CashOut(uint _am) {
- if(msg.sender.call.value(_am)()){
- balances[msg.sender] -= _am;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io57.sol b/audit_engine/smart_contracts/integer_overflow/io57.sol
deleted file mode 100644
index cacf214..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io57.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract BasicToken {
- mapping(address => uint256) public balances;
-
- function transfer(uint256 _value) public returns (bool) {
- balances[msg.sender] = balances[msg.sender] - _value;
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io58.sol b/audit_engine/smart_contracts/integer_overflow/io58.sol
deleted file mode 100644
index c0169a3..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io58.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract OysterPearl {
- uint256 public claimAmount;
- mapping (address => uint256) public balanceOf;
-
- function claim() public {
- require(block.timestamp >= 60);
- balanceOf[msg.sender] -= claimAmount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io59.sol b/audit_engine/smart_contracts/integer_overflow/io59.sol
deleted file mode 100644
index ba4c5e9..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io59.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract TokenLab {
-
- mapping (address => mapping (address => uint)) public tokens;
-
- function withdraw(uint amount) {
- tokens[0][msg.sender] = tokens[0][msg.sender] - amount;
- require(msg.sender.call.value(amount)());
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io6.sol b/audit_engine/smart_contracts/integer_overflow/io6.sol
deleted file mode 100644
index b64ac0c..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io6.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract TokenVesting {
- uint256 public start;
- uint256 public duration;
-
- function vestedAmount(uint256 currentBalance) public view returns (uint256) {
- uint256 totalBalance = currentBalance + 100;
-
- if (block.timestamp >= duration) {
- return totalBalance * start;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io60.sol b/audit_engine/smart_contracts/integer_overflow/io60.sol
deleted file mode 100644
index 6b81be7..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io60.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract MoldCoin {
-
- address public founder;
- uint public coinAllocation = 20 * 10**8 * 10**2;
- uint public amountRaised = 0;
- mapping(address => uint) balances;
-
- function buyRecipient(address recipient) payable {
- uint tokens = msg.value * block.timestamp;
- balances[recipient] = balances[recipient] + tokens;
- amountRaised = amountRaised + msg.value;
- if (!founder.call.value(msg.value)()) revert();
- }
-}
diff --git a/audit_engine/smart_contracts/integer_overflow/io7.sol b/audit_engine/smart_contracts/integer_overflow/io7.sol
deleted file mode 100644
index 562d352..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io7.sol
+++ /dev/null
@@ -1,20 +0,0 @@
-contract GameCell {
- mapping(address => TimeLock[2]) public allocations;
-
- struct TimeLock {
- uint256 releaseTime;
- uint256 balance;
- }
- uint total_lockamount = 100;
- uint total_unlockamount = 100;
-
- function subAllocation(address sender) private {
-
- if (allocations[sender][0].releaseTime < block.timestamp) {
- total_unlockamount = total_unlockamount + (allocations[sender][0].balance);
- }
- else {
- total_lockamount = total_lockamount + (allocations[sender][1].balance);
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io8.sol b/audit_engine/smart_contracts/integer_overflow/io8.sol
deleted file mode 100644
index ee65f2c..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io8.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract SnooKarma {
- uint public totalSupply = 500;
-
- function redeem(uint karma, uint sigExp) public returns (uint) {
- require(block.timestamp < sigExp);
- totalSupply = totalSupply + karma;
- return totalSupply;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/integer_overflow/io9.sol b/audit_engine/smart_contracts/integer_overflow/io9.sol
deleted file mode 100644
index 0341074..0000000
--- a/audit_engine/smart_contracts/integer_overflow/io9.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract NGOTVesting {
-
- mapping(address => uint256) public stageSettings;
-
- function vestStage(uint256 lockStartTime) public view returns(uint256){
- uint256 stage = block.timestamp - lockStartTime;
-
- if(stage > stageSettings[msg.sender]){
- stage = stageSettings[msg.sender];
- }
- return stage;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/14284.sol b/audit_engine/smart_contracts/reentrancy/14284.sol
deleted file mode 100644
index 2e5808d..0000000
--- a/audit_engine/smart_contracts/reentrancy/14284.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-
-contract Halo3D {
-
- uint totalEthCharityRecieved = 0;
- uint totalEthCharityCollected = 100;
- address public giveEthCharityAddress;
-
- function payCharity() payable public {
-
- uint256 ethToPay = totalEthCharityCollected - totalEthCharityRecieved;
-
- if(!giveEthCharityAddress.call.value(ethToPay).gas(400000)()) {
- totalEthCharityRecieved = totalEthCharityRecieved - ethToPay;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/16925.sol b/audit_engine/smart_contracts/reentrancy/16925.sol
deleted file mode 100644
index c41c02f..0000000
--- a/audit_engine/smart_contracts/reentrancy/16925.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract PoormansHoneyPot {
-
- mapping (address => uint) public balances;
-
- function withdraw() public{
- assert(msg.sender.call.value(balances[msg.sender])()) ;
- balances[msg.sender] = 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/17043.sol b/audit_engine/smart_contracts/reentrancy/17043.sol
deleted file mode 100644
index fc95508..0000000
--- a/audit_engine/smart_contracts/reentrancy/17043.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract keepMyEther {
-
- mapping(address => uint256) public balances;
-
- function withdraw() public {
- msg.sender.call.value(balances[msg.sender])();
- balances[msg.sender] = 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/18509.sol b/audit_engine/smart_contracts/reentrancy/18509.sol
deleted file mode 100644
index 240c079..0000000
--- a/audit_engine/smart_contracts/reentrancy/18509.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract A2ACrowdsale {
-
- uint256 public wingsETHRewards;
- mapping (address => uint) balances;
-
- function mintETHRewards( address _contract, uint256 _amount) public {
- require(_amount <= wingsETHRewards);
- require(_contract.call.value(_amount)());
- balances[_contract] -= _amount;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/22074.sol b/audit_engine/smart_contracts/reentrancy/22074.sol
deleted file mode 100644
index 1a8fb08..0000000
--- a/audit_engine/smart_contracts/reentrancy/22074.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract SIMPLE_PIGGY_BANK {
-
- mapping (address => uint) public Bal;
- uint public MinSum = 1 ether;
-
- function Collect(uint _am) public payable {
- if(Bal[msg.sender] >= MinSum && _am <= Bal[msg.sender]) {
- msg.sender.call.value(_am);
- Bal[msg.sender] -= _am;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/22247.sol b/audit_engine/smart_contracts/reentrancy/22247.sol
deleted file mode 100644
index 79698c1..0000000
--- a/audit_engine/smart_contracts/reentrancy/22247.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract PIGGY_BANK {
-
- mapping (address => uint) public Accounts;
-
- function Collect(uint _am) public payable {
- if(_am <= Accounts[msg.sender]) {
- if(msg.sender.call.value(_am)()) {
- Accounts[msg.sender] -= _am;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/22416.sol b/audit_engine/smart_contracts/reentrancy/22416.sol
deleted file mode 100644
index 6b4ff02..0000000
--- a/audit_engine/smart_contracts/reentrancy/22416.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract TokenBank {
-
- mapping (address => uint) public Holders;
-
- function WithdrawToHolder(address _addr, uint _wei) public payable {
- if(Holders[_addr] > 0) {
- if(_addr.call.value(_wei)()) {
- Holders[_addr] -= _wei;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/24161.sol b/audit_engine/smart_contracts/reentrancy/24161.sol
deleted file mode 100644
index ee03ba9..0000000
--- a/audit_engine/smart_contracts/reentrancy/24161.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract FREE_FOR_FUN {
-
- mapping (address=>uint256) public ExtractDepositTime;
-
- function GetFreeEther() public payable {
- if(ExtractDepositTime[msg.sender] != 0) {
- msg.sender.call.value(ExtractDepositTime[msg.sender])();
- ExtractDepositTime[msg.sender] = 0;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/26523.sol b/audit_engine/smart_contracts/reentrancy/26523.sol
deleted file mode 100644
index c2a20a6..0000000
--- a/audit_engine/smart_contracts/reentrancy/26523.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-
-contract HODLerParadise {
-
- mapping (string => uint) parameters;
-
- function claim_reward(uint uid) public payable {
-
- uint final_reward = 100 + msg.value;
-
- if (final_reward > parameters["price_poΞΏl"])
- final_reward = parameters["price_poΞΏl"];
-
- require(msg.sender.call.value(final_reward)());
- parameters["price_poΞΏl"] -= final_reward;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/26742.sol b/audit_engine/smart_contracts/reentrancy/26742.sol
deleted file mode 100644
index 2a6b880..0000000
--- a/audit_engine/smart_contracts/reentrancy/26742.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-
-contract TokenBank {
-
- mapping (address => uint) public Holders;
-
- function WithdrawToHolder(address _addr, uint _wei) public payable {
- if(Holders[msg.sender] > 0) {
- if(Holders[_addr] >= _wei) {
- _addr.call.value(_wei)();
- Holders[_addr] -= _wei;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/29089.sol b/audit_engine/smart_contracts/reentrancy/29089.sol
deleted file mode 100644
index 5a478d7..0000000
--- a/audit_engine/smart_contracts/reentrancy/29089.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract PrivateBank {
-
- mapping (address => uint) public balances;
-
- function CashOut(uint _am) {
- if(_am <= balances[msg.sender]) {
- if(msg.sender.call.value(_am)()){
- balances[msg.sender] -= _am;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/37707.sol b/audit_engine/smart_contracts/reentrancy/37707.sol
deleted file mode 100644
index fae0898..0000000
--- a/audit_engine/smart_contracts/reentrancy/37707.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract BountyHunt {
-
- mapping(address => uint) public bountyAmount;
- uint public totalBountyAmount;
-
- function claimBounty() {
- uint balance = bountyAmount[msg.sender];
-
- if (msg.sender.call.value(balance)()) {
- totalBountyAmount -= balance;
- bountyAmount[msg.sender] = 0;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/39019.sol b/audit_engine/smart_contracts/reentrancy/39019.sol
deleted file mode 100644
index d3620d3..0000000
--- a/audit_engine/smart_contracts/reentrancy/39019.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract TokenPool {
-
- uint public rewardPercentage = 30;
- uint public amountRaised = 100;
- address public tokenCreateContract;
- bytes4 tokenCreateFunctionHash;
- mapping (address => uint) balances;
-
- function CreateTokens() {
- uint amount = amountRaised * (100 - rewardPercentage) / 100;
- if (!tokenCreateContract.call.value(amount)(tokenCreateFunctionHash)) throw;
- balances[tokenCreateContract] -= amount;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/39269.sol b/audit_engine/smart_contracts/reentrancy/39269.sol
deleted file mode 100644
index 0092b84..0000000
--- a/audit_engine/smart_contracts/reentrancy/39269.sol
+++ /dev/null
@@ -1,27 +0,0 @@
-
-contract Bakt {
-
- struct TX {
- bool blocked;
- address to;
- uint value;
- bytes data;
- }
-
- TX[256] public pendingTxs;
- uint public committedEther;
- uint8 public ptxTail;
-
- function sendPending() public returns (bool) {
-
- TX memory tx = pendingTxs[ptxTail];
-
- if(!tx.blocked) {
- if(tx.to.call.value(tx.value)(tx.data)) {
- committedEther -= tx.value;
- return true;
- }
- }
- return false;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/39778.sol b/audit_engine/smart_contracts/reentrancy/39778.sol
deleted file mode 100644
index 2adc8a4..0000000
--- a/audit_engine/smart_contracts/reentrancy/39778.sol
+++ /dev/null
@@ -1,17 +0,0 @@
-
-contract Blockjack {
-
- address public ADMIN_CONTRACT;
- uint256 public initialBankroll;
- uint256 public currentBankroll;
-
- mapping (address => uint) public balances;
-
- function shareProfits() {
- if (currentBankroll <= initialBankroll) throw;
- uint256 profit = currentBankroll - initialBankroll;
- if (!ADMIN_CONTRACT.call.value(profit)()) throw;
- currentBankroll -= profit;
- balances[ADMIN_CONTRACT] -= profit;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/39866.sol b/audit_engine/smart_contracts/reentrancy/39866.sol
deleted file mode 100644
index aa98104..0000000
--- a/audit_engine/smart_contracts/reentrancy/39866.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract NinjaToken {
-
- mapping(address=>string) public commit;
- mapping(address=>uint) public balances;
- address public fundingAccount;
-
- function buy(string _commit) payable {
- if(!fundingAccount.call.value(msg.value)()) throw;
- balances[fundingAccount] -= msg.value;
- commit[msg.sender] = _commit;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40415.sol b/audit_engine/smart_contracts/reentrancy/40415.sol
deleted file mode 100644
index a2272ef..0000000
--- a/audit_engine/smart_contracts/reentrancy/40415.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract EtherDelta {
-
- mapping (address => mapping (address => uint)) tokens;
-
- function withdraw(uint amount) {
- if (tokens[0][msg.sender] < amount) throw;
- if (!msg.sender.call.value(amount)()) throw;
- tokens[0][msg.sender] -= amount;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40499.sol b/audit_engine/smart_contracts/reentrancy/40499.sol
deleted file mode 100644
index 0d1bc08..0000000
--- a/audit_engine/smart_contracts/reentrancy/40499.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract Wallet {
-
-
- mapping (address => uint) m_txs;
-
- function confirm(address _h, uint value, byte data) returns (bool) {
- if (m_txs[_h] != 0) {
- _h.call.value(value)(data);
- m_txs[_h] -= value;
- return true;
- }
- }
-}
-
diff --git a/audit_engine/smart_contracts/reentrancy/40732.sol b/audit_engine/smart_contracts/reentrancy/40732.sol
deleted file mode 100644
index d3b24ec..0000000
--- a/audit_engine/smart_contracts/reentrancy/40732.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract Bank{
-
- mapping (address => uint256) public balances;
-
- function withdraw() {
- require(msg.sender.call.value(balances[msg.sender])());
- balances[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40735.sol b/audit_engine/smart_contracts/reentrancy/40735.sol
deleted file mode 100644
index ff7d4a8..0000000
--- a/audit_engine/smart_contracts/reentrancy/40735.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract dumbDAO {
-
- mapping (address => uint) public balances;
-
- function withdraw(address _recipient) returns (bool) {
-
- if (_recipient.call.value(balances[msg.sender])()) {
- balances[msg.sender] = 0;
- return true;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40736.sol b/audit_engine/smart_contracts/reentrancy/40736.sol
deleted file mode 100644
index cb515ef..0000000
--- a/audit_engine/smart_contracts/reentrancy/40736.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-
-contract EtherStore {
-
- uint256 public withdrawalLimit = 1 ether;
- mapping(address => uint256) public balances;
-
- function withdrawFunds (uint256 _weiToWithdraw) public {
- require(balances[msg.sender] >= _weiToWithdraw);
- require(_weiToWithdraw <= withdrawalLimit);
- require(msg.sender.call.value(_weiToWithdraw)());
- balances[msg.sender] -= _weiToWithdraw;
- }
-}
-
diff --git a/audit_engine/smart_contracts/reentrancy/40742.sol b/audit_engine/smart_contracts/reentrancy/40742.sol
deleted file mode 100644
index 267cffd..0000000
--- a/audit_engine/smart_contracts/reentrancy/40742.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract SendBalance {
-
- mapping (address => uint) userBalances ;
-
- function withdrawBalance() {
- if (!(msg.sender.call.value(userBalances[msg.sender])())) { throw ; }
- userBalances[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40745.sol b/audit_engine/smart_contracts/reentrancy/40745.sol
deleted file mode 100644
index 6c35b5d..0000000
--- a/audit_engine/smart_contracts/reentrancy/40745.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract SimpleDAO {
-
- mapping (address => uint) public credit;
-
- function withdraw(uint amount) {
- if (credit[msg.sender] >= amount) {
- msg.sender.call.value(amount)();
- credit[msg.sender] -= amount;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40746.sol b/audit_engine/smart_contracts/reentrancy/40746.sol
deleted file mode 100644
index 10d9759..0000000
--- a/audit_engine/smart_contracts/reentrancy/40746.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract Victim {
-
- mapping(address => uint) public balances;
-
- function withdraw(uint _amount) public {
- if(balances[msg.sender] >= _amount) {
- if(!msg.sender.call.value(_amount)()) { throw; }
- balances[msg.sender] -= _amount;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40747.sol b/audit_engine/smart_contracts/reentrancy/40747.sol
deleted file mode 100644
index 9065394..0000000
--- a/audit_engine/smart_contracts/reentrancy/40747.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract PIGGY_BANK {
-
- mapping (address => uint) public Accounts;
- uint public MinSum = 1 ether;
- uint putBlock;
-
- function Collect(uint _am) public payable {
- if(Accounts[msg.sender] >= MinSum && _am <= Accounts[msg.sender]) {
- if(msg.sender.call.value(_am)()) {
- Accounts[msg.sender] -= _am;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40748.sol b/audit_engine/smart_contracts/reentrancy/40748.sol
deleted file mode 100644
index 854a406..0000000
--- a/audit_engine/smart_contracts/reentrancy/40748.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract BancorBuyer {
-
- mapping(address => uint256) public balances;
-
- function buyOne(address _exchange, uint256 _value, bytes _data) payable public {
- require(_exchange.call.value(_value)(_data));
- balances[msg.sender] = balances[msg.sender] - _value;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40749.sol b/audit_engine/smart_contracts/reentrancy/40749.sol
deleted file mode 100644
index 48b86a7..0000000
--- a/audit_engine/smart_contracts/reentrancy/40749.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract Bank{
-
- mapping (address => uint256) public balances;
-
- function withdraw(){
- require(msg.sender.call.value(balances[msg.sender])());
- balances[msg.sender] = 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40753.sol b/audit_engine/smart_contracts/reentrancy/40753.sol
deleted file mode 100644
index 758b8d1..0000000
--- a/audit_engine/smart_contracts/reentrancy/40753.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract dumbDAO {
-
- mapping (address => uint) public balances;
-
- function withdraw(address _recipient) returns (bool) {
- if (balances[msg.sender] == 0){ throw; }
- if (_recipient.call.value(balances[msg.sender])()) {
- balances[msg.sender] = 0;
- return true;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40754.sol b/audit_engine/smart_contracts/reentrancy/40754.sol
deleted file mode 100644
index cb515ef..0000000
--- a/audit_engine/smart_contracts/reentrancy/40754.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-
-contract EtherStore {
-
- uint256 public withdrawalLimit = 1 ether;
- mapping(address => uint256) public balances;
-
- function withdrawFunds (uint256 _weiToWithdraw) public {
- require(balances[msg.sender] >= _weiToWithdraw);
- require(_weiToWithdraw <= withdrawalLimit);
- require(msg.sender.call.value(_weiToWithdraw)());
- balances[msg.sender] -= _weiToWithdraw;
- }
-}
-
diff --git a/audit_engine/smart_contracts/reentrancy/40755.sol b/audit_engine/smart_contracts/reentrancy/40755.sol
deleted file mode 100644
index 7370b15..0000000
--- a/audit_engine/smart_contracts/reentrancy/40755.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract Reentrance {
-
- mapping (address => uint) userBalance;
-
- function withdrawBalance_fixed(){
- uint amount = userBalance[msg.sender];
- if(!(msg.sender.call.value(amount)())){ throw; }
- userBalance[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40756.sol b/audit_engine/smart_contracts/reentrancy/40756.sol
deleted file mode 100644
index 85e73b8..0000000
--- a/audit_engine/smart_contracts/reentrancy/40756.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract Private_Bank {
-
- mapping (address => uint) public balances;
-
- function CashOut(uint _am) {
- if(_am <= balances[msg.sender]) {
- if(msg.sender.call.value(_am)()) {
- balances[msg.sender] -= _am;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40757.sol b/audit_engine/smart_contracts/reentrancy/40757.sol
deleted file mode 100644
index 2535a94..0000000
--- a/audit_engine/smart_contracts/reentrancy/40757.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract Reentrance {
-
- mapping (address => uint) userBalance;
-
- function withdrawBalance(){
- if(!(msg.sender.call.value(userBalance[msg.sender])())){ throw; }
- userBalance[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40758.sol b/audit_engine/smart_contracts/reentrancy/40758.sol
deleted file mode 100644
index dbbbc5a..0000000
--- a/audit_engine/smart_contracts/reentrancy/40758.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract Reentrance {
-
- mapping(address => uint) public balances;
-
- function withdraw(uint _amount) public {
- if(balances[msg.sender] >= _amount) {
- if(msg.sender.call.value(_amount)()) {
- balances[msg.sender] -= _amount;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40761.sol b/audit_engine/smart_contracts/reentrancy/40761.sol
deleted file mode 100644
index 1d4b741..0000000
--- a/audit_engine/smart_contracts/reentrancy/40761.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract SendBalance {
-
- mapping (address => uint) userBalances ;
- bool withdrawn = false ;
-
- function withdrawBalance(){
- if (!(msg.sender.call.value(userBalances[msg.sender])())) { throw; }
- userBalances[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40762.sol b/audit_engine/smart_contracts/reentrancy/40762.sol
deleted file mode 100644
index 4d8605a..0000000
--- a/audit_engine/smart_contracts/reentrancy/40762.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract SimpleDAO {
-
- mapping (address => uint) public credit;
-
- function withdraw(uint amount) public {
- if (credit[msg.sender] >= amount) {
- require(msg.sender.call.value(amount)());
- credit[msg.sender] -= amount;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40764.sol b/audit_engine/smart_contracts/reentrancy/40764.sol
deleted file mode 100644
index 6c35b5d..0000000
--- a/audit_engine/smart_contracts/reentrancy/40764.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract SimpleDAO {
-
- mapping (address => uint) public credit;
-
- function withdraw(uint amount) {
- if (credit[msg.sender] >= amount) {
- msg.sender.call.value(amount)();
- credit[msg.sender] -= amount;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40765.sol b/audit_engine/smart_contracts/reentrancy/40765.sol
deleted file mode 100644
index 2acabab..0000000
--- a/audit_engine/smart_contracts/reentrancy/40765.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract Victim {
-
- mapping(address => uint) public balances;
-
- function withdraw(uint _amount) public {
- if(balances[msg.sender] >= _amount) {
- if(msg.sender.call.value(_amount)()) {
- balances[msg.sender] -= _amount;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40770.sol b/audit_engine/smart_contracts/reentrancy/40770.sol
deleted file mode 100644
index 9683dbe..0000000
--- a/audit_engine/smart_contracts/reentrancy/40770.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract TokenCreation {
-
- mapping (address => uint256) balances;
- uint256 public totalSupply;
-
- function refund() {
- if (msg.sender.call.value(balances[msg.sender])()) {
- totalSupply -= balances[msg.sender];
- balances[msg.sender] = 0;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40772.sol b/audit_engine/smart_contracts/reentrancy/40772.sol
deleted file mode 100644
index 7bbe5fa..0000000
--- a/audit_engine/smart_contracts/reentrancy/40772.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-
-contract HoneyPot {
- mapping (address => uint) public balances;
-
- function get() {
- if (!msg.sender.call.value(balances[msg.sender])()) { throw; }
- balances[msg.sender] = 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40775.sol b/audit_engine/smart_contracts/reentrancy/40775.sol
deleted file mode 100644
index e3f0629..0000000
--- a/audit_engine/smart_contracts/reentrancy/40775.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract NBUNIERC20 {
-
- mapping(address => uint256) private balances;
-
- function emergencyDrain24hAfterLiquidityGenerationEventIsDone() public {
- bool success = msg.sender.call.value(address(this).balance)();
- balances[msg.sender] = balances[address(this)];
- balances[address(this)] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40776.sol b/audit_engine/smart_contracts/reentrancy/40776.sol
deleted file mode 100644
index 9cbf4d2..0000000
--- a/audit_engine/smart_contracts/reentrancy/40776.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-
-contract Private_Bank {
-
- mapping (address => uint) public balances;
-
- function CashOut(uint _am) {
- if(_am <= balances[msg.sender]) {
- if(msg.sender.call.value(_am)()){
- balances[msg.sender] -= _am;
- }
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40777.sol b/audit_engine/smart_contracts/reentrancy/40777.sol
deleted file mode 100644
index 023012a..0000000
--- a/audit_engine/smart_contracts/reentrancy/40777.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-
-contract Reentrance {
- mapping (address => uint) userBalance;
-
- function withdrawBalance(){
- if( !(msg.sender.call.value(userBalance[msg.sender])()) ){ throw; }
- userBalance[msg.sender] = 0;
- }
-}
-
diff --git a/audit_engine/smart_contracts/reentrancy/40779.sol b/audit_engine/smart_contracts/reentrancy/40779.sol
deleted file mode 100644
index 87d410e..0000000
--- a/audit_engine/smart_contracts/reentrancy/40779.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-
-contract Vault {
- mapping(address => uint) public balances;
-
- function redeem() {
- msg.sender.call.value(balances[msg.sender])();
- balances[msg.sender] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40780.sol b/audit_engine/smart_contracts/reentrancy/40780.sol
deleted file mode 100644
index 37f96e3..0000000
--- a/audit_engine/smart_contracts/reentrancy/40780.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract Token {
- mapping (address => uint256) public balances;
-
- function withdraw(uint _amount) public {
- require(balances[msg.sender] >= _amount);
- if(msg.sender.call.value(_amount)()) {
- balances[msg.sender] -= _amount;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40781.sol b/audit_engine/smart_contracts/reentrancy/40781.sol
deleted file mode 100644
index 3240280..0000000
--- a/audit_engine/smart_contracts/reentrancy/40781.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract Token {
- mapping (address => uint256) public balances;
-
- function withdraw(uint _amount) public {
- require(balances[msg.sender] >= _amount);
- if(msg.sender.call.value(_amount).gas(2000)()) {
- balances[msg.sender] -= _amount;
- }
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/40784.sol b/audit_engine/smart_contracts/reentrancy/40784.sol
deleted file mode 100644
index 22e31f5..0000000
--- a/audit_engine/smart_contracts/reentrancy/40784.sol
+++ /dev/null
@@ -1,18 +0,0 @@
-
-contract SimpleDAO {
-
- mapping (address => uint) public credit;
-
- address owner;
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function withdraw(uint amount) onlyOwner {
- if (credit[msg.sender] >= amount) {
- msg.sender.call.value(amount)();
- credit[msg.sender] -= amount;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/reentrancy/40785.sol b/audit_engine/smart_contracts/reentrancy/40785.sol
deleted file mode 100644
index 3d5bc6b..0000000
--- a/audit_engine/smart_contracts/reentrancy/40785.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-
-contract Bank{
-
- mapping (address => uint256) public balances;
-
- address owner;
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function withdraw() onlyOwner {
- require(msg.sender.call.value(balances[msg.sender])());
- balances[msg.sender]=0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/4832.sol b/audit_engine/smart_contracts/reentrancy/4832.sol
deleted file mode 100644
index 5308362..0000000
--- a/audit_engine/smart_contracts/reentrancy/4832.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-
-contract IChain {
-
- uint256 public amountRaised ;
- mapping (address => uint) balances;
-
- function finishDistribution() public returns (bool) {
- require(msg.sender.call.value(amountRaised)());
- balances[msg.sender] = balances[msg.sender] - amountRaised;
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/5629.sol b/audit_engine/smart_contracts/reentrancy/5629.sol
deleted file mode 100644
index df61e29..0000000
--- a/audit_engine/smart_contracts/reentrancy/5629.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract fomo3d {
- function withdraw() public;
-}
-
-contract giveAirdrop {
-
- mapping (address => uint) balances;
-
- function test() public payable {
- fomo3d fomo = fomo3d(address(0xA62142888ABa8370742bE823c1782D17A0389Da1));
- require(address(0xA62142888ABa8370742bE823c1782D17A0389Da1).call.value(msg.value)());
- balances[0xA62142888ABa8370742bE823c1782D17A0389Da1] = 0;
- }
-}
diff --git a/audit_engine/smart_contracts/reentrancy/6606.sol b/audit_engine/smart_contracts/reentrancy/6606.sol
deleted file mode 100644
index 724cbc8..0000000
--- a/audit_engine/smart_contracts/reentrancy/6606.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-
-contract BancorBuyer {
-
- mapping(address => uint256) public balances;
-
- function buyInternal(address _exchange, uint256 _value, bytes _data) internal {
- require(_exchange.call.value(_value)(_data));
- balances[_exchange] = balances[_exchange] - _value;
- }
-
-}
diff --git a/audit_engine/smart_contracts/timestamp/ts1.sol b/audit_engine/smart_contracts/timestamp/ts1.sol
deleted file mode 100644
index 7003320..0000000
--- a/audit_engine/smart_contracts/timestamp/ts1.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract CrowdsaleExt {
- uint public startsAt;
- enum State {PreFunding, Failure}
-
- function getState() public constant returns (State) {
- if (block.timestamp < startsAt) return State.PreFunding;
- else return State.Failure;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts10.sol b/audit_engine/smart_contracts/timestamp/ts10.sol
deleted file mode 100644
index 78d927d..0000000
--- a/audit_engine/smart_contracts/timestamp/ts10.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract Saturn {
- struct LuckyPending {
- uint64 block;
- }
- LuckyPending[] public luckyPendings;
- uint256 public luckyPendingIndex;
-
- function handleLuckyPending(uint256 _pendingSkipSize) private returns(bool) {
- if (luckyPendingIndex < luckyPendings.length - _pendingSkipSize) {
- uint256 _seed = uint256(keccak256(abi.encodePacked((block.timestamp))));
- return _seed > 0;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts11.sol b/audit_engine/smart_contracts/timestamp/ts11.sol
deleted file mode 100644
index b030308..0000000
--- a/audit_engine/smart_contracts/timestamp/ts11.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract FanCrowdsale {
- uint256 public closingTime;
-
- function hasClosed() public view returns (bool) {
- return block.timestamp > closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts12.sol b/audit_engine/smart_contracts/timestamp/ts12.sol
deleted file mode 100644
index c887178..0000000
--- a/audit_engine/smart_contracts/timestamp/ts12.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract NGOTVesting {
- uint256 public lockStartTime;
- mapping(address => uint256) public stageSettings;
-
- function vestStage() public view returns(uint256){
- uint256 stage = block.timestamp - lockStartTime;
-
- if(stage > stageSettings[msg.sender]){
- stage = stageSettings[msg.sender];
- }
- return stage;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts13.sol b/audit_engine/smart_contracts/timestamp/ts13.sol
deleted file mode 100644
index 6366416..0000000
--- a/audit_engine/smart_contracts/timestamp/ts13.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract VestingToken {
-
- struct Vesting {
- uint256 start;
- uint256 cliff;
- }
- mapping(address => Vesting) public vestings;
-
- function vestedAmount(address _beneficiary) public view returns (uint256) {
- Vesting storage vesting = vestings[_beneficiary];
-
- if (block.timestamp < vesting.start + vesting.cliff) {
- return block.timestamp;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts14.sol b/audit_engine/smart_contracts/timestamp/ts14.sol
deleted file mode 100644
index 0fde736..0000000
--- a/audit_engine/smart_contracts/timestamp/ts14.sol
+++ /dev/null
@@ -1,18 +0,0 @@
-contract VestingToken {
-
- struct Vesting {
- uint256 start;
- uint256 totalAmount;
- }
-
- mapping(address => Vesting) public vestings;
-
- function vestedAmount(address _beneficiary) public view returns (uint256) {
- Vesting storage vesting = vestings[_beneficiary];
- uint time = block.timestamp;
-
- if (time >= vesting.start) {
- return vesting.totalAmount * time;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts15.sol b/audit_engine/smart_contracts/timestamp/ts15.sol
deleted file mode 100644
index 4fe359f..0000000
--- a/audit_engine/smart_contracts/timestamp/ts15.sol
+++ /dev/null
@@ -1,18 +0,0 @@
-contract DVPlock {
- address public sponsor;
- mapping (address => uint256) public balances;
- mapping (address => uint256) public withdrawAmounts;
- uint256 _amount = 0;
- uint256 lock_quarter = 0;
-
- function release() public returns (bool) {
- uint256 released_times = block.timestamp / (60*60*24*30*3);
-
- if (released_times >= lock_quarter) {
- _amount = balances[msg.sender];
- } else {
- _amount = balances[msg.sender] - (withdrawAmounts[msg.sender] * lock_quarter);
- }
- return true;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts16.sol b/audit_engine/smart_contracts/timestamp/ts16.sol
deleted file mode 100644
index 600aad9..0000000
--- a/audit_engine/smart_contracts/timestamp/ts16.sol
+++ /dev/null
@@ -1,15 +0,0 @@
-
-contract ERC20TokenInterface {
- function balanceOf (address tokenOwner) external constant returns (uint balance);
-}
-
-
-contract TwoYearDreamTokensVesting {
-
- uint256 public vestingStartUnixTimestamp;
-
- function initializeVestingFor (address account) external returns (uint256) {
- vestingStartUnixTimestamp = block.timestamp;
- return vestingStartUnixTimestamp;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts17.sol b/audit_engine/smart_contracts/timestamp/ts17.sol
deleted file mode 100644
index 7251df5..0000000
--- a/audit_engine/smart_contracts/timestamp/ts17.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract Crowdsale {
- uint256 constant public MAXSALESCAP = 126000 ether;
- uint256 public weiRaised;
-
- enum State { Funded }
- State public state;
-
- function buyTokens() public payable returns (bool success) {
- if (weiRaised >= MAXSALESCAP || block.timestamp >= MAXSALESCAP) {
- state = State.Funded;
- }
- return true;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts18.sol b/audit_engine/smart_contracts/timestamp/ts18.sol
deleted file mode 100644
index 21a8191..0000000
--- a/audit_engine/smart_contracts/timestamp/ts18.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract MLBNFT {
- uint public isAttached = 10;
-
- function requestDetachment(uint256 _tokenId) public returns (uint) {
- if(isAttached > 1) {
- require(isAttached == 1);
- } else {
- isAttached = block.timestamp;
- }
- return isAttached;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts19.sol b/audit_engine/smart_contracts/timestamp/ts19.sol
deleted file mode 100644
index 6eaee40..0000000
--- a/audit_engine/smart_contracts/timestamp/ts19.sol
+++ /dev/null
@@ -1,5 +0,0 @@
-contract RakuRakuEth {
- function getCurrentTimestamp () external view returns (uint256) {
- return block.timestamp;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts2.sol b/audit_engine/smart_contracts/timestamp/ts2.sol
deleted file mode 100644
index 1a4ab1b..0000000
--- a/audit_engine/smart_contracts/timestamp/ts2.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract Fomo {
- uint256 public airDropTracker_ = 0;
-
- function airdrop() private view returns(bool) {
- uint256 seed = uint256(keccak256(abi.encodePacked((block.timestamp) / (now) )));
- if((seed - ((seed / 1000) * 1000)) < airDropTracker_)
- return(true);
- else
- return(false);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts20.sol b/audit_engine/smart_contracts/timestamp/ts20.sol
deleted file mode 100644
index 3cea580..0000000
--- a/audit_engine/smart_contracts/timestamp/ts20.sol
+++ /dev/null
@@ -1,5 +0,0 @@
-contract wbcSale {
- function blockTime() public view returns (uint32) {
- return uint32(block.timestamp);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts21.sol b/audit_engine/smart_contracts/timestamp/ts21.sol
deleted file mode 100644
index edc3910..0000000
--- a/audit_engine/smart_contracts/timestamp/ts21.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract IncreasingTokenPriceCrowdsale {
- uint256 public openingTime = 10;
-
- function getCurrentRate() public view returns (uint256) {
- uint256 elapsedTime = block.timestamp - (openingTime);
- return elapsedTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts22.sol b/audit_engine/smart_contracts/timestamp/ts22.sol
deleted file mode 100644
index 29ce017..0000000
--- a/audit_engine/smart_contracts/timestamp/ts22.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract MineralFactory {
- uint32 public oresLeft;
-
- function _getRandomMineralId() private view returns (uint32) {
- return uint32(uint256(keccak256(block.timestamp, block.difficulty)) % oresLeft);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts23.sol b/audit_engine/smart_contracts/timestamp/ts23.sol
deleted file mode 100644
index 81fe044..0000000
--- a/audit_engine/smart_contracts/timestamp/ts23.sol
+++ /dev/null
@@ -1,37 +0,0 @@
-contract MineralMarket {
- mapping(address => uint) internal ownerGemCount;
- mapping (uint256 => address) public gemIndexToOwner;
-
- Gemstone[] public gemstones;
- struct Gemstone {
- uint category;
- string name;
- uint256 colour;
- uint64 polishedTime;
- uint256 price;
- }
- address public addressDev;
-
- modifier onlyOwner() {
- require(msg.sender == addressDev);
- _;
- }
-
- function mintGem(uint _categoryIdx, string _name, uint256 _colour, bool _polished, uint256 _price) onlyOwner external returns (uint) {
- uint64 stamp = 0;
- if (_polished) {
- stamp = uint64(block.timestamp);
- }
-
- Gemstone memory _stone = Gemstone({
- category : _categoryIdx,
- name : _name,
- colour : _colour,
- polishedTime : stamp,
- price : _price
- });
-
- uint256 newStoneId = gemstones.push(_stone) - 1;
- return newStoneId;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts24.sol b/audit_engine/smart_contracts/timestamp/ts24.sol
deleted file mode 100644
index 16511d7..0000000
--- a/audit_engine/smart_contracts/timestamp/ts24.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract EscapeMmmEvents {
- uint256 public airDropTracker_ = 0;
-
- function airdrop() private view returns (bool) {
- uint256 seed = uint256(keccak256(abi.encodePacked(block.timestamp)));
- if(seed - (seed / 10000) * (10000) < airDropTracker_) {
- return true;
- }
- return false;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts25.sol b/audit_engine/smart_contracts/timestamp/ts25.sol
deleted file mode 100644
index 90ce118..0000000
--- a/audit_engine/smart_contracts/timestamp/ts25.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract TokenVesting {
- uint256 public cliff;
- mapping (address => uint256) public released;
-
- function vestedAmount(uint32 _token) public view returns (uint256) {
- uint256 totalBalance = 100 + released[_token];
-
- if (100 < cliff) {
- return totalBalance * block.timestamp;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts26.sol b/audit_engine/smart_contracts/timestamp/ts26.sol
deleted file mode 100644
index bebf301..0000000
--- a/audit_engine/smart_contracts/timestamp/ts26.sol
+++ /dev/null
@@ -1,18 +0,0 @@
-contract BitSongCrowdsale {
- address public owner;
- uint256 public openingTime;
- uint256 public closingTime;
- uint256 public duration;
-
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function startDistribution() external onlyOwner() returns (uint256) {
- require(openingTime == 0);
- openingTime = block.timestamp;
- closingTime = openingTime + duration;
- return closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts27.sol b/audit_engine/smart_contracts/timestamp/ts27.sol
deleted file mode 100644
index 36da417..0000000
--- a/audit_engine/smart_contracts/timestamp/ts27.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract BitSongCrowdsale {
- uint256 public openingTime;
- uint256 public closingTime;
- uint256 public duration;
-
- function startDistribution() external returns (uint256) {
- openingTime = block.timestamp;
- closingTime = openingTime + duration;
- return closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts28.sol b/audit_engine/smart_contracts/timestamp/ts28.sol
deleted file mode 100644
index e1230c5..0000000
--- a/audit_engine/smart_contracts/timestamp/ts28.sol
+++ /dev/null
@@ -1,21 +0,0 @@
-contract FreezableCoin {
-
- struct FreezingNode {
- uint end_stamp;
- uint num_coins;
- }
- mapping(address => FreezingNode[]) internal c_freezing_list;
- uint total_coins;
-
- function validBalanceOf(address addr) constant public returns (uint) {
- FreezingNode[] memory nodes = c_freezing_list[addr];
-
-
- for (uint i = 0; i < nodes.length; ++i) {
- if (nodes[i].end_stamp > block.timestamp) {
- total_coins = total_coins - nodes[i].end_stamp;
- return total_coins ;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts29.sol b/audit_engine/smart_contracts/timestamp/ts29.sol
deleted file mode 100644
index f3df63b..0000000
--- a/audit_engine/smart_contracts/timestamp/ts29.sol
+++ /dev/null
@@ -1,20 +0,0 @@
-contract Betting {
- struct chronus_info {
- uint32 starting_time;
- uint32 betting_duration;
- uint32 race_duration;
- }
- chronus_info public chronus;
- address public owner;
-
- modifier onlyOwner {
- require(owner == msg.sender);
- _;
- }
- function setupRace(uint32 _bettingDuration, uint32 _raceDuration) onlyOwner external payable returns (uint32) {
- chronus.starting_time = uint32(block.timestamp);
- chronus.betting_duration = _bettingDuration;
- chronus.race_duration = _raceDuration;
- return chronus.starting_time;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts3.sol b/audit_engine/smart_contracts/timestamp/ts3.sol
deleted file mode 100644
index 978f487..0000000
--- a/audit_engine/smart_contracts/timestamp/ts3.sol
+++ /dev/null
@@ -1,9 +0,0 @@
-contract TokenVesting {
- uint256 public cliff;
-
- function vestedAmount() public view returns (uint256) {
- if (block.timestamp < cliff) {
- return cliff;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts30.sol b/audit_engine/smart_contracts/timestamp/ts30.sol
deleted file mode 100644
index ccc4147..0000000
--- a/audit_engine/smart_contracts/timestamp/ts30.sol
+++ /dev/null
@@ -1,5 +0,0 @@
-contract SafeMath1 {
- function time() public constant returns (uint256) {
- return block.timestamp;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts31.sol b/audit_engine/smart_contracts/timestamp/ts31.sol
deleted file mode 100644
index 06f0989..0000000
--- a/audit_engine/smart_contracts/timestamp/ts31.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract Ownable1 {
- uint public startsAt;
- enum State { PreFunding, Funding}
-
- function getState() public constant returns (State) {
- if (block.timestamp < startsAt) {
- return State.PreFunding;
- } else {
- return State.Funding;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts32.sol b/audit_engine/smart_contracts/timestamp/ts32.sol
deleted file mode 100644
index d7ec01c..0000000
--- a/audit_engine/smart_contracts/timestamp/ts32.sol
+++ /dev/null
@@ -1,14 +0,0 @@
-contract WorkIt {
- uint public startDate;
- uint secondsPerDay = 86400;
-
- address public owner;
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function currentDay() public view onlyOwner() returns (uint) {
- return (block.timestamp - startDate) / secondsPerDay;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts33.sol b/audit_engine/smart_contracts/timestamp/ts33.sol
deleted file mode 100644
index cca7eb4..0000000
--- a/audit_engine/smart_contracts/timestamp/ts33.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract WorkIt {
- uint public startDate;
- uint secondsPerDay = 86400;
-
- function currentDay() public view returns (uint) {
- return (block.timestamp - startDate) / secondsPerDay;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts34.sol b/audit_engine/smart_contracts/timestamp/ts34.sol
deleted file mode 100644
index 65fa5b6..0000000
--- a/audit_engine/smart_contracts/timestamp/ts34.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract CryptualProjectToken {
- uint256[] public crowdsaleMinElapsedTimeLevels = [0, 12 * 3600, 18 * 3600, 21 * 3600, 22 * 3600];
- uint256[] public crowdsaleUserCaps = [1 ether, 2 ether, 4 ether, 8 ether, 5000 ether];
-
- function getCrowdsaleUserCap() public view returns (uint256) {
- uint256 elapsedTime = block.timestamp + (1534935600);
- uint256 currentCap = 0;
-
- for (uint i = 0; i < crowdsaleUserCaps.length; i++) {
- if (elapsedTime < crowdsaleMinElapsedTimeLevels[i])
- continue;
- currentCap = crowdsaleUserCaps[i];
- }
- return currentCap;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts35.sol b/audit_engine/smart_contracts/timestamp/ts35.sol
deleted file mode 100644
index 27516a1..0000000
--- a/audit_engine/smart_contracts/timestamp/ts35.sol
+++ /dev/null
@@ -1,22 +0,0 @@
-contract ERC20Basic {
- function balanceOf(address who) public view returns (uint256);
-}
-
-contract TokenVesting {
-
- ERC20Basic public token;
-
- uint256 public start;
- uint256 public currentBalance;
- mapping (address => uint256) public released;
- uint256 public totalBalance;
-
- function vestedAmount() public returns (uint256) {
- currentBalance = token.balanceOf(this);
- totalBalance = currentBalance + (released[token]);
-
- if (block.timestamp >= start) {
- return totalBalance;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts36.sol b/audit_engine/smart_contracts/timestamp/ts36.sol
deleted file mode 100644
index c7f41dd..0000000
--- a/audit_engine/smart_contracts/timestamp/ts36.sol
+++ /dev/null
@@ -1,22 +0,0 @@
-contract AdditionalToken{
- address public owner;
- uint256 public maxProportion;
- uint256 public totalSupply;
- mapping(uint256 => uint256) public maxAmountPer;
-
- modifier onlyOwner {
- require(msg.sender == owner);
- _;
- }
-
- function mint(address _to, uint256 _amount) onlyOwner public returns (bool) {
- uint256 curTime = block.timestamp;
- uint256 curTimes = curTime / (31536000);
-
- if(maxAmountPer[curTimes] == 0) {
- maxAmountPer[curTimes] = totalSupply * (maxProportion) / (100);
- }
-
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/timestamp/ts37.sol b/audit_engine/smart_contracts/timestamp/ts37.sol
deleted file mode 100644
index e1c69e4..0000000
--- a/audit_engine/smart_contracts/timestamp/ts37.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract AdditionalToken{
- uint256 public maxProportion;
- uint256 public totalSupply;
-
- mapping(uint256 => uint256) public maxAmountPer;
-
- function mint(address _to, uint256 _amount) public returns (bool) {
- uint256 curTime = block.timestamp;
- uint256 curTimes = curTime / (31536000);
-
- if(maxAmountPer[curTimes] == 0) {
- maxAmountPer[curTimes] = totalSupply * (maxProportion) / (100);
- }
- return true;
- }
-}
diff --git a/audit_engine/smart_contracts/timestamp/ts38.sol b/audit_engine/smart_contracts/timestamp/ts38.sol
deleted file mode 100644
index 74f0706..0000000
--- a/audit_engine/smart_contracts/timestamp/ts38.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract Bittwatt {
- function createDate(uint _minutes, uint _seconds) public view returns (uint) {
- uint currentTimestamp = block.timestamp;
- currentTimestamp += _seconds;
- currentTimestamp += 60 * _minutes;
- return currentTimestamp;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts39.sol b/audit_engine/smart_contracts/timestamp/ts39.sol
deleted file mode 100644
index 54c92ef..0000000
--- a/audit_engine/smart_contracts/timestamp/ts39.sol
+++ /dev/null
@@ -1,21 +0,0 @@
-contract ERC20Basic {
- function balanceOf(address who) public view returns (uint256);
-}
-
-contract ANBXTimelock {
-
- ERC20Basic public token;
- uint256 public secondReleaseTime;
- uint256 amount;
-
- function release() public returns (bool) {
-
- if (block.timestamp >= secondReleaseTime) {
- amount = token.balanceOf(this);
- require(amount > 0);
- secondReleaseTime = 0;
- return true;
- }
- return false;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts4.sol b/audit_engine/smart_contracts/timestamp/ts4.sol
deleted file mode 100644
index 3302fc7..0000000
--- a/audit_engine/smart_contracts/timestamp/ts4.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract TokenVesting {
- uint256 public start;
- uint256 public duration;
-
- function vestedAmount(address token) public view returns (uint256) {
- uint256 currentBalance = 100;
- uint256 totalBalance = currentBalance + 100;
-
- if (block.timestamp >= (start + duration)) {
- return (totalBalance * start);
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts40.sol b/audit_engine/smart_contracts/timestamp/ts40.sol
deleted file mode 100644
index e424d88..0000000
--- a/audit_engine/smart_contracts/timestamp/ts40.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract EcroContract {
- address public owner;
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function withdrawInvestments() external onlyOwner returns (uint) {
- uint amount = block.timestamp;
- return amount;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts41.sol b/audit_engine/smart_contracts/timestamp/ts41.sol
deleted file mode 100644
index 2662a24..0000000
--- a/audit_engine/smart_contracts/timestamp/ts41.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract CardsRaffle {
- uint256 private raffleTicketsBought;
- uint256 private raffleTicketThatWon;
- address public owner;
-
- modifier onlyOwner() {
- require(msg.sender == owner);
- _;
- }
-
- function drawRandomWinner() public onlyOwner returns (uint256) {
- uint256 seed = raffleTicketsBought + block.timestamp;
- raffleTicketThatWon = addmod(uint256(block.blockhash(block.number-1)), seed, raffleTicketsBought);
- return raffleTicketThatWon;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts42.sol b/audit_engine/smart_contracts/timestamp/ts42.sol
deleted file mode 100644
index abc4161..0000000
--- a/audit_engine/smart_contracts/timestamp/ts42.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-contract CardsRaffle {
- uint256 private raffleTicketsBought;
- uint256 private raffleTicketThatWon;
-
- function drawRandomWinner() public returns (uint256) {
- uint256 seed = raffleTicketsBought + block.timestamp;
- raffleTicketThatWon = addmod(uint256(block.blockhash(block.number-1)), seed, raffleTicketsBought);
- return raffleTicketThatWon;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts43.sol b/audit_engine/smart_contracts/timestamp/ts43.sol
deleted file mode 100644
index c85aa29..0000000
--- a/audit_engine/smart_contracts/timestamp/ts43.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract FoMoJP {
- uint256 public airDropTracker_ = 0;
-
- function airdrop() private view returns(bool) {
- uint256 seed = uint256(keccak256(abi.encodePacked((block.timestamp) / now )));
- if((seed - ((seed / 1000) * 1000)) < airDropTracker_)
- return true;
- else
- return false;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts44.sol b/audit_engine/smart_contracts/timestamp/ts44.sol
deleted file mode 100644
index 840a550..0000000
--- a/audit_engine/smart_contracts/timestamp/ts44.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract MEWCrowdsale {
- uint256[2] internal signatures;
- address SIGN_ADDRESS1 = address(0xa5a5f62BfA22b1E42A98Ce00131eA658D5E29B37);
-
- function changeWallet(address newWallet) public returns (bool) {
- uint256 blocktime = block.timestamp;
-
- if (msg.sender == SIGN_ADDRESS1) {
- signatures[0] = blocktime;
- }
- return true;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts45.sol b/audit_engine/smart_contracts/timestamp/ts45.sol
deleted file mode 100644
index 4d0447c..0000000
--- a/audit_engine/smart_contracts/timestamp/ts45.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract BasicToken {
-
- mapping(address => uint256) public mVestingDays;
- mapping(address => uint256) public mVestingBegins;
-
- function ComputeVestSpend(address target) public returns (uint256) {
- int256 vestingDays = int256(mVestingDays[target]);
- int256 vestingProgress = (int256(block.timestamp) - int256(mVestingBegins[target])) / (int256(24*60*60));
-
- if (vestingProgress > vestingDays) {
- vestingProgress = vestingDays;
- }
-
- return 0;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts46.sol b/audit_engine/smart_contracts/timestamp/ts46.sol
deleted file mode 100644
index ff71f91..0000000
--- a/audit_engine/smart_contracts/timestamp/ts46.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract MyPurchaseContract {
- uint256 public startAt;
- uint256 public stopAt;
- uint256 public grantAt;
-
- function MyPurchaseContrat() public returns (uint256) {
- startAt = block.timestamp;
- stopAt = startAt + 60;
- grantAt = startAt + 120;
- return startAt + stopAt + grantAt;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts47.sol b/audit_engine/smart_contracts/timestamp/ts47.sol
deleted file mode 100644
index 43a0f72..0000000
--- a/audit_engine/smart_contracts/timestamp/ts47.sol
+++ /dev/null
@@ -1,16 +0,0 @@
-contract F3d {
- mapping(uint256 => Round) public rounds;
- uint256 public currentRound;
- struct Round {
- uint256 endTime;
- }
-
- function remainTime() public view returns (uint256) {
- uint time = block.timestamp;
- if (rounds[currentRound].endTime <= time) {
- return 0;
- } else {
- return rounds[currentRound].endTime - time;
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts48.sol b/audit_engine/smart_contracts/timestamp/ts48.sol
deleted file mode 100644
index b05bf7c..0000000
--- a/audit_engine/smart_contracts/timestamp/ts48.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract FoMo3DWorld {
- uint256 public airDropTracker_ = 0;
-
- function airdrop() private view returns(bool) {
- uint256 seed = uint256(keccak256(abi.encodePacked((block.timestamp) + (block.difficulty))));
- if((seed - ((seed / 1000) * 1000)) < airDropTracker_)
- return(true);
- else
- return(false);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts49.sol b/audit_engine/smart_contracts/timestamp/ts49.sol
deleted file mode 100644
index 44cea3a..0000000
--- a/audit_engine/smart_contracts/timestamp/ts49.sol
+++ /dev/null
@@ -1,11 +0,0 @@
-contract Greedy {
- uint256 public luckybuyTracker_ = 0;
-
- function luckyBuy() private view returns(bool) {
- uint256 seed = uint256(keccak256(abi.encodePacked((block.timestamp) + (block.difficulty))));
- if((seed - ((seed / 1000) * 1000)) < luckybuyTracker_)
- return(true);
- else
- return(false);
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts5.sol b/audit_engine/smart_contracts/timestamp/ts5.sol
deleted file mode 100644
index d4eeb16..0000000
--- a/audit_engine/smart_contracts/timestamp/ts5.sol
+++ /dev/null
@@ -1,20 +0,0 @@
-contract GameCell {
- mapping(address => TimeLock[2]) public allocations;
-
- struct TimeLock {
- uint256 releaseTime;
- uint256 balance;
- }
- uint256 total_lockamount = 0;
- uint256 total_unlockamount = 0;
-
- function subAllocation(address sender) private {
-
- if (allocations[sender][0].releaseTime < block.timestamp) {
- total_unlockamount = total_unlockamount + (allocations[sender][0].balance);
- }
- else {
- total_lockamount = total_lockamount + (allocations[sender][1].balance);
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts50.sol b/audit_engine/smart_contracts/timestamp/ts50.sol
deleted file mode 100644
index 7fb4ab4..0000000
--- a/audit_engine/smart_contracts/timestamp/ts50.sol
+++ /dev/null
@@ -1,12 +0,0 @@
-contract OutCloud {
- uint256 public preico_startdate;
- uint public bonusCalculationFactor;
- uint disc;
-
- function getCurrentTokenPricepreICO() private returns (uint) {
- bonusCalculationFactor = (block.timestamp + (preico_startdate)) / (604800);
- if (bonusCalculationFactor== 0)
- disc = 30;
- return disc;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts6.sol b/audit_engine/smart_contracts/timestamp/ts6.sol
deleted file mode 100644
index 227e77e..0000000
--- a/audit_engine/smart_contracts/timestamp/ts6.sol
+++ /dev/null
@@ -1,18 +0,0 @@
-contract GameCell {
- mapping(address => TimeLock[]) public allocations;
- struct TimeLock {
- uint256 releaseTime;
- uint256 balance;
- }
- uint256 total_lockamount = 0;
- uint256 total_unlockamount = 0;
-
- function subAllocation(address sender) private {
-
- for (uint j = 0; j < allocations[sender].length; j++) {
- if (allocations[sender][j].releaseTime < block.timestamp) {
- total_unlockamount = total_unlockamount + (allocations[sender][j].balance);
- }
- }
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts7.sol b/audit_engine/smart_contracts/timestamp/ts7.sol
deleted file mode 100644
index 33579be..0000000
--- a/audit_engine/smart_contracts/timestamp/ts7.sol
+++ /dev/null
@@ -1,13 +0,0 @@
-contract FreezableToken {
- uint release;
- uint balance;
-
- function releaseAll() public returns (uint tokens) {
-
- while (release > block.timestamp) {
- tokens += balance;
- msg.sender.call.value(tokens);
- }
- return tokens;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts8.sol b/audit_engine/smart_contracts/timestamp/ts8.sol
deleted file mode 100644
index 754a6fd..0000000
--- a/audit_engine/smart_contracts/timestamp/ts8.sol
+++ /dev/null
@@ -1,7 +0,0 @@
-contract TimedCrowdsale {
- uint256 public closingTime;
-
- function hasClosed() public view returns (bool) {
- return block.timestamp > closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/smart_contracts/timestamp/ts9.sol b/audit_engine/smart_contracts/timestamp/ts9.sol
deleted file mode 100644
index 6dce7f3..0000000
--- a/audit_engine/smart_contracts/timestamp/ts9.sol
+++ /dev/null
@@ -1,8 +0,0 @@
-contract CrowdsaleWPTByRounds{
- uint256 public closingTime;
-
- function closeRound() public returns(uint256) {
- closingTime = block.timestamp + 1;
- return closingTime;
- }
-}
\ No newline at end of file
diff --git a/audit_engine/static_analysis/base.py b/audit_engine/static_analysis/base.py
deleted file mode 100644
index 915ecfc..0000000
--- a/audit_engine/static_analysis/base.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import Any, Dict, List, Literal, TypedDict
-
-class StandardFinding(TypedDict, total=False):
- title: str
- description: str
- severity: Literal["High", "Medium", "Low"]
- confidence: Literal["High", "Medium", "Low"]
- swc_id: str
- line_numbers: List[int]
- tool: str
- # Optional extras that help with traceability without breaking consumers:
- original_severity: Any
- file: str
- raw: Dict[str, Any]
-
-from ..core.vulnerability_patterns import get_vulnerability_info, get_fix_suggestions
-
-class AbstractAdapter(ABC):
- @abstractmethod
- def run(self, contract_path: str, **kwargs) -> List[StandardFinding]:
- """Run analysis and return a list of standardized findings."""
- pass
-
- @abstractmethod
- def parse_output(self, output: str) -> List[StandardFinding]:
- """Parse tool output into standardized findings."""
- pass
-
- def standardize_finding(self, finding: Dict) -> Dict:
- # Standardize keys and SWC/severity mapping
- severity_raw = finding.get("severity")
- # Normalize line_numbers to a list[int]
- ln = finding.get("line_numbers", [])
- if isinstance(ln, int):
- line_numbers = [ln]
- elif isinstance(ln, str):
- parts = [p.strip() for p in ln.split(",")]
- line_numbers = [int(p) for p in parts if p.isdigit()]
- else:
- line_numbers = ln or []
-
- # Get vulnerability details and fixes
- swc_id = str(finding.get("swc_id", ""))
- vuln_info = get_vulnerability_info(swc_id) if swc_id else None
- fixes = get_fix_suggestions(swc_id) if swc_id else []
-
-
- standardized = {
- "title": str(finding.get("title", "")),
- "description": str(finding.get("description", "")),
- "severity": self._map_severity(severity_raw),
- "swc_id": swc_id,
- "line_numbers": line_numbers,
- "confidence": str(finding.get("confidence", "Medium")),
- "tool": finding.get("tool", getattr(self, "tool_name", self.__class__.__name__)),
- "file_path": str(finding.get("file_path", "")),
- # Extras that do not break consumers but preserve fidelity
- "original_severity": severity_raw,
- # Enhanced vulnerability information
- "vulnerability_details": vuln_info,
- "suggested_fixes": fixes,
- "recommendations": [fix["description"] for fix in fixes] if fixes else []
- }
-
- return standardized
-
- def _map_severity(self, severity) -> str:
- if severity is None:
- return "Medium"
- key = str(severity).strip().lower()
- mapping = {
- "critical": "High",
- "high": "High",
- "medium": "Medium",
- "moderate": "Medium",
- "low": "Low",
- "info": "Low",
- "informational": "Low",
- }
- return mapping.get(key, "Medium")
-
-
\ No newline at end of file
diff --git a/audit_engine/static_analysis/manticore_adapter.py b/audit_engine/static_analysis/manticore_adapter.py
deleted file mode 100644
index 1d406c6..0000000
--- a/audit_engine/static_analysis/manticore_adapter.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import subprocess
-import json
-import os
-import tempfile
-import shutil
-from pathlib import Path
-from typing import List, Dict, Optional
-from .base import AbstractAdapter
-from ..core.vulnerability_patterns import get_vulnerability_info, get_fix_suggestions
-
-class ManticoreAdapter(AbstractAdapter):
- def __init__(self, config=None, logger=None):
- self.config = config or {}
- self.logger = logger
- def run(self, contract_path: str, **kwargs) -> List[Dict]:
- # For demo purposes, always simulate findings based on contract path
- return self._simulate_findings(contract_path)
-
- # Create temporary directory for manticore output
- with tempfile.TemporaryDirectory() as temp_dir:
- output_dir = Path(temp_dir) / "mcore_out"
-
- # Run manticore with proper flags for vulnerability detection
- cmd = [
- "manticore",
- contract_path,
- "--no-progress",
- "--output", str(output_dir),
- "--workspace", str(output_dir),
- "--detect", "all", # Enable all vulnerability detectors
- "--timeout", str(kwargs.get("timeout", 60)) # Shorter timeout for demo
- ]
-
- try:
- result = subprocess.run(
- cmd,
- capture_output=True,
- text=True,
- timeout=kwargs.get("timeout", 60) + 10,
- cwd=os.path.dirname(contract_path) or "."
- )
-
- # Parse findings from multiple possible output files
- findings = []
- findings.extend(self._parse_global_findings(output_dir))
- findings.extend(self._parse_testcase_findings(output_dir))
- findings.extend(self._parse_stdout_findings(result.stdout))
-
- return findings
-
- except subprocess.TimeoutExpired:
- return [{"title": "Manticore Timeout", "description": f"Analysis timed out after {kwargs.get('timeout', 60)}s", "severity": "Low", "swc_id": "", "line_numbers": [], "confidence": "Low", "tool": "Manticore"}]
- except Exception as e:
- return [{"title": "Manticore Error", "description": str(e), "severity": "Low", "swc_id": "", "line_numbers": [], "confidence": "Low", "tool": "Manticore"}]
-
- def _simulate_findings(self, contract_path: str) -> List[Dict]:
- """Simulate findings for demo purposes when Manticore is not available"""
- findings = []
- path_lower = contract_path.lower()
-
- if "integer_overflow" in path_lower or "io" in path_lower:
- finding = {
- "title": "Integer Overflow/Underflow",
- "description": "Potential integer overflow detected in arithmetic operation. uint8 can overflow when adding large values.",
- "severity": "High",
- "swc_id": "SWC-101",
- "line_numbers": [7],
- "confidence": "High",
- "tool": "Manticore",
- "file_path": contract_path
- }
- vuln_info = get_vulnerability_info("SWC-101")
- if vuln_info:
- finding["vulnerability_details"] = {
- "name": vuln_info["name"],
- "description": vuln_info["description"],
- "impact": vuln_info["impact"]
- }
- finding["suggested_fixes"] = get_fix_suggestions("SWC-101")
- findings.append(self.standardize_finding(finding))
-
- elif "reentrancy" in path_lower:
- finding = {
- "title": "Reentrancy Vulnerability",
- "description": "External call before state changes could allow reentrancy attacks.",
- "severity": "High",
- "swc_id": "SWC-107",
- "line_numbers": [15, 20],
- "confidence": "High",
- "tool": "Manticore",
- "file_path": contract_path
- }
- vuln_info = get_vulnerability_info("SWC-107")
- if vuln_info:
- finding["vulnerability_details"] = {
- "name": vuln_info["name"],
- "description": vuln_info["description"],
- "impact": vuln_info["impact"]
- }
- finding["suggested_fixes"] = get_fix_suggestions("SWC-107")
- findings.append(finding)
-
- elif "timestamp" in path_lower or "ts" in path_lower:
- finding = {
- "title": "Timestamp Dependence",
- "description": "Contract logic depends on block.timestamp which can be manipulated by miners.",
- "severity": "Medium",
- "swc_id": "SWC-116",
- "line_numbers": [12],
- "confidence": "Medium",
- "tool": "Manticore",
- "file_path": contract_path
- }
- vuln_info = get_vulnerability_info("SWC-116")
- if vuln_info:
- finding["vulnerability_details"] = {
- "name": vuln_info["name"],
- "description": vuln_info["description"],
- "impact": vuln_info["impact"]
- }
- finding["suggested_fixes"] = get_fix_suggestions("SWC-116")
- findings.append(finding)
-
- return [self.standardize_finding(f) for f in findings]
-
- def _parse_global_findings(self, output_dir: Path) -> List[Dict]:
- """Parse global.findings file"""
- findings = []
- global_file = output_dir / "global.findings"
- if global_file.exists():
- try:
- with open(global_file, 'r') as f:
- data = json.load(f)
- for issue in data.get("issues", []):
- finding = {
- "title": issue.get("title", "Unknown Issue"),
- "description": issue.get("description", ""),
- "severity": self._map_manticore_severity(issue.get("severity", "medium")),
- "swc_id": issue.get("swc_id", ""),
- "line_numbers": issue.get("lines", []),
- "confidence": "High" if issue.get("severity") == "high" else "Medium",
- "tool": "Manticore"
- }
- findings.append(self.standardize_finding(finding))
- except Exception:
- pass
- return findings
-
- def _parse_testcase_findings(self, output_dir: Path) -> List[Dict]:
- """Parse testcase files for additional findings"""
- findings = []
- for test_file in output_dir.glob("test_*.json"):
- try:
- with open(test_file, 'r') as f:
- data = json.load(f)
- if data.get("error"):
- finding = {
- "title": "Runtime Error Detected",
- "description": f"Runtime error: {data.get('error', 'Unknown error')}",
- "severity": "High",
- "swc_id": "SWC-110", # Assert violation
- "line_numbers": [],
- "confidence": "High",
- "tool": "Manticore"
- }
- findings.append(self.standardize_finding(finding))
- except Exception:
- pass
- return findings
\ No newline at end of file
diff --git a/audit_engine/static_analysis/mythril_adapter.py b/audit_engine/static_analysis/mythril_adapter.py
deleted file mode 100644
index 5542202..0000000
--- a/audit_engine/static_analysis/mythril_adapter.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import subprocess
-import json
-from typing import List, Dict
-from .base import AbstractAdapter
-
-class MythrilAdapter(AbstractAdapter):
- def run(self, contract_path: str, **kwargs) -> List[Dict]:
- cmd = [
- "myth", "analyze", contract_path,
- "-o", "json",
- "--execution-timeout", str(kwargs.get("timeout", 180))
- ]
- try:
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=kwargs.get("timeout", 180)+10)
- return self.parse_output(result.stdout)
- except Exception as e:
- return [{"title": "Mythril Error", "description": str(e), "severity": "Low", "swc_id": "", "line_numbers": [], "confidence": "Low", "tool": "Mythril"}]
-
- def parse_output(self, output: str) -> List[Dict]:
- try:
- data = json.loads(output)
- findings = []
- for issue in data.get("issues", []):
- finding = {
- "title": issue.get("title", ""),
- "description": issue.get("description", ""),
- "severity": issue.get("severity", "Medium"),
- "swc_id": issue.get("swc-id", ""),
- "line_numbers": [loc.get("sourceMap", "") for loc in issue.get("locations", [])],
- "confidence": issue.get("confidence", "Medium"),
- "tool": "Mythril"
- }
- findings.append(self.standardize_finding(finding))
- return findings
- except Exception:
- return []
diff --git a/audit_engine/utils/file_handler.py b/audit_engine/utils/file_handler.py
deleted file mode 100644
index e4050bd..0000000
--- a/audit_engine/utils/file_handler.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from pathlib import Path
-from typing import Iterable, List
-
-
-class ContractFileHandler:
- SUPPORTED_EXTENSIONS = {".sol"}
-
- def is_supported_contract(self, path: Path) -> bool:
- return path.suffix.lower() in self.SUPPORTED_EXTENSIONS
-
- def collect_contracts(self, base: Path) -> List[str]:
- if base.is_file() and self.is_supported_contract(base):
- return [str(base.resolve())]
- results: List[str] = []
- for p in base.rglob("*"):
- if p.is_file() and self.is_supported_contract(p):
- results.append(str(p.resolve()))
- return results
-
-
-
diff --git a/audit_engine/utils/logger.py b/audit_engine/utils/logger.py
deleted file mode 100644
index 9530997..0000000
--- a/audit_engine/utils/logger.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import logging
-
-
-def get_logger(name: str, level: str = "INFO") -> logging.Logger:
- logger = logging.getLogger(name)
- if not logger.handlers:
- handler = logging.StreamHandler()
- formatter = logging.Formatter('[%(asctime)s] %(levelname)s %(name)s: %(message)s')
- handler.setFormatter(formatter)
- logger.addHandler(handler)
- try:
- logger.setLevel(getattr(logging, level.upper()))
- except Exception:
- logger.setLevel(logging.INFO)
- return logger
-
-
-
diff --git a/data/contracts/.gitkeep b/data/contracts/.gitkeep
new file mode 100644
index 0000000..1565a06
--- /dev/null
+++ b/data/contracts/.gitkeep
@@ -0,0 +1,5 @@
+# Smart Contracts
+
+This directory contains smart contract samples for testing and analysis.
+
+Add your contract files here.
diff --git a/data/datasets/.gitkeep b/data/datasets/.gitkeep
new file mode 100644
index 0000000..4aff1bd
--- /dev/null
+++ b/data/datasets/.gitkeep
@@ -0,0 +1,5 @@
+# Datasets
+
+This directory contains datasets for training and evaluation.
+
+Add your dataset files here.
diff --git a/docs/agents.md b/docs/agents.md
new file mode 100644
index 0000000..aab4078
--- /dev/null
+++ b/docs/agents.md
@@ -0,0 +1,23 @@
+# Agents
+
+## Overview
+
+This document describes the different agents in the system and their responsibilities.
+
+## Agent Types
+
+### Coordinator Agent
+Routes analysis tasks to the appropriate specialized agents based on the contract type and analysis requirements.
+
+### Static Agent
+Performs static analysis using tools like Slither and other static analyzers.
+
+### Dynamic Agent
+Performs dynamic analysis through symbolic execution and fuzzing.
+
+### ML Agent
+Uses machine learning models to detect patterns and anomalies in smart contracts.
+
+## Agent Communication
+
+Agents communicate through a message queue system and report results to a centralized results sink.
diff --git a/docs/api.md b/docs/api.md
new file mode 100644
index 0000000..8d2b17d
--- /dev/null
+++ b/docs/api.md
@@ -0,0 +1,20 @@
+# API Documentation
+
+## Overview
+
+REST API endpoints for the OAL Agent system.
+
+## Endpoints
+
+### POST /analysis
+Submit a smart contract for analysis.
+
+### GET /analysis/{job_id}
+Get the status and results of an analysis job.
+
+### GET /analysis/{job_id}/results
+Get detailed results for a completed analysis.
+
+## Schemas
+
+See the `src/oal_agent/app/schemas/` directory for request/response schemas.
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 0000000..2d30f74
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,34 @@
+# Architecture
+
+## Overview
+
+This document describes the high-level architecture of the OAL Agent system.
+
+## Components
+
+### Core
+- **Orchestrator**: Manages the overall execution flow
+- **Pipeline**: Defines analysis pipelines
+- **Config**: Configuration management
+- **Errors**: Custom error handling
+
+### Agents
+- **Coordinator**: Routes tasks to appropriate agents
+- **Static Agent**: Performs static analysis
+- **Dynamic Agent**: Performs dynamic analysis
+- **ML Agent**: Machine learning-based analysis
+
+### Tools
+Integration with external security analysis tools.
+
+### Services
+Background services for queue management, storage, and results handling.
+
+### LLM
+Large Language Model integration for intelligent analysis.
+
+### Security
+Security validation, policies, and sandboxing.
+
+### Telemetry
+Logging, metrics, and distributed tracing.
diff --git a/docs/pipelines.md b/docs/pipelines.md
new file mode 100644
index 0000000..3e54701
--- /dev/null
+++ b/docs/pipelines.md
@@ -0,0 +1,20 @@
+# Pipelines
+
+## Overview
+
+Analysis pipelines define the sequence of operations performed on smart contracts.
+
+## Pipeline Types
+
+### Standard Pipeline
+Basic static and dynamic analysis.
+
+### Deep Pipeline
+Comprehensive analysis including ML-based detection.
+
+### Quick Pipeline
+Fast preliminary analysis for triage.
+
+## Pipeline Configuration
+
+Pipelines can be configured through the configuration system to customize analysis depth and tools used.
diff --git a/Paper/IEEE-IoT-templates/IEEEtran.cls b/docs/research/IEEE-IoT-templates/IEEEtran.cls
similarity index 100%
rename from Paper/IEEE-IoT-templates/IEEEtran.cls
rename to docs/research/IEEE-IoT-templates/IEEEtran.cls
diff --git a/Paper/IEEE-IoT-templates/New_IEEEtran_how-to.pdf b/docs/research/IEEE-IoT-templates/New_IEEEtran_how-to.pdf
similarity index 100%
rename from Paper/IEEE-IoT-templates/New_IEEEtran_how-to.pdf
rename to docs/research/IEEE-IoT-templates/New_IEEEtran_how-to.pdf
diff --git a/Paper/IEEE-IoT-templates/New_IEEEtran_how-to.tex b/docs/research/IEEE-IoT-templates/New_IEEEtran_how-to.tex
similarity index 100%
rename from Paper/IEEE-IoT-templates/New_IEEEtran_how-to.tex
rename to docs/research/IEEE-IoT-templates/New_IEEEtran_how-to.tex
diff --git a/Paper/IEEE-IoT-templates/bare_jrnl_new_sample4.pdf b/docs/research/IEEE-IoT-templates/bare_jrnl_new_sample4.pdf
similarity index 100%
rename from Paper/IEEE-IoT-templates/bare_jrnl_new_sample4.pdf
rename to docs/research/IEEE-IoT-templates/bare_jrnl_new_sample4.pdf
diff --git a/Paper/IEEE-IoT-templates/bare_jrnl_new_sample4.tex b/docs/research/IEEE-IoT-templates/bare_jrnl_new_sample4.tex
similarity index 100%
rename from Paper/IEEE-IoT-templates/bare_jrnl_new_sample4.tex
rename to docs/research/IEEE-IoT-templates/bare_jrnl_new_sample4.tex
diff --git a/Paper/IEEE-IoT-templates/fig1.png b/docs/research/IEEE-IoT-templates/fig1.png
similarity index 100%
rename from Paper/IEEE-IoT-templates/fig1.png
rename to docs/research/IEEE-IoT-templates/fig1.png
diff --git a/Paper/research/Paper.md b/docs/research/research/Paper.md
similarity index 100%
rename from Paper/research/Paper.md
rename to docs/research/research/Paper.md
diff --git a/Paper/research/Workflow.md b/docs/research/research/Workflow.md
similarity index 100%
rename from Paper/research/Workflow.md
rename to docs/research/research/Workflow.md
diff --git a/docs/setup.md b/docs/setup.md
new file mode 100644
index 0000000..6a54e78
--- /dev/null
+++ b/docs/setup.md
@@ -0,0 +1,320 @@
+# Setup Guide
+
+This guide will help you set up the OAL Agent development env### 4. Install Pre-commit Hooks
+
+```bash
+pre-commit install
+```
+
+### 5. Configure Environment Variablest.
+
+## Table of Contents
+
+- [System Requirements](#system-requirements)
+- [Initial Setup](#initial-setup)
+- [Development Environment](#development-environment)
+- [Running Tests](#running-tests)
+- [IDE Configuration](#ide-configuration)
+- [Troubleshooting](#troubleshooting)
+
+## System Requirements
+
+### Required
+
+- **Python**: 3.9 or higher (3.11 recommended)
+- **pip**: Latest version
+- **Git**: For version control
+- **Redis**: 6.0+ for queue management
+
+### Recommended
+
+- **Docker**: For containerized development
+- **PostgreSQL**: 13+ for production database (SQLite works for development)
+- **Solidity Compiler (solc)**: For smart contract compilation
+
+### Operating Systems
+
+- Linux (Ubuntu 20.04+, Debian 11+)
+- macOS (11.0+)
+- Windows (via WSL2)
+
+## Initial Setup
+
+### 1. Clone the Repository
+
+```bash
+git clone https://github.com/OpenAuditLabs/agent.git
+cd agent
+```
+
+### 2. Create Virtual Environment
+
+```bash
+# Create virtual environment
+python -m venv .venv
+
+# Activate on Linux/macOS
+source .venv/bin/activate
+
+# Activate on Windows
+.venv\Scripts\activate
+```
+
+### 3. Install Python Dependencies
+
+```bash
+# Upgrade pip
+pip install --upgrade pip
+
+# Install production dependencies
+pip install -r requirements.txt
+
+# Install development dependencies
+pip install -r requirements-dev.txt
+```
+
+### 4. Install Pre-commit Hooks
+
+```bash
+pre-commit install
+```
+
+### 6. Configure Environment Variables
+
+```bash
+# Copy example environment file
+cp .env.example .env
+
+# Edit .env with your settings
+nano .env # or your preferred editor
+```
+
+**Required environment variables:**
+
+```bash
+# API Configuration
+API_HOST=0.0.0.0
+API_PORT=8000
+
+# Database
+DATABASE_URL=sqlite:///./oal_agent.db
+
+# Queue (Redis)
+QUEUE_URL=redis://localhost:6379
+
+# LLM Provider
+LLM_PROVIDER=openai
+LLM_API_KEY=your-api-key-here
+
+# Security
+SECRET_KEY=generate-a-random-secret-key
+
+# Logging
+LOG_LEVEL=INFO
+```
+
+### 6. Install External Tools
+
+#### Slither (Static Analysis)
+
+```bash
+pip install slither-analyzer
+```
+
+#### Mythril (Symbolic Execution)
+
+```bash
+pip install mythril
+```
+
+#### Solc (Solidity Compiler)
+
+```bash
+# On Linux
+sudo add-apt-repository ppa:ethereum/ethereum
+sudo apt-get update
+sudo apt-get install solc
+
+# On macOS
+brew tap ethereum/ethereum
+brew install solidity
+
+# Or use solc-select for version management
+pip install solc-select
+solc-select install 0.8.0
+solc-select use 0.8.0
+```
+
+### 7. Start Redis
+
+```bash
+# On Linux
+sudo systemctl start redis
+
+# On macOS
+brew services start redis
+
+# Or run in foreground
+redis-server
+```
+
+### 8. Verify Installation
+
+```bash
+# Run health check
+python src/oal_agent/cli.py serve &
+sleep 2
+curl http://localhost:8000/health
+```
+
+## Development Environment
+
+### Running the Development Server
+
+```bash
+# Start with auto-reload
+python src/oal_agent/cli.py serve --host 0.0.0.0 --port 8000
+
+# Access API docs at: http://localhost:8000/docs
+```
+
+### Running Tests
+
+```bash
+# Run all tests
+bash scripts/test.sh
+
+# Run specific test suites
+pytest tests/unit/ -v
+pytest tests/integration/ -v
+pytest tests/e2e/ -v
+
+# Run with coverage
+pytest --cov=src/oal_agent --cov-report=html
+```
+
+### Code Quality Checks
+
+```bash
+# Format code
+bash scripts/format.sh
+
+# Run linters
+bash scripts/lint.sh
+
+# Run pre-commit checks
+pre-commit run --all-files
+```
+
+## IDE Configuration
+
+### VS Code
+
+The project includes `.vscode/settings.json` with recommended settings.
+
+**Recommended Extensions:**
+
+- Python (ms-python.python)
+- Pylance (ms-python.vscode-pylance)
+- Black Formatter (ms-python.black-formatter)
+- isort (ms-python.isort)
+- GitLens (eamodio.gitlens)
+
+### PyCharm
+
+1. Open the project
+2. Configure Python interpreter: Settings β Project β Python Interpreter
+3. Select the `.venv` virtual environment
+4. Enable Black formatter: Settings β Tools β Black
+5. Enable pytest: Settings β Tools β Python Integrated Tools
+
+## Troubleshooting
+
+### Python Version Issues
+
+```bash
+# Check Python version
+python --version
+
+# If needed, use pyenv to manage Python versions
+curl https://pyenv.run | bash
+pyenv install 3.11
+pyenv local 3.11
+```
+
+### Import Errors
+
+```bash
+# Ensure you're in the virtual environment
+which python # Should point to .venv/bin/python
+
+# Add src to PYTHONPATH
+export PYTHONPATH="${PYTHONPATH}:${PWD}/src"
+```
+
+### Redis Connection Issues
+
+```bash
+# Check if Redis is running
+redis-cli ping
+
+# Should return: PONG
+
+# Check Redis logs
+tail -f /var/log/redis/redis-server.log
+```
+
+### Permission Errors
+
+```bash
+# Make scripts executable
+chmod +x scripts/*.sh
+
+# Fix ownership if needed
+sudo chown -R $USER:$USER .
+```
+
+### Database Issues
+
+```bash
+# For SQLite, simply delete and recreate
+rm oal_agent.db
+
+# For PostgreSQL
+dropdb oal_agent
+createdb oal_agent
+```
+
+### Dependency Conflicts
+
+```bash
+# Clear pip cache
+pip cache purge
+
+# Reinstall from scratch
+pip uninstall -r requirements.txt -y
+pip install -r requirements.txt
+```
+
+## Next Steps
+
+- Read the [Architecture documentation](architecture.md)
+- Explore the [API documentation](api.md)
+- Check out [Contributing guidelines](../CONTRIBUTING.md)
+- Join our [GitHub Discussions](https://github.com/OpenAuditLabs/agent/discussions)
+
+## Getting Help
+
+If you encounter issues not covered here:
+
+1. Check [GitHub Issues](https://github.com/OpenAuditLabs/agent/issues)
+2. Search [GitHub Discussions](https://github.com/OpenAuditLabs/agent/discussions)
+3. Create a new issue with:
+ - Your OS and Python version
+ - Steps to reproduce
+ - Error messages
+ - What you've tried
+
+---
+
+**Last Updated**: October 2025
diff --git a/models/gnn/.gitkeep b/models/gnn/.gitkeep
new file mode 100644
index 0000000..d00f2af
--- /dev/null
+++ b/models/gnn/.gitkeep
@@ -0,0 +1,5 @@
+# Graph Neural Network Models
+
+This directory contains GNN-based models for smart contract analysis.
+
+Add your GNN models here.
diff --git a/models/transformers/.gitkeep b/models/transformers/.gitkeep
new file mode 100644
index 0000000..d8b3a8f
--- /dev/null
+++ b/models/transformers/.gitkeep
@@ -0,0 +1,5 @@
+# Transformer Models
+
+This directory contains transformer-based models for smart contract analysis.
+
+Add your transformer models here.
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..8ec0905
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,56 @@
+[build-system]
+requires = ["setuptools>=61.0", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "oal-agent"
+version = "0.1.0"
+description = "Smart Contract Security Analysis System"
+readme = "README.md"
+requires-python = ">=3.9"
+license = {text = "AGPL-3.0-or-later"}
+authors = [
+ {name = "OpenAuditLabs", email = "contact@openauditlabs.com"}
+]
+classifiers = [
+ "Development Status :: 3 - Alpha",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+]
+
+[project.optional-dependencies]
+dev = [
+ "pytest>=7.4.0",
+ "pytest-asyncio>=0.21.0",
+ "pytest-cov>=4.1.0",
+ "black>=23.12.0",
+ "isort>=5.13.0",
+ "flake8>=7.0.0",
+ "mypy>=1.8.0",
+ "pre-commit>=3.6.0",
+]
+
+[tool.black]
+line-length = 88
+target-version = ['py39', 'py310', 'py311']
+include = '\.pyi?$'
+
+[tool.isort]
+profile = "black"
+line_length = 88
+
+[tool.pytest.ini_options]
+minversion = "7.0"
+addopts = "-ra -q --strict-markers"
+testpaths = ["tests"]
+pythonpath = ["src"]
+
+[tool.mypy]
+python_version = "3.9"
+warn_return_any = true
+warn_unused_configs = true
+disallow_untyped_defs = false
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..b3f6e3a
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,22 @@
+# Testing
+pytest>=7.4.0
+pytest-asyncio>=0.21.0
+pytest-cov>=4.1.0
+pytest-mock>=3.12.0
+
+# Code Quality
+black>=23.12.0
+isort>=5.13.0
+flake8>=7.0.0
+mypy>=1.8.0
+pylint>=3.0.0
+
+# Pre-commit
+pre-commit>=3.6.0
+
+# YAML/JSON formatting
+pyyaml>=6.0.0
+
+# Documentation
+mkdocs>=1.5.0
+mkdocs-material>=9.5.0
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..e779e17
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,32 @@
+# Web Framework
+fastapi>=0.109.0
+uvicorn[standard]>=0.27.0
+pydantic>=2.5.0
+pydantic-settings>=2.1.0
+
+# HTTP Client
+httpx>=0.26.0
+
+# Database
+sqlalchemy>=2.0.0
+alembic>=1.13.0
+
+# Queue
+redis>=5.0.0
+celery>=5.3.0
+
+# LLM
+openai>=1.10.0
+langchain>=0.1.0
+
+# Security Tools
+slither-analyzer>=0.10.0
+mythril>=0.24.0
+
+# Utilities
+python-dotenv>=1.0.0
+click>=8.1.0
+pyyaml>=6.0.0
+
+# Monitoring
+prometheus-client>=0.19.0
diff --git a/run_audit_example.py b/run_audit_example.py
deleted file mode 100644
index cce1f08..0000000
--- a/run_audit_example.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env python3
-"""
-Example script showing how to feed vulnerable smart contracts to the audit engine.
-This demonstrates the proper way to use the AuditEngine with the smart_contracts folder.
-"""
-
-import asyncio
-import os
-from pathlib import Path
-from audit_engine.core.engine import AuditEngine
-from audit_engine.core.schemas import AnalysisRequest
-from audit_engine.core.config import AuditConfig, StaticAnalysisConfig
-from audit_engine.dynamic_analysis.config import DynamicAnalysisConfig
-
-def get_contract_paths():
- """Get all .sol files from the smart_contracts folder"""
- base_path = Path("audit_engine/smart_contracts")
- contract_paths = []
-
- # Walk through all subdirectories
- for root, dirs, files in os.walk(base_path):
- for file in files:
- if file.endswith('.sol'):
- full_path = os.path.join(root, file)
- contract_paths.append(full_path)
-
- return contract_paths
-
-def get_contracts_by_vulnerability_type():
- """Get contracts organized by vulnerability type"""
- base_path = Path("audit_engine/smart_contracts")
- contracts_by_type = {}
-
- for vuln_type in ['reentrancy', 'integer_overflow', 'timestamp']:
- vuln_path = base_path / vuln_type
- if vuln_path.exists():
- contracts_by_type[vuln_type] = [
- str(vuln_path / file) for file in os.listdir(vuln_path)
- if file.endswith('.sol')
- #checks if there is any file available inside each type
- ]
-
- return contracts_by_type
-
-async def run_audit_example():
- """Main function to demonstrate audit engine usage"""
-
- print("π OpenAudit Agent - Smart Contract Vulnerability Detection")
- print("=" * 60)
-
- # Initialize the audit engine with configuration
- config = AuditConfig(
- log_level="INFO",
- enable_parallel_execution=True,
- # Enable static analysis tools
- static_analysis=StaticAnalysisConfig(
- enable_mythril=False,
- enable_manticore=True,
- enable_slither=False
- ),
- # Enable dynamic analysis tools
- dynamic_analysis=DynamicAnalysisConfig(
- enable_echidna=True,
- enable_adversarial_fuzz=False
- )
- )
-
- engine = AuditEngine(config=config)
-
- # Example 1: Analyze all contracts at once
- print("\nπ Example 1: Analyzing ALL vulnerable contracts")
- print("-" * 50)
-
- all_contracts = get_contract_paths()
- print(f"Found {len(all_contracts)} contract files")
-
- if all_contracts:
- # Create analysis request
- request = AnalysisRequest(
- contract_paths=all_contracts[:30], # Limit to first 5 for demo
- analysis_type="comprehensive",
- include_static=True,
- include_dynamic=True,
- include_scoring=True,
- enable_ai_agents=False, # Not implemented yet
- max_analysis_time=300 # 5 minutes
- )
-
- print(f"Analyzing {len(request.contract_paths)} contracts...")
- result = await engine.analyze(request)
-
- print(f"\nβ
Analysis completed in {result.duration_seconds:.2f} seconds")
- print(f"π Found {result.total_findings} vulnerabilities")
-
- # Show severity breakdown
- if result.severity_distribution:
- print("\nπ Severity Distribution:")
- for severity, count in result.severity_distribution.items():
- print(f" {severity}: {count}")
-
- # Show sample findings
- if result.findings:
- print(f"\nπ Sample Findings (showing first 30):")
- for i, finding in enumerate(result.findings[:30]):
- print(f"\n Finding {i+1}:")
- print(f" Tool: {finding.tool_name}")
- print(f" Severity: {finding.severity}")
- print(f" SWC ID: {finding.swc_id or 'N/A'}")
- print(f" File: {finding.file_path}")
- print(f" Description: {finding.description[:]}...")
- print(f" Confidence: {finding.confidence:.2f}")
-
- # Example 2: Analyze by vulnerability type
- print("\n\nπ Example 2: Analyzing by vulnerability type")
- print("-" * 50)
-
- contracts_by_type = get_contracts_by_vulnerability_type()
-
- for vuln_type, contracts in contracts_by_type.items():
- if contracts:
- print(f"\nπ Analyzing {vuln_type} contracts ({len(contracts)} files)")
-
- # Analyze just 2 contracts per type for demo
- sample_contracts = contracts[:2]
-
- request = AnalysisRequest(
- contract_paths=sample_contracts,
- analysis_type="comprehensive",
- include_static=True,
- include_dynamic=False, # Skip dynamic for faster demo
- include_scoring=True,
- max_analysis_time=120
- )
-
- result = await engine.analyze(request)
- print(f" Found {result.total_findings} vulnerabilities")
-
- # Show specific findings for this vulnerability type
- for finding in result.findings:
- if vuln_type.lower() in finding.description.lower() or vuln_type.lower() in finding.swc_id.lower():
- print(f" β οΈ {finding.severity}: {finding.description[:80]}...")
-
- # Example 3: Single contract analysis
- print("\n\nπ Example 3: Single contract deep analysis")
- print("-" * 50)
-
- # Pick a specific reentrancy contract
- reentrancy_contracts = contracts_by_type.get('reentrancy', [])
- if reentrancy_contracts:
- single_contract = reentrancy_contracts[0]
- print(f"π Deep analysis of: {single_contract}")
-
- request = AnalysisRequest(
- contract_paths=[single_contract],
- analysis_type="comprehensive",
- include_static=True,
- include_dynamic=True,
- include_scoring=True,
- max_analysis_time=180
- )
-
- result = await engine.analyze(request)
-
- print(f"\nπ Detailed Results:")
- print(f" Total findings: {result.total_findings}")
- print(f" Analysis time: {result.duration_seconds:.2f}s")
-
- if result.findings:
- print(f"\nπ All Findings:")
- for i, finding in enumerate(result.findings, 1):
- print(f"\n {i}. {finding.severity} - {finding.tool_name}")
- print(f" SWC: {finding.swc_id or 'N/A'}")
- print(f" File: {os.path.basename(finding.file_path)}")
- print(f" Description: {finding.description}")
- print(f" Confidence: {finding.confidence:.2f}")
- if finding.recommendations:
- print(f" Recommendations: {finding.recommendations[0]}")
-
- if result.tool_errors:
- print(f"\nβ οΈ Tool Errors ({len(result.tool_errors)}):")
- for error in result.tool_errors:
- print(f" - {error.tool_name}: {error.error_message}")
-
-if __name__ == "__main__":
- # Run the async function
- asyncio.run(run_audit_example())
\ No newline at end of file
diff --git a/scripts/format.sh b/scripts/format.sh
new file mode 100755
index 0000000..5888de7
--- /dev/null
+++ b/scripts/format.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -e
+
+echo "Formatting Python code with black..."
+black src/ tests/
+
+echo "Sorting imports with isort..."
+isort src/ tests/
+
+echo "Fixing trailing whitespace and line endings..."
+# Remove trailing whitespace
+find src/ tests/ -name "*.py" -type f -exec sed -i 's/[[:space:]]*$//' {} +
+
+echo "Code formatted successfully!"
diff --git a/scripts/lint.sh b/scripts/lint.sh
new file mode 100755
index 0000000..f58efbf
--- /dev/null
+++ b/scripts/lint.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -e
+
+echo "Running black..."
+black --check src/ tests/
+
+echo "Running isort..."
+isort --check-only src/ tests/
+
+echo "Running flake8..."
+flake8 src/ tests/
+
+echo "Running mypy..."
+mypy src/
+
+echo "All linting checks passed!"
diff --git a/scripts/test.sh b/scripts/test.sh
new file mode 100755
index 0000000..d667299
--- /dev/null
+++ b/scripts/test.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+set -e
+
+echo "Running unit tests..."
+pytest tests/unit/ -v --cov=src/oal_agent --cov-report=term-missing
+
+echo "Running integration tests..."
+pytest tests/integration/ -v
+
+echo "Running e2e tests..."
+pytest tests/e2e/ -v
+
+echo "All tests passed!"
diff --git a/src/oal_agent/__init__.py b/src/oal_agent/__init__.py
new file mode 100644
index 0000000..71a25ad
--- /dev/null
+++ b/src/oal_agent/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (C) 2025 OpenAuditLabs
+# SPDX-License-Identifier: AGPL-3.0-or-later
+
+"""OAL Agent - Smart Contract Security Analysis System."""
+
+__version__ = "0.1.0"
+__license__ = "AGPL-3.0-or-later"
diff --git a/src/oal_agent/agents/__init__.py b/src/oal_agent/agents/__init__.py
new file mode 100644
index 0000000..ad9131a
--- /dev/null
+++ b/src/oal_agent/agents/__init__.py
@@ -0,0 +1 @@
+"""Agents module."""
diff --git a/src/oal_agent/agents/coordinator.py b/src/oal_agent/agents/coordinator.py
new file mode 100644
index 0000000..56a2458
--- /dev/null
+++ b/src/oal_agent/agents/coordinator.py
@@ -0,0 +1,14 @@
+"""Coordinator agent for routing tasks."""
+
+
+class CoordinatorAgent:
+ """Routes tasks to appropriate specialized agents."""
+
+ def __init__(self):
+ """Initialize the coordinator."""
+ pass
+
+ async def route(self, task: dict):
+ """Route a task to the appropriate agent."""
+ # TODO: Implement routing logic
+ pass
diff --git a/src/oal_agent/agents/dynamic_agent.py b/src/oal_agent/agents/dynamic_agent.py
new file mode 100644
index 0000000..1289708
--- /dev/null
+++ b/src/oal_agent/agents/dynamic_agent.py
@@ -0,0 +1,14 @@
+"""Dynamic analysis agent."""
+
+
+class DynamicAgent:
+ """Performs dynamic analysis on smart contracts."""
+
+ def __init__(self):
+ """Initialize the dynamic agent."""
+ pass
+
+ async def analyze(self, contract_code: str):
+ """Perform dynamic analysis."""
+ # TODO: Implement dynamic analysis
+ pass
diff --git a/src/oal_agent/agents/ml_agent.py b/src/oal_agent/agents/ml_agent.py
new file mode 100644
index 0000000..8e6cb90
--- /dev/null
+++ b/src/oal_agent/agents/ml_agent.py
@@ -0,0 +1,14 @@
+"""Machine learning agent."""
+
+
+class MLAgent:
+ """Performs ML-based analysis on smart contracts."""
+
+ def __init__(self):
+ """Initialize the ML agent."""
+ pass
+
+ async def analyze(self, contract_code: str):
+ """Perform ML-based analysis."""
+ # TODO: Implement ML analysis
+ pass
diff --git a/src/oal_agent/agents/static_agent.py b/src/oal_agent/agents/static_agent.py
new file mode 100644
index 0000000..d2935e8
--- /dev/null
+++ b/src/oal_agent/agents/static_agent.py
@@ -0,0 +1,14 @@
+"""Static analysis agent."""
+
+
+class StaticAgent:
+ """Performs static analysis on smart contracts."""
+
+ def __init__(self):
+ """Initialize the static agent."""
+ pass
+
+ async def analyze(self, contract_code: str):
+ """Perform static analysis."""
+ # TODO: Implement static analysis
+ pass
diff --git a/src/oal_agent/app/__init__.py b/src/oal_agent/app/__init__.py
new file mode 100644
index 0000000..39cfd04
--- /dev/null
+++ b/src/oal_agent/app/__init__.py
@@ -0,0 +1 @@
+"""App module."""
diff --git a/src/oal_agent/app/dependencies.py b/src/oal_agent/app/dependencies.py
new file mode 100644
index 0000000..29ebb77
--- /dev/null
+++ b/src/oal_agent/app/dependencies.py
@@ -0,0 +1,15 @@
+"""FastAPI dependencies."""
+
+from typing import Generator
+
+
+def get_db() -> Generator:
+ """Database dependency."""
+ # TODO: Implement database session
+ pass
+
+
+def get_queue() -> Generator:
+ """Queue dependency."""
+ # TODO: Implement queue connection
+ pass
diff --git a/src/oal_agent/app/main.py b/src/oal_agent/app/main.py
new file mode 100644
index 0000000..0a9ae2f
--- /dev/null
+++ b/src/oal_agent/app/main.py
@@ -0,0 +1,28 @@
+# Copyright (C) 2025 OpenAuditLabs
+# SPDX-License-Identifier: AGPL-3.0-or-later
+
+"""Main FastAPI application."""
+
+from fastapi import FastAPI
+
+from .routers import analysis
+
+app = FastAPI(
+ title="OAL Agent API",
+ description="Smart Contract Security Analysis System",
+ version="0.1.0"
+)
+
+app.include_router(analysis.router, prefix="/api/v1")
+
+
+@app.get("/")
+async def root():
+ """Root endpoint."""
+ return {"message": "OAL Agent API"}
+
+
+@app.get("/health")
+async def health():
+ """Health check endpoint."""
+ return {"status": "healthy"}
diff --git a/src/oal_agent/app/routers/__init__.py b/src/oal_agent/app/routers/__init__.py
new file mode 100644
index 0000000..99504c9
--- /dev/null
+++ b/src/oal_agent/app/routers/__init__.py
@@ -0,0 +1 @@
+"""Routers module."""
diff --git a/src/oal_agent/app/routers/analysis.py b/src/oal_agent/app/routers/analysis.py
new file mode 100644
index 0000000..0a825ed
--- /dev/null
+++ b/src/oal_agent/app/routers/analysis.py
@@ -0,0 +1,29 @@
+"""Analysis router."""
+
+from fastapi import APIRouter, HTTPException
+
+from ..schemas.jobs import JobRequest, JobResponse
+from ..schemas.results import AnalysisResult
+
+router = APIRouter(prefix="/analysis", tags=["analysis"])
+
+
+@router.post("/", response_model=JobResponse)
+async def submit_analysis(job: JobRequest):
+ """Submit a smart contract for analysis."""
+ # TODO: Implement job submission logic
+ return JobResponse(job_id="placeholder", status="queued")
+
+
+@router.get("/{job_id}", response_model=JobResponse)
+async def get_job_status(job_id: str):
+ """Get the status of an analysis job."""
+ # TODO: Implement job status retrieval
+ raise HTTPException(status_code=404, detail="Job not found")
+
+
+@router.get("/{job_id}/results", response_model=AnalysisResult)
+async def get_job_results(job_id: str):
+ """Get the results of an analysis job."""
+ # TODO: Implement results retrieval
+ raise HTTPException(status_code=404, detail="Results not found")
diff --git a/src/oal_agent/app/schemas/__init__.py b/src/oal_agent/app/schemas/__init__.py
new file mode 100644
index 0000000..21312d2
--- /dev/null
+++ b/src/oal_agent/app/schemas/__init__.py
@@ -0,0 +1 @@
+"""Schemas module."""
diff --git a/src/oal_agent/app/schemas/jobs.py b/src/oal_agent/app/schemas/jobs.py
new file mode 100644
index 0000000..23643c8
--- /dev/null
+++ b/src/oal_agent/app/schemas/jobs.py
@@ -0,0 +1,20 @@
+"""Job schemas."""
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class JobRequest(BaseModel):
+ """Analysis job request schema."""
+ contract_code: str
+ contract_address: Optional[str] = None
+ chain_id: Optional[int] = None
+ pipeline: str = "standard"
+
+
+class JobResponse(BaseModel):
+ """Analysis job response schema."""
+ job_id: str
+ status: str
+ message: Optional[str] = None
diff --git a/src/oal_agent/app/schemas/results.py b/src/oal_agent/app/schemas/results.py
new file mode 100644
index 0000000..86a6073
--- /dev/null
+++ b/src/oal_agent/app/schemas/results.py
@@ -0,0 +1,22 @@
+"""Results schemas."""
+
+from typing import Any, Dict, List
+
+from pydantic import BaseModel
+
+
+class Finding(BaseModel):
+ """Security finding schema."""
+ severity: str
+ title: str
+ description: str
+ location: Dict[str, Any]
+ recommendation: str
+
+
+class AnalysisResult(BaseModel):
+ """Analysis result schema."""
+ job_id: str
+ status: str
+ findings: List[Finding]
+ metadata: Dict[str, Any]
diff --git a/src/oal_agent/cli.py b/src/oal_agent/cli.py
new file mode 100644
index 0000000..c1a7972
--- /dev/null
+++ b/src/oal_agent/cli.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2025 OpenAuditLabs
+# SPDX-License-Identifier: AGPL-3.0-or-later
+
+"""Command-line interface."""
+
+import click
+
+from .core.config import settings
+
+
+@click.group()
+def cli():
+ """OAL Agent CLI."""
+ pass
+
+
+@cli.command()
+@click.option('--host', default=settings.api_host, help='API host')
+@click.option('--port', default=settings.api_port, help='API port')
+def serve(host: str, port: int):
+ """Start the API server."""
+ import uvicorn
+
+ from .app.main import app
+
+ uvicorn.run(app, host=host, port=port)
+
+
+@cli.command()
+@click.argument('contract_file')
+def analyze(contract_file: str):
+ """Analyze a smart contract file."""
+ click.echo(f"Analyzing {contract_file}...")
+ # TODO: Implement analysis logic
+
+
+if __name__ == '__main__':
+ cli()
diff --git a/src/oal_agent/core/__init__.py b/src/oal_agent/core/__init__.py
new file mode 100644
index 0000000..8051c87
--- /dev/null
+++ b/src/oal_agent/core/__init__.py
@@ -0,0 +1 @@
+"""Core module."""
diff --git a/src/oal_agent/core/config.py b/src/oal_agent/core/config.py
new file mode 100644
index 0000000..12f1255
--- /dev/null
+++ b/src/oal_agent/core/config.py
@@ -0,0 +1,28 @@
+"""Configuration management."""
+
+from pydantic_settings import BaseSettings
+
+
+class Settings(BaseSettings):
+ """Application settings."""
+
+ # API Settings
+ api_host: str = "0.0.0.0"
+ api_port: int = 8000
+
+ # Database
+ database_url: str = "sqlite:///./oal_agent.db"
+
+ # Queue
+ queue_url: str = "redis://localhost:6379"
+
+ # LLM
+ llm_provider: str = "openai"
+ llm_api_key: str = ""
+
+ class Config:
+ """Pydantic config."""
+ env_file = ".env"
+
+
+settings = Settings()
diff --git a/src/oal_agent/core/errors.py b/src/oal_agent/core/errors.py
new file mode 100644
index 0000000..19267ac
--- /dev/null
+++ b/src/oal_agent/core/errors.py
@@ -0,0 +1,21 @@
+"""Custom error classes."""
+
+
+class OALAgentError(Exception):
+ """Base exception for OAL Agent."""
+ pass
+
+
+class AnalysisError(OALAgentError):
+ """Error during analysis."""
+ pass
+
+
+class ValidationError(OALAgentError):
+ """Validation error."""
+ pass
+
+
+class ConfigurationError(OALAgentError):
+ """Configuration error."""
+ pass
diff --git a/src/oal_agent/core/orchestrator.py b/src/oal_agent/core/orchestrator.py
new file mode 100644
index 0000000..613166c
--- /dev/null
+++ b/src/oal_agent/core/orchestrator.py
@@ -0,0 +1,14 @@
+"""Orchestrator for coordinating analysis tasks."""
+
+
+class Orchestrator:
+ """Orchestrates the analysis workflow."""
+
+ def __init__(self):
+ """Initialize the orchestrator."""
+ pass
+
+ async def orchestrate(self, job_id: str):
+ """Orchestrate an analysis job."""
+ # TODO: Implement orchestration logic
+ pass
diff --git a/src/oal_agent/core/pipeline.py b/src/oal_agent/core/pipeline.py
new file mode 100644
index 0000000..e36c5ef
--- /dev/null
+++ b/src/oal_agent/core/pipeline.py
@@ -0,0 +1,17 @@
+"""Pipeline definitions."""
+
+from typing import Callable, List
+
+
+class Pipeline:
+ """Analysis pipeline."""
+
+ def __init__(self, name: str, steps: List[Callable]):
+ """Initialize a pipeline."""
+ self.name = name
+ self.steps = steps
+
+ async def execute(self, context: dict):
+ """Execute the pipeline."""
+ # TODO: Implement pipeline execution
+ pass
diff --git a/src/oal_agent/llm/__init__.py b/src/oal_agent/llm/__init__.py
new file mode 100644
index 0000000..357ac9a
--- /dev/null
+++ b/src/oal_agent/llm/__init__.py
@@ -0,0 +1 @@
+"""LLM module."""
diff --git a/src/oal_agent/llm/guards.py b/src/oal_agent/llm/guards.py
new file mode 100644
index 0000000..9d33d59
--- /dev/null
+++ b/src/oal_agent/llm/guards.py
@@ -0,0 +1,19 @@
+"""LLM guardrails."""
+
+
+class LLMGuards:
+ """Implements safety guardrails for LLM interactions."""
+
+ def __init__(self):
+ """Initialize guards."""
+ pass
+
+ async def validate_input(self, prompt: str) -> bool:
+ """Validate input prompt."""
+ # TODO: Implement input validation
+ return True
+
+ async def validate_output(self, response: str) -> bool:
+ """Validate LLM output."""
+ # TODO: Implement output validation
+ return True
diff --git a/src/oal_agent/llm/prompts/classifier.txt b/src/oal_agent/llm/prompts/classifier.txt
new file mode 100644
index 0000000..4e9b575
--- /dev/null
+++ b/src/oal_agent/llm/prompts/classifier.txt
@@ -0,0 +1,11 @@
+You are a smart contract vulnerability classifier.
+
+Your role is to classify the severity and type of vulnerabilities found in smart contracts.
+
+Consider:
+- Impact on funds
+- Exploitability
+- Contract context
+- Industry standards
+
+Classify findings as: CRITICAL, HIGH, MEDIUM, LOW, or INFORMATIONAL.
diff --git a/src/oal_agent/llm/prompts/coordinator.txt b/src/oal_agent/llm/prompts/coordinator.txt
new file mode 100644
index 0000000..b3b9a8b
--- /dev/null
+++ b/src/oal_agent/llm/prompts/coordinator.txt
@@ -0,0 +1,11 @@
+You are a smart contract security coordinator agent.
+
+Your role is to analyze the provided smart contract and determine which specialized agents should be used for analysis.
+
+Consider:
+- Contract complexity
+- Language and version
+- Specific vulnerabilities to check
+- Analysis depth required
+
+Respond with a JSON object specifying which agents to use and in what order.
diff --git a/src/oal_agent/llm/provider.py b/src/oal_agent/llm/provider.py
new file mode 100644
index 0000000..d59baae
--- /dev/null
+++ b/src/oal_agent/llm/provider.py
@@ -0,0 +1,25 @@
+"""LLM provider interface."""
+
+from abc import ABC, abstractmethod
+
+
+class LLMProvider(ABC):
+ """Base class for LLM providers."""
+
+ @abstractmethod
+ async def generate(self, prompt: str, **kwargs):
+ """Generate text from prompt."""
+ pass
+
+
+class OpenAIProvider(LLMProvider):
+ """OpenAI LLM provider."""
+
+ def __init__(self, api_key: str):
+ """Initialize OpenAI provider."""
+ self.api_key = api_key
+
+ async def generate(self, prompt: str, **kwargs):
+ """Generate text using OpenAI."""
+ # TODO: Implement OpenAI integration
+ pass
diff --git a/src/oal_agent/security/__init__.py b/src/oal_agent/security/__init__.py
new file mode 100644
index 0000000..1051c9f
--- /dev/null
+++ b/src/oal_agent/security/__init__.py
@@ -0,0 +1 @@
+"""Security module."""
diff --git a/src/oal_agent/security/policies.py b/src/oal_agent/security/policies.py
new file mode 100644
index 0000000..31f450f
--- /dev/null
+++ b/src/oal_agent/security/policies.py
@@ -0,0 +1,14 @@
+"""Security policies."""
+
+
+class SecurityPolicy:
+ """Defines security policies."""
+
+ def __init__(self):
+ """Initialize security policy."""
+ pass
+
+ def check_permission(self, action: str, resource: str) -> bool:
+ """Check if action is permitted on resource."""
+ # TODO: Implement permission checks
+ return True
diff --git a/src/oal_agent/security/sandboxing.py b/src/oal_agent/security/sandboxing.py
new file mode 100644
index 0000000..86d9f47
--- /dev/null
+++ b/src/oal_agent/security/sandboxing.py
@@ -0,0 +1,14 @@
+"""Sandboxing utilities."""
+
+
+class Sandbox:
+ """Provides sandboxed execution environment."""
+
+ def __init__(self):
+ """Initialize sandbox."""
+ pass
+
+ async def run(self, code: str, timeout: int = 30):
+ """Run code in sandbox with timeout."""
+ # TODO: Implement sandboxed execution
+ pass
diff --git a/src/oal_agent/security/validation.py b/src/oal_agent/security/validation.py
new file mode 100644
index 0000000..90efafd
--- /dev/null
+++ b/src/oal_agent/security/validation.py
@@ -0,0 +1,17 @@
+"""Input validation."""
+
+
+class Validator:
+ """Validates inputs for security."""
+
+ @staticmethod
+ def validate_contract_code(code: str) -> bool:
+ """Validate contract code."""
+ # TODO: Implement validation
+ return True
+
+ @staticmethod
+ def validate_address(address: str) -> bool:
+ """Validate contract address."""
+ # TODO: Implement validation
+ return True
diff --git a/src/oal_agent/services/__init__.py b/src/oal_agent/services/__init__.py
new file mode 100644
index 0000000..5949e0b
--- /dev/null
+++ b/src/oal_agent/services/__init__.py
@@ -0,0 +1 @@
+"""Services module."""
diff --git a/src/oal_agent/services/queue.py b/src/oal_agent/services/queue.py
new file mode 100644
index 0000000..435f2e6
--- /dev/null
+++ b/src/oal_agent/services/queue.py
@@ -0,0 +1,19 @@
+"""Queue service for job management."""
+
+
+class QueueService:
+ """Manages the job queue."""
+
+ def __init__(self, queue_url: str):
+ """Initialize queue service."""
+ self.queue_url = queue_url
+
+ async def enqueue(self, job_id: str, job_data: dict):
+ """Add a job to the queue."""
+ # TODO: Implement queue enqueue
+ pass
+
+ async def dequeue(self):
+ """Get next job from queue."""
+ # TODO: Implement queue dequeue
+ pass
diff --git a/src/oal_agent/services/results_sink.py b/src/oal_agent/services/results_sink.py
new file mode 100644
index 0000000..f5d2796
--- /dev/null
+++ b/src/oal_agent/services/results_sink.py
@@ -0,0 +1,19 @@
+"""Results sink service."""
+
+
+class ResultsSink:
+ """Collects and stores analysis results."""
+
+ def __init__(self):
+ """Initialize results sink."""
+ pass
+
+ async def store(self, job_id: str, results: dict):
+ """Store analysis results."""
+ # TODO: Implement results storage
+ pass
+
+ async def retrieve(self, job_id: str):
+ """Retrieve analysis results."""
+ # TODO: Implement results retrieval
+ pass
diff --git a/src/oal_agent/services/storage.py b/src/oal_agent/services/storage.py
new file mode 100644
index 0000000..d293c63
--- /dev/null
+++ b/src/oal_agent/services/storage.py
@@ -0,0 +1,19 @@
+"""Storage service."""
+
+
+class StorageService:
+ """Manages persistent storage."""
+
+ def __init__(self, storage_path: str):
+ """Initialize storage service."""
+ self.storage_path = storage_path
+
+ async def save(self, key: str, data: bytes):
+ """Save data to storage."""
+ # TODO: Implement storage save
+ pass
+
+ async def load(self, key: str):
+ """Load data from storage."""
+ # TODO: Implement storage load
+ pass
diff --git a/src/oal_agent/telemetry/__init__.py b/src/oal_agent/telemetry/__init__.py
new file mode 100644
index 0000000..19db6c6
--- /dev/null
+++ b/src/oal_agent/telemetry/__init__.py
@@ -0,0 +1 @@
+"""Telemetry module."""
diff --git a/src/oal_agent/telemetry/logging.py b/src/oal_agent/telemetry/logging.py
new file mode 100644
index 0000000..2e35736
--- /dev/null
+++ b/src/oal_agent/telemetry/logging.py
@@ -0,0 +1,18 @@
+"""Logging configuration."""
+
+import logging
+import sys
+
+
+def setup_logging(level: str = "INFO"):
+ """Setup logging configuration."""
+ logging.basicConfig(
+ level=getattr(logging, level.upper()),
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+ handlers=[
+ logging.StreamHandler(sys.stdout)
+ ]
+ )
+
+
+logger = logging.getLogger("oal_agent")
diff --git a/src/oal_agent/telemetry/metrics.py b/src/oal_agent/telemetry/metrics.py
new file mode 100644
index 0000000..37963a9
--- /dev/null
+++ b/src/oal_agent/telemetry/metrics.py
@@ -0,0 +1,26 @@
+"""Metrics collection."""
+
+
+class MetricsCollector:
+ """Collects application metrics."""
+
+ def __init__(self):
+ """Initialize metrics collector."""
+ self.metrics = {}
+
+ def increment(self, metric: str, value: int = 1):
+ """Increment a metric."""
+ if metric not in self.metrics:
+ self.metrics[metric] = 0
+ self.metrics[metric] += value
+
+ def gauge(self, metric: str, value: float):
+ """Set a gauge metric."""
+ self.metrics[metric] = value
+
+ def get_metrics(self):
+ """Get all metrics."""
+ return self.metrics
+
+
+metrics = MetricsCollector()
diff --git a/src/oal_agent/telemetry/tracing.py b/src/oal_agent/telemetry/tracing.py
new file mode 100644
index 0000000..afb4f2a
--- /dev/null
+++ b/src/oal_agent/telemetry/tracing.py
@@ -0,0 +1,22 @@
+"""Distributed tracing."""
+
+
+class Tracer:
+ """Distributed tracing implementation."""
+
+ def __init__(self):
+ """Initialize tracer."""
+ pass
+
+ def start_span(self, name: str):
+ """Start a new trace span."""
+ # TODO: Implement tracing
+ pass
+
+ def end_span(self):
+ """End current span."""
+ # TODO: Implement tracing
+ pass
+
+
+tracer = Tracer()
diff --git a/src/oal_agent/tools/__init__.py b/src/oal_agent/tools/__init__.py
new file mode 100644
index 0000000..4c6ce7b
--- /dev/null
+++ b/src/oal_agent/tools/__init__.py
@@ -0,0 +1 @@
+"""Tools module."""
diff --git a/src/oal_agent/tools/mythril.py b/src/oal_agent/tools/mythril.py
new file mode 100644
index 0000000..9d93f53
--- /dev/null
+++ b/src/oal_agent/tools/mythril.py
@@ -0,0 +1,14 @@
+"""Mythril tool integration."""
+
+
+class MythrilTool:
+ """Integration with Mythril symbolic analyzer."""
+
+ def __init__(self):
+ """Initialize Mythril tool."""
+ pass
+
+ async def analyze(self, contract_code: str):
+ """Run Mythril analysis."""
+ # TODO: Implement Mythril integration
+ pass
diff --git a/src/oal_agent/tools/sandbox.py b/src/oal_agent/tools/sandbox.py
new file mode 100644
index 0000000..6c0f7a6
--- /dev/null
+++ b/src/oal_agent/tools/sandbox.py
@@ -0,0 +1,14 @@
+"""Sandbox tool for safe contract execution."""
+
+
+class SandboxTool:
+ """Provides sandboxed environment for contract execution."""
+
+ def __init__(self):
+ """Initialize sandbox."""
+ pass
+
+ async def execute(self, contract_code: str):
+ """Execute contract in sandbox."""
+ # TODO: Implement sandbox execution
+ pass
diff --git a/src/oal_agent/tools/slither.py b/src/oal_agent/tools/slither.py
new file mode 100644
index 0000000..35d866d
--- /dev/null
+++ b/src/oal_agent/tools/slither.py
@@ -0,0 +1,14 @@
+"""Slither tool integration."""
+
+
+class SlitherTool:
+ """Integration with Slither static analyzer."""
+
+ def __init__(self):
+ """Initialize Slither tool."""
+ pass
+
+ async def analyze(self, contract_path: str):
+ """Run Slither analysis."""
+ # TODO: Implement Slither integration
+ pass
diff --git a/src/oal_agent/utils/__init__.py b/src/oal_agent/utils/__init__.py
new file mode 100644
index 0000000..285e1d4
--- /dev/null
+++ b/src/oal_agent/utils/__init__.py
@@ -0,0 +1 @@
+"""Utilities module."""
diff --git a/src/oal_agent/utils/env.py b/src/oal_agent/utils/env.py
new file mode 100644
index 0000000..378518a
--- /dev/null
+++ b/src/oal_agent/utils/env.py
@@ -0,0 +1,16 @@
+"""Environment utilities."""
+
+import os
+
+
+def get_env(key: str, default: str = None) -> str:
+ """Get environment variable."""
+ return os.getenv(key, default)
+
+
+def require_env(key: str) -> str:
+ """Get required environment variable."""
+ value = os.getenv(key)
+ if value is None:
+ raise ValueError(f"Required environment variable {key} is not set")
+ return value
diff --git a/src/oal_agent/utils/fs.py b/src/oal_agent/utils/fs.py
new file mode 100644
index 0000000..abab203
--- /dev/null
+++ b/src/oal_agent/utils/fs.py
@@ -0,0 +1,22 @@
+"""File system utilities."""
+
+import os
+from pathlib import Path
+
+
+def ensure_dir(path: str):
+ """Ensure directory exists."""
+ Path(path).mkdir(parents=True, exist_ok=True)
+
+
+def read_file(path: str) -> str:
+ """Read file contents."""
+ with open(path, 'r') as f:
+ return f.read()
+
+
+def write_file(path: str, content: str):
+ """Write content to file."""
+ ensure_dir(os.path.dirname(path))
+ with open(path, 'w') as f:
+ f.write(content)
diff --git a/src/oal_agent/utils/timing.py b/src/oal_agent/utils/timing.py
new file mode 100644
index 0000000..00b7d89
--- /dev/null
+++ b/src/oal_agent/utils/timing.py
@@ -0,0 +1,20 @@
+"""Timing utilities."""
+
+import time
+from contextlib import contextmanager
+
+
+@contextmanager
+def timer(name: str):
+ """Context manager for timing operations."""
+ start = time.time()
+ try:
+ yield
+ finally:
+ duration = time.time() - start
+ print(f"{name} took {duration:.2f} seconds")
+
+
+def timestamp() -> float:
+ """Get current timestamp."""
+ return time.time()
diff --git a/test_enhanced_audit.py b/test_enhanced_audit.py
deleted file mode 100644
index 5c217ce..0000000
--- a/test_enhanced_audit.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python3
-"""
-Test script for enhanced vulnerability detection with detailed fixes
-"""
-
-import asyncio
-import os
-from pathlib import Path
-from audit_engine.core.engine import AuditEngine
-from audit_engine.core.schemas import AnalysisRequest
-from audit_engine.core.config import AuditConfig, StaticAnalysisConfig
-from audit_engine.core.vulnerability_patterns import get_detection_tips, get_vulnerability_info, get_fix_suggestions
-from pprint import pprint
-
-async def test_enhanced_audit():
- """Test the enhanced vulnerability detection"""
-
- print("π Enhanced Vulnerability Detection Test")
- print("=" * 60)
-
- # Initialize the audit engine with configuration
- config = AuditConfig(
- log_level="INFO",
- enable_parallel_execution=False, # Sequential for clearer output
- static_analysis=StaticAnalysisConfig(
- enable_mythril=False,
- enable_manticore=True,
- enable_slither=False
- )
- )
-
- engine = AuditEngine(config=config)
-
- # Test contracts with different vulnerability types
- test_contracts = [
- "audit_engine/smart_contracts/reentrancy/16925.sol",
- "audit_engine/smart_contracts/integer_overflow/io1.sol",
- "audit_engine/smart_contracts/timestamp/ts1.sol"
- ]
-
- print(f"\nπ Testing enhanced detection on {len(test_contracts)} contracts")
-
- request = AnalysisRequest(
- contract_paths=test_contracts,
- analysis_type="comprehensive",
- include_static=True,
- include_dynamic=False,
- include_scoring=True,
- max_analysis_time=120
- )
-
- result = await engine.analyze(request)
-
- print(f"\nβ
Analysis completed in {result.duration_seconds:.2f} seconds")
- print(f"π Found {result.total_findings} vulnerabilities\n")
-
- # Display enhanced findings with detailed information
- for i, finding in enumerate(result.findings, 1):
- print(f"\n{'='*80}")
- print(f"Finding {i}:")
- print(f"{'='*80}")
-
- # Basic Information
- print(f"Description: {finding.description}")
- print(f"Severity: {finding.severity}")
- print(f"SWC ID: {finding.swc_id or 'N/A'}")
- print(f"Confidence: {finding.confidence:.2f}")
-
- # Location Information
- print(f"\nVulnerable File: {finding.file_path}")
- if finding.line_span:
- print(f"Lines: {finding.line_span.start}-{finding.line_span.end}")
- if finding.function_name:
- print(f"Function: {finding.function_name}")
-
- # Detection Tips
- if finding.swc_id:
- detection_tips = get_detection_tips(finding.swc_id)
- if detection_tips:
- print("\nDetection Tips:")
- for tip in detection_tips:
- print(f" β’ {tip}")
- if finding.vulnerability_details:
- print(f"\nVulnerability Details:")
- print(f" Name: {finding.vulnerability_details.get('name')}")
- print(f" Impact: {finding.vulnerability_details.get('impact')}")
- print(f"\nDetailed Description:")
- print(f"{finding.vulnerability_details.get('description')}")
-
- if finding.suggested_fixes:
- print(f"\nSuggested Fixes:")
- for j, fix in enumerate(finding.suggested_fixes, 1):
- print(f"\nFix {j}: {fix.get('pattern')}")
- print("-" * 40)
- print(f"Description: {fix.get('description')}")
- print("\nExample Implementation:")
- print(f"{fix.get('code_example')}")
-
- if finding.recommendations:
- print("\nRecommendations:")
- for rec in finding.recommendations:
- print(f"- {rec}")
-
- print("\n")
-
-if __name__ == "__main__":
- asyncio.run(test_enhanced_audit())
-
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..f1b390f
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1 @@
+"""Tests module."""
diff --git a/tests/e2e/__init__.py b/tests/e2e/__init__.py
new file mode 100644
index 0000000..98b50e4
--- /dev/null
+++ b/tests/e2e/__init__.py
@@ -0,0 +1 @@
+"""End-to-end tests."""
diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py
new file mode 100644
index 0000000..18ef0e3
--- /dev/null
+++ b/tests/fixtures/__init__.py
@@ -0,0 +1 @@
+"""Test fixtures."""
diff --git a/tests/fixtures/contracts/.gitkeep b/tests/fixtures/contracts/.gitkeep
new file mode 100644
index 0000000..671a988
--- /dev/null
+++ b/tests/fixtures/contracts/.gitkeep
@@ -0,0 +1,3 @@
+# Test Contract Fixtures
+
+This directory contains sample smart contracts for testing.
diff --git a/tests/fixtures/responses/.gitkeep b/tests/fixtures/responses/.gitkeep
new file mode 100644
index 0000000..a9eaa7c
--- /dev/null
+++ b/tests/fixtures/responses/.gitkeep
@@ -0,0 +1,3 @@
+# Test Response Fixtures
+
+This directory contains mock API responses for testing.
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 0000000..c210fac
--- /dev/null
+++ b/tests/integration/__init__.py
@@ -0,0 +1 @@
+"""Integration tests."""
diff --git a/tests/load/__init__.py b/tests/load/__init__.py
new file mode 100644
index 0000000..573ad5e
--- /dev/null
+++ b/tests/load/__init__.py
@@ -0,0 +1 @@
+"""Load tests."""
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 0000000..e0310a0
--- /dev/null
+++ b/tests/unit/__init__.py
@@ -0,0 +1 @@
+"""Unit tests."""