Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions .github/workflows/python-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Python CI

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Lint with flake8
run: |
pip install flake8
flake8 memory_benchmark.py --max-line-length=120
- name: Run script (smoke test)
run: |
python memory_benchmark.py --sizes 128
24 changes: 24 additions & 0 deletions .github/workflows/python-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Python Test

on:
push:
branches: [ feature/benchmark-enhancements ]
pull_request:
branches: [ feature/benchmark-enhancements ]

jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run benchmark tests
run: |
python test_benchmark.py
169 changes: 92 additions & 77 deletions memory_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,87 +12,102 @@
RESULTS_FILE = "memory_benchmark_results.txt"
CSV_FILE = "memory_benchmark_results.csv"


def memory_read_write_test(size_mb=1024):
print(Fore.CYAN + f"🧠 Allocating {size_mb}MB array...")
arr = np.random.rand(size_mb * 1024 * 1024 // 8) # float64 = 8 bytes
print(Fore.YELLOW + "🟡 Starting write test...")
start = time.time()
arr[:] = 1.2345
write_time = time.time() - start
print(Fore.GREEN + f"🟢 Write time: {write_time:.3f}s")

print(Fore.YELLOW + "🟡 Starting read test...")
start = time.time()
_ = arr.sum()
read_time = time.time() - start
print(Fore.GREEN + f"🟢 Read time: {read_time:.3f}s")

return write_time, read_time
print(Fore.CYAN + f"🧠 Allocating {size_mb}MB array...")
arr = np.random.rand(size_mb * 1024 * 1024 // 8) # float64 = 8 bytes
print(Fore.YELLOW + "🟡 Starting write test...")
start = time.time()
arr[:] = 1.2345
write_time = time.time() - start
print(Fore.GREEN + f"🟢 Write time: {write_time:.3f}s")

print(Fore.YELLOW + "🟡 Starting read test...")
start = time.time()
_ = arr.sum()
read_time = time.time() - start
print(Fore.GREEN + f"🟢 Read time: {read_time:.3f}s")

return write_time, read_time


def log_results(write_time, read_time, size_mb):
cpu_info = platform.processor()
with open(RESULTS_FILE, "a") as f:
f.write(f"Test size: {size_mb}MB\n")
f.write(f"Write time: {write_time:.3f}s\n")
f.write(f"Read time: {read_time:.3f}s\n")
f.write(f"RAM total: {psutil.virtual_memory().total / (1024**3):.2f} GB\n")
f.write(f"RAM available: {psutil.virtual_memory().available / (1024**3):.2f} GB\n")
f.write(f"Timestamp: {time.ctime()}\n")
if cpu_info:
f.write(f"CPU: {cpu_info}\n")
f.write(f"Machine: {platform.machine()}\n")
f.write(f"OS: {platform.system()} {platform.release()}\n")
f.write("-"*40 + "\n")

# Write to CSV
write_header = not os.path.exists(CSV_FILE)
with open(CSV_FILE, "a", newline='') as csvfile:
writer = csv.writer(csvfile)
if write_header:
writer.writerow([
"Test Size (MB)", "Write Time (s)", "Read Time (s)", "RAM Total (GB)", "RAM Available (GB)", "Timestamp", "CPU", "Machine", "OS"
])
writer.writerow([
size_mb,
f"{write_time:.3f}",
f"{read_time:.3f}",
f"{psutil.virtual_memory().total / (1024**3):.2f}",
f"{psutil.virtual_memory().available / (1024**3):.2f}",
time.ctime(),
platform.processor(),
platform.machine(),
f"{platform.system()} {platform.release()}"
])
cpu_info = platform.processor()
with open(RESULTS_FILE, "a") as f:
f.write(f"Test size: {size_mb}MB\n")
f.write(f"Write time: {write_time:.3f}s\n")
f.write(f"Read time: {read_time:.3f}s\n")
f.write(f"RAM total: {psutil.virtual_memory().total / (1024**3):.2f} GB\n")
f.write(f"RAM available: {psutil.virtual_memory().available / (1024**3):.2f} GB\n")
f.write(f"Timestamp: {time.ctime()}\n")
if cpu_info:
f.write(f"CPU: {cpu_info}\n")
f.write(f"Machine: {platform.machine()}\n")
f.write(f"OS: {platform.system()} {platform.release()}\n")
f.write("-"*40 + "\n")

# Write to CSV
write_header = not os.path.exists(CSV_FILE)
with open(CSV_FILE, "a", newline='') as csvfile:
writer = csv.writer(csvfile)
if write_header:
headers = [
"Test Size (MB)", "Write Time (s)", "Read Time (s)",
"RAM Total (GB)", "RAM Available (GB)", "Timestamp",
"CPU", "Machine", "OS"
]
writer.writerow(headers)
writer.writerow([
size_mb,
f"{write_time:.3f}",
f"{read_time:.3f}",
f"{psutil.virtual_memory().total / (1024**3):.2f}",
f"{psutil.virtual_memory().available / (1024**3):.2f}",
time.ctime(),
platform.processor(),
platform.machine(),
f"{platform.system()} {platform.release()}"
])


def main():
parser = argparse.ArgumentParser(description="Memory Benchmark Script")
parser.add_argument(
"--sizes",
nargs="+",
type=int,
help="List of test sizes in MB (e.g. --sizes 1024 2048 4096 8192)",
default=[1024, 2048, 4096, 8192]
)
args = parser.parse_args()
test_sizes = args.sizes

print(Fore.MAGENTA + Style.BRIGHT + "\n📊 Memory Benchmark Results\n" + "="*30)
cpu_info = platform.processor()
sysinfo = f"Machine: {platform.machine()} | OS: {platform.system()} {platform.release()}"
if cpu_info:
print(Fore.YELLOW + f"System Info: CPU: {cpu_info} | {sysinfo}")
else:
print(Fore.YELLOW + f"System Info: {sysinfo}")
print(f"{'Size':<8}{'Write Time':<15}{'Read Time':<15}{'Total RAM':<12}{'Available':<12}")
print("-"*62)
for size_mb in test_sizes:
print(Fore.BLUE + f"\n🧪 Testing {size_mb // 1024}GB...")
write_time, read_time = memory_read_write_test(size_mb)
total_ram = psutil.virtual_memory().total / (1024**3)
avail_ram = psutil.virtual_memory().available / (1024**3)
print(f"{str(size_mb // 1024)+'GB':<8}{write_time:<15.3f}{read_time:<15.3f}{total_ram:<12.2f}{avail_ram:<12.2f} 📝")
log_results(write_time, read_time, size_mb)
print(Fore.MAGENTA + Style.BRIGHT + f"\n✅ Results logged to {RESULTS_FILE} and {CSV_FILE}\n")
parser = argparse.ArgumentParser(description="Memory Benchmark Script")
parser.add_argument(
"--sizes",
nargs="+",
type=int,
help="List of test sizes in MB (e.g. --sizes 1024 2048 4096 8192)",
default=[1024, 2048, 4096, 8192]
)
args = parser.parse_args()
test_sizes = args.sizes

print(Fore.MAGENTA + Style.BRIGHT + "\n📊 Memory Benchmark Results\n" + "="*30)
cpu_info = platform.processor()
machine_info = f"Machine: {platform.machine()}"
os_info = f"OS: {platform.system()} {platform.release()}"
sysinfo = f"{machine_info} | {os_info}"
if cpu_info:
print(Fore.YELLOW + f"System Info: CPU: {cpu_info} | {sysinfo}")
else:
print(Fore.YELLOW + f"System Info: {sysinfo}")

headers = f"{'Size':<8}{'Write Time':<15}{'Read Time':<15}{'Total RAM':<12}{'Available':<12}"
print(headers)
print("-"*62)

for size_mb in test_sizes:
print(Fore.BLUE + f"\n🧪 Testing {size_mb // 1024}GB...")
write_time, read_time = memory_read_write_test(size_mb)
total_ram = psutil.virtual_memory().total / (1024**3)
avail_ram = psutil.virtual_memory().available / (1024**3)
size_str = str(size_mb // 1024) + 'GB'
result_line = f"{size_str:<8}{write_time:<15.3f}{read_time:<15.3f}{total_ram:<12.2f}{avail_ram:<12.2f} 📝"
print(result_line)
log_results(write_time, read_time, size_mb)

print(Fore.MAGENTA + Style.BRIGHT + f"\n✅ Results logged to {RESULTS_FILE} and {CSV_FILE}\n")


if __name__ == "__main__":
main()
main()
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
colorama==0.4.6
numpy==1.26.4
psutil==7.0.0
25 changes: 25 additions & 0 deletions test_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import os
import csv
import subprocess

def test_script_runs():
result = subprocess.run(['python', 'memory_benchmark.py', '--sizes', '128'], capture_output=True)
assert result.returncode == 0, f"Script failed: {result.stderr.decode()}"

def test_output_files():
assert os.path.exists('memory_benchmark_results.txt'), "Results txt file not found"
assert os.path.exists('memory_benchmark_results.csv'), "Results csv file not found"

def test_csv_columns():
with open('memory_benchmark_results.csv') as f:
reader = csv.reader(f)
header = next(reader)
expected = ["Test Size (MB)", "Write Time (s)", "Read Time (s)", "RAM Total (GB)", "RAM Available (GB)", "Timestamp", "CPU", "Machine", "OS"]
for col in expected:
assert col in header, f"Missing column: {col}"

if __name__ == "__main__":
test_script_runs()
test_output_files()
test_csv_columns()
print("All tests passed!")