diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000..2c6980e --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,28 @@ +name: Python CI + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Lint with flake8 + run: | + pip install flake8 + flake8 memory_benchmark.py --max-line-length=120 + - name: Run script (smoke test) + run: | + python memory_benchmark.py --sizes 128 diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml new file mode 100644 index 0000000..c4d8e78 --- /dev/null +++ b/.github/workflows/python-test.yml @@ -0,0 +1,24 @@ +name: Python Test + +on: + push: + branches: [ feature/benchmark-enhancements ] + pull_request: + branches: [ feature/benchmark-enhancements ] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run benchmark tests + run: | + python test_benchmark.py diff --git a/memory_benchmark.py b/memory_benchmark.py index e38ca83..30d4db0 100644 --- a/memory_benchmark.py +++ b/memory_benchmark.py @@ -12,87 +12,102 @@ RESULTS_FILE = "memory_benchmark_results.txt" CSV_FILE = "memory_benchmark_results.csv" + def memory_read_write_test(size_mb=1024): - print(Fore.CYAN + f"๐Ÿง  Allocating {size_mb}MB array...") - arr = np.random.rand(size_mb * 1024 * 1024 // 8) # float64 = 8 bytes - print(Fore.YELLOW + "๐ŸŸก Starting write test...") - start = time.time() - arr[:] = 1.2345 - write_time = time.time() - start - print(Fore.GREEN + f"๐ŸŸข Write time: {write_time:.3f}s") - - print(Fore.YELLOW + "๐ŸŸก Starting read test...") - start = time.time() - _ = arr.sum() - read_time = time.time() - start - print(Fore.GREEN + f"๐ŸŸข Read time: {read_time:.3f}s") - - return write_time, read_time + print(Fore.CYAN + f"๐Ÿง  Allocating {size_mb}MB array...") + arr = np.random.rand(size_mb * 1024 * 1024 // 8) # float64 = 8 bytes + print(Fore.YELLOW + "๐ŸŸก Starting write test...") + start = time.time() + arr[:] = 1.2345 + write_time = time.time() - start + print(Fore.GREEN + f"๐ŸŸข Write time: {write_time:.3f}s") + + print(Fore.YELLOW + "๐ŸŸก Starting read test...") + start = time.time() + _ = arr.sum() + read_time = time.time() - start + print(Fore.GREEN + f"๐ŸŸข Read time: {read_time:.3f}s") + + return write_time, read_time + def log_results(write_time, read_time, size_mb): - cpu_info = platform.processor() - with open(RESULTS_FILE, "a") as f: - f.write(f"Test size: {size_mb}MB\n") - f.write(f"Write time: {write_time:.3f}s\n") - f.write(f"Read time: {read_time:.3f}s\n") - f.write(f"RAM total: {psutil.virtual_memory().total / (1024**3):.2f} GB\n") - f.write(f"RAM available: {psutil.virtual_memory().available / (1024**3):.2f} GB\n") - f.write(f"Timestamp: {time.ctime()}\n") - if cpu_info: - f.write(f"CPU: {cpu_info}\n") - f.write(f"Machine: {platform.machine()}\n") - f.write(f"OS: {platform.system()} {platform.release()}\n") - f.write("-"*40 + "\n") - - # Write to CSV - write_header = not os.path.exists(CSV_FILE) - with open(CSV_FILE, "a", newline='') as csvfile: - writer = csv.writer(csvfile) - if write_header: - writer.writerow([ - "Test Size (MB)", "Write Time (s)", "Read Time (s)", "RAM Total (GB)", "RAM Available (GB)", "Timestamp", "CPU", "Machine", "OS" - ]) - writer.writerow([ - size_mb, - f"{write_time:.3f}", - f"{read_time:.3f}", - f"{psutil.virtual_memory().total / (1024**3):.2f}", - f"{psutil.virtual_memory().available / (1024**3):.2f}", - time.ctime(), - platform.processor(), - platform.machine(), - f"{platform.system()} {platform.release()}" - ]) + cpu_info = platform.processor() + with open(RESULTS_FILE, "a") as f: + f.write(f"Test size: {size_mb}MB\n") + f.write(f"Write time: {write_time:.3f}s\n") + f.write(f"Read time: {read_time:.3f}s\n") + f.write(f"RAM total: {psutil.virtual_memory().total / (1024**3):.2f} GB\n") + f.write(f"RAM available: {psutil.virtual_memory().available / (1024**3):.2f} GB\n") + f.write(f"Timestamp: {time.ctime()}\n") + if cpu_info: + f.write(f"CPU: {cpu_info}\n") + f.write(f"Machine: {platform.machine()}\n") + f.write(f"OS: {platform.system()} {platform.release()}\n") + f.write("-"*40 + "\n") + + # Write to CSV + write_header = not os.path.exists(CSV_FILE) + with open(CSV_FILE, "a", newline='') as csvfile: + writer = csv.writer(csvfile) + if write_header: + headers = [ + "Test Size (MB)", "Write Time (s)", "Read Time (s)", + "RAM Total (GB)", "RAM Available (GB)", "Timestamp", + "CPU", "Machine", "OS" + ] + writer.writerow(headers) + writer.writerow([ + size_mb, + f"{write_time:.3f}", + f"{read_time:.3f}", + f"{psutil.virtual_memory().total / (1024**3):.2f}", + f"{psutil.virtual_memory().available / (1024**3):.2f}", + time.ctime(), + platform.processor(), + platform.machine(), + f"{platform.system()} {platform.release()}" + ]) + def main(): - parser = argparse.ArgumentParser(description="Memory Benchmark Script") - parser.add_argument( - "--sizes", - nargs="+", - type=int, - help="List of test sizes in MB (e.g. --sizes 1024 2048 4096 8192)", - default=[1024, 2048, 4096, 8192] - ) - args = parser.parse_args() - test_sizes = args.sizes - - print(Fore.MAGENTA + Style.BRIGHT + "\n๐Ÿ“Š Memory Benchmark Results\n" + "="*30) - cpu_info = platform.processor() - sysinfo = f"Machine: {platform.machine()} | OS: {platform.system()} {platform.release()}" - if cpu_info: - print(Fore.YELLOW + f"System Info: CPU: {cpu_info} | {sysinfo}") - else: - print(Fore.YELLOW + f"System Info: {sysinfo}") - print(f"{'Size':<8}{'Write Time':<15}{'Read Time':<15}{'Total RAM':<12}{'Available':<12}") - print("-"*62) - for size_mb in test_sizes: - print(Fore.BLUE + f"\n๐Ÿงช Testing {size_mb // 1024}GB...") - write_time, read_time = memory_read_write_test(size_mb) - total_ram = psutil.virtual_memory().total / (1024**3) - avail_ram = psutil.virtual_memory().available / (1024**3) - print(f"{str(size_mb // 1024)+'GB':<8}{write_time:<15.3f}{read_time:<15.3f}{total_ram:<12.2f}{avail_ram:<12.2f} ๐Ÿ“") - log_results(write_time, read_time, size_mb) - print(Fore.MAGENTA + Style.BRIGHT + f"\nโœ… Results logged to {RESULTS_FILE} and {CSV_FILE}\n") + parser = argparse.ArgumentParser(description="Memory Benchmark Script") + parser.add_argument( + "--sizes", + nargs="+", + type=int, + help="List of test sizes in MB (e.g. --sizes 1024 2048 4096 8192)", + default=[1024, 2048, 4096, 8192] + ) + args = parser.parse_args() + test_sizes = args.sizes + + print(Fore.MAGENTA + Style.BRIGHT + "\n๐Ÿ“Š Memory Benchmark Results\n" + "="*30) + cpu_info = platform.processor() + machine_info = f"Machine: {platform.machine()}" + os_info = f"OS: {platform.system()} {platform.release()}" + sysinfo = f"{machine_info} | {os_info}" + if cpu_info: + print(Fore.YELLOW + f"System Info: CPU: {cpu_info} | {sysinfo}") + else: + print(Fore.YELLOW + f"System Info: {sysinfo}") + + headers = f"{'Size':<8}{'Write Time':<15}{'Read Time':<15}{'Total RAM':<12}{'Available':<12}" + print(headers) + print("-"*62) + + for size_mb in test_sizes: + print(Fore.BLUE + f"\n๐Ÿงช Testing {size_mb // 1024}GB...") + write_time, read_time = memory_read_write_test(size_mb) + total_ram = psutil.virtual_memory().total / (1024**3) + avail_ram = psutil.virtual_memory().available / (1024**3) + size_str = str(size_mb // 1024) + 'GB' + result_line = f"{size_str:<8}{write_time:<15.3f}{read_time:<15.3f}{total_ram:<12.2f}{avail_ram:<12.2f} ๐Ÿ“" + print(result_line) + log_results(write_time, read_time, size_mb) + + print(Fore.MAGENTA + Style.BRIGHT + f"\nโœ… Results logged to {RESULTS_FILE} and {CSV_FILE}\n") + if __name__ == "__main__": - main() + main() diff --git a/requirements.txt b/requirements.txt index e69de29..492feeb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -0,0 +1,3 @@ +colorama==0.4.6 +numpy==1.26.4 +psutil==7.0.0 diff --git a/test_benchmark.py b/test_benchmark.py new file mode 100644 index 0000000..3d427a9 --- /dev/null +++ b/test_benchmark.py @@ -0,0 +1,25 @@ +import os +import csv +import subprocess + +def test_script_runs(): + result = subprocess.run(['python', 'memory_benchmark.py', '--sizes', '128'], capture_output=True) + assert result.returncode == 0, f"Script failed: {result.stderr.decode()}" + +def test_output_files(): + assert os.path.exists('memory_benchmark_results.txt'), "Results txt file not found" + assert os.path.exists('memory_benchmark_results.csv'), "Results csv file not found" + +def test_csv_columns(): + with open('memory_benchmark_results.csv') as f: + reader = csv.reader(f) + header = next(reader) + expected = ["Test Size (MB)", "Write Time (s)", "Read Time (s)", "RAM Total (GB)", "RAM Available (GB)", "Timestamp", "CPU", "Machine", "OS"] + for col in expected: + assert col in header, f"Missing column: {col}" + +if __name__ == "__main__": + test_script_runs() + test_output_files() + test_csv_columns() + print("All tests passed!")