Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 119 additions & 0 deletions benchmarks/bench_frate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import gamms
import random

from typing import Callable

def create_grid(graph, n):
edge_count = 0 # initialize the edge count to 0
for i in range(n):
for j in range(n):
# add a node to the graph with id i * n + j and coordinates (i, j)
graph.add_node({'id': i * n + j, 'x': i * 100.0, 'y': j * 100.0})
if i > 0:
# add an edge to the graph from node (i - 1) * n + j to node i * n + j
graph.add_edge({'id': edge_count, 'source': (i - 1) * n + j, 'target': i * n + j, 'length': 1.0})
# add an edge to the graph from node i * n + j to node (i - 1) * n + j
graph.add_edge({'id': edge_count + 1, 'source': i * n + j, 'target': (i - 1) * n + j, 'length': 1.0})
edge_count += 2 # increment the edge count by 2
if j > 0:
# add an edge to the graph from node i * n + (j - 1) to node i * n + j
graph.add_edge({'id': edge_count, 'source': i * n + (j - 1), 'target': i * n + j, 'length': 1.0})
# add an edge to the graph from node i * n + j to node i * n + (j - 1)
graph.add_edge({'id': edge_count + 1, 'source': i * n + j, 'target': i * n + (j - 1), 'length': 1.0})
edge_count += 2 # increment the edge count by 2

def create_test(
n: int = 10,
n_agents: int = 10,
map_sensors: bool = False,
) -> Callable[[], None]:
print(f"Creating test with n={n}, n_agents={n_agents}, map_sensors={map_sensors}")
sensor_config = {}
for i in range(n_agents):
sensor_config[f"neigh_{i}"] = {
"type": gamms.sensor.SensorType.NEIGHBOR,
}
if map_sensors:
sensor_config[f"map_{i}"] = {
"type": gamms.sensor.SensorType.MAP,
"sensor_range": 200,
}

agent_config = {}
for i in range(n_agents):
agent_config[f"agent_{i}"] = {'start_node_id': i}
if map_sensors:
agent_config[f"agent_{i}"]['sensors'] = [f"map_{i}", f"neigh_{i}"]
else:
agent_config[f"agent_{i}"]['sensors'] = [f"neigh_{i}"]

ctx = gamms.create_context(logger_config={'level':'ERROR'})
create_grid(ctx.graph.graph, n)

for name, sensor in sensor_config.items():
ctx.sensor.create_sensor(name, sensor['type'], **sensor)


for name, agent in agent_config.items():
ctx.agent.create_agent(name, **agent)

def loop():
print(f"Starting simulation with n={n}, n_agents={n_agents}, map_sensors={map_sensors}")
for _ in range(100):
states = {}
for agent in ctx.agent.create_iter():
states[agent.name] = agent.get_state()
for agent in ctx.agent.create_iter():
state = states[agent.name]
state['action'] = random.choice(state['sensor'][f"neigh_{agent.name.split('_')[1]}"][1])

for agent in ctx.agent.create_iter():
agent.set_state()

ctx.visual.simulate()

return loop

__benchmarks__ = [
(
create_test(n=100, n_agents=10, map_sensors=False),
create_test(n=100, n_agents=100, map_sensors=False),
"100x100 grid with 10 agents vs 100 agents without map sensors",
),
(
create_test(n=100, n_agents=10, map_sensors=True),
create_test(n=100, n_agents=100, map_sensors=True),
"100x100 grid with 10 agents vs 100 agents with map sensors",
),
(
create_test(n=100, n_agents=20, map_sensors=False),
create_test(n=100, n_agents=200, map_sensors=False),
"100x100 grid with 20 agents vs 200 agents without map sensors",
),
(
create_test(n=100, n_agents=20, map_sensors=True),
create_test(n=100, n_agents=200, map_sensors=True),
"100x100 grid with 20 agents vs 200 agents with map sensors",
),
(
create_test(n=100, n_agents=30, map_sensors=False),
create_test(n=100, n_agents=300, map_sensors=False),
"100x100 grid with 30 agents vs 300 agents without map sensors",
),
(
create_test(n=100, n_agents=30, map_sensors=True),
create_test(n=100, n_agents=300, map_sensors=True),
"100x100 grid with 30 agents vs 300 agents with map sensors",
),
(
create_test(n=100, n_agents=50, map_sensors=False),
create_test(n=100, n_agents=500, map_sensors=False),
"100x100 grid with 50 agents vs 500 agents without map sensors",
),
(
create_test(n=100, n_agents=50, map_sensors=True),
create_test(n=100, n_agents=500, map_sensors=True),
"100x100 grid with 50 agents vs 500 agents with map sensors",
),
]

Binary file added benchmarks/benchmark_performance100.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added benchmarks/benchmark_performance30.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
103 changes: 103 additions & 0 deletions benchmarks/scale.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
from memory_profiler import profile

import gamms
import random

from typing import Callable

def create_grid(graph, n):
edge_count = 0 # initialize the edge count to 0
for i in range(n):
for j in range(n):
# add a node to the graph with id i * n + j and coordinates (i, j)
graph.add_node({'id': i * n + j, 'x': i * 100.0, 'y': j * 100.0})
if i > 0:
# add an edge to the graph from node (i - 1) * n + j to node i * n + j
graph.add_edge({'id': edge_count, 'source': (i - 1) * n + j, 'target': i * n + j, 'length': 1.0})
# add an edge to the graph from node i * n + j to node (i - 1) * n + j
graph.add_edge({'id': edge_count + 1, 'source': i * n + j, 'target': (i - 1) * n + j, 'length': 1.0})
edge_count += 2 # increment the edge count by 2
if j > 0:
# add an edge to the graph from node i * n + (j - 1) to node i * n + j
graph.add_edge({'id': edge_count, 'source': i * n + (j - 1), 'target': i * n + j, 'length': 1.0})
# add an edge to the graph from node i * n + j to node i * n + (j - 1)
graph.add_edge({'id': edge_count + 1, 'source': i * n + j, 'target': i * n + (j - 1), 'length': 1.0})
edge_count += 2 # increment the edge count by 2

@profile
def create_test(
n: int = 10,
n_agents: int = 10,
map_sensors: bool = False,
) -> Callable[[], None]:
print(f"Creating test with n={n}, n_agents={n_agents}, map_sensors={map_sensors}")
sensor_config = {}
for i in range(n_agents):
sensor_config[f"neigh_{i}"] = {
"type": gamms.sensor.SensorType.NEIGHBOR,
}
if map_sensors:
sensor_config[f"map_{i}"] = {
"type": gamms.sensor.SensorType.MAP,
"sensor_range": 200,
}

agent_config = {}
for i in range(n_agents):
agent_config[f"agent_{i}"] = {'start_node_id': i}
if map_sensors:
agent_config[f"agent_{i}"]['sensors'] = [f"map_{i}", f"neigh_{i}"]
else:
agent_config[f"agent_{i}"]['sensors'] = [f"neigh_{i}"]

ctx = gamms.create_context(logger_config={'level':'ERROR'})
create_grid(ctx.graph.graph, n)

for name, sensor in sensor_config.items():
ctx.sensor.create_sensor(name, sensor['type'], **sensor)


for name, agent in agent_config.items():
ctx.agent.create_agent(name, **agent)

def loop():
print(f"Starting simulation with n={n}, n_agents={n_agents}, map_sensors={map_sensors}")
for _ in range(100):
states = {}
for agent in ctx.agent.create_iter():
states[agent.name] = agent.get_state()
for agent in ctx.agent.create_iter():
state = states[agent.name]
state['action'] = random.choice(state['sensor'][f"neigh_{agent.name.split('_')[1]}"][1])

for agent in ctx.agent.create_iter():
agent.set_state()

ctx.visual.simulate()

return loop



__benchmarks__ = [
(
create_test(n=10, n_agents=10, map_sensors=False),
create_test(n=10, n_agents=10, map_sensors=True),
"10x10 grid 10 agents wo vs. w map sensors",
),
(
create_test(n=100, n_agents=10, map_sensors=False),
create_test(n=1000, n_agents=10, map_sensors=False),
"100x100 grid vs 1000x1000 grid",
),
(
create_test(n=100, n_agents=10, map_sensors=False),
create_test(n=100, n_agents=100, map_sensors=False),
"10 agents vs 100 agents",
),
(
create_test(n=10, n_agents=10, map_sensors=True),
create_test(n=100, n_agents=10, map_sensors=True),
"10x10 grid vs 100x100 grid with map sensors",
),
]
111 changes: 111 additions & 0 deletions benchmarks/vis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

# Re-importing the necessary libraries due to environment reset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

# Intel vs M3 benchmark data for 30x30 grid
intel_benchmark_data = [
{"benchmark": "30x30 grid with 10 agents vs 100 agents without map sensors", "min": 4.646, "max": 4.646, "mean": 4.646, "min_plus": 45.388, "max_plus": 45.388, "mean_plus": 45.388},
{"benchmark": "30x30 grid with 10 agents vs 100 agents with map sensors", "min": 7.725, "max": 7.725, "mean": 7.725, "min_plus": 76.981, "max_plus": 76.981, "mean_plus": 76.981},
{"benchmark": "30x30 grid with 20 agents vs 200 agents without map sensors", "min": 9.165, "max": 9.165, "mean": 9.165, "min_plus": 90.795, "max_plus": 90.795, "mean_plus": 90.795},
{"benchmark": "30x30 grid with 20 agents vs 200 agents with map sensors", "min": 15.280, "max": 15.280, "mean": 15.280, "min_plus": 155.098, "max_plus": 155.098, "mean_plus": 155.098},
{"benchmark": "30x30 grid with 30 agents vs 300 agents without map sensors", "min": 13.893, "max": 13.893, "mean": 13.893, "min_plus": 142.738, "max_plus": 142.738, "mean_plus": 142.738},
{"benchmark": "30x30 grid with 30 agents vs 300 agents with map sensors", "min": 23.446, "max": 23.446, "mean": 23.446, "min_plus": 232.568, "max_plus": 232.568, "mean_plus": 232.568},
{"benchmark": "30x30 grid with 50 agents vs 500 agents without map sensors", "min": 22.926, "max": 22.926, "mean": 22.926, "min_plus": 226.748, "max_plus": 226.748, "mean_plus": 226.748},
{"benchmark": "30x30 grid with 50 agents vs 500 agents with map sensors", "min": 38.626, "max": 38.626, "mean": 38.626, "min_plus": 384.081, "max_plus": 384.081, "mean_plus": 384.081},
]

# Convert to DataFrame for Intel
df_intel = pd.DataFrame(intel_benchmark_data)

# M3 data as provided in the previous message
m3_benchmark_data = [
{"benchmark": "30x30 grid with 10 agents vs 100 agents without map sensors", "min": 1.780, "max": 1.780, "mean": 1.780, "min_plus": 17.928, "max_plus": 17.928, "mean_plus": 17.928},
{"benchmark": "30x30 grid with 10 agents vs 100 agents with map sensors", "min": 3.560, "max": 3.560, "mean": 3.560, "min_plus": 35.044, "max_plus": 35.044, "mean_plus": 35.044},
{"benchmark": "30x30 grid with 20 agents vs 200 agents without map sensors", "min": 3.546, "max": 3.546, "mean": 3.546, "min_plus": 35.395, "max_plus": 35.395, "mean_plus": 35.395},
{"benchmark": "30x30 grid with 20 agents vs 200 agents with map sensors", "min": 8.044, "max": 8.044, "mean": 8.044, "min_plus": 70.350, "max_plus": 70.350, "mean_plus": 70.350},
{"benchmark": "30x30 grid with 30 agents vs 300 agents without map sensors", "min": 5.497, "max": 5.497, "mean": 5.497, "min_plus": 53.133, "max_plus": 53.133, "mean_plus": 53.133},
{"benchmark": "30x30 grid with 30 agents vs 300 agents with map sensors", "min": 10.399, "max": 10.399, "mean": 10.399, "min_plus": 107.700, "max_plus": 107.700, "mean_plus": 107.700},
{"benchmark": "30x30 grid with 50 agents vs 500 agents without map sensors", "min": 8.834, "max": 8.834, "mean": 8.834, "min_plus": 88.391, "max_plus": 88.391, "mean_plus": 88.391},
{"benchmark": "30x30 grid with 50 agents vs 500 agents with map sensors", "min": 17.501, "max": 17.501, "mean": 17.501, "min_plus": 174.566, "max_plus": 174.566, "mean_plus": 174.566},
]

# Convert to DataFrame for M3
df_m3 = pd.DataFrame(m3_benchmark_data)

# Bar graph for Intel vs M3
fig, ax = plt.subplots(figsize=(12, 6))

# Bar width
bar_width = 0.35

# Bar positions for Intel and M3
index = np.arange(len(df_intel))

# Plot Intel data
ax.bar(index, df_intel['mean'], bar_width, label='Intel', color='blue')
ax.bar(index + bar_width, df_m3['mean'], bar_width, label='M3', color='orange')

ax.set_xlabel('Benchmark')
ax.set_ylabel('Steps per Second')
ax.set_title('Intel vs M3: Benchmark Performance (Mean)')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(df_intel['benchmark'], rotation=45)
ax.legend()

plt.tight_layout()
plt.savefig('benchmark_performance30.png', dpi=300)
plt.show()


"""
100x 100
"""

# # Additional benchmark data
# benchmark_data_additional = [
# {"benchmark": "100x100 grid with 10 agents vs 100 agents without map sensors", "min": 20.179, "max": 20.179, "mean": 20.179, "min_plus": 202.741, "max_plus": 202.741, "mean_plus": 202.741},
# {"benchmark": "100x100 grid with 10 agents vs 100 agents with map sensors", "min": 40.049, "max": 40.049, "mean": 40.049, "min_plus": 401.329, "max_plus": 401.329, "mean_plus": 401.329},
# {"benchmark": "100x100 grid with 20 agents vs 200 agents without map sensors", "min": 40.683, "max": 40.683, "mean": 40.683, "min_plus": 407.157, "max_plus": 407.157, "mean_plus": 407.157},
# {"benchmark": "100x100 grid with 20 agents vs 200 agents with map sensors", "min": 80.102, "max": 80.102, "mean": 80.102, "min_plus": 802.887, "max_plus": 802.887, "mean_plus": 802.887},
# {"benchmark": "100x100 grid with 30 agents vs 300 agents without map sensors", "min": 60.435, "max": 60.435, "mean": 60.435, "min_plus": 603.162, "max_plus": 603.162, "mean_plus": 603.162},
# {"benchmark": "100x100 grid with 30 agents vs 300 agents with map sensors", "min": 119.779, "max": 119.779, "mean": 119.779, "min_plus": 1209.115, "max_plus": 1209.115, "mean_plus": 1209.115},
# {"benchmark": "100x100 grid with 50 agents vs 500 agents without map sensors", "min": 100.163, "max": 100.163, "mean": 100.163, "min_plus": 1019.049, "max_plus": 1019.049, "mean_plus": 1019.049},
# {"benchmark": "100x100 grid with 50 agents vs 500 agents with map sensors", "min": 200.549, "max": 200.549, "mean": 200.549, "min_plus": 2012.245, "max_plus": 2012.245, "mean_plus": 2012.245}
# ]

# # Convert to DataFrame
# df_additional = pd.DataFrame(benchmark_data_additional)

# # Plot the new data
# fig, ax = plt.subplots(2, 1, figsize=(10, 10))

# # Plot 1: Min, Max, and Mean values for additional benchmarks
# ax[0].plot(df_additional['benchmark'], df_additional['min'], label="Min", marker='o')
# ax[0].plot(df_additional['benchmark'], df_additional['max'], label="Max", marker='o')
# ax[0].plot(df_additional['benchmark'], df_additional['mean'], label="Mean", marker='o')
# ax[0].set_title('Benchmark Performance: Min, Max, and Mean (100x100 Grid)')
# ax[0].set_xlabel('Benchmark')
# ax[0].set_ylabel('Time (seconds)')
# ax[0].legend()
# ax[0].tick_params(axis='x', rotation=45)

# # Plot 2: Min (+) values for additional benchmarks
# ax[1].plot(df_additional['benchmark'], df_additional['min_plus'], label="Min (+)", marker='o')
# ax[1].plot(df_additional['benchmark'], df_additional['max_plus'], label="Max (+)", marker='o')
# ax[1].plot(df_additional['benchmark'], df_additional['mean_plus'], label="Mean (+)", marker='o')
# ax[1].set_title('Benchmark Performance (+): Min, Max, and Mean (100x100 Grid)')
# ax[1].set_xlabel('Benchmark')
# ax[1].set_ylabel('Time (seconds)')
# ax[1].legend()
# ax[1].tick_params(axis='x', rotation=45)

# plt.tight_layout()
# plt.savefig('benchmark_performance100.png', dpi=300)
# plt.show()