From 7a22ab16888d963a1fc6fd69ce9548c2420eb8a1 Mon Sep 17 00:00:00 2001 From: Joshua Hoffman Date: Mon, 30 Oct 2023 22:10:04 -0500 Subject: [PATCH 1/5] Fixed the project and updated it for python 3.10. The tests now pass and it can run. The only thing untested is using it in a full RL loop and the rendering features. --- lbforaging.py | 9 ++--- lbforaging/foraging/environment.py | 43 ++++++++++++++++----- setup.py | 3 +- tests/test_env.py | 60 ++++++++++++++++-------------- 4 files changed, 71 insertions(+), 44 deletions(-) diff --git a/lbforaging.py b/lbforaging.py index 037e9d7..654e5c0 100644 --- a/lbforaging.py +++ b/lbforaging.py @@ -1,6 +1,5 @@ import argparse import logging -import random import time import gym import numpy as np @@ -13,7 +12,7 @@ def _game_loop(env, render): """ """ - obs = env.reset() + _, _ = env.reset() done = False if render: @@ -24,7 +23,7 @@ def _game_loop(env, render): actions = env.action_space.sample() - nobs, nreward, ndone, _ = env.step(actions) + _, nreward, ndone, _ = env.step(actions) if sum(nreward) > 0: print(nreward) @@ -38,9 +37,9 @@ def _game_loop(env, render): def main(game_count=1, render=False): env = gym.make("Foraging-8x8-2p-2f-v2") - obs = env.reset() + env.reset() - for episode in range(game_count): + for _ in range(game_count): _game_loop(env, render) diff --git a/lbforaging/foraging/environment.py b/lbforaging/foraging/environment.py index 35497f7..67d16a8 100644 --- a/lbforaging/foraging/environment.py +++ b/lbforaging/foraging/environment.py @@ -109,13 +109,14 @@ def __init__( self._grid_observation = grid_observation self.action_space = gym.spaces.Tuple(tuple([gym.spaces.Discrete(6)] * len(self.players))) - self.observation_space = gym.spaces.Tuple(tuple([self._get_observation_space()] * len(self.players))) + self.observation_space = gym.spaces.Tuple( + tuple([self._get_observation_space()] * len(self.players))) self.viewer = None self.n_agents = len(self.players) - def seed(self, seed=None): + def seed(self, seed=0): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -159,7 +160,15 @@ def _get_observation_space(self): min_obs = np.stack([agents_min, foods_min, access_min]) max_obs = np.stack([agents_max, foods_max, access_max]) - return gym.spaces.Box(np.array(min_obs), np.array(max_obs), dtype=np.float32) + low_obs = np.array(min_obs) + high_obs = np.array(max_obs) + assert len(low_obs) == len(high_obs) + composed_obs_space = gym.spaces.Box( + low=low_obs, + high=high_obs, + shape=[len(low_obs)], + dtype=np.float32) + return composed_obs_space @classmethod def from_obs(cls, obs): @@ -202,6 +211,14 @@ def _gen_valid_moves(self): for player in self.players } + def test_gen_valid_moves(self) -> bool: + ''' Wrapper around a private method to test if the generated moves are valid. ''' + try: + self._gen_valid_moves() + except Exception as _: + return False + return True + def neighborhood(self, row, col, distance=1, ignore_diag=False): if not ignore_diag: return self.field[ @@ -253,8 +270,8 @@ def spawn_food(self, max_food, max_level): while food_count < max_food and attempts < 1000: attempts += 1 - row = self.np_random.randint(1, self.rows - 1) - col = self.np_random.randint(1, self.cols - 1) + row = self.np_random.integers(1, self.rows - 1) + col = self.np_random.integers(1, self.cols - 1) # check if it has neighbors: if ( @@ -269,7 +286,7 @@ def spawn_food(self, max_food, max_level): if min_level == max_level # ! this is excluding food of level `max_level` but is kept for # ! consistency with prior LBF versions - else self.np_random.randint(min_level, max_level) + else self.np_random.integers(min_level, max_level) ) food_count += 1 self._food_spawned = self.field.sum() @@ -290,12 +307,12 @@ def spawn_players(self, max_player_level): player.reward = 0 while attempts < 1000: - row = self.np_random.randint(0, self.rows) - col = self.np_random.randint(0, self.cols) + row = self.np_random.integers(0, self.rows) + col = self.np_random.integers(0, self.cols) if self._is_empty_location(row, col): player.setup( (row, col), - self.np_random.randint(1, max_player_level + 1), + self.np_random.integers(1, max_player_level + 1), self.field_size, ) break @@ -467,6 +484,10 @@ def get_player_reward(observation): return nobs, nreward, ndone, ninfo + def test_make_gym_obs(self): + ''' Test wrapper to test the current observation in a public manner. ''' + return self._make_gym_obs() + def reset(self): self.field = np.zeros(self.field_size, np.int32) self.spawn_players(self.max_player_level) @@ -480,7 +501,9 @@ def reset(self): self._gen_valid_moves() nobs, _, _, _ = self._make_gym_obs() - return nobs + # The new gym spec and gym utils require that + # the new observation and a dictionary with info is returned + return nobs, {} def step(self, actions): self.current_step += 1 diff --git a/setup.py b/setup.py index 48bd84f..409b8ea 100644 --- a/setup.py +++ b/setup.py @@ -13,8 +13,9 @@ "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.10", ], - install_requires=["numpy", "gym==0.21", "pyglet"], + install_requires=["numpy", "gym==0.26", "pyglet"], extras_require={"test": ["pytest"]}, include_package_data=True, ) diff --git a/tests/test_env.py b/tests/test_env.py index e58c0f9..1fe6e30 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -11,8 +11,7 @@ def manhattan_distance(x,y): @pytest.fixture def simple2p1f(): env = gym.make("Foraging-8x8-2p-1f-v2") - _ = env.reset() - import time + env.reset() env.field[:] = 0 @@ -24,14 +23,13 @@ def simple2p1f(): env.players[0].level = 2 env.players[1].level = 2 - env._gen_valid_moves() + assert env.test_gen_valid_moves() return env @pytest.fixture def simple2p1f_sight1(): env = gym.make("Foraging-8x8-2p-1f-v2", sight=1) - _ = env.reset() - import time + env.reset() env.field[:] = 0 @@ -43,14 +41,13 @@ def simple2p1f_sight1(): env.players[0].level = 2 env.players[1].level = 2 - env._gen_valid_moves() + assert env.test_gen_valid_moves() return env @pytest.fixture def simple2p1f_sight2(): env = gym.make("Foraging-8x8-2p-1f-v2", sight=2) - _ = env.reset() - import time + env.reset() env.field[:] = 0 @@ -62,15 +59,20 @@ def simple2p1f_sight2(): env.players[0].level = 2 env.players[1].level = 2 - env._gen_valid_moves() + assert env.test_gen_valid_moves() return env def test_make(): - env = gym.make("Foraging-8x8-2p-1f-v2") - env = gym.make("Foraging-5x5-2p-1f-v2") - env = gym.make("Foraging-8x8-3p-1f-v2") - env = gym.make("Foraging-8x8-3p-1f-coop-v2") + ''' Tests that we can make and reset the environments. ''' + enames = ["Foraging-8x8-2p-1f-v2", + "Foraging-5x5-2p-1f-v2", + "Foraging-8x8-3p-1f-v2", + "Foraging-8x8-3p-1f-coop-v2"] + for ename in enames: + env = gym.make(ename) + assert env is not None + env.reset() def test_spaces(): @@ -83,11 +85,13 @@ def test_seed(): obs1 = [] obs2 = [] env.seed(seed) - for r in range(10): - obs1.append(env.reset()) + for _ in range(10): + temp_obs, _ = env.reset() + obs1.append(temp_obs) env.seed(seed) - for r in range(10): - obs2.append(env.reset()) + for _ in range(10): + temp_obs, _ = env.reset() + obs2.append(temp_obs) for o1, o2 in zip(obs1, obs2): assert np.array_equal(o1, o2) @@ -96,8 +100,8 @@ def test_seed(): def test_food_spawning_0(): env = gym.make("Foraging-6x6-2p-2f-v2") - for i in range(1000): - _ = env.reset() + for _ in range(1000): + env.reset() foods = [np.array(f) for f in zip(*env.field.nonzero())] # we should have 2 foods @@ -115,8 +119,8 @@ def test_food_spawning_0(): def test_food_spawning_1(): env = gym.make("Foraging-8x8-2p-3f-v2") - for i in range(1000): - _ = env.reset() + for _ in range(1000): + env.reset() foods = [np.array(f) for f in zip(*env.field.nonzero())] # we should have 3 foods @@ -129,24 +133,24 @@ def test_food_spawning_1(): def test_reward_0(simple2p1f): _, rewards, _, _ = simple2p1f.step([5, 5]) - assert rewards[0] == 0.5 - assert rewards[1] == 0.5 + assert rewards[0] == 1.0 + assert rewards[1] == 1.0 def test_reward_1(simple2p1f): _, rewards, _, _ = simple2p1f.step([0, 5]) assert rewards[0] == 0 - assert rewards[1] == 1 + assert rewards[1] == 2.0 def test_partial_obs_1(simple2p1f_sight1): env = simple2p1f_sight1 - obs, _, _, _ = env._make_gym_obs() + obs, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] == -1 assert obs[1][-2] == -1 def test_partial_obs_2(simple2p1f_sight2): env = simple2p1f_sight2 - obs, _, _, _ = env._make_gym_obs() + obs, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] > -1 assert obs[1][-2] > -1 @@ -158,7 +162,7 @@ def test_partial_obs_2(simple2p1f_sight2): def test_partial_obs_3(simple2p1f): env = simple2p1f - obs, _, _, _ = env._make_gym_obs() + obs, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] > -1 assert obs[1][-2] > -1 @@ -166,4 +170,4 @@ def test_partial_obs_3(simple2p1f): obs, _, _, _ = env.step([Action.WEST, Action.NONE]) assert obs[0][-2] > -1 - assert obs[1][-2] > -1 \ No newline at end of file + assert obs[1][-2] > -1 From cc5f1022bf992d87f2dc06598344db9e32b7a553 Mon Sep 17 00:00:00 2001 From: Joshua Hoffman Date: Tue, 31 Oct 2023 14:36:30 -0500 Subject: [PATCH 2/5] Changed the setup file to specify that pyglet needs to be less than version 2 for it to still work. Changed the init file for agents and dependencies so that the correct classes are imported and point to the correct class/function/interface. Fixed and made the imports more verbose, so it is clear what is being imported where. Removed white space. Changed the raise errors to be callables instead of classes --- lbforaging.py | 3 +++ lbforaging/agents/__init__.py | 6 +----- lbforaging/agents/hba.py | 6 +++--- lbforaging/agents/heuristic_agent.py | 6 +++--- lbforaging/agents/monte_carlo.py | 3 ++- lbforaging/agents/nn_agent.py | 2 +- lbforaging/agents/q_agent.py | 7 ++++--- lbforaging/agents/random_agent.py | 2 +- lbforaging/foraging/agent.py | 2 +- lbforaging/foraging/environment.py | 2 +- lbforaging/foraging/rendering.py | 12 ++++++++++++ setup.py | 2 +- 12 files changed, 33 insertions(+), 20 deletions(-) diff --git a/lbforaging.py b/lbforaging.py index 654e5c0..4ae4c69 100644 --- a/lbforaging.py +++ b/lbforaging.py @@ -1,3 +1,4 @@ +'''Basic flow to see if the base install worked over one environment.''' import argparse import logging import time @@ -53,3 +54,5 @@ def main(game_count=1, render=False): args = parser.parse_args() main(args.times, args.render) + + print("Done. NO RUNTIME ERRORS.") diff --git a/lbforaging/agents/__init__.py b/lbforaging/agents/__init__.py index 29f46bd..27fcf67 100644 --- a/lbforaging/agents/__init__.py +++ b/lbforaging/agents/__init__.py @@ -1,5 +1 @@ -# from lbforaging.agents.random_agent import RandomAgent -# from lbforaging.agents.heuristic_agent import H1, H2, H3, H4 -# from lbforaging.agents.q_agent import QAgent -# from lbforaging.agents.monte_carlo import MonteCarloAgent -# from lbforaging.agents.hba import HBAAgent +from lbforaging.agents import * diff --git a/lbforaging/agents/hba.py b/lbforaging/agents/hba.py index 6a74c38..9c965be 100644 --- a/lbforaging/agents/hba.py +++ b/lbforaging/agents/hba.py @@ -1,8 +1,8 @@ -from . import QAgent -from foraging import Env +from lbforaging.agents.q_agent import QAgent +from lbforaging.foraging.environment import ForagingEnv as Env import random import numpy as np -from agents import H1, H2, H3, H4 +from lbforaging.agents.heuristic_agent import H1, H2, H3, H4 from itertools import product from collections import defaultdict from functools import reduce diff --git a/lbforaging/agents/heuristic_agent.py b/lbforaging/agents/heuristic_agent.py index dd12689..5a70c0d 100644 --- a/lbforaging/agents/heuristic_agent.py +++ b/lbforaging/agents/heuristic_agent.py @@ -1,7 +1,7 @@ import random import numpy as np -from foraging import Agent -from foraging.environment import Action +from lbforaging.foraging.agent import Agent +from lbforaging.foraging.environment import Action class HeuristicAgent(Agent): @@ -28,7 +28,7 @@ def _move_towards(self, target, allowed): raise ValueError("No simple path found") def step(self, obs): - raise NotImplemented("Heuristic agent is implemented by H1-H4") + raise NotImplementedError("Heuristic agent is implemented by H1-H4") class H1(HeuristicAgent): diff --git a/lbforaging/agents/monte_carlo.py b/lbforaging/agents/monte_carlo.py index eb99505..d82b9a4 100644 --- a/lbforaging/agents/monte_carlo.py +++ b/lbforaging/agents/monte_carlo.py @@ -8,7 +8,8 @@ import plotly.graph_objs as go from networkx.drawing.nx_pydot import graphviz_layout -from foraging import Agent, Env +from lbforaging.foraging.agent import Agent +from lbforaging.foraging.environment import ForagingEnv as Env MCTS_DEPTH = 15 diff --git a/lbforaging/agents/nn_agent.py b/lbforaging/agents/nn_agent.py index 59b516c..e503f13 100644 --- a/lbforaging/agents/nn_agent.py +++ b/lbforaging/agents/nn_agent.py @@ -1,6 +1,6 @@ import random -from foraging import Agent +from lbforaging.foraging.agent import Agent class NNAgent(Agent): diff --git a/lbforaging/agents/q_agent.py b/lbforaging/agents/q_agent.py index 5d6e631..1fb2793 100644 --- a/lbforaging/agents/q_agent.py +++ b/lbforaging/agents/q_agent.py @@ -4,9 +4,10 @@ import numpy as np import pandas as pd -from agents import H1 -from lbforaging import Agent, Env -from lbforaging.environment import Action +from lbforaging.agents.heuristic_agent import H1 +from lbforaging.foraging.agent import Agent +from lbforaging.foraging.environment import Action +from lbforaging.foraging.environment import ForagingEnv as Env _CACHE = None diff --git a/lbforaging/agents/random_agent.py b/lbforaging/agents/random_agent.py index fa136f3..c208ce5 100644 --- a/lbforaging/agents/random_agent.py +++ b/lbforaging/agents/random_agent.py @@ -1,6 +1,6 @@ import random -from lbforaging import Agent +from lbforaging.foraging.agent import Agent class RandomAgent(Agent): diff --git a/lbforaging/foraging/agent.py b/lbforaging/foraging/agent.py index c6b181d..3dda240 100644 --- a/lbforaging/foraging/agent.py +++ b/lbforaging/foraging/agent.py @@ -30,7 +30,7 @@ def _step(self, obs): return action def step(self, obs): - raise NotImplemented("You must implement an agent") + raise NotImplementedError("You must implement an agent") def _closest_food(self, obs, max_food_level=None, start=None): diff --git a/lbforaging/foraging/environment.py b/lbforaging/foraging/environment.py index 67d16a8..e87a252 100644 --- a/lbforaging/foraging/environment.py +++ b/lbforaging/foraging/environment.py @@ -93,7 +93,7 @@ def __init__( self.field = np.zeros(field_size, np.int32) self.penalty = penalty - + self.max_food = max_food self._food_spawned = 0.0 self.max_player_level = max_player_level diff --git a/lbforaging/foraging/rendering.py b/lbforaging/foraging/rendering.py index 98f9509..44b6039 100644 --- a/lbforaging/foraging/rendering.py +++ b/lbforaging/foraging/rendering.py @@ -29,6 +29,18 @@ """ ) +try: + from pyglet import gl +except ImportError as e: + raise ImportError( + """ + Cannot 'from pyglet import gl' + HINT: you can install pyglet directly via 'pip install pyglet'. + But if you really just want to install all Gym dependencies and not have to think about it, + 'pip install -e .[all]' or 'pip install gym[all]' will do it. + """ + ) + try: from pyglet.gl import * except ImportError as e: diff --git a/setup.py b/setup.py index 409b8ea..f7ee792 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.10", ], - install_requires=["numpy", "gym==0.26", "pyglet"], + install_requires=["numpy", "gym==0.26", "pyglet<2"], extras_require={"test": ["pytest"]}, include_package_data=True, ) From 270148a008d8f74cf4002013720284f5c9e38733 Mon Sep 17 00:00:00 2001 From: Joshua Hoffman Date: Tue, 31 Oct 2023 14:47:35 -0500 Subject: [PATCH 3/5] All the changes here updated the repo to a newer version of Gym, it clarifies the dependencies in the setup doc, and it is now compatible with python3.10. I also fixed the dependency issue in the agents directory and some __init__ files. All of the tests now pass, and it the rendering now works as well. --- lbforaging/foraging/agent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lbforaging/foraging/agent.py b/lbforaging/foraging/agent.py index 3dda240..a998184 100644 --- a/lbforaging/foraging/agent.py +++ b/lbforaging/foraging/agent.py @@ -1,5 +1,4 @@ import logging - import numpy as np _MAX_INT = 999999 From 3756ac424ca9c237f5e1da752386b480dd3ee080 Mon Sep 17 00:00:00 2001 From: Joshua Hoffman Date: Thu, 2 Nov 2023 13:33:12 -0500 Subject: [PATCH 4/5] Found more bugs in updating the version. This is now compliant with the new gym standard API and can be run with RLLib for proof of concept work as well --- lbforaging.py | 8 +++++--- lbforaging/__init__.py | 3 ++- lbforaging/foraging/environment.py | 24 +++++++++++++++++------- tests/test_env.py | 16 ++++++++-------- 4 files changed, 32 insertions(+), 19 deletions(-) diff --git a/lbforaging.py b/lbforaging.py index 4ae4c69..834215a 100644 --- a/lbforaging.py +++ b/lbforaging.py @@ -2,7 +2,7 @@ import argparse import logging import time -import gym +import gymnasium as gym import numpy as np import lbforaging @@ -24,7 +24,7 @@ def _game_loop(env, render): actions = env.action_space.sample() - _, nreward, ndone, _ = env.step(actions) + _, nreward, ndone, _, _ = env.step(actions) if sum(nreward) > 0: print(nreward) @@ -38,7 +38,9 @@ def _game_loop(env, render): def main(game_count=1, render=False): env = gym.make("Foraging-8x8-2p-2f-v2") - env.reset() + + _, info = env.reset() + assert info == {} for _ in range(game_count): _game_loop(env, render) diff --git a/lbforaging/__init__.py b/lbforaging/__init__.py index 0196ee4..538a37e 100644 --- a/lbforaging/__init__.py +++ b/lbforaging/__init__.py @@ -1,5 +1,6 @@ -from gym.envs.registration import registry, register, make, spec +from gymnasium.envs.registration import registry, register, make, spec from itertools import product +from lbforaging import foraging sizes = range(5, 20) players = range(2, 20) diff --git a/lbforaging/foraging/environment.py b/lbforaging/foraging/environment.py index e87a252..0602f76 100644 --- a/lbforaging/foraging/environment.py +++ b/lbforaging/foraging/environment.py @@ -2,9 +2,9 @@ from collections import namedtuple, defaultdict from enum import Enum from itertools import product -from gym import Env -import gym -from gym.utils import seeding +from gymnasium import Env +import gymnasium as gym +from gymnasium.utils import seeding import numpy as np @@ -179,7 +179,15 @@ def from_obs(cls, obs): player.score = p.score if p.score else 0 players.append(player) - env = cls(players, None, None, None, None) + env = cls( + players=players, + max_player_level=None, + field_size=None, + max_food=None, + sight=None, + max_episode_steps=50, + force_coop=False + ) env.field = np.copy(obs.field) env.current_step = obs.current_step env.sight = obs.sight @@ -482,13 +490,15 @@ def get_player_reward(observation): assert self.observation_space[i].contains(obs), \ f"obs space error: obs: {obs}, obs_space: {self.observation_space[i]}" - return nobs, nreward, ndone, ninfo + truncated_term = False + # To turn this into a single agent task, you need to sum the nreward and the ndone + return nobs, nreward, ndone, truncated_term, ninfo def test_make_gym_obs(self): ''' Test wrapper to test the current observation in a public manner. ''' return self._make_gym_obs() - def reset(self): + def reset(self, *, seed=None, options=None): self.field = np.zeros(self.field_size, np.int32) self.spawn_players(self.max_player_level) player_levels = sorted([player.level for player in self.players]) @@ -500,7 +510,7 @@ def reset(self): self._game_over = False self._gen_valid_moves() - nobs, _, _, _ = self._make_gym_obs() + nobs, _, _, _, _ = self._make_gym_obs() # The new gym spec and gym utils require that # the new observation and a dictionary with info is returned return nobs, {} diff --git a/tests/test_env.py b/tests/test_env.py index 1fe6e30..724d001 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -2,7 +2,7 @@ import numpy as np import lbforaging from lbforaging.foraging.environment import Action -import gym +import gymnasium as gym def manhattan_distance(x,y): @@ -132,42 +132,42 @@ def test_food_spawning_1(): assert manhattan_distance(foods[1], foods[2]) > 2 def test_reward_0(simple2p1f): - _, rewards, _, _ = simple2p1f.step([5, 5]) + _, rewards, _, _, _ = simple2p1f.step([5, 5]) assert rewards[0] == 1.0 assert rewards[1] == 1.0 def test_reward_1(simple2p1f): - _, rewards, _, _ = simple2p1f.step([0, 5]) + _, rewards, _, _, _ = simple2p1f.step([0, 5]) assert rewards[0] == 0 assert rewards[1] == 2.0 def test_partial_obs_1(simple2p1f_sight1): env = simple2p1f_sight1 - obs, _, _, _ = env.test_make_gym_obs() + obs, _, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] == -1 assert obs[1][-2] == -1 def test_partial_obs_2(simple2p1f_sight2): env = simple2p1f_sight2 - obs, _, _, _ = env.test_make_gym_obs() + obs, _, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] > -1 assert obs[1][-2] > -1 - obs, _, _, _ = env.step([Action.WEST, Action.NONE]) + obs, _, _, _, _ = env.step([Action.WEST, Action.NONE]) assert obs[0][-2] == -1 assert obs[1][-2] == -1 def test_partial_obs_3(simple2p1f): env = simple2p1f - obs, _, _, _ = env.test_make_gym_obs() + obs, _, _, _, _ = env.test_make_gym_obs() assert obs[0][-2] > -1 assert obs[1][-2] > -1 - obs, _, _, _ = env.step([Action.WEST, Action.NONE]) + obs, _, _, _, _ = env.step([Action.WEST, Action.NONE]) assert obs[0][-2] > -1 assert obs[1][-2] > -1 From 210251d67d4496bb9b8dbf7c0344de49a918d08c Mon Sep 17 00:00:00 2001 From: Joshua Hoffman Date: Wed, 8 Nov 2023 18:19:45 -0600 Subject: [PATCH 5/5] Rearranged import order --- lbforaging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lbforaging.py b/lbforaging.py index 834215a..ae8ad48 100644 --- a/lbforaging.py +++ b/lbforaging.py @@ -2,8 +2,8 @@ import argparse import logging import time -import gymnasium as gym import numpy as np +import gymnasium as gym import lbforaging