diff --git a/neuralplayground/arenas/discritized_objects.py b/neuralplayground/arenas/discritized_objects.py index ba1610ed..0720778f 100644 --- a/neuralplayground/arenas/discritized_objects.py +++ b/neuralplayground/arenas/discritized_objects.py @@ -406,7 +406,7 @@ def plot_trajectory( else: return ax - def render(self, history_length=30): + def render(self, history_length=30, display=True): """Render the environment live through iterations""" f, ax = plt.subplots(1, 1, figsize=(8, 6)) canvas = FigureCanvas(f) @@ -416,5 +416,6 @@ def render(self, history_length=30): image = np.frombuffer(canvas.tostring_rgb(), dtype="uint8") image = image.reshape(f.canvas.get_width_height()[::-1] + (3,)) print(image.shape) - cv2.imshow("2D_env", image) - cv2.waitKey(10) + if display: + cv2.imshow("2D_env", image) + cv2.waitKey(10) diff --git a/neuralplayground/arenas/simple2d.py b/neuralplayground/arenas/simple2d.py index 498dbe27..d98e421d 100644 --- a/neuralplayground/arenas/simple2d.py +++ b/neuralplayground/arenas/simple2d.py @@ -347,7 +347,7 @@ def plot_trajectory( else: return ax - def render(self, history_length=30): + def render(self, history_length=30, display=True): """Render the environment live through iterations as in OpenAI gym""" f, ax = plt.subplots(1, 1, figsize=(8, 6)) canvas = FigureCanvas(f) @@ -357,5 +357,6 @@ def render(self, history_length=30): image = np.frombuffer(canvas.tostring_rgb(), dtype="uint8") image = image.reshape(f.canvas.get_width_height()[::-1] + (3,)) print(image.shape) - cv2.imshow("2D_env", image) - cv2.waitKey(10) + if display: + cv2.imshow("2D_env", image) + cv2.waitKey(10) diff --git a/tests/arena_exp_test.py b/tests/arena_exp_test.py index 34f8c767..8eff4b6c 100644 --- a/tests/arena_exp_test.py +++ b/tests/arena_exp_test.py @@ -51,7 +51,7 @@ def test_agent_interaction(self, init_env): action = agent.act(obs) # Run environment for given action obs, state, reward = init_env[0].step(action) - init_env[0].render() + init_env[0].render(display=False) init_env[0].plot_trajectory()