Skip to content

Commit

Permalink
Merge branch 'neuroai_tutorial' of https://github.com/SainsburyWellco…
Browse files Browse the repository at this point in the history
…meCentre/NeuralPlayground into neuroai_tutorial
  • Loading branch information
rodrigcd committed Aug 20, 2024
2 parents c5dc9ea + 044c9fc commit 2dea94d
Show file tree
Hide file tree
Showing 3 changed files with 144 additions and 0 deletions.
44 changes: 44 additions & 0 deletions neuralplayground/agents/agent_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ def _act(self, obs):
position variation to compute next position
"""
# Pick direction

direction = super().act(obs)
# Normalize direction to step size
direction = direction / np.sqrt(np.sum(direction**2)) * self.step_size
Expand Down Expand Up @@ -260,3 +261,46 @@ def act(self, obs):
return self.action_buffer.pop()
else:
return action

class TrajectoryGenerator(RandomAgent):
def __init__(
self,step_size: float = 0.02,):
super().__init__(step_size=step_size)
self.action_buffer = []
self.b = 0.13 * 2 * np.pi # forward velocity rayleigh dist scale (m/sec)
self.sigma = 1 # stdev rotation velocity (rads/sec)
self.mu = 0 # turn angle bias
self.reset()
def reset(self):
self.head_dir = np.random.uniform(0, 2 * np.pi)
self.velocity = 0
self.turn_angle = 0

def act(self, obs, crossed_border):
# Pick direction
"""The base model executes a random action from a normal distribution
Parameters
----------
obs:
Whatever observation from the environment class needed to choose the right action
Returns
-------
d_pos: nd.array (2,)
position variation to compute next position
"""
#is_near_wall, turn_angle = self.avoid_wall(position[:, t], head_dir[:, t], room_width, room_depth)
v = np.random.rayleigh(self.b,1)
v[crossed_border] *= 0.25

turn_angle = np.zeros(1)
random_turn = np.random.normal(self.mu, self.sigma, 1)
self.turn_angle += self.step_size * random_turn

self.velocity = self.step_size * v

d_pos = self.velocity * np.stack([np.cos(self.head_dir), np.sin(self.head_dir)]).reshape(-1)
self.head_dir = self.head_dir + self.turn_angle

self.head_dir = np.mod(self.head_dir + np.pi, 2 * np.pi) - np.pi

return d_pos
15 changes: 15 additions & 0 deletions neuralplayground/debug.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from neuralplayground.agents import RandomAgent, LevyFlightAgent,TrajectoryGenerator
# Random agent generates a brownian motion. Levy flight is still experimental.

agent = TrajectoryGenerator()
time_step_size = 0.1 #seg
agent_step_size = 3

# Init environment
env = Simple2D(time_step_size = time_step_size,
agent_step_size = agent_step_size,
arena_x_limits=(-100, 100),
arena_y_limits=(-100, 100))


print('testing stuff ')
85 changes: 85 additions & 0 deletions neuralplayground/debug_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import os
import matplotlib.pyplot as plt
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from neuralplayground.arenas import Simple2D
from neuralplayground.agents import RandomAgent, LevyFlightAgent, TrajectoryGenerator
from neuralplayground.experiments import Sargolini2006Data
from neuralplayground.agents import TrajectoryGenerator, Burak2009, Sorscher2022exercise, SorscherIdealRNN
from neuralplayground.utils import PlaceCells, get_2d_sort
from neuralplayground.plotting import plot_trajectory_place_cells_activity, plot_ratemaps, compute_ratemaps
from neuralplayground.config import load_plot_config
load_plot_config()
import matplotlib.pyplot as plt
import numpy as np
import torch
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
import os
import pickle
import matplotlib as mpl
import scipy
np.random.seed(0)

# Use GPU if available
device = torch.device("cpu")
agent_step_size = 0.02
agent = TrajectoryGenerator(step_size = agent_step_size)
time_step_size = 0.01


# Init environment
env = Simple2D(time_step_size = time_step_size,
agent_step_size = agent_step_size,
arena_x_limits=(-2, 2),
arena_y_limits=(-2, 2))

n_steps = 5000#50000

# Initialize environment
obs, state, crossed = env.reset()
for i in range(n_steps):
# Observe to choose an action
action = agent.act(obs,crossed)
# Run environment for given action
obs, state, reward, crossed = env.step(action)
ax = env.plot_trajectory()
ax.grid()
plt.show()


print('testing stuff ')


# We'll use a longer sequence just for plotting purposes
# Training will be done with short sequences
sequence_length = 300
batch_size = 4
room_width = 2.2
room_depth = 2.2
# Arena dimensions Just 2D
room_width = 2.2
room_depth = 2.2

# We'll use a longer sequence just for plotting purposes
# Training will be done with short sequences
sequence_length = 300
batch_size = 4

# Place cells parameters
n_place_cells = 512
place_cell_rf = 0.12
surround_scale = 2.0
periodic = False
difference_of_gaussians = True
place_cells = PlaceCells(Np=n_place_cells,
place_cell_rf=place_cell_rf,
surround_scale=surround_scale,
room_width=room_width,
room_depth=room_depth,
periodic=periodic,
DoG=difference_of_gaussians,
device=device)
device = torch.device("cpu")
generator = TrajectoryGenerator(sequence_length, batch_size, room_width, room_depth, device, place_cells=place_cells)
traj = generator.generate_trajectory(room_width, room_depth, batch_size)
x, y = traj["target_x"], traj["target_y"]

0 comments on commit 2dea94d

Please sign in to comment.