Skip to content

Commit

Permalink
organizing files
Browse files Browse the repository at this point in the history
  • Loading branch information
rodrigcd committed Jun 15, 2024
1 parent e105cf0 commit 65ca830
Show file tree
Hide file tree
Showing 6 changed files with 2,795 additions and 320 deletions.
2,655 changes: 2,655 additions & 0 deletions examples/neuroai_tutorial/final_rnn_turorial.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from neuralplayground.config import load_plot_config


def main(activation="tanh"):
def main(activation="relu"):

print("Pre-training the network")
print("Activation function: ", activation)
Expand Down Expand Up @@ -49,9 +49,9 @@ def main(activation="tanh"):
device=device)

# Real RNN parameters
n_grid_cells = 64 ** 2 # needs to be the square of a number for later analysis
learning_rate = 1e-4
training_steps = int(1e4)
n_grid_cells = 24 ** 2 # needs to be the square of a number for later analysis
learning_rate = 5e-4
training_steps = int(1e5)
weight_decay = 1e-4

generator = TrajectoryGenerator(sequence_length, batch_size, room_width, room_depth, device,
Expand Down

Large diffs are not rendered by default.

File renamed without changes.
File renamed without changes.
53 changes: 51 additions & 2 deletions neuralplayground/agents/burak2009.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def path_neural_activity(self, position_x, position_y, headDirection, grid_cell_
rfield += convolution

# Neural Transfer Function
fr = np.where(rfield > 0, rfield, 0)
fr = np.maximum(rfield, 0)

# Neuron dynamics (Eq. 1)
r_old = r
Expand All @@ -283,10 +283,59 @@ def path_neural_activity(self, position_x, position_y, headDirection, grid_cell_

# Track single neuron response
if fr[single_neuron[0], single_neuron[1]] > 0:
single_neuron_response[increment] = 1
single_neuron_response[increment] = fr[single_neuron[0], single_neuron[1]]

return single_neuron_response, r

def update_rate_map(self, headDirection, velocity, grid_cell_rate):
increment = 1
r = grid_cell_rate

for iter in range(len(velocity)):
theta_v = headDirection[iter]
vel = velocity[iter]

left = -np.cos(theta_v)
right = np.cos(theta_v)
up = np.sin(theta_v)
down = -np.sin(theta_v)

increment += 1

# Break feedforward input into its directional components
# Equation (4)
rfield = self.venvelope * (
(1 + self.alpha * vel * right) * self.typeR
+ (1 + self.alpha * vel * left) * self.typeL
+ (1 + self.alpha * vel * up) * self.typeU
+ (1 + self.alpha * vel * down) * self.typeD
)

# Convolute population activity with shifted symmetric weights.
# real() is implemented for octave compatibility
convolution = np.real(
ifft2(
fft2(r * self.typeR) * self.ftr_small
+ fft2(r * self.typeL) * self.ftl_small
+ fft2(r * self.typeD) * self.ftd_small
+ fft2(r * self.typeU) * self.ftu_small
)
)

# Add feedforward inputs to the shifted population activity to
# yield the new population activity.
rfield += convolution

# Neural Transfer Function
fr = np.maximum(rfield, 0)

# Neuron dynamics (Eq. 1)
r_old = r
r_new = np.minimum(10, (self.time_step_dt / self.tau) * (5 * fr - r_old) + r_old)
r = r_new

return r


def npRelu(x):
return np.maximum(0, x)
Expand Down

0 comments on commit 65ca830

Please sign in to comment.