Skip to content

Commit

Permalink
Refactored
Browse files Browse the repository at this point in the history
  • Loading branch information
williampeer committed Apr 2, 2016
1 parent ddcf43b commit 9b78b56
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 31 deletions.
35 changes: 17 additions & 18 deletions ExperimentExecution.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
training_patterns_heterogeneous.append([uppercase_letter, lowercase_letter])

turnover_rate = 0.50
weighting_dg = current_dg_weighting % 25
weighting_dg = current_dg_weighting % 26
# print "TRIAL #", trial, "turnover rate:", turnover_rate
# dims,
# connection_rate_input_ec, perforant_path, mossy_fibers,
Expand All @@ -45,24 +45,23 @@
0.7, 100.0, 0.1, turnover_rate, # gamma, epsilon, nu, turnover rate
0.10, 0.95, 0.8, 2.0, weighting_dg) # k_m, k_r, a_i, alpha. alpha is 2 in 4.1

for three_trials in range(3):
hpc.reset_hpc_module()
for train_set_size_ctr in range(3, 4):
# hpc.reset_hpc_module()
for train_set_size_ctr in range(3, 4):

hipp_chaotic_pats, _ = experiment_4_x_1(hpc, train_set_size_ctr, training_patterns_associative)
Tools.save_experiment_4_1_results(hpc, hipp_chaotic_pats, "train_set_size_"+str(train_set_size_ctr)+"_exp_1"+
"turnover_rate_" + str(turnover_rate) +
"weighting_" + str(hpc._weighting_dg))
hipp_chaotic_pats, _ = experiment_4_x_1(hpc, train_set_size_ctr, training_patterns_associative)
Tools.save_experiment_4_1_results(hpc, hipp_chaotic_pats, "train_set_size_"+str(train_set_size_ctr)+"_exp_1"+
"turnover_rate_" + str(turnover_rate) +
"weighting_" + str(hpc._weighting_dg))

# ann = SimpleNeocorticalNetwork(io_dim, 30, io_dim, 0.01, 0.9)
# ann = SimpleNeocorticalNetwork(io_dim, 30, io_dim, 0.01, 0.9)

# print "Starting experiment 4_2..."
# This also saves the experiment_4_x_1 results!
# information_vector = experiment_4_x_2(hpc, ann, train_set_size_ctr,
# training_patterns_associative[:5 * train_set_size_ctr])
# print "Saving the results."
# Tools.save_experiment_4_2_results(information_vector, "train_set_size_" + str(train_set_size_ctr) +
# "_exp_2_")
# print "Starting experiment 4_2..."
# This also saves the experiment_4_x_1 results!
# information_vector = experiment_4_x_2(hpc, ann, train_set_size_ctr,
# training_patterns_associative[:5 * train_set_size_ctr])
# print "Saving the results."
# Tools.save_experiment_4_2_results(information_vector, "train_set_size_" + str(train_set_size_ctr) +
# "_exp_2_")

# For now, this is the ONLY place where the counter is incremented.
Tools.increment_experiment_counter()
# For now, this is the ONLY place where the counter is incremented.
Tools.increment_experiment_counter()
2 changes: 1 addition & 1 deletion Experiments_4_x.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def training_and_recall_hpc_helper(hpc, training_set_size, train_set_num, origin
# print "Neuronal turnover completed in", "{:7.3f}".format(t1-t0), "seconds."
# hpc.re_wire_fixed_input_to_ec_weights()
print "Learning patterns in training set..."
hpc_learn_patterns_wrapper(hpc, patterns=training_set, max_training_iterations=30) # when training is fixed,
hpc_learn_patterns_wrapper(hpc, patterns=training_set, max_training_iterations=50) # when training is fixed,
# convergence should occur after one or two iterations?

# extract by chaotic recall:
Expand Down
10 changes: 5 additions & 5 deletions HPC.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,28 +71,28 @@ def __init__(self, dims, connection_rate_input_ec, perforant_path, mossy_fibers,
borrow=True)

# randomly assign about 25 % of the weights to a random connection weight
ec_dg_weights = Tools.binomial_f(dims[1], dims[2], self.PP) * np.random.normal(0.5, np.sqrt(0.25), (dims[1], dims[2]))
ec_dg_weights = Tools.binomial_f(dims[1], dims[2], self.PP) * np.random.normal(0.5, np.sqrt(0.25), (dims[1], dims[2])).astype(np.float32)
self.ec_dg_weights = theano.shared(name='ec_dg_weights', value=ec_dg_weights.astype(theano.config.floatX),
borrow=True)

# randomly assign all weights between the EC and CA3
ec_ca3_weights = np.random.normal(0.5, np.sqrt(0.25), (dims[1], dims[3]))# * Tools.binomial_f(dims[1], dims[3], self.PP)
ec_ca3_weights = np.random.normal(0.5, np.sqrt(0.25), (dims[1], dims[3])).astype(np.float32)# * Tools.binomial_f(dims[1], dims[3], self.PP)
self.ec_ca3_weights = theano.shared(name='ec_ca3_weights', value=ec_ca3_weights.astype(theano.config.floatX),
borrow=True)

# randomly assign about 4 % of the weights to random connection weights
dg_ca3_weights = Tools.binomial_f(dims[2], dims[3], self.MF) * \
np.random.normal(0.9, np.sqrt(0.01), (dims[2], dims[3])) # elemwise
np.random.normal(0.9, np.sqrt(0.01), (dims[2], dims[3])).astype(np.float32) # elemwise
self.dg_ca3_weights = theano.shared(name='dg_ca3_weights', value=dg_ca3_weights.astype(theano.config.floatX),
borrow=True)

# randomly assign 100 % of the weights between CA3 and CA3
ca3_ca3_weights = np.random.normal(0.5, np.sqrt(0.25), (dims[3], dims[3]))
ca3_ca3_weights = np.random.normal(0.5, np.sqrt(0.25), (dims[3], dims[3])).astype(np.float32)
self.ca3_ca3_weights = theano.shared(name='ca3_ca3_weights', value=ca3_ca3_weights.astype(theano.config.floatX),
borrow=True)

# random weight assignment, full connection rate CA3-out
ca3_output_weights = np.random.normal(0., np.sqrt(0.5), (dims[3], dims[4]))
ca3_output_weights = np.random.normal(0., np.sqrt(0.5), (dims[3], dims[4])).astype(np.float32)
self.ca3_out_weights = theano.shared(name='ca3_out_weights',
value=ca3_output_weights.astype(theano.config.floatX), borrow=True)

Expand Down
14 changes: 7 additions & 7 deletions Tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,15 +114,15 @@ def get_pattern_correlation_slow(pattern_1, pattern_2):
def save_experiment_4_1_results(hpc, chaotically_recalled_patterns, custom_name):
experiment_dir = get_experiment_dir()

hpc_f = file(experiment_dir+'/hpc_'+custom_name+'.save', 'wb')
cPickle.dump(hpc, hpc_f, protocol=cPickle.HIGHEST_PROTOCOL)
hpc_f.close()
# hpc_f = file(experiment_dir+'/hpc_'+custom_name+'.save', 'wb')
# cPickle.dump(hpc, hpc_f, protocol=cPickle.HIGHEST_PROTOCOL)
# hpc_f.close()

save_images_from(chaotically_recalled_patterns, experiment_dir+'/images')
save_images_from(chaotically_recalled_patterns, experiment_dir+'/images'+custom_name)

f2 = file(experiment_dir+'/_chaotically_recalled_patterns.save', 'wb')
cPickle.dump(chaotically_recalled_patterns, f2, protocol=cPickle.HIGHEST_PROTOCOL)
f2.close()
# f2 = file(experiment_dir+'/_chaotically_recalled_patterns.save', 'wb')
# cPickle.dump(chaotically_recalled_patterns, f2, protocol=cPickle.HIGHEST_PROTOCOL)
# f2.close()


def save_experiment_4_2_results(information_vector, custom_name):
Expand Down

0 comments on commit 9b78b56

Please sign in to comment.