-
Notifications
You must be signed in to change notification settings - Fork 0
/
PasteBin.py
126 lines (113 loc) · 6.05 KB
/
PasteBin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import numpy as np
io_dim = 49
# sample IO:
I = np.asarray([[1, -1, 1, -1, 1, -1, 1] * 7], dtype=np.float32)
O = np.asarray([[-1, 1, -1, 1, -1, 1, -1] * 7], dtype=np.float32)
rand_I = np.random.random((1, io_dim)).astype(np.float32) - 0.5 * np.ones((1, io_dim), dtype=np.float32)
rand_O = np.random.random((1, io_dim)).astype(np.float32) - 0.5 * np.ones((1, io_dim), dtype=np.float32)
for index in xrange(rand_I.shape[1]):
if rand_I[0][index] < 0:
rand_I[0][index] = -1
else:
rand_I[0][index] = 1
if rand_O[0][index] < 0:
rand_O[0][index] = -1
else:
rand_O[0][index] = 1
# # TODO: Theano-ize (parallelization). Could use profiler.
# def neuronal_turnover_dg(self):
# # get beta %
# # for each of those neurons, initialize weights according to the percentage above.
# num_of_dg_neurons = self.dims[2]
# num_of_ca3_neurons = self.dims[3]
# num_of_ec_neurons = self.dims[1]
#
# num_of_neurons_to_be_turned_over = np.round(num_of_dg_neurons * self._turnover_rate).astype(np.int16)
# # np.random.seed(np.sqrt(time.time()).astype(np.int64))
# for n in range(num_of_neurons_to_be_turned_over):
# # Note: These neurons may be drawn so that we get a more exact number of beta %. This implementation,
# # however, introduces random fluctuations. Which might be beneficial?
# # this neuron is selected to have re-initialised its weights:
# random_dg_neuron_index = np.round(np.random.random() * (num_of_dg_neurons-1)).astype(np.int16)
#
# # from ec to dg:
# for ec_n in range(num_of_ec_neurons):
# if np.random.random() < self.PP:
# self.update_ec_dg_weights_value(ec_n, random_dg_neuron_index, np.random.random())
# else:
# self.update_ec_dg_weights_value(ec_n, random_dg_neuron_index, 0.0)
# # from dg to ca3:
# for ca3_neuron_index in range(num_of_ca3_neurons):
# if np.random.random() < self.MF:
# self.update_dg_ca3_weights_value(random_dg_neuron_index, ca3_neuron_index, np.random.random())
# else:
# self.update_dg_ca3_weights_value(random_dg_neuron_index, ca3_neuron_index, 0.0)
# def hpc_learn_patterns_iterations_hardcoded_wrapper(hpc, patterns):
# print "Commencing learning of", len(patterns), "I/O patterns."
# time_start_overall = time.time()
# iter_ctr = 0
# while iter_ctr < 2:
# p_ctr = 0
# for [input_pattern, output_pattern] in patterns:
# # Neuronal turnover, setting input and output in the hpc network.
# hpc.setup_pattern(input_pattern, output_pattern)
#
# # one iteration of learning using Hebbian learning
# hpc.learn()
# p_ctr += 1
#
# iter_ctr += 1
# time_stop_overall = time.time()
#
# print "Learned", len(patterns), "pattern-associations in ", iter_ctr, "iterations, which took" "{:7.3f}". \
# format(time_stop_overall-time_start_overall), "seconds."
# NEURONAL TURNOVER SNIPPET:
# # Symbolically: DOESN'T WORK. WTF.
# dg_res = T.fvector()
# dg_num = T.iscalar()
# ctr = T.iscalar()
# _, updates_ec_dg = theano.scan(fn=self.neuronal_turnover_helper_ec_dg, outputs_info=ctr,
# sequences=[dg_res, T.arange(dg_num)])
# neuronal_turnover_ec_dg = theano.function([dg_res, dg_num, ctr], outputs=None, updates=updates_ec_dg)
#
# _, updates_dg_ca3 = theano.scan(self.neuronal_turnover_helper_dg_ca3, outputs_info=ctr,
# sequences=[dg_res, T.arange(dg_num)])
# neuronal_turnover_dg_ca3 = theano.function([dg_res, dg_num, ctr], outputs=None, updates=updates_dg_ca3)
# TURNOVER W/ SCAN FAILED ATTEMPT:
# get beta %
# for each of those neurons, initialize weights according to the percentage above.
# Execution:
# num_of_dg_neurons = self.dims[2]
# dg_neuron_selection = binomial_f(1, num_of_dg_neurons, self._turnover_rate)
# neuron_index = 0
# target_indices = []
# for dg_sel in dg_neuron_selection[0]:
# if dg_sel == 1:
# target_indices.append(neuron_index)
# neuron_index += 1
#
# indices = T.ivector('indices')
# random_weights_sequence = T.fvectors('random_weights_sequence')
# ec_dg_weights = T.fmatrix('ec_dg_weights')
# new_ec_dg_weights = T.fmatrix('new_ec_dg_weights')
# ec_dg_results, ec_dg_updates = theano.scan(fn=self.return_weight_column,
# outputs_info=new_ec_dg_weights,
# sequences=[indices, random_weights_sequence],
# non_sequences=[ec_dg_weights])
# perform_turnover_ec_dg = theano.function([indices, random_weights_sequence, ec_dg_weights],
# outputs=ec_dg_results)
#
# #
# column_length = self.ec_dg_weights.get_value().shape[0]
# index_sequence = np.asarray(target_indices, dtype=np.int32)
# # print "index seq:", index_sequence
# new_column_weights = random_f(index_sequence.shape[1], column_length) * binomial_f(index_sequence,
# column_length, self.PP)
#
# print perform_turnover_ec_dg(index_sequence, new_column_weights, self.ec_dg_weights.get_value())
# column_index = T.iscalar("column_index")
# weight_column = T.fvector("weight_column")
# weight_matrix = T.fmatrix("weight_matrix")
# self.return_weight_column = theano.function([column_index, weight_column, weight_matrix],
# outputs=T.set_subtensor(
# weight_matrix[:, column_index], weight_column))