Skip to content

Commit

Permalink
bit renaming
Browse files Browse the repository at this point in the history
  • Loading branch information
ShivamShrirao committed Aug 9, 2020
1 parent 007b99b commit 87f965f
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 37 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ Non-trainable Params: 704
### Compile model with optimizer, loss and Learning rate

```python
model.compile(optimizer=optimizers.adam,loss=functions.cross_entropy_with_logits,learning_rate=0.001)
model.compile(optimizer=optimizers.adam,loss=functions.cross_entropy,learning_rate=0.001)
```

### Optimizers avaliable (nnet.optimizers)
Expand Down
26 changes: 13 additions & 13 deletions nnet_gpu/functions/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,30 +76,30 @@ def softmax(z, a=None, derivative=False):
return exps


def cross_entropy_with_logits(logits, labels, epsilon=1e-12):
return -cp.sum(labels * cp.log(logits + epsilon), axis=0, keepdims=True)
def cross_entropy_with_logits(outputs, labels, epsilon=1e-12):
return -cp.sum(labels * cp.log(outputs + epsilon), axis=0, keepdims=True)


def cross_entropy(logits, labels, epsilon=1e-12):
def cross_entropy(outputs, labels, epsilon=1e-12):
labels = labels.clip(epsilon, 1 - epsilon)
logits = logits.clip(epsilon, 1 - epsilon)
return -labels * cp.log(logits) - (1 - labels) * cp.log(1 - logits)
outputs = outputs.clip(epsilon, 1 - epsilon)
return -labels * cp.log(outputs) - (1 - labels) * cp.log(1 - outputs)


def del_cross_sigmoid(logits, labels):
return (logits - labels)
def del_cross_sigmoid(outputs, labels):
return (outputs - labels)


def del_cross_soft(logits, labels):
return (logits - labels)
def del_cross_soft(outputs, labels):
return (outputs - labels)


def mean_squared_error(logits, labels):
return ((logits - labels) ** 2) / 2
def mean_squared_error(outputs, labels):
return ((outputs - labels) ** 2) / 2


def del_mean_squared_error(logits, labels):
return (logits - labels)
def del_mean_squared_error(outputs, labels):
return (outputs - labels)


def echo(z, a=None, derivative=False, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion nnet_gpu/layers/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def forward(self, inp, training=True):
return self.a_out

def backprop(self, grads, do_d_inp=True):
if self.notEcho and self.not_softmax_cross_entrp: # make it better in future
if self.notEcho and self.not_softmax_cross_entrp: # TODO - make it better in future by adding Activation layer in graph
grads *= self.activation(self.z_out, self.a_out, derivative=True)
self.grad_event = stream_maps.default_stream.record(self.grad_event)
with self.backp_stream:
Expand Down
44 changes: 22 additions & 22 deletions nnet_gpu/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,18 @@ def add(self, obj):
obj(self.sequence[-1])
self.sequence.append(obj)

def compile(self, optimizer=adam, beta=0.9, loss=cross_entropy, learning_rate=0.001):
self.optimizer = optimizer
self.beta = beta
self.learning_rate = learning_rate
self.loss = loss
if self.loss == cross_entropy:
self.sequence[-1].not_softmax_cross_entrp = False
self.del_loss = del_cross_soft
elif self.loss == mean_squared_error:
self.del_loss = del_mean_squared_error
self.lenseq_m1 = len(self.sequence) - 1

def forward(self, X_inp, training=True):
obj = self.sequence[0]
while True:
Expand Down Expand Up @@ -89,20 +101,20 @@ def fit(self, X_inp=None, labels=None, iterator=None, batch_size=1, epochs=1, va
inp = cp.asarray(X_inp[idx:idx + batch_size])
y_inp = cp.asarray(labels[idx:idx + batch_size])
idx += inp.shape[0]
logits = self.train_on_batch(inp, y_inp)
outputs = self.train_on_batch(inp, y_inp)
self.logit_event = cp.cuda.get_current_stream().record()
with eval_stream:
eval_stream.wait_event(self.logit_event)
if accuracy_metric:
if self.loss == cross_entropy_with_logits:
ans = logits.argmax(axis=1)
if self.loss == cross_entropy:
ans = outputs.argmax(axis=1)
cor = y_inp.argmax(axis=1)
else:
ans = logits
ans = outputs
cor = y_inp
nacc = (ans == cor).mean().get(eval_stream)
acc = info_beta * nacc + (1 - info_beta) * acc
sample_loss = self.loss(logits=logits, labels=y_inp).mean().get(eval_stream) / 10
sample_loss = self.loss(outputs=outputs, labels=y_inp).mean().get(eval_stream) / 10
loss = info_beta * sample_loss + (1 - info_beta) * loss
samtm = time.time() - smtst
sam_time = info_beta * samtm + (1 - info_beta) * sam_time
Expand Down Expand Up @@ -131,31 +143,19 @@ def validate(self, validation_data, batch_size, info_beta=0.2):
inp = cp.asarray(VX[vidx:vidx + batch_size])
y_inp = cp.asarray(VY[vidx:vidx + batch_size])
vidx += inp.shape[0]
logits = self.predict(inp)
if self.loss == cross_entropy_with_logits:
ans = logits.argmax(axis=1)
outputs = self.predict(inp)
if self.loss == cross_entropy:
ans = outputs.argmax(axis=1)
cor = y_inp.argmax(axis=1)
else:
ans = logits
ans = outputs
cor = y_inp
vacc += (ans == cor).sum()
sample_loss = self.loss(logits=logits, labels=y_inp).mean() / 10
sample_loss = self.loss(outputs=outputs, labels=y_inp).mean() / 10
vloss = info_beta * sample_loss + (1 - info_beta) * vloss
end = time.time()
print(f"\rValidation Accuracy: {(vacc / lnvx).get():.4f} - val_loss: {vloss.get():.4f} - Time: {end - start:.3f}s")

def compile(self, optimizer=adam, beta=0.9, loss=cross_entropy_with_logits, learning_rate=0.001):
self.optimizer = optimizer
self.beta = beta
self.learning_rate = learning_rate
self.loss = loss
if self.loss == cross_entropy_with_logits:
self.sequence[-1].not_softmax_cross_entrp = False
self.del_loss = del_cross_soft
elif self.loss == mean_squared_error:
self.del_loss = del_mean_squared_error
self.lenseq_m1 = len(self.sequence) - 1

def save_weights(self, path): # TODO - make a proper saving mechanism.
sv_me = []
for obj in self.sequence:
Expand Down

0 comments on commit 87f965f

Please sign in to comment.