-
Notifications
You must be signed in to change notification settings - Fork 0
/
LossFunctions.py
30 lines (24 loc) · 1.11 KB
/
LossFunctions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#implement Loss function
#######################################################################
from __future__ import print_function, division
import keras.backend as K
import numpy as np
#######################################################################
def gradient_penalty_loss(y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
#######################################################################
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred)