Last active
March 13, 2021 11:50
-
-
Save chrischoy/c233b0d25c5fbe008d9477bc1b2d234c to your computer and use it in GitHub Desktop.
Revisions
-
chrischoy revised this gist
May 24, 2017 . 1 changed file with 1 addition and 1 deletion.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -16,7 +16,7 @@ def smoothed_metric_loss(input_tensor, name='smoothed_triplet_loss', margin=1): # Compute the pairwise distance Xe = tf.expand_dims(X, 1) Dsq = tf.reduce_sum(tf.square(Xe - tf.transpose(Xe, (1, 0, 2))), 2, keep_dims=False) D = tf.sqrt(Dsq + 1e-8) expmD = tf.exp(m - D) # Compute the loss -
chrischoy revised this gist
May 23, 2017 . 1 changed file with 6 additions and 8 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -14,9 +14,9 @@ def smoothed_metric_loss(input_tensor, name='smoothed_triplet_loss', margin=1): m = margin # Compute the pairwise distance Xe = tf.expand_dims(X, 1) Dsq = tf.reduce_sum(tf.square(Xe - tf.transpose(Xe, (1, 0, 2))), 2, keep_dims=False) D = tf.sqrt(Dsq) expmD = tf.exp(m - D) # Compute the loss @@ -32,11 +32,9 @@ def smoothed_metric_loss(input_tensor, name='smoothed_triplet_loss', margin=1): ind_rest = np.hstack([np.arange(0, pair_ind * 2), np.arange(pair_ind * 2 + 2, batch_size)]) inds = [[i, k] for k in ind_rest] inds.extend([[j, l] for l in ind_rest]) J_ij = tf.log(tf.reduce_sum(tf.gather_nd(expmD, inds))) + tf.gather_nd(D, [[i, j]]) J_all.append(J_ij) J_all = tf.convert_to_tensor(J_all) -
chrischoy revised this gist
Mar 1, 2017 . 1 changed file with 4 additions and 2 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -1,9 +1,11 @@ import tensorflow as tf import numpy as np def smoothed_metric_loss(input_tensor, name='smoothed_triplet_loss', margin=1): ''' input_tensor: require a tensor with predefined dimensions (No None dimension) Every two consecutive vectors must be a positive pair. There should not be more than one pair from each class. ''' with tf.variable_scope(name): # Song et al., Deep Metric Learning via Lifted Structured Feature Embedding @@ -13,7 +15,7 @@ def smoothed_metric_loss(input_tensor, name='smoothed_metric_loss', margin=1): # Compute the pairwise distance sq = tf.reduce_sum(X*X, 1) sq = tf.reshape(sq, [-1, 1]) # turn r into a column vector D = sq - 2 * tf.matmul(X, tf.transpose(X)) + tf.transpose(sq) expmD = tf.exp(m - D) -
chrischoy created this gist
Mar 1, 2017 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,43 @@ import tensorflow as tf import numpy as np def smoothed_metric_loss(input_tensor, name='smoothed_metric_loss', margin=1): ''' input_tensor: require a tensor with predefined dimensions (No None dimension) ''' with tf.variable_scope(name): # Song et al., Deep Metric Learning via Lifted Structured Feature Embedding # Define feature X \in \mathbb{R}^{N \times C} X = input_tensor m = margin # Compute the pairwise distance sq = tf.reduce_sum(X*X, 1) sq = tf.reshape(sq, [-1, 1]) # turn r into column vector D = sq - 2 * tf.matmul(X, tf.transpose(X)) + tf.transpose(sq) expmD = tf.exp(m - D) # Compute the loss # Assume that the input data is aligned in a way that two consecutive data form a pair batch_size, _ = X.get_shape().as_list() # L_{ij} = \log (\sum_{i, k} exp\{m - D_{ik}\} + \sum_{j, l} exp\{m - D_{jl}\}) + D_{ij} # L = \frac{1}{2|P|}\sum_{(i,j)\in P} \max(0, J_{i,j})^2 J_all = [] for pair_ind in range(batch_size // 2): i = pair_ind * 2 j = i + 1 ind_rest = np.hstack([np.arange(0, pair_ind * 2), np.arange(pair_ind * 2 + 2, batch_size)]) i_nd_rest = [[i, k] for k in ind_rest] j_nd_rest = [[j, l] for l in ind_rest] J_ij = tf.log(tf.reduce_sum(tf.gather_nd(expmD, i_nd_rest)) + tf.reduce_sum(tf.gather_nd(expmD, j_nd_rest))) \ + tf.gather_nd(D, [[i, j]]) J_all.append(J_ij) J_all = tf.convert_to_tensor(J_all) loss = tf.divide(tf.reduce_mean(tf.square(tf.maximum(J_all, 0))), 2.0, name='metric_loss') tf.add_to_collection(tf.GraphKeys.LOSSES, loss) return loss