#Non-mathematical Introductions
- http://gcn.com/articles/2014/01/09/topographical-data-analysis.aspx
 - https://www.simonsfoundation.org/quanta/20131004-the-mathematical-shape-of-things-to-come/
 
#Videos
| """ | |
| MIT License | |
| Copyright (c) 2017 Cyrille Rossant | |
| Permission is hereby granted, free of charge, to any person obtaining a copy | |
| of this software and associated documentation files (the "Software"), to deal | |
| in the Software without restriction, including without limitation the rights | |
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| copies of the Software, and to permit persons to whom the Software is | 
| def dot_product(x, kernel): | |
| """ | |
| Wrapper for dot product operation, in order to be compatible with both | |
| Theano and Tensorflow | |
| Args: | |
| x (): input | |
| kernel (): weights | |
| Returns: | |
| """ | |
| if K.backend() == 'tensorflow': | 
| import tensorflow as tf | |
| from tensorflow.python.framework import ops | |
| from tensorflow.python.ops import gen_nn_ops | |
| @ops.RegisterGradient("GuidedRelu") | |
| def _GuidedReluGrad(op, grad): | |
| return tf.select(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape())) | |
| if __name__ == '__main__': | |
| with tf.Session() as sess: | 
| from keras.layers.core import Layer | |
| import keras.backend as K | |
| if K.backend() == 'tensorflow': | |
| import tensorflow as tf | |
| def K_arange(start, stop=None, step=1, dtype='int32'): | |
| result = tf.range(start, limit=stop, delta=step, name='arange') | |
| if dtype != 'int32': | |
| result = K.cast(result, dtype) | |
| return result | 
| """ | |
| This is a batched LSTM forward and backward pass | |
| """ | |
| import numpy as np | |
| import code | |
| class LSTM: | |
| @staticmethod | |
| def init(input_size, hidden_size, fancy_forget_bias_init = 3): | 
| import pickle | |
| import numpy as np | |
| import pdb | |
| img_width, img_height = 300, 300 | |
| box_configs = [ | |
| {'layer_width': 38, 'layer_height': 38, 'num_prior': 3, 'min_size': 30.0, | |
| 'max_size': None, 'aspect_ratios': [1.0, 2.0, 1/2.0]}, | |
| {'layer_width': 19, 'layer_height': 19, 'num_prior': 6, 'min_size': 60.0, | |
| 'max_size': 114.0, 'aspect_ratios': [1.0, 1.0, 2.0, 1/2.0, 3.0, 1/3.0]}, | 
| class AttentionLSTM(LSTM): | |
| """LSTM with attention mechanism | |
| This is an LSTM incorporating an attention mechanism into its hidden states. | |
| Currently, the context vector calculated from the attended vector is fed | |
| into the model's internal states, closely following the model by Xu et al. | |
| (2016, Sec. 3.1.2), using a soft attention model following | |
| Bahdanau et al. (2014). | |
| The layer expects two inputs instead of the usual one: | 
#Non-mathematical Introductions
#Videos