@@ -16,9 +16,125 @@ import tensorflow as tf
from pprint import pprint
import msa .tf .ops
msa .tf .ops .verbose = False
def example ():
# dict of dicts { { <opname> : kwargs }, ... }
default_op_args = {
'conv2d' : { 'padding' :'same' , 'kernel_size' :(3 ,3 ), 'strides' :(1 ,1 ) },
'conv2d_transpose' : { 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ) },
}
# list of dicts [ {'op':<opname>, kwargs }, ... ]
encoder_ops_info = [
{ 'op' :'conv2d' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'identity' , 'name' :'pre_z_conv' },
{ 'op' :'flatten' },
{ 'op' :'dense' , 'units' :1024 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
]
decoder_ops_info = [
{ 'op' :'dense' , 'units' :128 , 'name' :'z' }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'dense' , 'units' :1024 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'tf.reshape' , 'name' :'post_z_conv' }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' ,'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(1 ,1 ), 'filters' :3 }, { 'op' :'tanh' , 'name' :'output' }
]
x = tf .placeholder (tf .float32 , [None , 256 ,256 ,3 ])
# build encoder
with tf .variable_scope ('encoder' ):
encoder_ops , errors = build_graph (x , encoder_ops_info , default_op_args )
# search tensors by name (pre_z_conv) to get pre-flattened shape
shape = tf .shape (filter (lambda x : 'pre_z_conv' in x .name , encoder_ops )[0 ])
# search decoders dict by name (post_z_conv) to write desired shape to
filter (lambda x : 'name' in x and 'post_z_conv' in x ['name' ], decoder_ops_info )[0 ]['shape' ] = shape
with tf .variable_scope ('decoder' ):
decoder_ops , errors = build_graph (encoder_ops [- 1 ], decoder_ops_info , default_op_args )
'''
Output:
--------------------------------------------------------------------------------
> msa.tf.ops.conv2d {'filters': 64} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("encoder/conv2d/BiasAdd:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization/FusedBatchNorm:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 64} + defaults {'padding': 'same'} --> Tensor("encoder/conv2d_2/BiasAdd:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_2/FusedBatchNorm:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_1:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.conv2d {'filters': 128} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("encoder/conv2d_3/BiasAdd:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_3/FusedBatchNorm:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_2:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 128} + defaults {'padding': 'same'} --> Tensor("encoder/conv2d_4/BiasAdd:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_4/FusedBatchNorm:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_3:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.conv2d {'filters': 256} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("encoder/conv2d_5/BiasAdd:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_5/FusedBatchNorm:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_4:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 256} + defaults {'padding': 'same'} --> Tensor("encoder/conv2d_6/BiasAdd:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_6/FusedBatchNorm:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_5:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.identity {'name': 'pre_z_conv'} --> Tensor("encoder/pre_z_conv:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.flatten {} --> Tensor("encoder/Flatten/flatten/Reshape:0", shape=(?, 262144), dtype=float32)
> msa.tf.ops.dense {'units': 1024} --> Tensor("encoder/dense/BiasAdd:0", shape=(?, 1024), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("encoder/batch_normalization_7/batchnorm/add_1:0", shape=(?, 1024), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("encoder/Relu_6:0", shape=(?, 1024), dtype=float32)
--------------------------------------------------------------------------------
23 ops added
--------------------------------------------------------------------------------
> msa.tf.ops.dense {'units': 128, 'name': 'z'} --> Tensor("decoder/z/BiasAdd:0", shape=(?, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization/batchnorm/add_1:0", shape=(?, 128), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu:0", shape=(?, 128), dtype=float32)
> msa.tf.ops.dense {'units': 1024} --> Tensor("decoder/dense/BiasAdd:0", shape=(?, 1024), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_2/batchnorm/add_1:0", shape=(?, 1024), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_1:0", shape=(?, 1024), dtype=float32)
> tf.reshape {'shape': <tf.Tensor 'Shape:0' shape=(4,) dtype=int32>, 'name': 'post_z_conv'} --> Tensor("decoder/post_z_conv:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_3/FusedBatchNorm:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_2:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.conv2d {'filters': 256} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("decoder/conv2d/BiasAdd:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_4/FusedBatchNorm:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_3:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 256} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("decoder/conv2d_transpose/BiasAdd:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_5/FusedBatchNorm:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_4:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.conv2d {'filters': 128} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("decoder/conv2d_2/BiasAdd:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_6/FusedBatchNorm:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_5:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 128} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("decoder/conv2d_transpose_2/BiasAdd:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_7/FusedBatchNorm:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_6:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.conv2d {'filters': 64} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("decoder/conv2d_3/BiasAdd:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_8/FusedBatchNorm:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_7:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 64} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("decoder/conv2d_transpose_3/BiasAdd:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("decoder/batch_normalization_9/FusedBatchNorm:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.relu {} --> Tensor("decoder/Relu_8:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.conv2d {'kernel_size': (1, 1), 'filters': 3} + defaults {'padding': 'same', 'strides': (1, 1)} --> Tensor("decoder/conv2d_4/BiasAdd:0", shape=(?, 256, 256, 3), dtype=float32)
> msa.tf.ops.tanh {'name': 'output'} --> Tensor("decoder/output:0", shape=(?, 256, 256, 3), dtype=float32)
--------------------------------------------------------------------------------
29 ops added
'''
#%%
namespaces = [
'' ,
'msa.tf.ops' ,
@@ -28,7 +144,7 @@ namespaces=[
'tf.contrib.layers'
]
def build_graph (input_T , ops_info , defaults = None , verbose = True ):
def build_graph (input_T , ops_info , default_op_args = None , verbose = True ):
print ('-' * 80 )
errors = []
def handle_error (msg , op_dict ):
@@ -57,8 +173,8 @@ def build_graph(input_T, ops_info, defaults=None, verbose=True):
if verbose : print ('>' , fn_path , args , end = ' ' )
extra_args = None
if defaults and op_str in defaults : # check for defaults
op_defaults = defaults [op_str ] # defaults dict for this op type
if default_op_args and op_str in default_op_args : # check for defaults
op_defaults = default_op_args [op_str ] # defaults dict for this op type
extra_args = { k :v for k ,v in op_defaults .items () if k not in args }
if extra_args :
@@ -93,96 +209,4 @@ def build_graph(input_T, ops_info, defaults=None, verbose=True):
#%%
if __name__ == "__main__" :
defaults = {
'conv2d' : { 'padding' :'same' , 'kernel_size' :(3 ,3 ), 'strides' :(1 ,1 ) },
'conv2d_transpose' : { 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ) },
}
# list of dicts [ {'op':<opname>, **kwargs }, ... ]
ops_info = [
{ 'op' :'conv2d' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(2 ,2 ), 'strides' :(2 ,2 ), 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'dense' , 'units' :1024 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'dense' , 'units' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'dense' , 'units' :1024 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' ,'filters' :256 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' , 'filters' :128 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d_transpose' , 'filters' :64 }, { 'op' :'batch_norm' }, { 'op' :'relu' },
{ 'op' :'conv2d' , 'kernel_size' :(1 ,1 ), 'filters' :3 }, { 'op' :'tanh' , 'name' :'output' }
]
x = tf .placeholder (tf .float32 , [None , 256 ,256 ,3 ])
ops , errors = build_graph (x , ops_info , defaults )
#%%
'''
Output:
--------------------------------------------------------------------------------
> msa.tf.ops.conv2d {'filters': 64} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d/BiasAdd:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization/FusedBatchNorm:0", shape=(?, 256, 256, 64), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 64} + defaults {'padding': 'same'} --> Tensor("conv2d_2/BiasAdd:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_2/FusedBatchNorm:0", shape=(?, 128, 128, 64), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_1:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.conv2d {'filters': 128} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d_3/BiasAdd:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_3/FusedBatchNorm:0", shape=(?, 128, 128, 128), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_2:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 128} + defaults {'padding': 'same'} --> Tensor("conv2d_4/BiasAdd:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_4/FusedBatchNorm:0", shape=(?, 64, 64, 128), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_3:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.conv2d {'filters': 256} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d_5/BiasAdd:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_5/FusedBatchNorm:0", shape=(?, 64, 64, 256), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_4:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.conv2d {'strides': (2, 2), 'kernel_size': (2, 2), 'filters': 256} + defaults {'padding': 'same'} --> Tensor("conv2d_6/BiasAdd:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_6/FusedBatchNorm:0", shape=(?, 32, 32, 256), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_5:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.dense {'units': 1024} --> Tensor("dense/BiasAdd:0", shape=(?, 32, 32, 1024), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_7/FusedBatchNorm:0", shape=(?, 32, 32, 1024), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_6:0", shape=(?, 32, 32, 1024), dtype=float32)
> msa.tf.ops.dense {'units': 128} --> Tensor("dense_2/BiasAdd:0", shape=(?, 32, 32, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_8/FusedBatchNorm:0", shape=(?, 32, 32, 128), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_7:0", shape=(?, 32, 32, 128), dtype=float32)
> msa.tf.ops.dense {'units': 1024} --> Tensor("dense_3/BiasAdd:0", shape=(?, 32, 32, 1024), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_9/FusedBatchNorm:0", shape=(?, 32, 32, 1024), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_8:0", shape=(?, 32, 32, 1024), dtype=float32)
> msa.tf.ops.conv2d {'filters': 256} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d_7/BiasAdd:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_10/FusedBatchNorm:0", shape=(?, 32, 32, 256), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_9:0", shape=(?, 32, 32, 256), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 256} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("conv2d_transpose/BiasAdd:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_11/FusedBatchNorm:0", shape=(?, 64, 64, 256), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_10:0", shape=(?, 64, 64, 256), dtype=float32)
> msa.tf.ops.conv2d {'filters': 128} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d_8/BiasAdd:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_12/FusedBatchNorm:0", shape=(?, 64, 64, 128), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_11:0", shape=(?, 64, 64, 128), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 128} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("conv2d_transpose_2/BiasAdd:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_13/FusedBatchNorm:0", shape=(?, 128, 128, 128), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_12:0", shape=(?, 128, 128, 128), dtype=float32)
> msa.tf.ops.conv2d {'filters': 64} + defaults {'padding': 'same', 'strides': (1, 1), 'kernel_size': (3, 3)} --> Tensor("conv2d_9/BiasAdd:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_14/FusedBatchNorm:0", shape=(?, 128, 128, 64), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_13:0", shape=(?, 128, 128, 64), dtype=float32)
> msa.tf.ops.conv2d_transpose {'filters': 64} + defaults {'strides': (2, 2), 'kernel_size': (2, 2)} --> Tensor("conv2d_transpose_3/BiasAdd:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.batch_norm {} --> Tensor("batch_normalization_15/FusedBatchNorm:0", shape=(?, 256, 256, 64), dtype=float32)
> tf.nn.relu {} --> Tensor("Relu_14:0", shape=(?, 256, 256, 64), dtype=float32)
> msa.tf.ops.conv2d {'kernel_size': (1, 1), 'filters': 3} + defaults {'padding': 'same', 'strides': (1, 1)} --> Tensor("conv2d_10/BiasAdd:0", shape=(?, 256, 256, 3), dtype=float32)
> tf.tanh {'name': 'output'} --> Tensor("output:0", shape=(?, 256, 256, 3), dtype=float32)
--------------------------------------------------------------------------------
47 ops added
'''
example ()