@startuml class Session { - String name - void tuneKernels() + void register_device(Device* device) + void run() } enum DeviceType { CPU GPU } abstract class Device { - int id - DeviceType device_type + void register_op(Layer* layer) } class DeviceContext { - CLCudaAPI::Platform platform - CLCudaAPI::Device device - CLCudaAPI::Context context - CLCudaAPI::Queue queue } DeviceContext --> KernelLauncher class KernelLauncher { } class LibDNNKernelLauncher { - greentea::LibDNNConv kernel } class CLCudaAPIKernelLauncher { - CLCudaAPI::Kernel kernel } KernelLauncher <|-- LibDNNKernelLauncher KernelLauncher <|-- CLCudaAPIKernelLauncher class Node { } class Edge { - shape3d shape } class op_params { } abstract class Layer { - String layer_type - void tuneKernel() + virtual void forward_propagation() + virtual void backward_propagation() } Layer --> op_params DeviceContext --> op_params class DeviceContext { } class ConvolutionalLayer { } class MaxPoolingLayer { } enum BackendType { TINYCNN AVX NNPACK LIBDNN OPENCL } abstract class Backend { - BackendType backend_type + virtual conv2d() + virtual conv2d_q() + virtual conv2d_eq() + virtual deconv2d() + virtual deconv2d_q() + virtual deconv2d_eq() + virtual maxpool() + virtual fully() + virtual fully_q() } class TinyCNNBackend { } class NNPackBackend { } class LibDNNBackend { } Layer -- DeviceContext DeviceContext -- OpenCLDevice Device *-- Layer Session *-- Device Device <|-- CPUDevice Device <|-- GPUDevice CPUDevice <|-- OpenCLDevice GPUDevice <|-- OpenCLDevice Node <|-- Layer Node *-- Edge : prev Node *-- Edge : next Edge --> Node : prev Edge *-- Node : next Edge *-- Data : data Edge *-- Data : grad Layer <|-- ConvolutionalLayer Layer <|-- MaxPoolingLayer ConvolutionalLayer *-- Backend : backend MaxPoolingLayer *-- Backend : backend Backend <|-- TinyCNNBackend Backend <|-- NNPackBackend Backend <|-- LibDNNBackend @enduml