Bootstrap knowledge of LLMs ASAP. With a bias/focus to GPT.
Avoid being a link dump. Try to provide only valuable well tuned information.
Neural network links before starting with transformers.
| import torch | |
| def jacobian(y, x, create_graph=False): | |
| jac = [] | |
| flat_y = y.reshape(-1) | |
| grad_y = torch.zeros_like(flat_y) | |
| for i in range(len(flat_y)): | |
| grad_y[i] = 1. | |
| grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph) | |
| jac.append(grad_x.reshape(x.shape)) |
| #!/bin/bash | |
| # | |
| # EDIT: this script is outdated, please see https://forums.developer.nvidia.com/t/pytorch-for-jetson-nano-version-1-6-0-now-available | |
| # | |
| sudo apt-get install python-pip | |
| # upgrade pip | |
| pip install -U pip | |
| pip --version | |
| # pip 9.0.1 from /home/ubuntu/.local/lib/python2.7/site-packages (python 2.7) |