The tiling feature is based on neural-dream's tiling system.
Basic usage:
python neural_style_tile.py -style_image -content_image -tile_size 256 -image_size 512
| import os | |
| from PIL import Image | |
| def convert_webp_to_png(directory: str, delete_old_webp_images: bool = False): | |
| for root, dirs, files in os.walk(directory): | |
| for file in files: | |
| if file.endswith(".webp"): | |
| filepath = os.path.join(root, file) | |
| img = Image.open(filepath) | |
| new_filepath = os.path.splitext(filepath)[0] + ".png" |
| # Script by https://github.com/ProGamerGov | |
| import copy | |
| import torch | |
| # Path to model and VAE files that you want to merge | |
| vae_file_path = "vae-ft-mse-840000-ema-pruned.ckpt" | |
| model_file_path = "v1-5-pruned-emaonly.ckpt" | |
| # Name to use for new model file |
| from collections import OrderedDict | |
| from typing import Callable, Dict, Optional | |
| from warnings import warn | |
| import torch | |
| def _remove_all_forward_hooks( | |
| module: torch.nn.Module, hook_fn_name: Optional[str] = None | |
| ) -> None: | |
| """ |
| from typing import Tuple | |
| import torch | |
| def color_transfer( | |
| input: torch.Tensor, | |
| source: torch.Tensor, | |
| mode: str = "pca", |
| # tensorflow/lucid CPPN (X,Y) --> (R,G,B) Differentiable Image Parameterization in PyTorch | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| import torchvision.transforms as transforms | |
| from inception5h import Inception5h | |
| from PIL import Image | |
The tiling feature is based on neural-dream's tiling system.
Basic usage:
python neural_style_tile.py -style_image -content_image -tile_size 256 -image_size 512
| import torch | |
| import torch.nn as nn | |
| from collections import OrderedDict | |
| import imp | |
| import numpy as np | |
| # Import the model classes that were edited. Replace 'model_class_name" with the name of the class script, and | |
| # replace 'ModelName' with the name of the class in the script | |
| from model_class_name import ModelName |
| import os | |
| import copy | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| import torchvision.transforms as transforms | |
| from PIL import Image | |
| from CaffeLoader import loadCaffemodel, ModelParallel |
Users can specify an image for which the histogram will be transfered from, and what images the histogram will be transfered to; either the content image, style image(s), or both.
A new loss layer type has been added that uses image means. Currently it only uses the first style image specified.
The code here is based on genekogan's neural-style-pt histogram loss code. The CUDA code comes from pierre-wilmot's code here: https://github.com/pierre-wilmot/NeuralTextureSynthesis
Each histogram loss layer stores the style image's histogram as a target, and then uses that compute the difference to the image being stylized.