Skip to content

Instantly share code, notes, and snippets.

Created December 21, 2017 16:05
Show Gist options
  • Save anonymous/5832a6fffb8309ac851a29874c3dbb7b to your computer and use it in GitHub Desktop.
Save anonymous/5832a6fffb8309ac851a29874c3dbb7b to your computer and use it in GitHub Desktop.

Revisions

  1. @invalid-email-address Anonymous created this gist Dec 21, 2017.
    110 changes: 110 additions & 0 deletions align_images.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,110 @@
    import argparse
    import cv2
    import dlib
    import json
    import numpy
    import skimage
    from pathlib import Path
    from tqdm import tqdm
    from umeyama import umeyama

    from face_alignment import FaceAlignment, LandmarksType

    def monkey_patch_face_detector(_):
    detector = dlib.get_frontal_face_detector()
    class Rect(object):
    def __init__(self,rect):
    self.rect=rect
    def detect( *args ):
    return [ Rect(x) for x in detector(*args) ]
    return detect

    dlib.cnn_face_detection_model_v1 = monkey_patch_face_detector
    FACE_ALIGNMENT = FaceAlignment( LandmarksType._2D, enable_cuda=True, flip_input=False )

    mean_face_x = numpy.array([
    0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
    0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
    0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
    0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
    0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
    0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
    0.553364, 0.490127, 0.42689 ])

    mean_face_y = numpy.array([
    0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
    0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
    0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
    0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
    0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
    0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
    0.784792, 0.824182, 0.831803, 0.824182 ])

    landmarks_2D = numpy.stack( [ mean_face_x, mean_face_y ], axis=1 )

    def transform( image, mat, size, padding=0 ):
    mat = mat * size
    mat[:,2] += padding
    new_size = int( size + padding * 2 )
    return cv2.warpAffine( image, mat, ( new_size, new_size ) )

    def main( args ):
    input_dir = Path( args.input_dir )
    assert input_dir.is_dir()

    output_dir = input_dir / args.output_dir
    output_dir.mkdir( parents=True, exist_ok=True )

    output_file = input_dir / args.output_file

    input_files = list( input_dir.glob( "*." + args.file_type ) )
    assert len( input_files ) > 0, "Can't find input files"

    def iter_face_alignments():
    for fn in tqdm( input_files ):
    image = cv2.imread( str(fn) )
    if image is None:
    tqdm.write( "Can't read image file: ", fn )
    continue

    faces = FACE_ALIGNMENT.get_landmarks( skimage.io.imread( str(fn) ) )

    if faces is None: continue
    if len(faces) == 0: continue
    if args.only_one_face and len(faces) != 1: continue

    for i,points in enumerate(faces):
    alignment = umeyama( points[17:], landmarks_2D, True )[0:2]
    aligned_image = transform( image, alignment, 160, 48 )

    if len(faces) == 1:
    out_fn = "{}.jpg".format( Path(fn).stem )
    else:
    out_fn = "{}_{}.jpg".format( Path(fn).stem, i )

    out_fn = output_dir / out_fn
    cv2.imwrite( str(out_fn), aligned_image )

    yield str(fn.relative_to(input_dir)), str(out_fn.relative_to(input_dir)), list( alignment.ravel() )

    face_alignments = list( iter_face_alignments() )

    with output_file.open('w') as f:
    results = json.dumps( face_alignments, ensure_ascii=False )
    f.write( results )

    print( "Save face alignments to output file:", output_file )

    if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument( "input_dir" , type=str )
    parser.add_argument( "output_dir" , type=str, nargs='?', default='aligned' )
    parser.add_argument( "output_file", type=str, nargs='?', default='alignments.json' )

    parser.set_defaults( only_one_face=False )
    parser.add_argument('--one-face' , dest='only_one_face', action='store_true' )
    parser.add_argument('--all-faces', dest='only_one_face', action='store_false' )

    parser.add_argument( "--file-type", type=str, default='jpg' )

    main( parser.parse_args() )
    62 changes: 62 additions & 0 deletions merge_faces.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,62 @@
    import argparse
    import cv2
    import json
    import numpy
    from pathlib import Path
    from tqdm import tqdm

    from model import autoencoder_A
    from model import autoencoder_B
    from model import encoder, decoder_A, decoder_B

    encoder .load_weights( "models/encoder.h5" )
    decoder_A.load_weights( "models/decoder_A.h5" )
    decoder_B.load_weights( "models/decoder_B.h5" )

    def convert_one_image( autoencoder, image, mat ):
    size = 64
    face = cv2.warpAffine( image, mat * size, (size,size) )
    face = numpy.expand_dims( face, 0 )
    new_face = autoencoder.predict( face / 255.0 )[0]
    new_face = numpy.clip( new_face * 255, 0, 255 ).astype( image.dtype )
    new_image = numpy.copy( image )
    image_size = image.shape[1], image.shape[0]
    cv2.warpAffine( new_face, mat * size, image_size, new_image, cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT )
    return new_image

    def main( args ):
    input_dir = Path( args.input_dir )
    assert input_dir.is_dir()

    alignments = input_dir / args.alignments
    with alignments.open() as f:
    alignments = json.load(f)

    output_dir = input_dir / args.output_dir
    output_dir.mkdir( parents=True, exist_ok=True )

    if args.direction == 'AtoB': autoencoder = autoencoder_B
    if args.direction == 'BtoA': autoencoder = autoencoder_A

    for image_file, face_file, mat in tqdm( alignments ):
    image = cv2.imread( str( input_dir / image_file ) )
    face = cv2.imread( str( input_dir / face_file ) )

    mat = numpy.array(mat).reshape(2,3)

    if image is None: continue
    if face is None: continue

    new_image = convert_one_image( autoencoder, image, mat )

    output_file = output_dir / Path(image_file).name
    cv2.imwrite( str(output_file), new_image )

    if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument( "input_dir", type=str )
    parser.add_argument( "alignments", type=str, nargs='?', default='alignments.json' )
    parser.add_argument( "output_dir", type=str, nargs='?', default='merged' )
    parser.add_argument( "--direction", type=str, default="AtoB", choices=["AtoB", "BtoA"])
    main( parser.parse_args() )

    84 changes: 84 additions & 0 deletions umeyama.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,84 @@
    ## License (Modified BSD)
    ## Copyright (C) 2011, the scikit-image team All rights reserved.
    ##
    ## Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
    ##
    ## Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
    ## Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
    ## Neither the name of skimage nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
    ## THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

    # umeyama function from scikit-image/skimage/transform/_geometric.py

    import numpy as np

    def umeyama( src, dst, estimate_scale ):
    """Estimate N-D similarity transformation with or without scaling.
    Parameters
    ----------
    src : (M, N) array
    Source coordinates.
    dst : (M, N) array
    Destination coordinates.
    estimate_scale : bool
    Whether to estimate scaling factor.
    Returns
    -------
    T : (N + 1, N + 1)
    The homogeneous similarity transformation matrix. The matrix contains
    NaN values only if the problem is not well-conditioned.
    References
    ----------
    .. [1] "Least-squares estimation of transformation parameters between two
    point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
    """

    num = src.shape[0]
    dim = src.shape[1]

    # Compute mean of src and dst.
    src_mean = src.mean(axis=0)
    dst_mean = dst.mean(axis=0)

    # Subtract mean from src and dst.
    src_demean = src - src_mean
    dst_demean = dst - dst_mean

    # Eq. (38).
    A = np.dot(dst_demean.T, src_demean) / num

    # Eq. (39).
    d = np.ones((dim,), dtype=np.double)
    if np.linalg.det(A) < 0:
    d[dim - 1] = -1

    T = np.eye(dim + 1, dtype=np.double)

    U, S, V = np.linalg.svd(A)

    # Eq. (40) and (43).
    rank = np.linalg.matrix_rank(A)
    if rank == 0:
    return np.nan * T
    elif rank == dim - 1:
    if np.linalg.det(U) * np.linalg.det(V) > 0:
    T[:dim, :dim] = np.dot(U, V)
    else:
    s = d[dim - 1]
    d[dim - 1] = -1
    T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))
    d[dim - 1] = s
    else:
    T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T))

    if estimate_scale:
    # Eq. (41) and (42).
    scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d)
    else:
    scale = 1.0

    T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T)
    T[:dim, :dim] *= scale

    return T