Skip to content

Instantly share code, notes, and snippets.

Created December 27, 2017 01:50
Show Gist options
  • Select an option

  • Save anonymous/d3815aba83a8f79779451262599b0955 to your computer and use it in GitHub Desktop.

Select an option

Save anonymous/d3815aba83a8f79779451262599b0955 to your computer and use it in GitHub Desktop.

Revisions

  1. @invalid-email-address Anonymous created this gist Dec 27, 2017.
    120 changes: 120 additions & 0 deletions align_images_masked.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,120 @@
    import argparse
    import cv2
    import dlib
    import json
    import numpy
    import skimage
    from pathlib import Path
    from tqdm import tqdm
    from umeyama import umeyama

    from face_alignment import FaceAlignment, LandmarksType

    def monkey_patch_face_detector(_):
    detector = dlib.get_frontal_face_detector()
    class Rect(object):
    def __init__(self,rect):
    self.rect=rect
    def detect( *args ):
    return [ Rect(x) for x in detector(*args) ]
    return detect

    dlib.cnn_face_detection_model_v1 = monkey_patch_face_detector
    FACE_ALIGNMENT = FaceAlignment( LandmarksType._2D, enable_cuda=True, flip_input=False )

    mean_face_x = numpy.array([
    0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
    0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
    0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
    0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
    0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
    0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
    0.553364, 0.490127, 0.42689 ])

    mean_face_y = numpy.array([
    0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
    0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
    0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
    0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
    0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
    0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
    0.784792, 0.824182, 0.831803, 0.824182 ])

    landmarks_2D = numpy.stack( [ mean_face_x, mean_face_y ], axis=1 )

    def transform( image, mat, size, padding=0 ):
    mat = mat * size
    mat[:,2] += padding
    new_size = int( size + padding * 2 )
    return cv2.warpAffine( image, mat, ( new_size, new_size ) )

    def main( args ):
    input_dir = Path( args.input_dir )
    assert input_dir.is_dir()

    output_dir = input_dir / args.output_dir
    output_dir.mkdir( parents=True, exist_ok=True )

    output_file = input_dir / args.output_file

    input_files = list( input_dir.glob( "*." + args.file_type ) )

    if args.maxFrames > 0:
    input_files=input_files[args.startFrame:args.startFrame+args.maxFrames]
    elif args.startFrame>0:
    input_files=input_files[args.startFrame:]


    assert len( input_files ) > 0, "Can't find input files"

    def iter_face_alignments():
    for fn in tqdm( input_files ):
    image = cv2.imread( str(fn) )
    if image is None:
    tqdm.write( "Can't read image file: ", fn )
    continue

    faces = FACE_ALIGNMENT.get_landmarks( skimage.io.imread( str(fn) ) )

    if faces is None: continue
    if len(faces) == 0: continue
    if args.only_one_face and len(faces) != 1: continue

    for i,points in enumerate(faces):
    alignment = umeyama( points[17:], landmarks_2D, True )[0:2]
    aligned_image = transform( image, alignment, 160, 48 )

    if len(faces) == 1:
    out_fn = "{}.jpg".format( Path(fn).stem )
    else:
    out_fn = "{}_{}.jpg".format( Path(fn).stem, i )

    out_fn = output_dir / out_fn
    cv2.imwrite( str(out_fn), aligned_image )

    yield str(fn.relative_to(input_dir)), str(out_fn.relative_to(input_dir)), list( alignment.ravel() ), list(points.flatten().astype(float))

    face_alignments = list( iter_face_alignments() )

    with output_file.open('w') as f:
    results = json.dumps( face_alignments, ensure_ascii=False )
    f.write( results )

    print( "Save face alignments to output file:", output_file )

    if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument( "input_dir" , type=str )
    parser.add_argument( "output_dir" , type=str, nargs='?', default='aligned' )
    parser.add_argument( "output_file", type=str, nargs='?', default='alignments.json' )

    parser.set_defaults( only_one_face=False )
    parser.add_argument('--one-face' , dest='only_one_face', action='store_true' )
    parser.add_argument('--all-faces', dest='only_one_face', action='store_false' )

    parser.add_argument( "--startFrame", type=int, default='0' )
    parser.add_argument( "--maxFrames", type=int, default='0' )

    parser.add_argument( "--file-type", type=str, default='jpg' )

    main( parser.parse_args() )
    133 changes: 133 additions & 0 deletions merge_faces_masked.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,133 @@
    import argparse
    import cv2
    import json
    import numpy
    from pathlib import Path
    from tqdm import tqdm
    from scipy import ndimage
    from model import autoencoder_A
    from model import autoencoder_B
    from model import encoder, decoder_A, decoder_B

    encoder .load_weights( "models/encoder.h5" )
    decoder_A.load_weights( "models/decoder_A.h5" )
    decoder_B.load_weights( "models/decoder_B.h5" )


    def convert_one_image( autoencoder, image, mat,facepoints,erosion_kernel,blurSize,seamlessClone,maskType ):
    size = 64
    image_size = image.shape[1], image.shape[0]

    face = cv2.warpAffine( image, mat * size, (size,size) )
    face = numpy.expand_dims( face, 0 )
    new_face = autoencoder.predict( face / 255.0 )[0]

    new_face = numpy.clip( new_face * 255, 0, 255 ).astype( image.dtype )

    face_mask = numpy.zeros(image.shape,dtype=float)
    if 'Rect' in maskType:
    face_src = numpy.ones(new_face.shape,dtype=float)
    cv2.warpAffine( face_src, mat * size, image_size, face_mask, cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT )

    hull_mask = numpy.zeros(image.shape,dtype=float)
    if 'Hull' in maskType:
    hull = cv2.convexHull( numpy.array( facepoints ).reshape((-1,2)).astype(int) ).flatten().reshape( (-1,2) )
    cv2.fillConvexPoly( hull_mask,hull,(1,1,1) )

    if maskType == 'FaceHull':
    image_mask = hull_mask
    elif maskType == 'Rect':
    image_mask = face_mask
    else:
    image_mask = ((face_mask*hull_mask))


    if erosion_kernel is not None:
    image_mask = cv2.erode(image_mask,erosion_kernel,iterations = 1)

    if blurSize!=0:
    image_mask = cv2.blur(image_mask,(blurSize,blurSize))

    base_image = numpy.copy( image )
    new_image = numpy.copy( image )

    cv2.warpAffine( new_face, mat * size, image_size, new_image, cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT )

    outImage = None
    if seamlessClone:
    masky,maskx = cv2.transform( numpy.array([ size/2,size/2 ]).reshape(1,1,2) ,cv2.invertAffineTransform(mat*size) ).reshape(2).astype(int)
    outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),(image_mask*255).astype(numpy.uint8),(masky,maskx) , cv2.NORMAL_CLONE )
    else:
    foreground = cv2.multiply(image_mask, new_image.astype(float))
    background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
    outimage = cv2.add(foreground, background)

    return outimage

    def main( args ):




    input_dir = Path( args.input_dir )
    assert input_dir.is_dir()

    alignments = input_dir / args.alignments
    with alignments.open() as f:
    alignments = json.load(f)

    output_dir = input_dir / args.output_dir
    output_dir.mkdir( parents=True, exist_ok=True )

    if args.direction == 'AtoB': autoencoder = autoencoder_B
    if args.direction == 'BtoA': autoencoder = autoencoder_A

    if args.erosionKernelSize>0:
    erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(args.erosionKernelSize,args.erosionKernelSize))
    else:
    erosion_kernel = None

    for e in alignments:
    if len(e)<4:
    raise LookupError('This script expects new format json files with face points included.')


    for image_file, face_file, mat,facepoints in tqdm( alignments ):
    image = cv2.imread( str( input_dir / image_file ) )
    face = cv2.imread( str( input_dir / face_file ) )


    mat = numpy.array(mat).reshape(2,3)

    if image is None: continue
    if face is None: continue


    new_image = convert_one_image( autoencoder, image, mat, facepoints, erosion_kernel, args.blurSize, args.seamlessClone, args.maskType)

    output_file = output_dir / Path(image_file).name
    cv2.imwrite( str(output_file), new_image )

    def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
    return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
    return False
    else:
    raise argparse.ArgumentTypeError('Boolean value expected.')

    if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument( "input_dir", type=str )
    parser.add_argument( "alignments", type=str, nargs='?', default='alignments.json' )
    parser.add_argument( "output_dir", type=str, nargs='?', default='merged' )

    parser.add_argument("--seamlessClone", type=str2bool, nargs='?', const=False, default='False', help="Attempt to use opencv seamlessClone.")

    parser.add_argument('--maskType', type=str, default='FaceHullAndRect' ,choices=['FaceHullAndRect','FaceHull','Rect'], help="The type of masking to use around the face.")

    parser.add_argument( "--blurSize", type=int, default='2' )
    parser.add_argument( "--erosionKernelSize", type=int, default='0' )
    parser.add_argument( "--direction", type=str, default="AtoB", choices=["AtoB", "BtoA"])
    main( parser.parse_args() )