#based on https://towardsdatascience.com/a-beginners-guide-to-building-your-own-face-recognition-system-to-creep-out-your-friends-df3f4c471d55 ##install: # pip3 install cmake # pip3 install face_recognition # or pip3 install boost & pip --no-cache-dir install face_recognition # pip3 install numpy # pip3 install dlib # pip3 install opencv-python ## ##for ChromeOS Linux # as per current security limitation web camera is not available on ChromeOS yet: https://support.google.com/chromebook/answer/9145439?hl=en # but it can be solved with Android Phone with "RTSP Camera Server" (https://play.google.com/store/apps/details?id=com.miv.rtspcamera&hl=en_CA) # which can be remotely accessed via RTSP protocol and "mounted" locally via ffmpeg # eg. ffmpeg -i rtsp://@192.168.0.12:5554/camera -acodec copy -vcodec copy -f v4l2 ~/video1 # or pyton video source can be switched to rtsp stream it self #code forked and tweaked from https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py #to extend, just add more people into the known_people folder import face_recognition import cv2 import numpy as np import os import glob # Get a reference to webcam #0 (the default one) video_capture = cv2.VideoCapture(0) #make array of sample pictures with encodings known_face_encodings = [] known_face_names = [] dirname = os.path.dirname(__file__) path = os.path.join(dirname, 'known_people/') #make an array of all the saved jpg files' paths list_of_files = [f for f in glob.glob(path+'*.jpg')] #find number of known faces number_files = len(list_of_files) names = list_of_files.copy() for i in range(number_files): globals()['image_{}'.format(i)] = face_recognition.load_image_file(list_of_files[i]) globals()['image_encoding_{}'.format(i)] = face_recognition.face_encodings(globals()['image_{}'.format(i)])[0] known_face_encodings.append(globals()['image_encoding_{}'.format(i)]) # Create array of known names names[i] = names[i].replace("known_people/", "") known_face_names.append(names[i]) # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1] # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # # If a match was found in known_face_encodings, just use the first one. # if True in matches: # first_match_index = matches.index(True) # name = known_face_names[first_match_index] # Or instead, use the known face with the smallest distance to the new face face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) best_match_index = np.argmin(face_distances) if matches[best_match_index]: name = known_face_names[best_match_index] face_names.append(name) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image cv2.imshow('Video', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows()