Skip to content

Instantly share code, notes, and snippets.

@tejastank
Created December 28, 2023 07:30
Show Gist options
  • Save tejastank/a59110c0ae6cd8b73fc604936ffe951a to your computer and use it in GitHub Desktop.
Save tejastank/a59110c0ae6cd8b73fc604936ffe951a to your computer and use it in GitHub Desktop.

Revisions

  1. tejastank created this gist Dec 28, 2023.
    59 changes: 59 additions & 0 deletions tongue-detection-and-recognition.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,59 @@
    #pip install opencv-python dlib facenet-pytorch
    #xamta infotech, guidelins to install dependancies. [email protected]

    import cv2
    import dlib
    from facenet_pytorch import MTCNN
    import numpy as np

    def detect_tongue(image_path):
    # Load the image
    image = cv2.imread(image_path)

    # Initialize the MTCNN face detection model
    mtcnn = MTCNN(keep_all=True)

    # Detect faces in the image
    boxes, probs = mtcnn.detect(image)

    if boxes is None:
    print("No face detected in the image.")
    return

    # Assuming the first detected face is the correct one
    face_box = boxes[0].astype(int)

    # Extract the region of interest (ROI) around the detected face
    face_roi = image[face_box[1]:face_box[3], face_box[0]:face_box[2]]

    # Display the detected face
    cv2.imshow("Detected Face", face_roi)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # Load the pre-trained facial landmark predictor from dlib
    predictor_path = "shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(predictor_path)

    # Convert the face ROI to grayscale for facial landmark detection
    gray_face = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)

    # Detect facial landmarks
    landmarks = predictor(gray_face, dlib.rectangle(0, 0, face_roi.shape[1], face_roi.shape[0]))

    # Extract the tongue region (assuming landmarks 54-59 represent the tongue)
    tongue_landmarks = np.array([(landmarks.part(i).x, landmarks.part(i).y) for i in range(54, 60)])

    # Create a mask for the tongue region
    mask = np.zeros_like(gray_face)
    cv2.fillPoly(mask, [tongue_landmarks], 255)

    # Display the detected tongue
    tongue = cv2.bitwise_and(gray_face, gray_face, mask=mask)
    cv2.imshow("Detected Tongue", tongue)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    if __name__ == "__main__":
    image_path = "path/to/your/image.jpg"
    detect_tongue(image_path)