Jump to content

Croatian.Apoxyomenos

Members
  • Posts

    2
  • Joined

  • Last visited

Everything posted by Croatian.Apoxyomenos

  1. Multumesc pentru sfat! Pe mine ma intereseaza daca se poate realiza face recognition in timp real, prin intermediul webcam, asa cum este realizat age and gender estimation prin script-ul indicat de tine pe github.
  2. Va salut! Am început de o săptămâna sa învăț Python, ca sa o ajut pe prietena mea cu proiectul pentru licența. Cu putinele cunoștințe acumulate, am reușit sa compun programul de mai jos din alte doua scripturi combinate, preluate de pe github: unul pentru face recognition, iar celălalt pentru gender and age estimation. Asa cum este el scris mai jos, funcționează optim pentru orice poza indicată. Programul pentru gender and age estimation are și funcția de identificare a acestor caracteristici prin intermediul Webcam, însă aceasta funcție nu mai funcționează după combinarea celor doua Scripturi. Am epuizat toate ideile și nu am reușit sa remediez problema sau măcar sa elimin funcționalitatea de video, sa nu mai fie scrisă in cod. Sper ca am reușit sa expun situația in termeni corecți ca sa se înțeleagă cât mai bine. Ma puteți ajuta, va rog, cu rezolvarea situației? Va mulțumesc! import face_recognition as fr import cv2 import math import numpy as np import os import argparse #Now, create 2 lists that store the names of the images (persons) and their respective face encodings. path = "./train/" known_names = [] known_name_encodings = [] images = os.listdir(path) """ We loop through each of the images in our train directory, extract the name of the person in the image, calculate its face encoding vector and store the information in the respective lists. """ for _ in images: image = fr.load_image_file(path + _) image_path = path + _ encoding = fr.face_encodings(image)[0] known_name_encodings.append(encoding) known_names.append(os.path.splitext(os.path.basename(image_path))[0].capitalize()) #Read the test image using the cv2 imread() method. parser=argparse.ArgumentParser() parser.add_argument("-i", "--image", type=str, default="./test/test.jpg", help="path to the input image") args=parser.parse_args() image = cv2.imread(args.image) """ The face_recognition library provides a useful method called face_locations() which locates the coordinates (left, bottom, right, top) of every face detected in the image. Using those location values we can easily find the face encodings. """ face_locations = fr.face_locations(image) face_encodings = fr.face_encodings(image, face_locations) def highlightFace(net, frame, conf_threshold=0.7): frameOpencvDnn=frame.copy() frameHeight=frameOpencvDnn.shape[0] frameWidth=frameOpencvDnn.shape[1] blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False) net.setInput(blob) detections=net.forward() faceBoxes=[] for i in range(detections.shape[2]): confidence=detections[0,0,i,2] if confidence>conf_threshold: x1=int(detections[0,0,i,3]*frameWidth) y1=int(detections[0,0,i,4]*frameHeight) x2=int(detections[0,0,i,5]*frameWidth) y2=int(detections[0,0,i,6]*frameHeight) faceBoxes.append([x1,y1,x2,y2]) cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8) return frameOpencvDnn,faceBoxes # --- Code for age detection starts here --- faceProto="opencv_face_detector.pbtxt" faceModel="opencv_face_detector_uint8.pb" ageProto="age_deploy.prototxt" ageModel="age_net.caffemodel" genderProto="gender_deploy.prototxt" genderModel="gender_net.caffemodel" MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746) ageList=['(0-3)', '(4-7)', '(8-14)', '(15-24)', '(25-37)', '(38-47)', '(48-59)', '(60-100)'] genderList=['Male','Female'] faceNet=cv2.dnn.readNet(faceModel,faceProto) ageNet=cv2.dnn.readNet(ageModel,ageProto) genderNet=cv2.dnn.readNet(genderModel,genderProto) video=cv2.VideoCapture(args.image if args.image else 0) padding=20 while cv2.waitKey(1)<0 : hasFrame,frame=video.read() if not hasFrame: #cv2.waitKey() break resultImg,faceBoxes=highlightFace(faceNet,frame) if not faceBoxes: print("No face detected") for faceBox in faceBoxes: face=frame[max(0,faceBox[1]-padding): min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding) :min(faceBox[2]+padding, frame.shape[1]-1)] blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False) genderNet.setInput(blob) genderPreds=genderNet.forward() gender=genderList[genderPreds[0].argmax()] print(f'Gender: {gender}') ageNet.setInput(blob) agePreds=ageNet.forward() age=ageList[agePreds[0].argmax()] print(f'Age: {age[1:-1]} years') cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA) """ We loop through each of the face locations and its encoding found in the image. Then we compare this encoding with the encodings of the faces from the “train” dataset. Then calculate the facial distance meaning that we calculate the similarity between the encoding of the test image and that of the train images. Now, we pick the minimum valued distance from it indicating that this face of the test image is one of the persons from the training dataset. """ for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): matches = fr.compare_faces(known_name_encodings, face_encoding) name = "" face_distances = fr.face_distance(known_name_encodings, face_encoding) best_match = np.argmin(face_distances) if matches[best_match]: name = known_names[best_match] #Now, put the name in the box. cv2.putText(resultImg, name, (left, bottom + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA) #Display the image using the imshow() method of the cv2 module. cv2.imshow("Detecting age and gender", resultImg) #Release the resources that weren’t deallocated(if any). cv2.waitKey(0) cv2.destroyAllWindows()
×
×
  • Create New...