1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
| import cv2,os import numpy as np from PIL import Image import pymongo,random
class VideoCamera(object): def __init__(self): self.video = cv2.VideoCapture(0) self.video.set(3, 640) self.video.set(4, 480) self.face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') mongodbs = pymongo.MongoClient('127.0.0.1', 27017) self.face = mongodbs.face self.path = "dataset" def __del__(self): self.video.release()
def get_frame(self): success, image = self.video.read() ret, jpeg = cv2.imencode('.jpg', image) return jpeg.tobytes()
def data_set(self, face_name): count = 0 face_id = random.randint(0, 100000) while(True): ret, img = self.video.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = self.face_detector.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) count += 1 cv2.imwrite(self.path+"/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) k = cv2.waitKey(100) & 0xff if k == 27: break elif count >= 30: break self.face.face.save({"face_name":face_name,"face_id":face_id}) self.training() def training(self): recognizer = cv2.face.LBPHFaceRecognizer_create() imagePaths = [os.path.join(self.path,f) for f in os.listdir(self.path)] faceSamples=[] ids = [] for imagePath in imagePaths: PIL_img = Image.open(imagePath).convert('L') img_numpy = np.array(PIL_img,'uint8') id = int(os.path.split(imagePath)[-1].split(".")[1]) faces = self.face_detector.detectMultiScale(img_numpy) for (x,y,w,h) in faces: faceSamples.append(img_numpy[y:y+h,x:x+w]) ids.append(id) recognizer.train(faceSamples, np.array(ids)) recognizer.write('trainer/trainer.yml') def recognition(self): recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read('trainer/trainer.yml') font = cv2.FONT_HERSHEY_SIMPLEX minW = 0.1*self.video.get(3) minH = 0.1*self.video.get(4) while True: id = 'unknown' ret, img = self.video.read() gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = self.face_detector.detectMultiScale( gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), ) for(x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) if (confidence < 100): face = self.face.face.find_one({"face_id":id}) id = id confidence = " {0}%".format(round(100 - confidence)) else: id = "unknown" confidence = " {0}%".format(round(100 - confidence)) if id != "unknown": break return face['face_name']
|