基于dlib进行人脸识别
1685554708671.png
dlib 提供的高精度人脸识别算法是基于深度学习网络ResNet-34实现。该网络基于三百万张照片进行训练,最终获得了人脸检测模型。
下载地址:
https://github.com/davisking/dlib-models/blob/master/dlib_face_recognition_resnet_model_v1.dat.bz2
face_rg.py
# 1 加载库
import cv2
import numpy as np
import face_recognition
# 2 加载图片
liu = cv2.imread("liu.jpeg")
guo = cv2.imread("guo.jpg")
# 3 BGR 转 RGB
liu_RGB = liu[:, :, ::-1]
guo_RGB = guo[:, :, ::-1]
# 4 检测人脸
liu_face = face_recognition.face_locations(liu_RGB)
guo_face = face_recognition.face_locations(guo_RGB)
# 5 人脸特征编码
liu_encoding = face_recognition.face_encodings(liu_RGB, liu_face)[0]
guo_encoding = face_recognition.face_encodings(guo_RGB, guo_face)[0]
# 6 把所有人脸放在一起,当做数据库使用
encodings = [liu_encoding, guo_encoding]
names = ["liu de hua", "guo fu cheng"]
# 7 打开摄像头,读取视频流
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Camera Error !")
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
# 8 BGR 传 RGB
frame_RGB = frame[:, :, ::-1]
# 9 人脸检测
faces_locations = face_recognition.face_locations(frame_RGB)
# 10 人脸特征编码
faces_encodings = face_recognition.face_encodings(frame_RGB, faces_locations)
# 11 与数据库中的所有人脸进行匹配
for (top, right, bottom, left), face_encoding in zip(faces_locations, faces_encodings):
# 12 进行匹配
matches = face_recognition.compare_faces(encodings, face_encoding)
# 13 计算距离
distances = face_recognition.face_distance(encodings, face_encoding)
min_distance_index = np.argmin(distances) # 0, 1, 2
# 14 判断:如果匹配,获取名字
name = "Unknown"
if matches[min_distance_index]:
name = names[min_distance_index]
# 15 绘制人脸矩形框
cv2.rectangle(frame, (left, top), (right, bottom), (0,255,0), 3)
# 16 绘制、显示对应人脸的名字
cv2.rectangle(frame, (left, bottom - 30),(right, bottom), (0,0,255), 3)
# 17 显示名字
cv2.putText(frame, name, (left+10 , bottom-10), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 1)
# 18 显示整个效果
cv2.imshow("face recognition", frame)
# 19 判断 Q , 退出
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 20 关闭所有资源
cap.release()
cv2.destroyAllWindows()
face_recognition 人脸识别
用到的方法:
face_distance(face_encodings, face_to_compare)
face_locations(img, number_of_times_to_upsample=1, model="hog")
face_landmarks(face_image, face_locations=None, model="large")
face_encodings(face_image, known_face_locations=None, num_jitters=1, model="small")
compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6)
face_recognition的API文档:
https://face-recognition.readthedocs.io/en/latest/_modules/face_recognition/api.html#face_locations
face_rg_dlib.py
# 1 导入库
import cv2
import dlib
import numpy as np
# 定义:关键点编码为128D
def encoder_face(image, detector, predictor, encoder, upsample=1, jet=1):
# 检测人脸
faces = detector(image, upsample)
# 对每张人脸进行关键点检测
faces_keypoints = [ predictor(image, face) for face in faces ] # 每张人脸的关键点
return [ np.array(encoder.compute_face_descriptor(image, face_keypoint, jet)) for face_keypoint in faces_keypoints ]
# 定义:人脸比较,通过欧氏距离
def compare_faces(face_encoding, test_encoding):
return list(np.linalg.norm(np.array(face_encoding) - np.array(test_encoding), axis=1))
# 定义:人脸比较,输出对应的名称
def comapre_faces_order(face_encoding, test_encoding, names):
distance = list(np.linalg.norm(np.array(face_encoding) - np.array(test_encoding), axis=1))
return zip(*sorted(zip(distance, names)))
def main():
# 2 读取4张图片
img1 = cv2.imread("guo.jpg")
img2 = cv2.imread("liu1.jpg")
img3 = cv2.imread("liu2.jpg")
img4 = cv2.imread("liu3.jpg")
test = cv2.imread("liu4.jpg")
# BGR to RGB
img1 = img1[:, :, ::-1]
img2 = img2[:, :, ::-1]
img3 = img3[:, :, ::-1]
img4 = img4[:, :, ::-1]
test = test[:, :, ::-1]
img_names = ["guo,jpg", "liu1.jpg", "liu2.jpg", "liu3.jpg"]
# 3 加载人脸检测器
detector = dlib.get_frontal_face_detector()
# 4 加载关键点的检测器
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# 5 加载人脸特征编码模型
encoder = dlib.face_recognition_model_v1("dlib_face_recognition_resnet_model_v1.dat")
# 6 调用方法:128D特征向量输出
img1_128D = encoder_face(img1, detector, predictor, encoder)[0]
img2_128D = encoder_face(img2, detector, predictor, encoder)[0]
img3_128D = encoder_face(img3, detector, predictor, encoder)[0]
img4_128D = encoder_face(img4, detector, predictor, encoder)[0]
test_128D = encoder_face(test, detector, predictor, encoder)[0]
four_images_128D = [img1_128D, img2_128D, img3_128D, img4_128D]
# 7 调用方法:比较人脸,计算特征向量之间的距离,判断是否为同一人
distance = compare_faces(four_images_128D, test_128D)
print(distance)
distance, name = comapre_faces_order(four_images_128D, test_128D, img_names)
print("\n")
print("distance: {}, \n names: {} ".format(distance, name))
if __name__ == '__main__':
main()