900字范文,内容丰富有趣,生活中的好帮手!
900字范文 > 百度api实现实时摄像头人脸识别

百度api实现实时摄像头人脸识别

时间:2020-01-04 20:12:06

相关推荐

百度api实现实时摄像头人脸识别

大家好,我是爱码士_秃头兔子。一名刚毕业从事计算机视觉的新人。如果文章对你有所帮助,希望能点个赞支持一下,谢谢~

百度api实现实时摄像头人脸识别

创建百度智能云账号并登录调用api实现完整代码

本文是利用百度api接口和openCV库实现的摄像头实时人脸识别搜索,落地性不强,但应付毕业设计或者课程设计足够了,之前看同学花了老大一笔买了个这种程序,太亏了,我直接写篇文章吧,有缘人可以直接改改用。

创建百度智能云账号并登录

上一篇静态人脸识别已经写过详细教学了,不会的朋友可以看上一篇静态m:n搜索教程,前面东西都是一样的创建智能云账号,进入控制台,左侧列表展开选择人脸识别,领取免费资源,创建应用,创建人脸库。创建用户组,创建用户,添加用户人脸信息。

在此不再啰嗦,直接上硬货。

调用api实现

因为api接口要求输入的图片格式必须是Base64,所以我们要把视频抽帧出来并转化为base64编码格式。

def frame2base64(frame):img = Image.fromarray(frame) #将每一帧转为Imageoutput_buffer = BytesIO() # 创建一个BytesIOimg.save(output_buffer, format='JPEG') # 写入output_bufferbyte_data = output_buffer.getvalue() # 在内存中读取base64_data = base64.b64encode(byte_data) # 转为BASE64return base64_data # 转码成功 返回base64编码``

在人脸框上如果想标注中文是不能直接用cv2.putText()的,这样如果标注的中文直接显示???

所以需要写一个函数:

def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))# 创建一个可以在给定图像上绘图的对象draw = ImageDraw.Draw(img)# 字体的格式fontStyle = ImageFont.truetype("simsun.ttc", textSize, encoding="utf-8")# 绘制文本draw.text((left, top), text, textColor, font=fontStyle)# 转换回OpenCV格式return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

这样就可以显示中文了。

完整代码

import cv2from aip import AipFacefrom io import BytesIOimport base64from PIL import Image,ImageDraw, ImageFontimport threadingimport numpy as npdef cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))# 创建一个可以在给定图像上绘图的对象draw = ImageDraw.Draw(img)# 字体的格式fontStyle = ImageFont.truetype("simsun.ttc", textSize, encoding="utf-8")# 绘制文本draw.text((left, top), text, textColor, font=fontStyle)# 转换回OpenCV格式return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)def frame2base64(frame):img = Image.fromarray(frame) #将每一帧转为Imageoutput_buffer = BytesIO() # 创建一个BytesIOimg.save(output_buffer, format='JPEG') # 写入output_bufferbyte_data = output_buffer.getvalue() # 在内存中读取base64_data = base64.b64encode(byte_data) # 转为BASE64return base64_data # 转码成功 返回base64编码def process(image,ls):""" 调用人脸检测 """""" 如果有可选参数 """#换成自己的appid APP_ID = ''API_KEY = ''SECRET_KEY = ''groupIdList = ''imageType = 'BASE64'base64=frame2base64(image)# base64 = str(base64)base64 = str(base64,'UTF-8')options = {}options["face_field"] = "age,gender,emotion"options["max_face_num"] = 10options["face_type"] = "LIVE"options["match_threshold"] = 0#options["liveness_control"] = "NORMAL"client = AipFace(APP_ID, API_KEY, SECRET_KEY)face_data=client.detect(base64, imageType, options)print(face_data)user_result = {}user_attribute = {}user_type = '访客'is_user = Falseif face_data['error_msg'] == 'SUCCESS':i = face_data['result']['face_list'][0]if i['face_probability']>0.8:is_user = Trueage=str(i['age'])gender = str('男' if i['gender']['type']== 'male' else '女')emotion = str(i['emotion']['type'])if emotion == 'disgust':emotion = 'angry'elif emotion == 'pouty':emotion = 'sad'elif emotion == 'grimace':emotion = 'happy'else:emotion = 'neutral'##shibie# groupIdList = 'group_1'""" 如果有可选参数 """options = {}options["max_face_num"] = 10options["match_threshold"] = 0options["quality_control"] = "LOW"options["liveness_control"] = "LOW"options["max_user_num"] = 7# json1 = client.multiSearch(base64, imageType, groupIdList, options)json1 = client.multiSearch(base64, imageType, groupIdList, options)print(json1)face_num = face_data['result']['face_num']for i in range(face_num):x = max(int(face_data['result']['face_list'][i]['location']['left']), 0)y = max(int(face_data['result']['face_list'][i]['location']['top']), 0)width = int(face_data['result']['face_list'][i]['location']['width'])height = int(face_data['result']['face_list'][i]['location']['height'])cv2.rectangle(image, (x, y), (x + width, y + height), (0, 0, 255), 2)if json1['error_msg']=='SUCCESS':user_attribute = []user_type = '员工'user_result = {'is_user':is_user,'user_type':user_type,'user_attribute':user_attribute}if json1['result']['face_list'][0]['user_list'][i]['score']>70:print(json1['result']['face_list'][0]['user_list'][i]['user_id'])image = cv2ImgAddText(image,json1['result']['face_list'][0]['user_list'][i]['user_id'],max(x - 20, 0), max(y - 20, 0), (255, 255, 255), 20)else:cv2.putText(image, f"{str(face_data['result']['face_list'][i]['age'])} {face_data['result']['face_list'][i]['gender']['type']}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 2)ls.append(image)else:user_result = {'is_user':is_user}print (user_result)# return user_resultelse:user_result = {'is_user':is_user}print (user_result)# return user_result# return user_resultdef main():# video_capture = cv2.VideoCapture(0)video_capture = cv2.VideoCapture('1.avi')# video_capture.set(30, 30)# video_capture.set(6, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))# video_capture.set(3, 640) # Width of the frames in the video stream.# video_capture.set(4, 480) # Height of the frames in the video stream.# video_capture.set(5, 30) # Frame rate.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # 宽度video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 高度video_capture.set(cv2.CAP_PROP_FPS, 30) # 帧数video_capture.set(cv2.CAP_PROP_BRIGHTNESS, 1) # 亮度 1video_capture.set(cv2.CAP_PROP_CONTRAST, 40) # 对比度 40video_capture.set(cv2.CAP_PROP_SATURATION, 50) # 饱和度 50video_capture.set(cv2.CAP_PROP_HUE, 50) # 色调 50video_capture.set(cv2.CAP_PROP_EXPOSURE, 50) # 曝光 50while True:ls = []ret, frame = video_capture.read()t = threading.Thread(target=process,args=(frame,ls))t.start()t.join()frame = ls[0] if ls else framecv2.imshow('wx',frame)if cv2.waitKey(1) & 0xFF == ord('Q'):cv2.destroyAllWindows()video_capture.release()breakif __name__ == "__main__":main()

代码改一下appid那块就可以用啦,因为调用 的百度开源模型,并没有太大技术。适合新手小白,应付一下做个简单的毕业设计或者课程设计也挺好的。哪里不明白或者有问题可以留言私信,不定时会登录解答的。

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。