【python】OpenCV—Age and Gender Classification

发布于:2024-09-18 ⋅ 阅读:(7) ⋅ 点赞:(0)

在这里插入图片描述

1、任务描述

性别分类和年龄分类预测

2、网络结构

2.1 人脸检测

在这里插入图片描述

在这里插入图片描述

输出最高的 200 个 RoI,每个 RoI 7 个值,(xx,xx,score,x0,y0,x1,y1)

2.2 性别分类

二分类

在这里插入图片描述

在这里插入图片描述

2.3 年龄分类

按年龄区间分类 ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']

在这里插入图片描述

在这里插入图片描述

3、代码实现

先检测人脸,人脸外扩,再性别检测,再年龄检测,最后结果绘制输出

# Import required modules
import cv2 as cv
import math
import time
import argparse


def getFaceBox(net, frame, conf_threshold=0.7):
    frameOpencvDnn = frame.copy()
    frameHeight = frameOpencvDnn.shape[0]  # 333
    frameWidth = frameOpencvDnn.shape[1]  # 500
    blob = cv.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)

    net.setInput(blob)
    detections = net.forward()  # (1, 1, 200, 7), (xxx, xxx, confidence, x0, y0, x1, y1)
    bboxes = []
    for i in range(detections.shape[2]):  # 遍历 top 200 RoI
        confidence = detections[0, 0, i, 2]
        if confidence > conf_threshold:
            x1 = int(detections[0, 0, i, 3] * frameWidth)
            y1 = int(detections[0, 0, i, 4] * frameHeight)
            x2 = int(detections[0, 0, i, 5] * frameWidth)
            y2 = int(detections[0, 0, i, 6] * frameHeight)
            bboxes.append([x1, y1, x2, y2])
            cv.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight/150)), 8)
    return frameOpencvDnn, bboxes


parser = argparse.ArgumentParser(description='Use this script to run age and gender recognition using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. '
                                    'Skip this argument to capture frames from a camera.',
                    default="jolie.jpg")
parser.add_argument("--device", default="cpu", help="Device to inference on")

args = parser.parse_args()


args = parser.parse_args()

faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"

ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"

genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"

MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList = ['Male', 'Female']

# Load network
ageNet = cv.dnn.readNet(ageModel, ageProto)
genderNet = cv.dnn.readNet(genderModel, genderProto)
faceNet = cv.dnn.readNet(faceModel, faceProto)


if args.device == "cpu":
    ageNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
    genderNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
    faceNet.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
    print("Using CPU device")

elif args.device == "gpu":
    ageNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
    ageNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)

    genderNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
    genderNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)

    genderNet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
    genderNet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
    print("Using GPU device")


# Open a video file or an image file or a camera stream
cap = cv.VideoCapture(args.input if args.input else 0)
padding = 20
while cv.waitKey(1) < 0:
    # Read frame
    t = time.time()
    hasFrame, frame = cap.read()
    if not hasFrame:
        cv.waitKey()
        break

    frameFace, bboxes = getFaceBox(faceNet, frame)  # (333, 500, 3), 4 bbox
    if not bboxes:
        print("No face Detected, Checking next frame")
        continue

    for bbox in bboxes:  # 遍历检测出来的人脸
        # print(bbox)
        face = frame[max(0,bbox[1]-padding):min(bbox[3]+padding,frame.shape[0]-1),
               max(0,bbox[0]-padding):min(bbox[2]+padding, frame.shape[1]-1)] # 人脸外扩

        blob = cv.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
        genderNet.setInput(blob)
        genderPreds = genderNet.forward()
        gender = genderList[genderPreds[0].argmax()]
        # array([[9.9999559e-01, 4.4012304e-06]], dtype=float32), 'Male'
        # print("Gender Output : {}".format(genderPreds))
        print("Gender : {}, conf = {:.3f}".format(gender, genderPreds[0].max()))

        ageNet.setInput(blob)
        agePreds = ageNet.forward()
        """
        array([[5.3957672e-05, 5.3967893e-02, 9.4579268e-01, 1.0875276e-04, 5.0436443e-05, 
                1.2142612e-05, 1.0151542e-05, 3.9845672e-06]],dtype=float32)
        """
        age = ageList[agePreds[0].argmax()]  # '(8-12)'
        # print("Age Output : {}".format(agePreds))
        # print("Age : {}, conf = {:.3f}".format(age, agePreds[0].max()))

        label = "{},{}".format(gender, age)  # Out[15]: 'Male,(8-12)'
        cv.putText(frameFace, label, (bbox[0], bbox[1]-5), cv.FONT_HERSHEY_SIMPLEX,
                   0.6, (0, 0, 255), 2, cv.LINE_AA)
        # cv.imshow("Age Gender Demo", frameFace)
        cv.imwrite("age-gender-out-{}".format(args.input), frameFace)
    print("time : {:.3f}".format(time.time() - t))

4、结果展示

输入图片

在这里插入图片描述

人脸检测结果

在这里插入图片描述

人脸外扩

在这里插入图片描述

输出结果

在这里插入图片描述

性别还是比较准的

输入图片

在这里插入图片描述

输出结果

在这里插入图片描述

输入图片

在这里插入图片描述

输出结果

在这里插入图片描述

输入图片

在这里插入图片描述

输出结果

在这里插入图片描述

输入图片

在这里插入图片描述

输出结果

在这里插入图片描述

5、参考

OpenCV进阶(8)性别和年龄识别


网站公告

今日签到

点亮在社区的每一天
去签到