- 论坛徽章:
- 0
|
我用shape_predictor_68_face_landmarks.dat计算出脸部得68个点,想得到脸部姿势,通过这个函数计算在两个人得图片上计算,发现[float(roll), float(pitch), float(yaw)]值接近得两张图,脸得姿势并不一样,谁用过这个函数,帮忙给点意见。
def face_orientation(frame, landmarks):
size = frame.shape #(height, width, color_channel)
# image_points = np.array([
# (landmarks[4], landmarks[5]), # Nose tip
# (landmarks[10], landmarks[11]), # Chin
# (landmarks[0], landmarks[1]), # Left eye left corner
# (landmarks[2], landmarks[3]), # Right eye right corne
# (landmarks[6], landmarks[7]), # Left Mouth corner
# (landmarks[8], landmarks[9]) # Right mouth corner
# ], dtype="double")
# center_righteyes = (landmarks[36] + landmarks[39]) // 2
# center_lefteyes = (landmarks[42] + landmarks[45]) // 2
# image_points = numpy.array([
# landmarks[31], # Nose tip
# landmarks[8], # Chin
# center_lefteyes, # Left eye left corner
# center_righteyes, # Right eye right corne
# landmarks[48], # Left Mouth corner
# landmarks[54] # Right mouth corner
# ], dtype="double")
image_points = numpy.array([
(landmarks[33, 0], landmarks[33, 1]), # Nose tip
(landmarks[8, 0], landmarks[8, 1]), # Chin
(landmarks[36, 0], landmarks[36, 1]), # Left eye left corner
(landmarks[45, 0], landmarks[45, 1]), # Right eye right corner
(landmarks[48, 0], landmarks[48, 1]), # Left Mouth corner
(landmarks[54, 0], landmarks[54, 1]) # Right mouth corner
], dtype="double")
model_points = numpy.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-165.0, 170.0, -135.0), # Left eye left corner
(165.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
center = (size[1]/2, size[0]/2)
focal_length = center[0] / numpy.tan(60/2 * numpy.pi / 180)
camera_matrix = numpy.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
dist_coeffs = numpy.zeros((4,1)) # Assuming no lens distortion
#(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.CV_ITERATIVE)
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
axis = numpy.float32([[500,0,0],
[0,500,0],
[0,0,500]])
imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
proj_matrix = numpy.hstack((rvec_matrix, translation_vector))
eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]
pitch = math.degrees(math.asin(math.sin(pitch)))
roll = -math.degrees(math.asin(math.sin(roll)))
yaw = math.degrees(math.asin(math.sin(yaw)))
#return imgpts, modelpts, (str(int(roll)), str(int(pitch)), str(int(yaw))), (landmarks[4], landmarks[5])
return numpy.array([float(roll), float(pitch), float(yaw)], dtype = "double")
|
|