Bootstrap

mediapipe 全部姿态检测服务

poses

主要负责获取图像 而后发送图像给其他三个服务
缺点是脸部可能需要进行裁剪后给face 检测 否则检测不到
遂可能要单独开服务去解决问题
先并行post hand pose 而后使用pose 切割图像post 给face
最后返回 结果

import base64

import uvicorn
from fastapi import FastAPI, HTTPException
import cv2
import asyncio
import httpx

app = FastAPI()

# 初始化摄像头
cap = cv2.VideoCapture("q888.mp4")


# 异步函数,用于获取姿态数据


async def get_pose_data(image_data_base64: str):
    async with httpx.AsyncClient() as client:
        response = await client.post("http://127.0.0.1:8002/pose", json={"image_data_base64": image_data_base64})
        if response.status_code == 200:
            return response.json()
        else:
            raise HTTPException(status_code=500, detail="Failed to get pose data from PoseEstimationService")


async def get_face_data(image_data_base64: str):
    async with httpx.AsyncClient() as client:
        response = await client.post("http://127.0.0.1:8003/face", json={"image_data_base64": image_data_base64})
        if response.status_code == 200:
            return response.json()
        else:
            raise HTTPException(status_code=500, detail="Failed to get pose data from PoseEstimationService")
async def get_hand_data(image_data_base64: str):
    async with httpx.AsyncClient() as client:
        response = await client.post("http://127.0.0.1:8005/hand", json={"image_data_base64": image_data_base64})
        if response.status_code == 200:
            return response.json()
        else:
            raise HTTPException(status_code=500, detail="Failed to get pose data from PoseEstimationService")


@app.get("/poses", response_model=list)
async def get_poses():
    # 获取多个图像数据
    pose_data_tasks = []
    image_data_base64 = get_image_base64()
    pose_data_tasks.append(get_pose_data(image_data_base64))
    pose_data_tasks.append(get_face_data(image_data_base64))
    pose_data_tasks.append(get_hand_data(image_data_base64))




    # 使用 asyncio.gather 并发执行多个 get_pose_data 协程

    pose_data_list = await asyncio.gather(*pose_data_tasks)

    return pose_data_list


def get_image_base64():
    # 从摄像头读取一帧
    success, image = cap.read()
    if not success:
        raise HTTPException(status_code=500, detail="Failed to capture image")

    # 将图像转换为JPEG格式并编码为Base64字符串
    ret, buffer = cv2.imencode('.jpg', image)
    image_data = buffer.tobytes()
    image_data_base64 = base64.b64encode(image_data).decode('utf-8')

    return image_data_base64


if __name__ == "__main__":
    # 运行服务,这里只是示例,实际部署时需要根据实际情况配置
    uvicorn.run(app, host="0.0.0.0", port=8001)

face

import base64
import numpy as np
import uvicorn
from fastapi import FastAPI
import cv2
import mediapipe as mp
from pydantic import BaseModel

app = FastAPI()

# 初始化MediaPipe的面部识别模型
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh()

class FaceData(BaseModel):
    face_landmarks: list

class FaceRequest(BaseModel):
    image_data_base64: str

@app.post("/face")
def get_face(request: FaceRequest):
    # 使用request.image_data_base64进行后续处理
    image_data = base64.b64decode(request.image_data_base64)
    # 将接收到的图像数据转换为图像
    image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # 处理图像以获取面部数据
    results = face_mesh.process(image)

    # 如果没有检测到面部,返回错误
    if not results.multi_face_landmarks:
        # 获取所有面部地标的位置和ID
        face_landmarks = []

        # 假设我们只处理第一个检测到的面部

        face_landmarks.append({
            'x': 0,
            'y': 0,
            'z': 0,
            'visibility': 0
        })

    else:

        # 获取所有面部地标的位置和ID
        face_landmarks = []

        # 假设我们只处理第一个检测到的面部
        for landmark in results.multi_face_landmarks[0].landmark:
            face_landmarks.append({
                'x': landmark.x,
                'y': landmark.y,
                'z': landmark.z,
                'visibility': landmark.visibility
            })

    # 返回面部数据
    return FaceData(face_landmarks=face_landmarks)

if __name__ == "__main__":
    # 运行服务,这里只是示例,实际部署时需要根据实际情况配置
    uvicorn.run(app, host="0.0.0.0", port=8003)

pose

import base64

import numpy as np
import uvicorn
from fastapi import FastAPI
import cv2
import mediapipe as mp
from pydantic import BaseModel

app = FastAPI()

# 初始化MediaPipe的姿态估计模型
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()


class PoseData(BaseModel):
    pose_landmarks: list


class PoseRequest(BaseModel):
    image_data_base64: str


@app.post("/pose")
def get_pose(request: PoseRequest):
    # 使用request.image_data_base64进行后续处理

    image_data = base64.b64decode(request.image_data_base64)
    # 将接收到的图像数据转换为图像
    image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # 处理图像以获取姿态数据
    results = pose.process(image)

    # 如果没有检测到姿态,返回错误
    if not results.pose_landmarks:
        # 获取所有地标的位置和ID
        landmarks = []

        landmarks.append({
            'x': 0,
            'y': 0,
            'z': 0,
            'visibility': 0
        })

    else:
        # 获取所有地标的位置和ID
        landmarks = []

        for landmark in results.pose_landmarks.landmark:
            landmarks.append({
                'x': landmark.x,
                'y': landmark.y,
                'z': landmark.z,
                'visibility': landmark.visibility
            })

    # 返回姿态数据
    return PoseData(pose_landmarks=landmarks)


if __name__ == "__main__":
    # 运行服务,这里只是示例,实际部署时需要根据实际情况配置
    uvicorn.run(app, host="0.0.0.0", port=8002)

hand

import base64
import numpy as np
import uvicorn
from fastapi import FastAPI, HTTPException
import cv2
import mediapipe as mp
from pydantic import BaseModel

app = FastAPI()

# 初始化MediaPipe的手部识别模型
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()

class HandData(BaseModel):
    hand_landmarks: list
    hand_world_landmarks: list

class HandRequest(BaseModel):
    image_data_base64: str

@app.post("/hand")
def get_hand(request: HandRequest):
    # 使用request.image_data_base64进行后续处理
    image_data = base64.b64decode(request.image_data_base64)
    # 将接收到的图像数据转换为图像
    image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # 处理图像以获取手势数据
    results = hands.process(image)

    # 如果没有检测到手部,返回错误
    if not results.multi_hand_landmarks:
        # 返回空列表,表示没有检测到手部
        return HandData(hand_landmarks=[{
                    'x':0,
                    'y': 0,
                    'z': 0
                }], hand_world_landmarks=[{
                    'x': 0,
                    'y': 0,
                    'z': 0
                }])

    else:
        # 获取所有手部地标的位置
        hand_landmarks = []
        hand_world_landmarks = []

        # 遍历所有检测到的手部
        for hand_landmarks_proto in results.multi_hand_landmarks:
            for landmark in hand_landmarks_proto.landmark:
                hand_landmarks.append({
                    'x': landmark.x,
                    'y': landmark.y,
                    'z': landmark.z
                })

            # 如果需要世界坐标,也可以添加
            if results.multi_hand_world_landmarks:
                for world_landmark in results.multi_hand_world_landmarks[0].landmark:
                    hand_world_landmarks.append({
                        'x': world_landmark.x,
                        'y': world_landmark.y,
                        'z': world_landmark.z
                    })

    # 返回手势数据
    return HandData(hand_landmarks=hand_landmarks, hand_world_landmarks=hand_world_landmarks)

if __name__ == "__main__":
    # 运行服务,这里只是示例,实际部署时需要根据实际情况配置
    uvicorn.run(app, host="0.0.0.0", port=8005)

;