Bootstrap

python+openCV+ffmpeg进行物联网设备拉流rtmp

前言:

通过物联网网关设备,推流的rtmp视频流,在此基础上完成对视频使用分类集的视频流抽帧(及对帧数计数,达到目标帧时才进行识别降低延迟率)检测,并重新推流为rtsp(和对rtmp处理类似,设置一个合理的ffmpeg参数)

import cv2
import subprocess
import numpy as np

# RTMP流地址
rtmp_url = "rtmp:/:1935/live/"

frame_width = 720
frame_height = 480
fps = 25
# FFmpeg命令,用于拉取RTMP流并输出到管道
ffmpeg_command = [
    "ffmpeg",
    "-i", rtmp_url,
    "-g", "1",
    "-fflags", "nobuffer",  # 减少输入流的缓冲
    "-b:v", "5k",
    "-preset", "ultrafast",  # 使用快速编码预设
    "-tune", "zerolatency",  # 启用低延迟模式
    "-f", "rawvideo",
    "-pix_fmt", "bgr24",
    '-s',"{}x{}".format(frame_width, frame_height),
    "-r", str(fps),
    "-vcodec", "rawvideo",
    "-an",  # 不包含音频
    "-"
]

# 启动FFmpeg进程
ffmpeg_process = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=10**3)

# 加载人脸检测器
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# 视频帧的分辨率
frame_width = 720
frame_height = 480
frame_size = frame_width * frame_height * 3  # BGR格式,每个像素3个字节

while True:
    # 从FFmpeg进程读取一帧
    raw_frame = ffmpeg_process.stdout.read(frame_size)
    if not raw_frame:
        break

    # 将帧数据转换为NumPy数组
    frame = np.frombuffer(raw_frame, np.uint8).reshape((frame_height, frame_width, 3))

    # 确保帧数组可写
    frame = frame.copy()

    # 处理帧(例如人脸检测)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # minNeighbors表示每个候选矩形保留的邻居个数,该值越大,误检率越低,但漏检率可能会增加。
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(10, 10),maxSize=(50, 50))
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # 显示处理后的帧
    cv2.imshow('Video Frame', frame)

    # 按下 'q' 键退出
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# 关闭进程和窗口
ffmpeg_process.stdout.close()
ffmpeg_process.terminate()
ffmpeg_process.wait()
cv2.destroyAllWindows()

;