Python opencv子进程写入返回断开的管道

2024-09-29 00:20:24 发布

您现在位置:Python中文网/ 问答频道 /正文

我想读取rtsp视频源,添加覆盖文本并将其推送到RTMP端点。我使用Videocapture读取视频源,并使用python子流程将帧写回到RTMP端点。我提到这个FFmpeg stream video to rtmp from frames OpenCV python

import sys
import subprocess

import cv2
import ffmpeg
rtmp_url = "rtmp://127.0.0.1:1935/live/test"

path = 0
cap = cv2.VideoCapture("rtsp://10.0.1.7/media.sdp")

# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))



command = ['ffmpeg', '-i', '-', "-c", "copy", '-f', 'flv', rtmp_url]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)

font = cv2.FONT_HERSHEY_SIMPLEX
while cap.isOpened():

    ret, frame = cap.read()
    cv2.putText(frame, 'TEXT ON VIDEO', (50, 50), font, 1, (0, 255, 255), 2, cv2.LINE_4)
    cv2.imshow('video', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    if not ret:
        print("frame read failed")
        break

    try:
        p.stdin.write(frame.tobytes())
    except Exception as e:
        print (e)


cap.release()
p.stdin.close()
p.stderr.close()
p.wait()

python脚本返回“[Errno 32]断开的管道”。在终端中运行ffmpeg命令可以正常工作

ffmpeg -i rtsp://10.0.1.7/media.sdp -c copy -f flv rtmp://127.0.0.1:1935/live/test

上面的命令运行良好,我可以将输入流推送到RTMP端点。但我无法将处理后的帧写入运行ffmpeg的子进程

如果我错过了什么,请告诉我


Tags: importgetvideo端点cv2frameffmpegint
1条回答
网友
1楼 · 发布于 2024-09-29 00:20:24

将原始帧写入stdin管道时不能使用"-c", "copy"

ret, frame = cap.read()返回的frame是BGR颜色格式的uint8NumPy数组(cap.read()解码视频并转换颜色格式)。
FFmpeg术语中frame格式为“rawvideo

  • command应该告诉FFmpeg希望原始视频作为输入,具体大小和像素格式:
    command = ['ffmpeg', '-f', 'rawvideo', '-s', f'{width}x{height}', '-pixel_format', 'bgr24', ...

  • 因为输入是原始视频,我们必须对其重新编码。
    我们可以指定编码像素格式和视频编解码器:
    '-pix_fmt', 'yuv420p', '-c:v', 'libx264' ...

备注:

  • 解码和重新编码视频会降低一些质量(但别无选择)
  • 建议的解决方案会释放音频(有些解决方案会保留音频,但OpenCV缺少音频支持)
  • 发布的解决方案重用了以下post中的一些代码。
    很少使用FFmpeg参数而不作解释(如'-bufsize', '64M'

执行侦听器应用程序:

  • 如果没有接收视频的“侦听器”,RTMP流媒体将无法工作。
    应在启动RTMP流之前启动侦听器(由于TCP使用)

  • 我们可以使用FFplay子进程作为“侦听器”应用程序:

     ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url])
    

流式合成视频帧:

从一个简单的代码示例开始,它流式传输合成帧(不捕获RTSP视频)

以下“自包含”代码示例在灰色背景上写入黄色文本,并将帧传递给FFmpeg以进行RTMP流传输:

import cv2
import numpy as np
import subprocess as sp 

width = 320
height = 240

fps = 5

rtmp_url = "rtmp://127.0.0.1:1935/live/test"

# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url])  # Use FFplay sub-process for receiving the RTMP video.


command = ['ffmpeg',
           '-re',
           '-f', 'rawvideo',  # Apply raw video as input
           '-s', f'{width}x{height}',
           '-pixel_format', 'bgr24',
           '-r', f'{fps}',
           '-i', '-',
           '-pix_fmt', 'yuv420p',
           '-c:v', 'libx264',
           '-bufsize', '64M',
           '-maxrate', '4M',
           '-f', 'flv',
           rtmp_url]

process = sp.Popen(command, stdin=sp.PIPE)  # Execute FFmpeg sub-process for RTSP streaming

frame_counter = 0;

while True:
    # Build sythetic frame in BGR color format (3D NumPy array).
    frame = np.full((height, width, 3), 60, np.uint8)
    cv2.putText(frame, 'TEXT ON VIDEO ' + str(frame_counter), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)  # Put a frame counter for showing progress.

    process.stdin.write(frame.tobytes())  # Write raw frame to stdin pipe.

    cv2.imshow('frame', frame)  # Show frame for testing

    key = cv2.waitKey(int(round(1000/fps)))  # We need to call cv2.waitKey after cv2.imshow

    if key == ord('q'):  # Press 'q' for exit
        break

    frame_counter += 1

process.stdin.close()  # Close stdin pipe
process.wait()  # Wait for FFmpeg sub-process to finish
ffplay_process.kill()  # Forcefully close FFplay sub-process
cv2.destroyAllWindows()  # Close OpenCV window

输出示例:
enter image description here


从RTSP流捕获视频帧。
以下代码示例从public RTSP stream捕获视频帧,写入文本,并将帧传递给FFmpeg进行RTMP流传输:

import cv2
import numpy as np
import subprocess as sp 

# Use public RTSP Streaming for testing.
rtsp_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"

rtmp_url = "rtmp://127.0.0.1:1935/live/test"

cap = cv2.VideoCapture(rtsp_stream)

# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))


# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url])  # Use FFplay sub-process for receiving the RTMP video.


command = ['ffmpeg',
           '-re',
           '-f', 'rawvideo',  # Apply raw video as input
           '-s', f'{width}x{height}',
           '-pixel_format', 'bgr24',
           '-r', f'{fps}',
           '-i', '-',
           '-pix_fmt', 'yuv420p',
           '-c:v', 'libx264',
           '-bufsize', '64M',
           '-maxrate', '4M',
           '-f', 'flv',
           rtmp_url]

process = sp.Popen(command, stdin=sp.PIPE)  # Execute FFmpeg sub-process for RTSP streaming

frame_counter = 0;

while cap.isOpened():
    # Read frame from RTSP stream.
    ret, frame = cap.read()

    if not ret:
        print("frame read failed")
        break

    cv2.putText(frame, 'TEXT ON VIDEO', (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)
    cv2.putText(frame, str(frame_counter), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)

    process.stdin.write(frame.tobytes())  # Write raw frame to stdin pipe.

    cv2.imshow('frame', frame)  # Show frame for testing

    key = cv2.waitKey(1)  # We need to call cv2.waitKey after cv2.imshow

    if key == ord('q'):  # Press 'q' for exit
        break

    frame_counter += 1

cap.release()
process.stdin.close()  # Close stdin pipe
process.wait()  # Wait for FFmpeg sub-process to finish
ffplay_process.kill()  # Forcefully close FFplay sub-process
cv2.destroyAllWindows()  # Close OpenCV window

输出示例:
enter image description here

相关问题 更多 >