fix stream of videos

This commit is contained in:
henryruhs
2026-03-09 17:43:11 +01:00
parent 7144c2b5b7
commit 85f097d85c
3 changed files with 17 additions and 12 deletions
+2 -2
View File
@@ -12,7 +12,7 @@ from facefusion import session_context, session_manager, state_manager
from facefusion.apis.api_helper import get_sec_websocket_protocol
from facefusion.apis.session_helper import extract_access_token
from facefusion.apis.stream_helper import on_video_track
from facefusion.streamer import process_stream_frame
from facefusion.streamer import process_vision_frame
async def websocket_stream(websocket : WebSocket) -> None:
@@ -31,7 +31,7 @@ async def websocket_stream(websocket : WebSocket) -> None:
target_vision_frame = cv2.imdecode(numpy.frombuffer(image_buffer, numpy.uint8), cv2.IMREAD_COLOR)
if numpy.any(target_vision_frame):
temp_vision_frame = process_stream_frame(target_vision_frame)
temp_vision_frame = process_vision_frame(target_vision_frame)
is_success, output_vision_frame = cv2.imencode('.jpg', temp_vision_frame)
if is_success:
+8 -3
View File
@@ -4,7 +4,13 @@ from typing import cast
from aiortc import MediaStreamTrack, RTCPeerConnection, VideoStreamTrack
from av import VideoFrame
from facefusion.streamer import process_stream_frame
from facefusion.streamer import process_vision_frame
def process_stream_frame(target_stream_frame : VideoFrame) -> VideoFrame:
target_vision_frame = target_stream_frame.to_ndarray(format = 'bgr24')
output_vision_frame = process_vision_frame(target_vision_frame)
return VideoFrame.from_ndarray(output_vision_frame, format = 'bgr24')
def create_output_track(target_track : MediaStreamTrack) -> VideoStreamTrack:
@@ -12,8 +18,7 @@ def create_output_track(target_track : MediaStreamTrack) -> VideoStreamTrack:
async def read_stream_frame() -> VideoFrame:
target_stream_frame = cast(VideoFrame, await target_track.recv())
output_vision_frame = await asyncio.get_running_loop().run_in_executor(None, process_stream_frame, target_stream_frame.to_ndarray(format = 'bgr24'))
output_stream_frame = VideoFrame.from_ndarray(output_vision_frame, format = 'bgr24')
output_stream_frame = await asyncio.get_running_loop().run_in_executor(None, process_stream_frame, target_stream_frame)
output_stream_frame.pts = target_stream_frame.pts
output_stream_frame.time_base = target_stream_frame.time_base
return output_stream_frame
+7 -7
View File
@@ -26,17 +26,17 @@ def multi_process_capture(camera_capture : cv2.VideoCapture, camera_fps : Fps) -
futures = []
while camera_capture and camera_capture.isOpened():
_, capture_vision_frame = camera_capture.read()
if analyse_stream(capture_vision_frame, camera_fps):
_, capture_frame = camera_capture.read()
if analyse_stream(capture_frame, camera_fps):
camera_capture.release()
if numpy.any(capture_vision_frame):
future = executor.submit(process_stream_frame, capture_vision_frame)
if numpy.any(capture_frame):
future = executor.submit(process_vision_frame, capture_frame)
futures.append(future)
for future_done in [ future for future in futures if future.done() ]:
capture_vision_frame = future_done.result()
capture_deque.append(capture_vision_frame)
capture_frame = future_done.result()
capture_deque.append(capture_frame)
futures.remove(future_done)
while capture_deque:
@@ -44,7 +44,7 @@ def multi_process_capture(camera_capture : cv2.VideoCapture, camera_fps : Fps) -
yield capture_deque.popleft()
def process_stream_frame(target_vision_frame : VisionFrame) -> VisionFrame:
def process_vision_frame(target_vision_frame : VisionFrame) -> VisionFrame:
source_vision_frames = read_static_images(state_manager.get_item('source_paths'))
source_audio_frame = create_empty_audio_frame()
source_voice_frame = create_empty_audio_frame()