Patch 3.5.4 (#1055)

* remove insecure flag from curl

* eleminate repating definitons

* limit processors and ui layouts by choices

* follow couple of v4 standards

* use more secure mkstemp

* dynamic cache path for execution providers

* fix benchmarker, prevent path traveling via job-id

* fix order in execution provider choices

* resort by prioroty

* introduce support for QNN

* close file description for Windows to stop crying

* prevent ConnectionResetError under windows

* needed for nested .caches directory as onnxruntime does not create it

* different approach to silent asyncio

* update dependencies

* simplify the name to just inference providers

* switch to trt_builder_optimization_level 4
This commit is contained in:
Henry Ruhs
2026-03-08 11:00:45 +01:00
committed by GitHub
parent c7976ec9d4
commit a498f3d618
31 changed files with 243 additions and 207 deletions
+78 -92
View File
@@ -7,6 +7,84 @@ from facefusion.types import ApplyStateItem, Args
from facefusion.vision import detect_video_fps
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('command', args.get('command'))
apply_state_item('temp_path', args.get('temp_path'))
apply_state_item('jobs_path', args.get('jobs_path'))
apply_state_item('source_paths', args.get('source_paths'))
apply_state_item('target_path', args.get('target_path'))
apply_state_item('output_path', args.get('output_path'))
apply_state_item('source_pattern', args.get('source_pattern'))
apply_state_item('target_pattern', args.get('target_pattern'))
apply_state_item('output_pattern', args.get('output_pattern'))
apply_state_item('face_detector_model', args.get('face_detector_model'))
apply_state_item('face_detector_size', args.get('face_detector_size'))
apply_state_item('face_detector_margin', normalize_space(args.get('face_detector_margin')))
apply_state_item('face_detector_angles', args.get('face_detector_angles'))
apply_state_item('face_detector_score', args.get('face_detector_score'))
apply_state_item('face_landmarker_model', args.get('face_landmarker_model'))
apply_state_item('face_landmarker_score', args.get('face_landmarker_score'))
apply_state_item('face_selector_mode', args.get('face_selector_mode'))
apply_state_item('face_selector_order', args.get('face_selector_order'))
apply_state_item('face_selector_age_start', args.get('face_selector_age_start'))
apply_state_item('face_selector_age_end', args.get('face_selector_age_end'))
apply_state_item('face_selector_gender', args.get('face_selector_gender'))
apply_state_item('face_selector_race', args.get('face_selector_race'))
apply_state_item('reference_face_position', args.get('reference_face_position'))
apply_state_item('reference_face_distance', args.get('reference_face_distance'))
apply_state_item('reference_frame_number', args.get('reference_frame_number'))
apply_state_item('face_occluder_model', args.get('face_occluder_model'))
apply_state_item('face_parser_model', args.get('face_parser_model'))
apply_state_item('face_mask_types', args.get('face_mask_types'))
apply_state_item('face_mask_areas', args.get('face_mask_areas'))
apply_state_item('face_mask_regions', args.get('face_mask_regions'))
apply_state_item('face_mask_blur', args.get('face_mask_blur'))
apply_state_item('face_mask_padding', normalize_space(args.get('face_mask_padding')))
apply_state_item('voice_extractor_model', args.get('voice_extractor_model'))
apply_state_item('trim_frame_start', args.get('trim_frame_start'))
apply_state_item('trim_frame_end', args.get('trim_frame_end'))
apply_state_item('temp_frame_format', args.get('temp_frame_format'))
apply_state_item('keep_temp', args.get('keep_temp'))
apply_state_item('output_image_quality', args.get('output_image_quality'))
apply_state_item('output_image_scale', args.get('output_image_scale'))
apply_state_item('output_audio_encoder', args.get('output_audio_encoder'))
apply_state_item('output_audio_quality', args.get('output_audio_quality'))
apply_state_item('output_audio_volume', args.get('output_audio_volume'))
apply_state_item('output_video_encoder', args.get('output_video_encoder'))
apply_state_item('output_video_preset', args.get('output_video_preset'))
apply_state_item('output_video_quality', args.get('output_video_quality'))
apply_state_item('output_video_scale', args.get('output_video_scale'))
if args.get('output_video_fps') or is_video(args.get('target_path')):
output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path'))
apply_state_item('output_video_fps', output_video_fps)
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
apply_state_item('processors', args.get('processors'))
for processor_module in get_processors_modules(available_processors):
processor_module.apply_args(args, apply_state_item)
apply_state_item('open_browser', args.get('open_browser'))
apply_state_item('ui_layouts', args.get('ui_layouts'))
apply_state_item('ui_workflow', args.get('ui_workflow'))
apply_state_item('execution_device_ids', args.get('execution_device_ids'))
apply_state_item('execution_providers', args.get('execution_providers'))
apply_state_item('execution_thread_count', args.get('execution_thread_count'))
apply_state_item('download_providers', args.get('download_providers'))
apply_state_item('download_scope', args.get('download_scope'))
apply_state_item('benchmark_mode', args.get('benchmark_mode'))
apply_state_item('benchmark_resolutions', args.get('benchmark_resolutions'))
apply_state_item('benchmark_cycle_count', args.get('benchmark_cycle_count'))
apply_state_item('video_memory_strategy', args.get('video_memory_strategy'))
apply_state_item('system_memory_limit', args.get('system_memory_limit'))
apply_state_item('log_level', args.get('log_level'))
apply_state_item('halt_on_error', args.get('halt_on_error'))
apply_state_item('job_id', args.get('job_id'))
apply_state_item('job_status', args.get('job_status'))
apply_state_item('step_index', args.get('step_index'))
def reduce_step_args(args : Args) -> Args:
step_args =\
{
@@ -37,95 +115,3 @@ def collect_job_args() -> Args:
key: state_manager.get_item(key) for key in job_store.get_job_keys() #type:ignore[arg-type]
}
return job_args
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
# general
apply_state_item('command', args.get('command'))
# paths
apply_state_item('temp_path', args.get('temp_path'))
apply_state_item('jobs_path', args.get('jobs_path'))
apply_state_item('source_paths', args.get('source_paths'))
apply_state_item('target_path', args.get('target_path'))
apply_state_item('output_path', args.get('output_path'))
# patterns
apply_state_item('source_pattern', args.get('source_pattern'))
apply_state_item('target_pattern', args.get('target_pattern'))
apply_state_item('output_pattern', args.get('output_pattern'))
# face detector
apply_state_item('face_detector_model', args.get('face_detector_model'))
apply_state_item('face_detector_size', args.get('face_detector_size'))
apply_state_item('face_detector_margin', normalize_space(args.get('face_detector_margin')))
apply_state_item('face_detector_angles', args.get('face_detector_angles'))
apply_state_item('face_detector_score', args.get('face_detector_score'))
# face landmarker
apply_state_item('face_landmarker_model', args.get('face_landmarker_model'))
apply_state_item('face_landmarker_score', args.get('face_landmarker_score'))
# face selector
apply_state_item('face_selector_mode', args.get('face_selector_mode'))
apply_state_item('face_selector_order', args.get('face_selector_order'))
apply_state_item('face_selector_age_start', args.get('face_selector_age_start'))
apply_state_item('face_selector_age_end', args.get('face_selector_age_end'))
apply_state_item('face_selector_gender', args.get('face_selector_gender'))
apply_state_item('face_selector_race', args.get('face_selector_race'))
apply_state_item('reference_face_position', args.get('reference_face_position'))
apply_state_item('reference_face_distance', args.get('reference_face_distance'))
apply_state_item('reference_frame_number', args.get('reference_frame_number'))
# face masker
apply_state_item('face_occluder_model', args.get('face_occluder_model'))
apply_state_item('face_parser_model', args.get('face_parser_model'))
apply_state_item('face_mask_types', args.get('face_mask_types'))
apply_state_item('face_mask_areas', args.get('face_mask_areas'))
apply_state_item('face_mask_regions', args.get('face_mask_regions'))
apply_state_item('face_mask_blur', args.get('face_mask_blur'))
apply_state_item('face_mask_padding', normalize_space(args.get('face_mask_padding')))
# voice extractor
apply_state_item('voice_extractor_model', args.get('voice_extractor_model'))
# frame extraction
apply_state_item('trim_frame_start', args.get('trim_frame_start'))
apply_state_item('trim_frame_end', args.get('trim_frame_end'))
apply_state_item('temp_frame_format', args.get('temp_frame_format'))
apply_state_item('keep_temp', args.get('keep_temp'))
# output creation
apply_state_item('output_image_quality', args.get('output_image_quality'))
apply_state_item('output_image_scale', args.get('output_image_scale'))
apply_state_item('output_audio_encoder', args.get('output_audio_encoder'))
apply_state_item('output_audio_quality', args.get('output_audio_quality'))
apply_state_item('output_audio_volume', args.get('output_audio_volume'))
apply_state_item('output_video_encoder', args.get('output_video_encoder'))
apply_state_item('output_video_preset', args.get('output_video_preset'))
apply_state_item('output_video_quality', args.get('output_video_quality'))
apply_state_item('output_video_scale', args.get('output_video_scale'))
if args.get('output_video_fps') or is_video(args.get('target_path')):
output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path'))
apply_state_item('output_video_fps', output_video_fps)
# processors
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
apply_state_item('processors', args.get('processors'))
for processor_module in get_processors_modules(available_processors):
processor_module.apply_args(args, apply_state_item)
# uis
apply_state_item('open_browser', args.get('open_browser'))
apply_state_item('ui_layouts', args.get('ui_layouts'))
apply_state_item('ui_workflow', args.get('ui_workflow'))
# execution
apply_state_item('execution_device_ids', args.get('execution_device_ids'))
apply_state_item('execution_providers', args.get('execution_providers'))
apply_state_item('execution_thread_count', args.get('execution_thread_count'))
# download
apply_state_item('download_providers', args.get('download_providers'))
apply_state_item('download_scope', args.get('download_scope'))
# benchmark
apply_state_item('benchmark_mode', args.get('benchmark_mode'))
apply_state_item('benchmark_resolutions', args.get('benchmark_resolutions'))
apply_state_item('benchmark_cycle_count', args.get('benchmark_cycle_count'))
# memory
apply_state_item('video_memory_strategy', args.get('video_memory_strategy'))
apply_state_item('system_memory_limit', args.get('system_memory_limit'))
# misc
apply_state_item('log_level', args.get('log_level'))
apply_state_item('halt_on_error', args.get('halt_on_error'))
# jobs
apply_state_item('job_id', args.get('job_id'))
apply_state_item('job_status', args.get('job_status'))
apply_state_item('step_index', args.get('step_index'))
+1 -1
View File
@@ -89,7 +89,7 @@ def cycle(cycle_count : int) -> BenchmarkCycleSet:
def suggest_output_path(target_path : str) -> str:
target_file_extension = get_file_extension(target_path)
return os.path.join(tempfile.gettempdir(), hashlib.sha1().hexdigest()[:8] + target_file_extension)
return os.path.join(tempfile.gettempdir(), hashlib.sha1(target_path.encode()).hexdigest() + target_file_extension)
def render() -> None:
+34 -33
View File
@@ -1,5 +1,5 @@
import logging
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_float_range, create_int_range
from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, BenchmarkMode, BenchmarkResolution, BenchmarkSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskArea, FaceMaskAreaSet, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, VoiceExtractorModel
@@ -12,15 +12,15 @@ face_detector_set : FaceDetectorSet =\
'yolo_face': [ '640x640' ],
'yunet': [ '640x640' ]
}
face_detector_models : List[FaceDetectorModel] = list(face_detector_set.keys())
face_landmarker_models : List[FaceLandmarkerModel] = [ 'many', '2dfan4', 'peppa_wutz' ]
face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ]
face_selector_orders : List[FaceSelectorOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
face_selector_genders : List[Gender] = [ 'female', 'male' ]
face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ]
face_occluder_models : List[FaceOccluderModel] = [ 'many', 'xseg_1', 'xseg_2', 'xseg_3' ]
face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ]
face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'area', 'region' ]
face_detector_models : List[FaceDetectorModel] = list(get_args(FaceDetectorModel))
face_landmarker_models : List[FaceLandmarkerModel] = list(get_args(FaceLandmarkerModel))
face_selector_modes : List[FaceSelectorMode] = list(get_args(FaceSelectorMode))
face_selector_orders : List[FaceSelectorOrder] = list(get_args(FaceSelectorOrder))
face_selector_genders : List[Gender] = list(get_args(Gender))
face_selector_races : List[Race] = list(get_args(Race))
face_occluder_models : List[FaceOccluderModel] = list(get_args(FaceOccluderModel))
face_parser_models : List[FaceParserModel] = list(get_args(FaceParserModel))
face_mask_types : List[FaceMaskType] = list(get_args(FaceMaskType))
face_mask_area_set : FaceMaskAreaSet =\
{
'upper-face': [ 0, 1, 2, 31, 32, 33, 34, 35, 14, 15, 16, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17 ],
@@ -40,10 +40,10 @@ face_mask_region_set : FaceMaskRegionSet =\
'upper-lip': 12,
'lower-lip': 13
}
face_mask_areas : List[FaceMaskArea] = list(face_mask_area_set.keys())
face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys())
face_mask_areas : List[FaceMaskArea] = list(get_args(FaceMaskArea))
face_mask_regions : List[FaceMaskRegion] = list(get_args(FaceMaskRegion))
voice_extractor_models : List[VoiceExtractorModel] = [ 'kim_vocal_1', 'kim_vocal_2', 'uvr_mdxnet' ]
voice_extractor_models : List[VoiceExtractorModel] = list(get_args(VoiceExtractorModel))
audio_type_set : AudioTypeSet =\
{
@@ -74,21 +74,21 @@ video_type_set : VideoTypeSet =\
'webm': 'video/webm',
'wmv': 'video/x-ms-wmv'
}
audio_formats : List[AudioFormat] = list(audio_type_set.keys())
image_formats : List[ImageFormat] = list(image_type_set.keys())
video_formats : List[VideoFormat] = list(video_type_set.keys())
temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpeg', 'png', 'tiff' ]
audio_formats : List[AudioFormat] = list(get_args(AudioFormat))
image_formats : List[ImageFormat] = list(get_args(ImageFormat))
video_formats : List[VideoFormat] = list(get_args(VideoFormat))
temp_frame_formats : List[TempFrameFormat] = list(get_args(TempFrameFormat))
output_audio_encoders : List[AudioEncoder] = list(get_args(AudioEncoder))
output_video_encoders : List[VideoEncoder] = list(get_args(VideoEncoder))
output_encoder_set : EncoderSet =\
{
'audio': [ 'flac', 'aac', 'libmp3lame', 'libopus', 'libvorbis', 'pcm_s16le', 'pcm_s32le' ],
'video': [ 'libx264', 'libx264rgb', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox', 'rawvideo' ]
'audio': output_audio_encoders,
'video': output_video_encoders
}
output_audio_encoders : List[AudioEncoder] = output_encoder_set.get('audio')
output_video_encoders : List[VideoEncoder] = output_encoder_set.get('video')
output_video_presets : List[VideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]
output_video_presets : List[VideoPreset] = list(get_args(VideoPreset))
benchmark_modes : List[BenchmarkMode] = [ 'warm', 'cold' ]
benchmark_modes : List[BenchmarkMode] = list(get_args(BenchmarkMode))
benchmark_set : BenchmarkSet =\
{
'240p': '.assets/examples/target-240p.mp4',
@@ -99,20 +99,21 @@ benchmark_set : BenchmarkSet =\
'1440p': '.assets/examples/target-1440p.mp4',
'2160p': '.assets/examples/target-2160p.mp4'
}
benchmark_resolutions : List[BenchmarkResolution] = list(benchmark_set.keys())
benchmark_resolutions : List[BenchmarkResolution] = list(get_args(BenchmarkResolution))
execution_provider_set : ExecutionProviderSet =\
{
'cuda': 'CUDAExecutionProvider',
'tensorrt': 'TensorrtExecutionProvider',
'directml': 'DmlExecutionProvider',
'rocm': 'ROCMExecutionProvider',
'migraphx': 'MIGraphXExecutionProvider',
'openvino': 'OpenVINOExecutionProvider',
'coreml': 'CoreMLExecutionProvider',
'openvino': 'OpenVINOExecutionProvider',
'qnn': 'QNNExecutionProvider',
'directml': 'DmlExecutionProvider',
'cpu': 'CPUExecutionProvider'
}
execution_providers : List[ExecutionProvider] = list(execution_provider_set.keys())
execution_providers : List[ExecutionProvider] = list(get_args(ExecutionProvider))
download_provider_set : DownloadProviderSet =\
{
'github':
@@ -133,10 +134,10 @@ download_provider_set : DownloadProviderSet =\
'path': '/facefusion/{base_name}/resolve/main/{file_name}'
}
}
download_providers : List[DownloadProvider] = list(download_provider_set.keys())
download_scopes : List[DownloadScope] = [ 'lite', 'full' ]
download_providers : List[DownloadProvider] = list(get_args(DownloadProvider))
download_scopes : List[DownloadScope] = list(get_args(DownloadScope))
video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ]
video_memory_strategies : List[VideoMemoryStrategy] = list(get_args(VideoMemoryStrategy))
log_level_set : LogLevelSet =\
{
@@ -145,10 +146,10 @@ log_level_set : LogLevelSet =\
'info': logging.INFO,
'debug': logging.DEBUG
}
log_levels : List[LogLevel] = list(log_level_set.keys())
log_levels : List[LogLevel] = list(get_args(LogLevel))
ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ]
job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ]
ui_workflows : List[UiWorkflow] = list(get_args(UiWorkflow))
job_statuses : List[JobStatus] = list(get_args(JobStatus))
benchmark_cycle_count_range : Sequence[int] = create_int_range(1, 10, 1)
execution_thread_count_range : Sequence[int] = create_int_range(1, 32, 1)
+1 -1
View File
@@ -9,7 +9,7 @@ from facefusion.types import Command
def run(commands : List[Command]) -> List[Command]:
user_agent = metadata.get('name') + '/' + metadata.get('version')
return [ shutil.which('curl'), '--user-agent', user_agent, '--insecure', '--location', '--silent' ] + commands
return [ shutil.which('curl'), '--user-agent', user_agent, '--location', '--silent' ] + commands
def chain(*commands : List[Command]) -> List[Command]:
+61 -27
View File
@@ -1,15 +1,17 @@
import os
import shutil
import subprocess
import xml.etree.ElementTree as ElementTree
from functools import lru_cache
from typing import List, Optional
from onnxruntime import get_available_providers, set_default_logger_severity
import onnxruntime
import facefusion.choices
from facefusion.types import ExecutionDevice, ExecutionProvider, InferenceSessionProvider, ValueAndUnit
from facefusion.filesystem import create_directory, is_directory
from facefusion.types import ExecutionDevice, ExecutionProvider, InferenceOptionSet, InferenceProvider, ValueAndUnit
set_default_logger_severity(3)
onnxruntime.set_default_logger_severity(3)
def has_execution_provider(execution_provider : ExecutionProvider) -> bool:
@@ -17,7 +19,7 @@ def has_execution_provider(execution_provider : ExecutionProvider) -> bool:
def get_available_execution_providers() -> List[ExecutionProvider]:
inference_session_providers = get_available_providers()
inference_session_providers = onnxruntime.get_available_providers()
available_execution_providers : List[ExecutionProvider] = []
for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items():
@@ -28,54 +30,86 @@ def get_available_execution_providers() -> List[ExecutionProvider]:
return available_execution_providers
def create_inference_session_providers(execution_device_id : int, execution_providers : List[ExecutionProvider]) -> List[InferenceSessionProvider]:
inference_session_providers : List[InferenceSessionProvider] = []
def create_inference_providers(execution_device_id : int, execution_providers : List[ExecutionProvider]) -> List[InferenceProvider]:
inference_providers : List[InferenceProvider] = []
cache_path = resolve_cache_path()
for execution_provider in execution_providers:
if execution_provider == 'cuda':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'device_id': execution_device_id,
'cudnn_conv_algo_search': resolve_cudnn_conv_algo_search()
}))
if execution_provider == 'tensorrt':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
inference_option_set : InferenceOptionSet =\
{
'device_id': execution_device_id,
'trt_engine_cache_enable': True,
'trt_engine_cache_path': '.caches',
'trt_timing_cache_enable': True,
'trt_timing_cache_path': '.caches',
'trt_builder_optimization_level': 5
}))
'device_id': execution_device_id
}
if is_directory(cache_path) or create_directory(cache_path):
inference_option_set.update(
{
'trt_engine_cache_enable': True,
'trt_engine_cache_path': cache_path,
'trt_timing_cache_enable': True,
'trt_timing_cache_path': cache_path,
'trt_builder_optimization_level': 4
})
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
if execution_provider in [ 'directml', 'rocm' ]:
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'device_id': execution_device_id
}))
if execution_provider == 'migraphx':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
inference_option_set =\
{
'device_id': execution_device_id,
'migraphx_model_cache_dir': '.caches'
}))
'device_id': execution_device_id
}
if is_directory(cache_path) or create_directory(cache_path):
inference_option_set.update(
{
'migraphx_model_cache_dir': cache_path
})
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
if execution_provider == 'coreml':
inference_option_set =\
{
'SpecializationStrategy': 'FastPrediction'
}
if is_directory(cache_path) or create_directory(cache_path):
inference_option_set.update(
{
'ModelCacheDirectory': cache_path
})
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
if execution_provider == 'openvino':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'device_type': resolve_openvino_device_type(execution_device_id),
'precision': 'FP32'
}))
if execution_provider == 'coreml':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
if execution_provider == 'qnn':
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'SpecializationStrategy': 'FastPrediction',
'ModelCacheDirectory': '.caches'
'device_id': execution_device_id,
'backend_type': 'htp'
}))
if 'cpu' in execution_providers:
inference_session_providers.append(facefusion.choices.execution_provider_set.get('cpu'))
inference_providers.append(facefusion.choices.execution_provider_set.get('cpu'))
return inference_session_providers
return inference_providers
def resolve_cache_path() -> str:
return os.path.join('.caches', onnxruntime.get_version_string())
def resolve_cudnn_conv_algo_search() -> str:
+1 -1
View File
@@ -233,7 +233,7 @@ def create_area_mask(crop_vision_frame : VisionFrame, face_landmark_68 : FaceLan
convex_hull = cv2.convexHull(face_landmark_68[landmark_points].astype(numpy.int32))
area_mask = numpy.zeros(crop_size).astype(numpy.float32)
cv2.fillConvexPoly(area_mask, convex_hull, 1.0) # type: ignore[call-overload]
cv2.fillConvexPoly(area_mask, convex_hull, 1.0) #type:ignore[call-overload]
area_mask = (cv2.GaussianBlur(area_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2
return area_mask
+2 -1
View File
@@ -244,7 +244,8 @@ def merge_video(target_path : str, temp_video_fps : Fps, output_video_resolution
def concat_video(output_path : str, temp_output_paths : List[str]) -> bool:
concat_video_path = tempfile.mktemp()
file_descriptor, concat_video_path = tempfile.mkstemp()
os.close(file_descriptor)
with open(concat_video_path, 'w') as concat_video_file:
for temp_output_path in temp_output_paths:
+3 -3
View File
@@ -8,7 +8,7 @@ from onnxruntime import InferenceSession
from facefusion import logger, process_manager, state_manager, translator
from facefusion.app_context import detect_app_context
from facefusion.common_helper import is_windows
from facefusion.execution import create_inference_session_providers, has_execution_provider
from facefusion.execution import create_inference_providers, has_execution_provider
from facefusion.exit_helper import fatal_exit
from facefusion.filesystem import get_file_name, is_file
from facefusion.time_helper import calculate_end_time
@@ -72,8 +72,8 @@ def create_inference_session(model_path : str, execution_device_id : int, execut
start_time = time()
try:
inference_session_providers = create_inference_session_providers(execution_device_id, execution_providers)
inference_session = InferenceSession(model_path, providers = inference_session_providers)
inference_providers = create_inference_providers(execution_device_id, execution_providers)
inference_session = InferenceSession(model_path, providers = inference_providers)
logger.debug(translator.get('loading_model_succeeded').format(model_name = model_file_name, seconds = calculate_end_time(start_time)), __name__)
return inference_session
+5 -4
View File
@@ -22,12 +22,13 @@ ONNXRUNTIME_SET =\
'default': ('onnxruntime', '1.24.1')
}
if is_windows() or is_linux():
ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.24.1')
ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.23.0')
ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.24.3')
ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.24.1')
if is_windows():
ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.24.1')
ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.24.3')
ONNXRUNTIME_SET['qnn'] = ('onnxruntime-qnn', '1.24.3')
if is_linux():
ONNXRUNTIME_SET['migraphx'] = ('onnxruntime-migraphx', '1.23.2')
ONNXRUNTIME_SET['migraphx'] = ('onnxruntime-migraphx', '1.24.2')
ONNXRUNTIME_SET['rocm'] = ('onnxruntime-rocm', '1.22.2.post1')
+1 -1
View File
@@ -4,7 +4,7 @@ METADATA =\
{
'name': 'FaceFusion',
'description': 'Industry leading face manipulation platform',
'version': '3.5.3',
'version': '3.5.4',
'license': 'OpenRAIL-AS',
'author': 'Henry Ruhs',
'url': 'https://facefusion.io'
@@ -1,8 +1,8 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.age_modifier.types import AgeModifierModel
age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ]
age_modifier_models : List[AgeModifierModel] = list(get_args(AgeModifierModel))
age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1)
@@ -1,8 +1,8 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.background_remover.types import BackgroundRemoverModel
background_remover_models : List[BackgroundRemoverModel] = [ 'ben_2', 'birefnet_general', 'birefnet_portrait', 'isnet_general', 'modnet', 'ormbg', 'rmbg_1.4', 'rmbg_2.0', 'silueta', 'u2net_cloth', 'u2net_general', 'u2net_human', 'u2netp' ]
background_remover_models : List[BackgroundRemoverModel] = list(get_args(BackgroundRemoverModel))
background_remover_color_range : Sequence[int] = create_int_range(0, 255, 1)
@@ -1,10 +1,10 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.expression_restorer.types import ExpressionRestorerArea, ExpressionRestorerModel
expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ]
expression_restorer_models : List[ExpressionRestorerModel] = list(get_args(ExpressionRestorerModel))
expression_restorer_areas : List[ExpressionRestorerArea] = [ 'upper-face', 'lower-face' ]
expression_restorer_areas : List[ExpressionRestorerArea] = list(get_args(ExpressionRestorerArea))
expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1)
@@ -1,5 +1,5 @@
from typing import List
from typing import List, get_args
from facefusion.processors.modules.face_debugger.types import FaceDebuggerItem
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask' ]
face_debugger_items : List[FaceDebuggerItem] = list(get_args(FaceDebuggerItem))
@@ -1,9 +1,9 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.face_editor.types import FaceEditorModel
face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ]
face_editor_models : List[FaceEditorModel] = list(get_args(FaceEditorModel))
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
@@ -1,9 +1,9 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_float_range, create_int_range
from facefusion.processors.modules.face_enhancer.types import FaceEnhancerModel
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
face_enhancer_models : List[FaceEnhancerModel] = list(get_args(FaceEnhancerModel))
face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
@@ -1,8 +1,9 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.face_swapper.types import FaceSwapperModel, FaceSwapperSet, FaceSwapperWeight
face_swapper_set : FaceSwapperSet =\
{
'blendswap_256': [ '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
@@ -20,6 +21,6 @@ face_swapper_set : FaceSwapperSet =\
'uniface_256': [ '256x256', '512x512', '768x768', '1024x1024' ]
}
face_swapper_models : List[FaceSwapperModel] = list(face_swapper_set.keys())
face_swapper_models : List[FaceSwapperModel] = list(get_args(FaceSwapperModel))
face_swapper_weight_range : Sequence[FaceSwapperWeight] = create_float_range(0.0, 1.0, 0.05)
@@ -1,9 +1,9 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.frame_colorizer.types import FrameColorizerModel
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ]
frame_colorizer_models : List[FrameColorizerModel] = list(get_args(FrameColorizerModel))
frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ]
@@ -1,8 +1,8 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.frame_enhancer.types import FrameEnhancerModel
frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'face_dat_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'tghq_face_x8', 'ultra_sharp_x4', 'ultra_sharp_2_x4' ]
frame_enhancer_models : List[FrameEnhancerModel] = list(get_args(FrameEnhancerModel))
frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
@@ -1,8 +1,8 @@
from typing import List, Sequence
from typing import List, Sequence, get_args
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.lip_syncer.types import LipSyncerModel
lip_syncer_models : List[LipSyncerModel] = [ 'edtalk_256', 'wav2lip_96', 'wav2lip_gan_96' ]
lip_syncer_models : List[LipSyncerModel] = list(get_args(LipSyncerModel))
lip_syncer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
+4 -7
View File
@@ -10,7 +10,7 @@ from facefusion.ffmpeg import get_available_encoder_set
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.jobs import job_store
from facefusion.processors.core import get_processors_modules
from facefusion.sanitizer import sanitize_int_range
from facefusion.sanitizer import sanitize_int_range, sanitize_job_id
def create_help_formatter_small(prog : str) -> HelpFormatter:
@@ -188,7 +188,7 @@ def create_processors_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
group_processors = program.add_argument_group('processors')
group_processors.add_argument('--processors', help = translator.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors', 'processors', 'face_swapper'), nargs = '+')
group_processors.add_argument('--processors', help = translator.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors', 'processors', 'face_swapper'), choices = available_processors, nargs = '+', metavar = 'PROCESSORS')
job_store.register_step_keys([ 'processors' ])
for processor_module in get_processors_modules(available_processors):
processor_module.register_args(program)
@@ -200,7 +200,7 @@ def create_uis_program() -> ArgumentParser:
available_ui_layouts = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/uis/layouts') ]
group_uis = program.add_argument_group('uis')
group_uis.add_argument('--open-browser', help = translator.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis', 'open_browser'))
group_uis.add_argument('--ui-layouts', help = translator.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), nargs = '+')
group_uis.add_argument('--ui-layouts', help = translator.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), choices = available_ui_layouts, nargs = '+', metavar = 'UI_LAYOUTS')
group_uis.add_argument('--ui-workflow', help = translator.get('help.ui_workflow'), default = config.get_str_value('uis', 'ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows)
return program
@@ -268,7 +268,7 @@ def create_halt_on_error_program() -> ArgumentParser:
def create_job_id_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
program.add_argument('job_id', help = translator.get('help.job_id'))
program.add_argument('job_id', help = translator.get('help.job_id'), type = sanitize_job_id)
return program
@@ -297,13 +297,11 @@ def create_program() -> ArgumentParser:
program._positionals.title = 'commands'
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
sub_program = program.add_subparsers(dest = 'command')
# general
sub_program.add_parser('run', help = translator.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('headless-run', help = translator.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = translator.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = translator.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = translator.get('help.benchmark'), parents = [ create_temp_path_program(), collect_step_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
# job manager
sub_program.add_parser('job-list', help = translator.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-create', help = translator.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit', help = translator.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
@@ -314,7 +312,6 @@ def create_program() -> ArgumentParser:
sub_program.add_parser('job-remix-step', help = translator.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-insert-step', help = translator.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remove-step', help = translator.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
# job runner
sub_program.add_parser('job-run', help = translator.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-run-all', help = translator.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry', help = translator.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
+9
View File
@@ -1,6 +1,15 @@
import hashlib
from typing import Sequence
def sanitize_job_id(job_id : str) -> str:
__job_id__ = job_id.replace('-', '')
if __job_id__.isalnum():
return job_id
return hashlib.sha1(job_id.encode()).hexdigest()
def sanitize_int_range(value : int, int_range : Sequence[int]) -> int:
if value in int_range:
return value
+4 -3
View File
@@ -167,10 +167,11 @@ ModelOptions : TypeAlias = Dict[str, Any]
ModelSet : TypeAlias = Dict[str, ModelOptions]
ModelInitializer : TypeAlias = NDArray[Any]
ExecutionProvider = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', 'migraphx', 'rocm', 'tensorrt']
ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'MIGraphXExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider']
ExecutionProvider = Literal['cuda', 'tensorrt', 'rocm', 'migraphx', 'coreml', 'openvino', 'qnn', 'directml', 'cpu']
ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'MIGraphXExecutionProvider', 'QNNExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider']
ExecutionProviderSet : TypeAlias = Dict[ExecutionProvider, ExecutionProviderValue]
InferenceSessionProvider : TypeAlias = Any
InferenceProvider : TypeAlias = Any
InferenceOptionSet : TypeAlias = Dict[str, Any]
ValueAndUnit = TypedDict('ValueAndUnit',
{
'value' : int,
+1 -1
View File
@@ -90,7 +90,7 @@ def start(webcam_device_id : int, webcam_mode : WebcamMode, webcam_resolution :
stream = None
if webcam_mode in [ 'udp', 'v4l2' ]:
stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) # type:ignore[arg-type]
stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) #type:ignore[arg-type]
webcam_width, webcam_height = unpack_resolution(webcam_resolution)
if camera_capture and camera_capture.isOpened():
+2
View File
@@ -1,4 +1,5 @@
import importlib
import logging
import os
import warnings
from types import ModuleType
@@ -72,6 +73,7 @@ def init() -> None:
os.environ['GRADIO_ANALYTICS_ENABLED'] = '0'
os.environ['GRADIO_TEMP_DIR'] = os.path.join(state_manager.get_item('temp_path'), 'gradio')
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
gradio.processing_utils._check_allowed = uis_overrides.mock
gradio.processing_utils.convert_video_to_playable_mp4 = uis_overrides.convert_video_to_playable_mp4
+1 -1
View File
@@ -24,7 +24,7 @@ def process(start_time : float) -> ErrorCode:
process_manager.start()
for task in tasks:
error_code = task() # type:ignore[operator]
error_code = task() #type:ignore[operator]
if error_code > 0:
process_manager.end()
+1 -1
View File
@@ -31,7 +31,7 @@ def process(start_time : float) -> ErrorCode:
process_manager.start()
for task in tasks:
error_code = task() # type:ignore[operator]
error_code = task() #type:ignore[operator]
if error_code > 0:
process_manager.end()
+2 -2
View File
@@ -2,7 +2,7 @@ gradio-rangeslider==0.0.8
gradio==5.44.1
numpy==2.2.1
onnx==1.20.1
onnxruntime==1.24.1
onnxruntime==1.24.3
opencv-python==4.13.0.92
tqdm==4.67.3
scipy==1.17.0
scipy==1.17.1
+1 -1
View File
@@ -7,7 +7,7 @@ from facefusion.curl_builder import chain, ping, run, set_timeout
def test_run() -> None:
user_agent = metadata.get('name') + '/' + metadata.get('version')
assert run([]) == [ which('curl'), '--user-agent', user_agent, '--insecure', '--location', '--silent' ]
assert run([]) == [ which('curl'), '--user-agent', user_agent, '--location', '--silent' ]
def test_chain() -> None:
+4 -4
View File
@@ -1,4 +1,4 @@
from facefusion.execution import create_inference_session_providers, get_available_execution_providers, has_execution_provider
from facefusion.execution import create_inference_providers, get_available_execution_providers, has_execution_provider
def test_has_execution_provider() -> None:
@@ -10,8 +10,8 @@ def test_get_available_execution_providers() -> None:
assert 'cpu' in get_available_execution_providers()
def test_create_inference_session_providers() -> None:
inference_session_providers =\
def test_create_inference_providers() -> None:
inference_providers =\
[
('CUDAExecutionProvider',
{
@@ -21,4 +21,4 @@ def test_create_inference_session_providers() -> None:
'CPUExecutionProvider'
]
assert create_inference_session_providers(1, [ 'cpu', 'cuda' ]) == inference_session_providers
assert create_inference_providers(1, [ 'cpu', 'cuda' ]) == inference_providers
+5 -2
View File
@@ -1,10 +1,12 @@
import os
import tempfile
from facefusion.json import read_json, write_json
def test_read_json() -> None:
_, json_path = tempfile.mkstemp(suffix = '.json')
file_descriptor, json_path = tempfile.mkstemp(suffix = '.json')
os.close(file_descriptor)
assert not read_json(json_path)
@@ -14,6 +16,7 @@ def test_read_json() -> None:
def test_write_json() -> None:
_, json_path = tempfile.mkstemp(suffix = '.json')
file_descriptor, json_path = tempfile.mkstemp(suffix = '.json')
os.close(file_descriptor)
assert write_json(json_path, {})