Update help message for arguments, Notation based wording approach (#347)

* Update help message for arguments, Notation based wording approach

* Fix installer
This commit is contained in:
Henry Ruhs
2024-01-28 10:49:44 +01:00
committed by GitHub
parent 0f65313d05
commit 32cbf0ca5b
37 changed files with 335 additions and 271 deletions
+6 -3
View File
@@ -82,8 +82,11 @@ def get_float_list(key : str, fallback : Optional[str] = None) -> Optional[List[
def get_value_by_notation(key : str) -> Optional[Any]:
config = get_config()
section, name = key.split('.')
if section in config and name in config[section]:
return config[section][name]
if '.' in key:
section, name = key.split('.')
if section in config and name in config[section]:
return config[section][name]
if key in config:
return config[key]
return None
+39 -39
View File
@@ -36,73 +36,73 @@ def cli() -> None:
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
# general
program.add_argument('-s', '--source', help = wording.get('source_help'), action = 'append', dest = 'source_paths', default = config.get_str_list('general.source_paths'))
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path', default = config.get_str_value('general.target_path'))
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path', default = config.get_str_value('general.output_path'))
program.add_argument('-s', '--source', help = wording.get('help.source'), action = 'append', dest = 'source_paths', default = config.get_str_list('general.source_paths'))
program.add_argument('-t', '--target', help = wording.get('help.target'), dest = 'target_path', default = config.get_str_value('general.target_path'))
program.add_argument('-o', '--output', help = wording.get('help.output'), dest = 'output_path', default = config.get_str_value('general.output_path'))
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
# misc
group_misc = program.add_argument_group('misc')
group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true', default = config.get_bool_value('misc.skip_download'))
group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true', default = config.get_bool_value('misc.headless'))
group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = config.get_str_value('misc.log_level', 'info'), choices = logger.get_log_levels())
group_misc.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download'))
group_misc.add_argument('--headless', help = wording.get('help.headless'), action = 'store_true', default = config.get_bool_value('misc.headless'))
group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = logger.get_log_levels())
# execution
execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
# memory
group_memory = program.add_argument_group('memory')
group_memory.add_argument('--video-memory-strategy', help = wording.get('video_memory_strategy_help'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
group_memory.add_argument('--system-memory-limit', help = wording.get('system_memory_limit_help'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_metavar(facefusion.choices.system_memory_limit_range))
group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_metavar(facefusion.choices.system_memory_limit_range))
# face analyser
group_face_analyser = program.add_argument_group('face analyser')
group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = config.get_str_value('face_analyser.face_analyser_order', 'left-right'), choices = facefusion.choices.face_analyser_orders)
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), default = config.get_str_value('face_analyser.face_analyser_age'), choices = facefusion.choices.face_analyser_ages)
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), default = config.get_str_value('face_analyser.face_analyser_gender'), choices = facefusion.choices.face_analyser_genders)
group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = config.get_str_value('face_analyser.face_detector_model', 'retinaface'), choices = facefusion.choices.face_detector_models)
group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = config.get_str_value('face_analyser.face_detector_size', '640x640'), choices = facefusion.choices.face_detector_sizes)
group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = config.get_float_value('face_analyser.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
group_face_analyser.add_argument('--face-analyser-order', help = wording.get('help.face_analyser_order'), default = config.get_str_value('face_analyser.face_analyser_order', 'left-right'), choices = facefusion.choices.face_analyser_orders)
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('help.face_analyser_age'), default = config.get_str_value('face_analyser.face_analyser_age'), choices = facefusion.choices.face_analyser_ages)
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('help.face_analyser_gender'), default = config.get_str_value('face_analyser.face_analyser_gender'), choices = facefusion.choices.face_analyser_genders)
group_face_analyser.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_analyser.face_detector_model', 'retinaface'), choices = facefusion.choices.face_detector_models)
group_face_analyser.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_analyser.face_detector_size', '640x640'), choices = facefusion.choices.face_detector_sizes)
group_face_analyser.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_analyser.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
# face selector
group_face_selector = program.add_argument_group('face selector')
group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0'))
group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0'))
group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0'))
group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0'))
# face mask
group_face_mask = program.add_argument_group('face mask')
group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_mask.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = config.get_float_value('face_mask.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = config.get_int_list('face_mask.face_mask_padding', '0 0 0 0'), nargs = '+')
group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_mask.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
group_face_mask.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_mask.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_mask.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_mask.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
group_face_mask.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_mask.face_mask_padding', '0 0 0 0'), nargs = '+')
group_face_mask.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_mask.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
# frame extraction
group_frame_extraction = program.add_argument_group('frame extraction')
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start'))
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end'))
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = config.get_str_value('frame_extraction.temp_frame_format', 'jpg'), choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = config.get_int_value('frame_extraction.temp_frame_quality', '100'), choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp'))
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start'))
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end'))
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction.temp_frame_format', 'jpg'), choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('help.temp_frame_quality'), type = int, default = config.get_int_value('frame_extraction.temp_frame_quality', '100'), choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp'))
# output creation
group_output_creation = program.add_argument_group('output creation')
group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders)
group_output_creation.add_argument('--output-video-preset', help = wording.get('output_video_preset_help'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--output-video-resolution', help = wording.get('output_video_resolution_help'), default = config.get_str_value('output_creation.output_video_resolution'))
group_output_creation.add_argument('--output-video-fps', help = wording.get('output_video_fps_help'), type = float)
group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio'))
group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders)
group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--output-video-resolution', help = wording.get('help.output_video_resolution'), default = config.get_str_value('output_creation.output_video_resolution'))
group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float)
group_output_creation.add_argument('--skip-audio', help = wording.get('help.skip_audio'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio'))
# frame processors
available_frame_processors = list_directory('facefusion/processors/frame/modules')
program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
group_frame_processors = program.add_argument_group('frame processors')
group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = config.get_str_list('frame_processors.frame_processors', 'face_swapper'), nargs = '+')
group_frame_processors.add_argument('--frame-processors', help = wording.get('help.frame_processors').format(choices = ', '.join(available_frame_processors)), default = config.get_str_list('frame_processors.frame_processors', 'face_swapper'), nargs = '+')
for frame_processor in available_frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
frame_processor_module.register_args(group_frame_processors)
# uis
available_ui_layouts = list_directory('facefusion/uis/layouts')
group_uis = program.add_argument_group('uis')
group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layout', 'default'), nargs = '+')
group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layouts', 'default'), nargs = '+')
run(program)
+5 -5
View File
@@ -39,9 +39,9 @@ if platform.system().lower() == 'windows':
def cli() -> None:
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys())
program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true')
program.add_argument('--torch', help = wording.get('help.install_dependency').format(dependency = 'torch'), choices = TORCH.keys())
program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
program.add_argument('--skip-venv', help = wording.get('help.skip_venv'), action = 'store_true')
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
run(program)
@@ -61,8 +61,8 @@ def run(program : ArgumentParser) -> None:
else:
answers = inquirer.prompt(
[
inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
inquirer.List('torch', message = wording.get('help.install_dependency').format(dependency = 'torch'), choices = list(TORCH.keys())),
inquirer.List('onnxruntime', message = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
])
if answers:
torch = answers['torch']
+1 -1
View File
@@ -2,7 +2,7 @@ METADATA =\
{
'name': 'FaceFusion',
'description': 'Next generation face swapper and enhancer',
'version': '2.2.1',
'version': 'NEXT',
'license': 'MIT',
'author': 'Henry Ruhs',
'url': 'https://facefusion.io'
+4 -4
View File
@@ -1,12 +1,12 @@
from typing import List
from facefusion.common_helper import create_int_range
from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel
face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer_plus_plus' ]
frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer_plus_plus' ]
face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
face_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
frame_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
@@ -35,7 +35,7 @@ def set_options(key : Literal['model'], value : Any) -> None:
def register_args(program : ArgumentParser) -> None:
program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = config.get_str_list('frame_processors.face_debugger_items', 'kps face-mask'), choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
program.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = config.get_str_list('frame_processors.face_debugger_items', 'kps face-mask'), choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
def apply_args(program : ArgumentParser) -> None:
@@ -115,8 +115,8 @@ def set_options(key : Literal['model'], value : Any) -> None:
def register_args(program : ArgumentParser) -> None:
program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models)
program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
program.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models)
program.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
def apply_args(program : ArgumentParser) -> None:
@@ -138,7 +138,7 @@ def register_args(program : ArgumentParser) -> None:
face_swapper_model_fallback = 'inswapper_128'
else:
face_swapper_model_fallback = 'inswapper_128_fp16'
program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.face_swapper_model', face_swapper_model_fallback), choices = frame_processors_choices.face_swapper_models)
program.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('frame_processors.face_swapper_model', face_swapper_model_fallback), choices = frame_processors_choices.face_swapper_models)
def apply_args(program : ArgumentParser) -> None:
@@ -91,8 +91,8 @@ def set_options(key : Literal['model'], value : Any) -> None:
def register_args(program : ArgumentParser) -> None:
program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.frame_enhancer_model', 'real_esrgan_x2plus'), choices = frame_processors_choices.frame_enhancer_models)
program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = config.get_int_value('frame_processors.frame_enhancer_blend', '80'), choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
program.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('frame_processors.frame_enhancer_model', 'real_esrgan_x2plus'), choices = frame_processors_choices.frame_enhancer_models)
program.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.frame_enhancer_blend', '80'), choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
def apply_args(program : ArgumentParser) -> None:
+3 -3
View File
@@ -1,6 +1,6 @@
from typing import Literal
FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer_plus_plus']
FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score', 'distance']
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer_plus_plus']
FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
+1 -1
View File
@@ -17,7 +17,7 @@ def render() -> None:
link = metadata.get('url')
)
DONATE_BUTTON = gradio.Button(
value = wording.get('donate_button_label'),
value = wording.get('uis.donate_button'),
link = 'https://donate.facefusion.io',
size = 'sm'
)
+3 -3
View File
@@ -36,7 +36,7 @@ def render() -> None:
global BENCHMARK_CLEAR_BUTTON
BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
label = wording.get('benchmark_results_dataframe_label'),
label = wording.get('uis.benchmark_results_dataframe'),
headers =
[
'target_path',
@@ -57,12 +57,12 @@ def render() -> None:
]
)
BENCHMARK_START_BUTTON = gradio.Button(
value = wording.get('start_button_label'),
value = wording.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
BENCHMARK_CLEAR_BUTTON = gradio.Button(
value = wording.get('clear_button_label'),
value = wording.get('uis.clear_button'),
size = 'sm'
)
@@ -14,12 +14,12 @@ def render() -> None:
global BENCHMARK_CYCLES_SLIDER
BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('benchmark_runs_checkbox_group_label'),
label = wording.get('uis.benchmark_runs_checkbox_group'),
value = list(BENCHMARKS.keys()),
choices = list(BENCHMARKS.keys())
)
BENCHMARK_CYCLES_SLIDER = gradio.Slider(
label = wording.get('benchmark_cycles_slider_label'),
label = wording.get('uis.benchmark_cycles_slider'),
value = 5,
step = 1,
minimum = 1,
+1 -1
View File
@@ -19,7 +19,7 @@ def render() -> None:
if facefusion.globals.skip_download:
value.append('skip-download')
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
label = wording.get('common_options_checkbox_group_label'),
label = wording.get('uis.common_options_checkbox_group'),
choices = uis_choices.common_options,
value = value
)
+1 -1
View File
@@ -15,7 +15,7 @@ def render() -> None:
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('execution_providers_checkbox_group_label'),
label = wording.get('uis.execution_providers_checkbox_group'),
choices = encode_execution_providers(onnxruntime.get_available_providers()),
value = encode_execution_providers(facefusion.globals.execution_providers)
)
@@ -12,7 +12,7 @@ def render() -> None:
global EXECUTION_QUEUE_COUNT_SLIDER
EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
label = wording.get('execution_queue_count_slider_label'),
label = wording.get('uis.execution_queue_count_slider'),
value = facefusion.globals.execution_queue_count,
step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0],
minimum = facefusion.choices.execution_queue_count_range[0],
@@ -12,7 +12,7 @@ def render() -> None:
global EXECUTION_THREAD_COUNT_SLIDER
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
label = wording.get('execution_thread_count_slider_label'),
label = wording.get('uis.execution_thread_count_slider'),
value = facefusion.globals.execution_thread_count,
step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0],
minimum = facefusion.choices.execution_thread_count_range[0],
+6 -6
View File
@@ -26,32 +26,32 @@ def render() -> None:
with gradio.Row():
FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_order_dropdown_label'),
label = wording.get('uis.face_analyser_order_dropdown'),
choices = facefusion.choices.face_analyser_orders,
value = facefusion.globals.face_analyser_order
)
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_age_dropdown_label'),
label = wording.get('uis.face_analyser_age_dropdown'),
choices = [ 'none' ] + facefusion.choices.face_analyser_ages,
value = facefusion.globals.face_analyser_age or 'none'
)
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_gender_dropdown_label'),
label = wording.get('uis.face_analyser_gender_dropdown'),
choices = [ 'none' ] + facefusion.choices.face_analyser_genders,
value = facefusion.globals.face_analyser_gender or 'none'
)
FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('face_detector_model_dropdown_label'),
label = wording.get('uis.face_detector_model_dropdown'),
choices = facefusion.choices.face_detector_models,
value = facefusion.globals.face_detector_model
)
FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_detector_size_dropdown_label'),
label = wording.get('uis.face_detector_size_dropdown'),
choices = facefusion.choices.face_detector_sizes,
value = facefusion.globals.face_detector_size
)
FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
label = wording.get('face_detector_score_slider_label'),
label = wording.get('uis.face_detector_score_slider'),
value = facefusion.globals.face_detector_score,
step = facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0],
minimum = facefusion.choices.face_detector_score_range[0],
+7 -7
View File
@@ -32,13 +32,13 @@ def render() -> None:
has_box_mask = 'box' in facefusion.globals.face_mask_types
has_region_mask = 'region' in facefusion.globals.face_mask_types
FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('face_mask_types_checkbox_group_label'),
label = wording.get('uis.face_mask_types_checkbox_group'),
choices = facefusion.choices.face_mask_types,
value = facefusion.globals.face_mask_types
)
with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
FACE_MASK_BLUR_SLIDER = gradio.Slider(
label = wording.get('face_mask_blur_slider_label'),
label = wording.get('uis.face_mask_blur_slider'),
step = facefusion.choices.face_mask_blur_range[1] - facefusion.choices.face_mask_blur_range[0],
minimum = facefusion.choices.face_mask_blur_range[0],
maximum = facefusion.choices.face_mask_blur_range[-1],
@@ -46,14 +46,14 @@ def render() -> None:
)
with gradio.Row():
FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
label = wording.get('face_mask_padding_top_slider_label'),
label = wording.get('uis.face_mask_padding_top_slider'),
step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = facefusion.globals.face_mask_padding[0]
)
FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
label = wording.get('face_mask_padding_right_slider_label'),
label = wording.get('uis.face_mask_padding_right_slider'),
step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
@@ -61,14 +61,14 @@ def render() -> None:
)
with gradio.Row():
FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
label = wording.get('face_mask_padding_bottom_slider_label'),
label = wording.get('uis.face_mask_padding_bottom_slider'),
step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = facefusion.globals.face_mask_padding[2]
)
FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
label = wording.get('face_mask_padding_left_slider_label'),
label = wording.get('uis.face_mask_padding_left_slider'),
step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
@@ -76,7 +76,7 @@ def render() -> None:
)
with gradio.Row():
FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('face_mask_region_checkbox_group_label'),
label = wording.get('uis.face_mask_region_checkbox_group'),
choices = facefusion.choices.face_mask_regions,
value = facefusion.globals.face_mask_regions,
visible = has_region_mask
+3 -3
View File
@@ -25,7 +25,7 @@ def render() -> None:
reference_face_gallery_args: Dict[str, Any] =\
{
'label': wording.get('reference_face_gallery_label'),
'label': wording.get('uis.reference_face_gallery'),
'object_fit': 'cover',
'columns': 8,
'allow_preview': False,
@@ -38,13 +38,13 @@ def render() -> None:
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_selector_mode_dropdown_label'),
label = wording.get('uis.face_selector_mode_dropdown'),
choices = facefusion.choices.face_selector_modes,
value = facefusion.globals.face_selector_mode
)
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
label = wording.get('reference_face_distance_slider_label'),
label = wording.get('uis.reference_face_distance_slider'),
value = facefusion.globals.reference_face_distance,
step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0],
minimum = facefusion.choices.reference_face_distance_range[0],
@@ -14,7 +14,7 @@ def render() -> None:
global FRAME_PROCESSORS_CHECKBOX_GROUP
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('frame_processors_checkbox_group_label'),
label = wording.get('uis.frame_processors_checkbox_group'),
choices = sort_frame_processors(facefusion.globals.frame_processors),
value = facefusion.globals.frame_processors
)
@@ -5,84 +5,109 @@ import facefusion.globals
from facefusion import wording
from facefusion.processors.frame.core import load_frame_processor_module
from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global FACE_SWAPPER_MODEL_DROPDOWN
global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
global FACE_ENHANCER_MODEL_DROPDOWN
global FACE_ENHANCER_BLEND_SLIDER
global FACE_SWAPPER_MODEL_DROPDOWN
global FRAME_ENHANCER_MODEL_DROPDOWN
global FRAME_ENHANCER_BLEND_SLIDER
global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('face_swapper_model_dropdown_label'),
choices = frame_processors_choices.face_swapper_models,
value = frame_processors_globals.face_swapper_model,
visible = 'face_swapper' in facefusion.globals.frame_processors
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('uis.face_debugger_items_checkbox_group'),
choices = frame_processors_choices.face_debugger_items,
value = frame_processors_globals.face_debugger_items,
visible = 'face_debugger' in facefusion.globals.frame_processors
)
FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('face_enhancer_model_dropdown_label'),
label = wording.get('uis.face_enhancer_model_dropdown'),
choices = frame_processors_choices.face_enhancer_models,
value = frame_processors_globals.face_enhancer_model,
visible = 'face_enhancer' in facefusion.globals.frame_processors
)
FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = wording.get('face_enhancer_blend_slider_label'),
label = wording.get('uis.face_enhancer_blend_slider'),
value = frame_processors_globals.face_enhancer_blend,
step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
minimum = frame_processors_choices.face_enhancer_blend_range[0],
maximum = frame_processors_choices.face_enhancer_blend_range[-1],
visible = 'face_enhancer' in facefusion.globals.frame_processors
)
FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.face_swapper_model_dropdown'),
choices = frame_processors_choices.face_swapper_models,
value = frame_processors_globals.face_swapper_model,
visible = 'face_swapper' in facefusion.globals.frame_processors
)
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('frame_enhancer_model_dropdown_label'),
label = wording.get('uis.frame_enhancer_model_dropdown'),
choices = frame_processors_choices.frame_enhancer_models,
value = frame_processors_globals.frame_enhancer_model,
visible = 'frame_enhancer' in facefusion.globals.frame_processors
)
FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = wording.get('frame_enhancer_blend_slider_label'),
label = wording.get('uis.frame_enhancer_blend_slider'),
value = frame_processors_globals.frame_enhancer_blend,
step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
minimum = frame_processors_choices.frame_enhancer_blend_range[0],
maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
visible = 'frame_enhancer' in facefusion.globals.frame_processors
)
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('face_debugger_items_checkbox_group_label'),
choices = frame_processors_choices.face_debugger_items,
value = frame_processors_globals.face_debugger_items,
visible = 'face_debugger' in facefusion.globals.frame_processors
)
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
def listen() -> None:
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
if frame_processors_checkbox_group:
frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP ])
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER ])
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider]:
has_face_debugger = 'face_debugger' in frame_processors
has_face_enhancer = 'face_enhancer' in frame_processors
has_face_swapper = 'face_swapper' in frame_processors
has_frame_enhancer = 'frame_enhancer' in frame_processors
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer)
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
frame_processors_globals.face_debugger_items = face_debugger_items
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
frame_processors_globals.face_enhancer_model = face_enhancer_model
face_enhancer_module = load_frame_processor_module('face_enhancer')
face_enhancer_module.clear_frame_processor()
face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
if not face_enhancer_module.pre_check():
return gradio.Dropdown()
return gradio.Dropdown(value = face_enhancer_model)
def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
frame_processors_globals.face_enhancer_blend = face_enhancer_blend
def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
@@ -101,20 +126,6 @@ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.D
return gradio.Dropdown(value = face_swapper_model)
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
frame_processors_globals.face_enhancer_model = face_enhancer_model
face_enhancer_module = load_frame_processor_module('face_enhancer')
face_enhancer_module.clear_frame_processor()
face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
if not face_enhancer_module.pre_check():
return gradio.Dropdown()
return gradio.Dropdown(value = face_enhancer_model)
def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
frame_processors_globals.face_enhancer_blend = face_enhancer_blend
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
frame_processors_globals.frame_enhancer_model = frame_enhancer_model
frame_enhancer_module = load_frame_processor_module('frame_enhancer')
@@ -127,15 +138,3 @@ def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gr
def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None:
frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
frame_processors_globals.face_debugger_items = face_debugger_items
def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]:
has_face_swapper = 'face_swapper' in frame_processors
has_face_enhancer = 'face_enhancer' in frame_processors
has_frame_enhancer = 'frame_enhancer' in frame_processors
has_face_debugger = 'face_debugger' in frame_processors
return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.CheckboxGroup(visible = has_face_debugger)
+2 -2
View File
@@ -15,12 +15,12 @@ def render() -> None:
global SYSTEM_MEMORY_LIMIT_SLIDER
VIDEO_MEMORY_STRATEGY = gradio.Dropdown(
label = wording.get('video_memory_strategy_dropdown_label'),
label = wording.get('uis.video_memory_strategy_dropdown'),
choices = facefusion.choices.video_memory_strategies,
value = facefusion.globals.video_memory_strategy
)
SYSTEM_MEMORY_LIMIT_SLIDER = gradio.Slider(
label = wording.get('system_memory_limit_slider_label'),
label = wording.get('uis.system_memory_limit_slider'),
step =facefusion.choices.system_memory_limit_range[1] - facefusion.choices.system_memory_limit_range[0],
minimum = facefusion.choices.system_memory_limit_range[0],
maximum = facefusion.choices.system_memory_limit_range[-1],
+4 -4
View File
@@ -22,19 +22,19 @@ def render() -> None:
global OUTPUT_CLEAR_BUTTON
OUTPUT_IMAGE = gradio.Image(
label = wording.get('output_image_or_video_label'),
label = wording.get('uis.output_image_or_video'),
visible = False
)
OUTPUT_VIDEO = gradio.Video(
label = wording.get('output_image_or_video_label')
label = wording.get('uis.output_image_or_video')
)
OUTPUT_START_BUTTON = gradio.Button(
value = wording.get('start_button_label'),
value = wording.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
OUTPUT_CLEAR_BUTTON = gradio.Button(
value = wording.get('clear_button_label'),
value = wording.get('uis.clear_button'),
size = 'sm'
)
+7 -7
View File
@@ -30,12 +30,12 @@ def render() -> None:
global OUTPUT_VIDEO_FPS_SLIDER
OUTPUT_PATH_TEXTBOX = gradio.Textbox(
label = wording.get('output_path_textbox_label'),
label = wording.get('uis.output_path_textbox'),
value = facefusion.globals.output_path or tempfile.gettempdir(),
max_lines = 1
)
OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
label = wording.get('output_image_quality_slider_label'),
label = wording.get('uis.output_image_quality_slider'),
value = facefusion.globals.output_image_quality,
step = facefusion.choices.output_image_quality_range[1] - facefusion.choices.output_image_quality_range[0],
minimum = facefusion.choices.output_image_quality_range[0],
@@ -43,19 +43,19 @@ def render() -> None:
visible = is_image(facefusion.globals.target_path)
)
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
label = wording.get('output_video_encoder_dropdown_label'),
label = wording.get('uis.output_video_encoder_dropdown'),
choices = facefusion.choices.output_video_encoders,
value = facefusion.globals.output_video_encoder,
visible = is_video(facefusion.globals.target_path)
)
OUTPUT_VIDEO_PRESET_DROPDOWN = gradio.Dropdown(
label = wording.get('output_video_preset_dropdown_label'),
label = wording.get('uis.output_video_preset_dropdown'),
choices = facefusion.choices.output_video_presets,
value = facefusion.globals.output_video_preset,
visible = is_video(facefusion.globals.target_path)
)
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
label = wording.get('output_video_quality_slider_label'),
label = wording.get('uis.output_video_quality_slider'),
value = facefusion.globals.output_video_quality,
step = facefusion.choices.output_video_quality_range[1] - facefusion.choices.output_video_quality_range[0],
minimum = facefusion.choices.output_video_quality_range[0],
@@ -63,13 +63,13 @@ def render() -> None:
visible = is_video(facefusion.globals.target_path)
)
OUTPUT_VIDEO_RESOLUTION_DROPDOWN = gradio.Dropdown(
label = wording.get('output_video_resolution_dropdown_label'),
label = wording.get('uis.output_video_resolution_dropdown'),
choices = create_video_resolutions(facefusion.globals.target_path),
value = facefusion.globals.output_video_resolution,
visible = is_video(facefusion.globals.target_path)
)
OUTPUT_VIDEO_FPS_SLIDER = gradio.Slider(
label = wording.get('output_video_fps_slider_label'),
label = wording.get('uis.output_video_fps_slider'),
value = facefusion.globals.output_video_fps,
step = 0.01,
minimum = 1,
+2 -2
View File
@@ -26,12 +26,12 @@ def render() -> None:
preview_image_args: Dict[str, Any] =\
{
'label': wording.get('preview_image_label'),
'label': wording.get('uis.preview_image'),
'interactive': False
}
preview_frame_slider_args: Dict[str, Any] =\
{
'label': wording.get('preview_frame_slider_label'),
'label': wording.get('uis.preview_frame_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
+1 -1
View File
@@ -24,7 +24,7 @@ def render() -> None:
'.jpg',
'.webp'
],
label = wording.get('source_file_label'),
label = wording.get('uis.source_file'),
value = facefusion.globals.source_paths if are_source_images else None
)
source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
+1 -1
View File
@@ -21,7 +21,7 @@ def render() -> None:
is_target_image = is_image(facefusion.globals.target_path)
is_target_video = is_video(facefusion.globals.target_path)
TARGET_FILE = gradio.File(
label = wording.get('target_file_label'),
label = wording.get('uis.target_file'),
file_count = 'single',
file_types =
[
+2 -2
View File
@@ -17,13 +17,13 @@ def render() -> None:
global TEMP_FRAME_QUALITY_SLIDER
TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
label = wording.get('temp_frame_format_dropdown_label'),
label = wording.get('uis.temp_frame_format_dropdown'),
choices = facefusion.choices.temp_frame_formats,
value = facefusion.globals.temp_frame_format,
visible = is_video(facefusion.globals.target_path)
)
TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
label = wording.get('temp_frame_quality_slider_label'),
label = wording.get('uis.temp_frame_quality_slider'),
value = facefusion.globals.temp_frame_quality,
step = facefusion.choices.temp_frame_quality_range[1] - facefusion.choices.temp_frame_quality_range[0],
minimum = facefusion.choices.temp_frame_quality_range[0],
+2 -2
View File
@@ -17,7 +17,7 @@ def render() -> None:
trim_frame_start_slider_args : Dict[str, Any] =\
{
'label': wording.get('trim_frame_start_slider_label'),
'label': wording.get('uis.trim_frame_start_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
@@ -25,7 +25,7 @@ def render() -> None:
}
trim_frame_end_slider_args : Dict[str, Any] =\
{
'label': wording.get('trim_frame_end_slider_label'),
'label': wording.get('uis.trim_frame_end_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
+3 -3
View File
@@ -53,15 +53,15 @@ def render() -> None:
global WEBCAM_STOP_BUTTON
WEBCAM_IMAGE = gradio.Image(
label = wording.get('webcam_image_label')
label = wording.get('uis.webcam_image')
)
WEBCAM_START_BUTTON = gradio.Button(
value = wording.get('start_button_label'),
value = wording.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
WEBCAM_STOP_BUTTON = gradio.Button(
value = wording.get('stop_button_label'),
value = wording.get('uis.stop_button'),
size = 'sm'
)
+3 -3
View File
@@ -16,17 +16,17 @@ def render() -> None:
global WEBCAM_FPS_SLIDER
WEBCAM_MODE_RADIO = gradio.Radio(
label = wording.get('webcam_mode_radio_label'),
label = wording.get('uis.webcam_mode_radio'),
choices = uis_choices.webcam_modes,
value = 'inline'
)
WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown(
label = wording.get('webcam_resolution_dropdown'),
label = wording.get('uis.webcam_resolution_dropdown'),
choices = uis_choices.webcam_resolutions,
value = uis_choices.webcam_resolutions[0]
)
WEBCAM_FPS_SLIDER = gradio.Slider(
label = wording.get('webcam_fps_slider'),
label = wording.get('uis.webcam_fps_slider'),
value = 25,
step = 1,
minimum = 1,
+1
View File
@@ -34,6 +34,7 @@ def render() -> gradio.Blocks:
about.render()
with gradio.Blocks():
frame_processors.render()
with gradio.Blocks():
frame_processors_options.render()
with gradio.Blocks():
execution.render()
+1
View File
@@ -19,6 +19,7 @@ def render() -> gradio.Blocks:
about.render()
with gradio.Blocks():
frame_processors.render()
with gradio.Blocks():
frame_processors_options.render()
with gradio.Blocks():
execution.render()
+1
View File
@@ -19,6 +19,7 @@ def render() -> gradio.Blocks:
about.render()
with gradio.Blocks():
frame_processors.render()
with gradio.Blocks():
frame_processors_options.render()
with gradio.Blocks():
execution.render()
+156 -104
View File
@@ -1,51 +1,9 @@
WORDING =\
from typing import Any, Dict, Optional
WORDING : Dict[str, Any] =\
{
'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',
'ffmpeg_not_installed': 'FFMpeg is not installed',
'install_dependency_help': 'select the variant of {dependency} to install',
'skip_venv_help': 'skip the virtual environment check',
'source_help': 'select a source image',
'target_help': 'select a target image or video',
'output_help': 'specify the output file or directory',
'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',
'frame_processor_model_help': 'choose the model for the frame processor',
'frame_processor_blend_help': 'specify the blend amount for the frame processor',
'face_debugger_items_help': 'specify the face debugger items (choices: {choices})',
'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',
'keep_temp_help': 'retain temporary frames after processing',
'skip_audio_help': 'omit audio from the target',
'face_analyser_order_help': 'specify the order used for the face analyser',
'face_analyser_age_help': 'specify the age used for the face analyser',
'face_analyser_gender_help': 'specify the gender used for the face analyser',
'face_detector_model_help': 'specify the model used for the face detector',
'face_detector_size_help': 'specify the size threshold used for the face detector',
'face_detector_score_help': 'specify the score threshold used for the face detector',
'face_selector_mode_help': 'specify the mode for the face selector',
'reference_face_position_help': 'specify the position of the reference face',
'reference_face_distance_help': 'specify the distance between the reference face and the target face',
'reference_frame_number_help': 'specify the number of the reference frame',
'face_mask_types_help': 'choose from the available face mask types (choices: {choices})',
'face_mask_blur_help': 'specify the blur amount for face mask',
'face_mask_padding_help': 'specify the face mask padding (top, right, bottom, left) in percent',
'face_mask_regions_help': 'choose from the available face mask regions (choices: {choices})',
'trim_frame_start_help': 'specify the start frame for extraction',
'trim_frame_end_help': 'specify the end frame for extraction',
'temp_frame_format_help': 'specify the image format used for frame extraction',
'temp_frame_quality_help': 'specify the image quality used for frame extraction',
'output_image_quality_help': 'specify the quality used for the output image',
'output_video_encoder_help': 'specify the encoder used for the output video',
'output_video_preset_help': 'specify the preset used for the output video',
'output_video_quality_help': 'specify the quality used for the output video',
'output_video_resolution_help': 'specify the resolution used for the output video',
'output_video_fps_help': 'specify the frames per second (fps) used for the output video',
'video_memory_strategy_help': 'specify strategy to handle the video memory',
'system_memory_limit_help': 'specify the amount (gb) of system memory to be used',
'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)',
'execution_thread_count_help': 'specify the number of execution threads',
'execution_queue_count_help': 'specify the number of execution queries',
'skip_download_help': 'omit automate downloads and lookups',
'headless_help': 'run the program in headless mode',
'log_level_help': 'choose from the available log levels',
'creating_temp': 'Creating temporary resources',
'extracting_frames_fps': 'Extracting frames with {video_fps} FPS',
'analysing': 'Analysing',
@@ -75,69 +33,163 @@ WORDING =\
'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',
'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',
'stream_not_loaded': 'Stream {stream_mode} could not be loaded',
'donate_button_label': 'DONATE',
'start_button_label': 'START',
'stop_button_label': 'STOP',
'clear_button_label': 'CLEAR',
'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS',
'benchmark_results_dataframe_label': 'BENCHMARK RESULTS',
'benchmark_cycles_slider_label': 'BENCHMARK CYCLES',
'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS',
'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT',
'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT',
'face_analyser_order_dropdown_label': 'FACE ANALYSER ORDER',
'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',
'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',
'face_detector_model_dropdown_label': 'FACE DETECTOR MODEL',
'face_detector_size_dropdown_label': 'FACE DETECTOR SIZE',
'face_detector_score_slider_label': 'FACE DETECTOR SCORE',
'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE',
'reference_face_gallery_label': 'REFERENCE FACE',
'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',
'face_mask_types_checkbox_group_label': 'FACE MASK TYPES',
'face_mask_blur_slider_label': 'FACE MASK BLUR',
'face_mask_padding_top_slider_label': 'FACE MASK PADDING TOP',
'face_mask_padding_bottom_slider_label': 'FACE MASK PADDING BOTTOM',
'face_mask_padding_left_slider_label': 'FACE MASK PADDING LEFT',
'face_mask_padding_right_slider_label': 'FACE MASK PADDING RIGHT',
'face_mask_region_checkbox_group_label': 'FACE MASK REGIONS',
'video_memory_strategy_dropdown_label': 'VIDEO MEMORY STRATEGY',
'system_memory_limit_slider_label': 'SYSTEM MEMORY LIMIT',
'output_image_or_video_label': 'OUTPUT',
'output_path_textbox_label': 'OUTPUT PATH',
'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY',
'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER',
'output_video_preset_dropdown_label': 'OUTPUT VIDEO PRESET',
'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY',
'output_video_resolution_dropdown_label': 'OUTPUT VIDEO RESOLUTION',
'output_video_fps_slider_label': 'OUTPUT VIDEO FPS',
'preview_image_label': 'PREVIEW',
'preview_frame_slider_label': 'PREVIEW FRAME',
'frame_processors_checkbox_group_label': 'FRAME PROCESSORS',
'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL',
'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL',
'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND',
'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL',
'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND',
'face_debugger_items_checkbox_group_label': 'FACE DEBUGGER ITEMS',
'common_options_checkbox_group_label': 'OPTIONS',
'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT',
'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY',
'trim_frame_start_slider_label': 'TRIM FRAME START',
'trim_frame_end_slider_label': 'TRIM FRAME END',
'source_file_label': 'SOURCE',
'target_file_label': 'TARGET',
'webcam_image_label': 'WEBCAM',
'webcam_mode_radio_label': 'WEBCAM MODE',
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',
'webcam_fps_slider': 'WEBCAM FPS',
'point': '.',
'comma': ',',
'colon': ':',
'question_mark': '?',
'exclamation_mark': '!'
'exclamation_mark': '!',
'help':
{
# installer
'install_dependency': 'select the variant of {dependency} to install',
'skip_venv': 'skip the virtual environment check',
# general
'source': 'choose single or multiple source images',
'target': 'choose single target image or video',
'output': 'specify the output file or directory',
# misc
'skip_download': 'omit automate downloads and remote lookups',
'headless': 'run the program without a user interface',
'log_level': 'adjust the message severity displayed in the terminal',
# execution
'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)',
'execution_thread_count': 'specify the amount of parallel threads while processing',
'execution_queue_count': 'specify the amount of frames each thread is processing',
# memory
'video_memory_strategy': 'balance fast frame processing and low vram usage',
'system_memory_limit': 'limit the available ram that can be used while processing',
# face analyser
'face_analyser_order': 'specify the order in which the face analyser detects faces.',
'face_analyser_age': 'filter the detected faces based on their age',
'face_analyser_gender': 'filter the detected faces based on their gender',
'face_detector_model': 'choose the model responsible for detecting the face',
'face_detector_size': 'specify the size of the frame provided to the face detector',
'face_detector_score': 'filter the detected faces base on the confidence score',
# face selector
'face_selector_mode': 'use reference based tracking with simple matching',
'reference_face_position': 'specify the position used to create the reference face',
'reference_face_distance': 'specify the desired similarity between the reference face and target face',
'reference_frame_number': 'specify the frame used to create the reference face',
# face mask
'face_mask_types': 'mix and match different face mask types (choices: {choices})',
'face_mask_blur': 'specify the degree of blur applied the box mask',
'face_mask_padding': 'apply top, right, bottom and left padding to the box mask',
'face_mask_regions': 'choose the facial features used for the region mask (choices: {choices})',
# frame extraction
'trim_frame_start': 'specify the the start frame of the target video',
'trim_frame_end': 'specify the the end frame of the target video',
'temp_frame_format': 'specify the temporary resources format',
'temp_frame_quality': 'specify the temporary resources quality',
'keep_temp': 'keep the temporary resources after processing',
# output creation
'output_image_quality': 'specify the image quality which translates to the compression factor',
'output_video_encoder': 'specify the encoder use for the video compression',
'output_video_preset': 'balance fast video processing and video file size',
'output_video_quality': 'specify the video quality which translates to the compression factor',
'output_video_resolution': 'specify the video output resolution based on the target video',
'output_video_fps': 'specify the video output fps based on the target video',
'skip_audio': 'omit the audio from the target video',
# frame processors
'frame_processors': 'load a single or multiple frame processors. (choices: {choices}, ...)',
'face_debugger_items': 'load a single or multiple frame processors (choices: {choices})',
'face_enhancer_model': 'choose the model responsible for enhancing the face',
'face_enhancer_blend': 'blend the enhanced into the previous face',
'face_swapper_model': 'choose the model responsible for swapping the face',
'frame_enhancer_model': 'choose the model responsible for enhancing the frame',
'frame_enhancer_blend': 'blend the enhanced into the previous frame',
# uis
'ui_layouts': 'Launch a single or multiple UI layouts (choices: {choices}, ...)'
},
'uis':
{
# general
'start_button': 'START',
'stop_button': 'STOP',
'clear_button': 'CLEAR',
# about
'donate_button': 'DONATE',
# benchmark
'benchmark_results_dataframe': 'BENCHMARK RESULTS',
# benchmark options
'benchmark_runs_checkbox_group': 'BENCHMARK RUNS',
'benchmark_cycles_slider': 'BENCHMARK CYCLES',
# common options
'common_options_checkbox_group': 'OPTIONS',
# execution
'execution_providers_checkbox_group': 'EXECUTION PROVIDERS',
# execution queue count
'execution_queue_count_slider': 'EXECUTION QUEUE COUNT',
# execution thread count
'execution_thread_count_slider': 'EXECUTION THREAD COUNT',
# face analyser
'face_analyser_order_dropdown': 'FACE ANALYSER ORDER',
'face_analyser_age_dropdown': 'FACE ANALYSER AGE',
'face_analyser_gender_dropdown': 'FACE ANALYSER GENDER',
'face_detector_model_dropdown': 'FACE DETECTOR MODEL',
'face_detector_size_dropdown': 'FACE DETECTOR SIZE',
'face_detector_score_slider': 'FACE DETECTOR SCORE',
# face masker
'face_mask_types_checkbox_group': 'FACE MASK TYPES',
'face_mask_blur_slider': 'FACE MASK BLUR',
'face_mask_padding_top_slider': 'FACE MASK PADDING TOP',
'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT',
'face_mask_padding_bottom_slider': 'FACE MASK PADDING BOTTOM',
'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT',
'face_mask_region_checkbox_group': 'FACE MASK REGIONS',
# face selector
'face_selector_mode_dropdown': 'FACE SELECTOR MODE',
'reference_face_gallery': 'REFERENCE FACE',
'reference_face_distance_slider': 'REFERENCE FACE DISTANCE',
# frame processors
'frame_processors_checkbox_group': 'FRAME PROCESSORS',
# frame processors options
'face_debugger_items_checkbox_group': 'FACE DEBUGGER ITEMS',
'face_enhancer_model_dropdown': 'FACE ENHANCER MODEL',
'face_enhancer_blend_slider': 'FACE ENHANCER BLEND',
'face_swapper_model_dropdown': 'FACE SWAPPER MODEL',
'frame_enhancer_model_dropdown': 'FRAME ENHANCER MODEL',
'frame_enhancer_blend_slider': 'FRAME ENHANCER BLEND',
# memory
'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY',
'system_memory_limit_slider': 'SYSTEM MEMORY LIMIT',
# output
'output_image_or_video': 'OUTPUT',
# output options
'output_path_textbox': 'OUTPUT PATH',
'output_image_quality_slider': 'OUTPUT IMAGE QUALITY',
'output_video_encoder_dropdown': 'OUTPUT VIDEO ENCODER',
'output_video_preset_dropdown': 'OUTPUT VIDEO PRESET',
'output_video_quality_slider': 'OUTPUT VIDEO QUALITY',
'output_video_resolution_dropdown': 'OUTPUT VIDEO RESOLUTION',
'output_video_fps_slider': 'OUTPUT VIDEO FPS',
# preview
'preview_image': 'PREVIEW',
'preview_frame_slider': 'PREVIEW FRAME',
# source
'source_file': 'SOURCE',
# target
'target_file': 'TARGET',
# temp frame
'temp_frame_format_dropdown': 'TEMP FRAME FORMAT',
'temp_frame_quality_slider': 'TEMP FRAME QUALITY',
# trim frame
'trim_frame_start_slider': 'TRIM FRAME START',
'trim_frame_end_slider': 'TRIM FRAME END',
# webcam
'webcam_image': 'WEBCAM',
# webcam options
'webcam_mode_radio': 'WEBCAM MODE',
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',
'webcam_fps_slider': 'WEBCAM FPS'
}
}
def get(key : str) -> str:
return WORDING[key]
def get(key : str) -> Optional[str]:
if '.' in key:
section, name = key.split('.')
if section in WORDING and name in WORDING[section]:
return WORDING[section][name]
if key in WORDING:
return WORDING[key]
return None
+7
View File
@@ -0,0 +1,7 @@
from facefusion import wording
def test_get() -> None:
assert wording.get('python_not_supported')
assert wording.get('help.source')
assert wording.get('invalid') is None