Feels so good to get rid of Gradio (#978)

This commit is contained in:
Henry Ruhs
2025-11-05 12:00:20 +01:00
committed by henryruhs
parent 20293d1fa3
commit cfa2216d0c
80 changed files with 41 additions and 4577 deletions
Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

-7
View File
@@ -8,12 +8,6 @@ FaceFusion
![License](https://img.shields.io/badge/license-OpenRAIL--AS-green)
Preview
-------
![Preview](https://raw.githubusercontent.com/facefusion/facefusion/master/.github/preview.png?sanitize=true)
Installation
------------
@@ -34,7 +28,6 @@ options:
commands:
run run the program
headless-run run the program in headless mode
batch-run run the program in batch mode
force-download force automate downloads and exit
benchmark benchmark the program
-5
View File
@@ -103,11 +103,6 @@ frame_enhancer_blend =
lip_syncer_model =
lip_syncer_weight =
[uis]
open_browser =
ui_layouts =
ui_workflow =
[download]
download_providers =
download_scope =
+2 -2
View File
@@ -10,7 +10,7 @@ def detect_app_context() -> AppContext:
while frame:
if os.path.join('facefusion', 'jobs') in frame.f_code.co_filename:
return 'cli'
if os.path.join('facefusion', 'uis') in frame.f_code.co_filename:
return 'ui'
if os.path.join('facefusion', 'apis') in frame.f_code.co_filename:
return 'api'
frame = frame.f_back
return 'cli'
-4
View File
@@ -104,10 +104,6 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('processors', args.get('processors'))
for processor_module in get_processors_modules(available_processors):
processor_module.apply_args(args, apply_state_item)
# uis
apply_state_item('open_browser', args.get('open_browser'))
apply_state_item('ui_layouts', args.get('ui_layouts'))
apply_state_item('ui_workflow', args.get('ui_workflow'))
# execution
apply_state_item('execution_device_ids', args.get('execution_device_ids'))
apply_state_item('execution_providers', args.get('execution_providers'))
+1 -2
View File
@@ -2,7 +2,7 @@ import logging
from typing import List, Sequence
from facefusion.common_helper import create_float_range, create_int_range
from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, BenchmarkMode, BenchmarkResolution, BenchmarkSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskArea, FaceMaskAreaSet, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, VoiceExtractorModel
from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, BenchmarkMode, BenchmarkResolution, BenchmarkSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskArea, FaceMaskAreaSet, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, VoiceExtractorModel
face_detector_set : FaceDetectorSet =\
{
@@ -147,7 +147,6 @@ log_level_set : LogLevelSet =\
}
log_levels : List[LogLevel] = list(log_level_set.keys())
ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ]
job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ]
benchmark_cycle_count_range : Sequence[int] = create_int_range(1, 10, 1)
-11
View File
@@ -62,17 +62,6 @@ def route(args : Args) -> None:
hard_exit(error_code)
if state_manager.get_item('command') == 'run':
import facefusion.uis.core as ui
if not common_pre_check() or not processors_pre_check():
hard_exit(2)
for ui_layout in ui.get_ui_layouts_modules(state_manager.get_item('ui_layouts')):
if not ui_layout.pre_check():
hard_exit(2)
ui.init()
ui.launch()
if state_manager.get_item('command') == 'headless-run':
if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1)
error_code = process_headless(args)
+5 -5
View File
@@ -17,7 +17,7 @@ from facefusion.types import DownloadSet, ExecutionProvider, InferencePool, Infe
INFERENCE_POOL_SET : InferencePoolSet =\
{
'cli': {},
'ui': {}
'api': {}
}
@@ -31,10 +31,10 @@ def get_inference_pool(module_name : str, model_names : List[str], model_source_
for execution_device_id in execution_device_ids:
inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
if app_context == 'cli' and INFERENCE_POOL_SET.get('ui').get(inference_context):
INFERENCE_POOL_SET['cli'][inference_context] = INFERENCE_POOL_SET.get('ui').get(inference_context)
if app_context == 'ui' and INFERENCE_POOL_SET.get('cli').get(inference_context):
INFERENCE_POOL_SET['ui'][inference_context] = INFERENCE_POOL_SET.get('cli').get(inference_context)
if app_context == 'cli' and INFERENCE_POOL_SET.get('api').get(inference_context):
INFERENCE_POOL_SET['cli'][inference_context] = INFERENCE_POOL_SET.get('api').get(inference_context)
if app_context == 'api' and INFERENCE_POOL_SET.get('cli').get(inference_context):
INFERENCE_POOL_SET['api'][inference_context] = INFERENCE_POOL_SET.get('cli').get(inference_context)
if not INFERENCE_POOL_SET.get(app_context).get(inference_context):
INFERENCE_POOL_SET[app_context][inference_context] = create_inference_pool(model_source_set, execution_device_id, execution_providers)
-5
View File
@@ -149,9 +149,6 @@ LOCALES : Locales =\
'processors': 'load a single or multiple processors (choices: {choices}, ...)',
'background-remover-model': 'choose the model responsible for removing the background',
'background-remover-color': 'apply red, green blue and alpha values of the background',
'open_browser': 'open the browser once the program is ready',
'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)',
'ui_workflow': 'choose the ui workflow',
'download_providers': 'download using different providers (choices: {choices}, ...)',
'download_scope': 'specify the download scope',
'benchmark_mode': 'choose the benchmark mode',
@@ -165,7 +162,6 @@ LOCALES : Locales =\
'log_level': 'adjust the message severity displayed in the terminal',
'halt_on_error': 'halt the program once an error occurred',
'run': 'run the program',
'headless_run': 'run the program in headless mode',
'batch_run': 'run the program in batch mode',
'force_download': 'force automate downloads and exit',
'benchmark': 'benchmark the program',
@@ -262,7 +258,6 @@ LOCALES : Locales =\
'temp_frame_format_dropdown': 'TEMP FRAME FORMAT',
'terminal_textbox': 'TERMINAL',
'trim_frame_slider': 'TRIM FRAME',
'ui_workflow': 'UI WORKFLOW',
'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY',
'webcam_fps_slider': 'WEBCAM FPS',
'webcam_image': 'WEBCAM',
+1 -12
View File
@@ -195,16 +195,6 @@ def create_processors_program() -> ArgumentParser:
return program
def create_uis_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_ui_layouts = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/uis/layouts') ]
group_uis = program.add_argument_group('uis')
group_uis.add_argument('--open-browser', help = translator.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis', 'open_browser'))
group_uis.add_argument('--ui-layouts', help = translator.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), nargs = '+')
group_uis.add_argument('--ui-workflow', help = translator.get('help.ui_workflow'), default = config.get_str_value('uis', 'ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows)
return program
def create_download_providers_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
@@ -298,8 +288,7 @@ def create_program() -> ArgumentParser:
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
sub_program = program.add_subparsers(dest = 'command')
# general
sub_program.add_parser('run', help = translator.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('headless-run', help = translator.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('run', help = translator.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = translator.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = translator.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = translator.get('help.benchmark'), parents = [ create_temp_path_program(), collect_step_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
+4 -4
View File
@@ -7,7 +7,7 @@ from facefusion.types import State, StateKey, StateSet
STATE_SET : Union[StateSet, ProcessorStateSet] =\
{
'cli': {}, #type:ignore[assignment]
'ui': {} #type:ignore[assignment]
'api': {} #type:ignore[assignment]
}
@@ -17,12 +17,12 @@ def get_state() -> Union[State, ProcessorState]:
def sync_state() -> None:
STATE_SET['cli'] = STATE_SET.get('ui') #type:ignore[assignment]
STATE_SET['cli'] = STATE_SET.get('api') #type:ignore[assignment]
def init_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None:
STATE_SET['cli'][key] = value #type:ignore[literal-required]
STATE_SET['ui'][key] = value #type:ignore[literal-required]
STATE_SET['api'][key] = value #type:ignore[literal-required]
def get_item(key : Union[StateKey, ProcessorStateKey]) -> Any:
@@ -35,7 +35,7 @@ def set_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None:
def sync_item(key : Union[StateKey, ProcessorStateKey]) -> None:
STATE_SET['cli'][key] = STATE_SET.get('ui').get(key) #type:ignore[literal-required]
STATE_SET['cli'][key] = STATE_SET.get('api').get(key) #type:ignore[literal-required]
def clear_item(key : Union[StateKey, ProcessorStateKey]) -> None:
+1 -9
View File
@@ -227,13 +227,11 @@ Download = TypedDict('Download',
DownloadSet : TypeAlias = Dict[str, Download]
VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant']
AppContext = Literal['cli', 'ui']
AppContext = Literal['cli', 'api']
InferencePool : TypeAlias = Dict[str, InferenceSession]
InferencePoolSet : TypeAlias = Dict[AppContext, Dict[str, InferencePool]]
UiWorkflow = Literal['instant_runner', 'job_runner', 'job_manager']
JobStore = TypedDict('JobStore',
{
'job_keys' : List[str],
@@ -312,9 +310,6 @@ StateKey = Literal\
'output_video_scale',
'output_video_fps',
'processors',
'open_browser',
'ui_layouts',
'ui_workflow',
'execution_device_ids',
'execution_providers',
'execution_thread_count',
@@ -382,9 +377,6 @@ State = TypedDict('State',
'output_video_scale' : Scale,
'output_video_fps' : float,
'processors' : List[str],
'open_browser' : bool,
'ui_layouts' : List[str],
'ui_workflow' : UiWorkflow,
'execution_device_ids' : List[int],
'execution_providers' : List[ExecutionProvider],
'execution_thread_count' : int,
View File
-163
View File
@@ -1,163 +0,0 @@
:root:root:root:root .gradio-container
{
overflow: unset;
}
:root:root:root:root main
{
max-width: 110em;
}
:root:root:root:root .tab-like-container input[type="number"]
{
border-radius: unset;
text-align: center;
order: 1;
padding: unset
}
:root:root:root:root input[type="number"]
{
appearance: textfield;
}
:root:root:root:root input[type="number"]::-webkit-inner-spin-button
{
appearance: none;
}
:root:root:root:root input[type="number"]:focus
{
outline: unset;
}
:root:root:root:root .reset-button
{
background: var(--background-fill-secondary);
border: unset;
font-size: unset;
padding: unset;
}
:root:root:root:root [type="checkbox"],
:root:root:root:root [type="radio"]
{
border-radius: 50%;
height: 1.125rem;
width: 1.125rem;
}
:root:root:root:root input[type="range"]
{
background: transparent;
}
:root:root:root:root input[type="range"]::-moz-range-thumb,
:root:root:root:root input[type="range"]::-webkit-slider-thumb
{
background: var(--neutral-300);
box-shadow: unset;
border-radius: 50%;
height: 1.125rem;
width: 1.125rem;
}
:root:root:root:root .thumbnail-item
{
border: unset;
box-shadow: unset;
}
:root:root:root:root .grid-wrap.fixed-height
{
min-height: unset;
}
:root:root:root:root .box-face-selector .empty,
:root:root:root:root .box-face-selector .gallery-container
{
min-height: 7.375rem;
}
:root:root:root:root .tab-wrapper
{
padding: 0 0.625rem;
}
:root:root:root:root .tab-container
{
gap: 0.5em;
}
:root:root:root:root .tab-container button
{
background: unset;
border-bottom: 0.125rem solid;
}
:root:root:root:root .tab-container button.selected
{
color: var(--primary-500)
}
:root:root:root:root .toast-body
{
background: white;
color: var(--primary-500);
border: unset;
border-radius: unset;
}
:root:root:root:root .dark .toast-body
{
background: var(--neutral-900);
color: var(--primary-600);
}
:root:root:root:root .toast-icon,
:root:root:root:root .toast-title,
:root:root:root:root .toast-text,
:root:root:root:root .toast-close
{
color: unset;
}
:root:root:root:root .toast-body .timer
{
background: currentColor;
}
:root:root:root:root .slider_input_container > span,
:root:root:root:root .feather-upload,
:root:root:root:root footer
{
display: none;
}
:root:root:root:root .image-frame
{
background-image: conic-gradient(#fff 90deg, #999 90deg 180deg, #fff 180deg 270deg, #999 270deg);
background-size: 1.25rem 1.25rem;
background-repeat: repeat;
width: 100%;
}
:root:root:root:root .image-frame > img
{
object-fit: cover;
}
:root:root:root:root .image-preview.is-landscape
{
position: sticky;
top: 0;
z-index: 100;
}
:root:root:root:root .block .error
{
border: 0.125rem solid;
padding: 0.375rem 0.75rem;
font-size: 0.75rem;
text-transform: uppercase;
}
-25
View File
@@ -1,25 +0,0 @@
from typing import Dict, List
from facefusion.types import Color, WebcamMode
from facefusion.uis.types import JobManagerAction, JobRunnerAction, PreviewMode
job_manager_actions : List[JobManagerAction] = [ 'job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]
job_runner_actions : List[JobRunnerAction] = [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]
common_options : List[str] = [ 'keep-temp' ]
preview_modes : List[PreviewMode] = [ 'default', 'frame-by-frame', 'face-by-face' ]
preview_resolutions : List[str] = [ '512x512', '768x768', '1024x1024' ]
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080' ]
background_remover_colors : Dict[str, Color] =\
{
'red' : (255, 0, 0, 255),
'green' : (0, 255, 0, 255),
'blue' : (0, 0, 255, 255),
'black' : (0, 0, 0, 255),
'white' : (255, 255, 255, 255),
'alpha' : (0, 0, 0, 0)
}
-41
View File
@@ -1,41 +0,0 @@
import random
from typing import Optional
import gradio
from facefusion import metadata, translator
METADATA_BUTTON : Optional[gradio.Button] = None
ACTION_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global METADATA_BUTTON
global ACTION_BUTTON
action = random.choice(
[
{
'translator': translator.get('about.fund'),
'url': 'https://fund.facefusion.io'
},
{
'translator': translator.get('about.subscribe'),
'url': 'https://subscribe.facefusion.io'
},
{
'translator': translator.get('about.join'),
'url': 'https://join.facefusion.io'
}
])
METADATA_BUTTON = gradio.Button(
value = metadata.get('name') + ' ' + metadata.get('version'),
variant = 'primary',
link = metadata.get('url')
)
ACTION_BUTTON = gradio.Button(
value = action.get('translator'),
link = action.get('url'),
size = 'sm'
)
@@ -1,64 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.age_modifier import choices as age_modifier_choices
from facefusion.processors.modules.age_modifier.types import AgeModifierModel
from facefusion.uis.core import get_ui_component, register_ui_component
AGE_MODIFIER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
AGE_MODIFIER_DIRECTION_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global AGE_MODIFIER_MODEL_DROPDOWN
global AGE_MODIFIER_DIRECTION_SLIDER
has_age_modifier = 'age_modifier' in state_manager.get_item('processors')
AGE_MODIFIER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.age_modifier'),
choices = age_modifier_choices.age_modifier_models,
value = state_manager.get_item('age_modifier_model'),
visible = has_age_modifier
)
AGE_MODIFIER_DIRECTION_SLIDER = gradio.Slider(
label = translator.get('uis.direction_slider', 'facefusion.processors.modules.age_modifier'),
value = state_manager.get_item('age_modifier_direction'),
step = calculate_float_step(age_modifier_choices.age_modifier_direction_range),
minimum = age_modifier_choices.age_modifier_direction_range[0],
maximum = age_modifier_choices.age_modifier_direction_range[-1],
visible = has_age_modifier
)
register_ui_component('age_modifier_model_dropdown', AGE_MODIFIER_MODEL_DROPDOWN)
register_ui_component('age_modifier_direction_slider', AGE_MODIFIER_DIRECTION_SLIDER)
def listen() -> None:
AGE_MODIFIER_MODEL_DROPDOWN.change(update_age_modifier_model, inputs = AGE_MODIFIER_MODEL_DROPDOWN, outputs = AGE_MODIFIER_MODEL_DROPDOWN)
AGE_MODIFIER_DIRECTION_SLIDER.release(update_age_modifier_direction, inputs = AGE_MODIFIER_DIRECTION_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ AGE_MODIFIER_MODEL_DROPDOWN, AGE_MODIFIER_DIRECTION_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
has_age_modifier = 'age_modifier' in processors
return gradio.Dropdown(visible = has_age_modifier), gradio.Slider(visible = has_age_modifier)
def update_age_modifier_model(age_modifier_model : AgeModifierModel) -> gradio.Dropdown:
age_modifier_module = load_processor_module('age_modifier')
age_modifier_module.clear_inference_pool()
state_manager.set_item('age_modifier_model', age_modifier_model)
if age_modifier_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('age_modifier_model'))
return gradio.Dropdown()
def update_age_modifier_direction(age_modifier_direction : float) -> None:
state_manager.set_item('age_modifier_direction', int(age_modifier_direction))
@@ -1,107 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.background_remover import choices as background_remover_choices
from facefusion.processors.modules.background_remover.types import BackgroundRemoverModel
from facefusion.sanitizer import sanitize_int_range
from facefusion.uis.core import get_ui_component, register_ui_component
BACKGROUND_REMOVER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
BACKGROUND_REMOVER_COLOR_WRAPPER : Optional[gradio.Group] = None
BACKGROUND_REMOVER_COLOR_RED_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_GREEN_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_BLUE_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER : Optional[gradio.Number] = None
def render() -> None:
global BACKGROUND_REMOVER_MODEL_DROPDOWN
global BACKGROUND_REMOVER_COLOR_WRAPPER
global BACKGROUND_REMOVER_COLOR_RED_NUMBER
global BACKGROUND_REMOVER_COLOR_GREEN_NUMBER
global BACKGROUND_REMOVER_COLOR_BLUE_NUMBER
global BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER
has_background_remover = 'background_remover' in state_manager.get_item('processors')
background_remover_color = state_manager.get_item('background_remover_color')
BACKGROUND_REMOVER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.background_remover'),
choices = background_remover_choices.background_remover_models,
value = state_manager.get_item('background_remover_model'),
visible = has_background_remover
)
with gradio.Group(visible = has_background_remover) as BACKGROUND_REMOVER_COLOR_WRAPPER:
with gradio.Row():
BACKGROUND_REMOVER_COLOR_RED_NUMBER = gradio.Number(
label = translator.get('uis.color_red_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[0],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
BACKGROUND_REMOVER_COLOR_GREEN_NUMBER = gradio.Number(
label = translator.get('uis.color_green_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[1],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
with gradio.Row():
BACKGROUND_REMOVER_COLOR_BLUE_NUMBER = gradio.Number(
label = translator.get('uis.color_blue_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[2],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER = gradio.Number(
label = translator.get('uis.color_alpha_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[3],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
register_ui_component('background_remover_model_dropdown', BACKGROUND_REMOVER_MODEL_DROPDOWN)
register_ui_component('background_remover_color_red_number', BACKGROUND_REMOVER_COLOR_RED_NUMBER)
register_ui_component('background_remover_color_green_number', BACKGROUND_REMOVER_COLOR_GREEN_NUMBER)
register_ui_component('background_remover_color_blue_number', BACKGROUND_REMOVER_COLOR_BLUE_NUMBER)
register_ui_component('background_remover_color_alpha_number', BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER)
def listen() -> None:
BACKGROUND_REMOVER_MODEL_DROPDOWN.change(update_background_remover_model, inputs = BACKGROUND_REMOVER_MODEL_DROPDOWN, outputs = BACKGROUND_REMOVER_MODEL_DROPDOWN)
background_remover_color_inputs = [ BACKGROUND_REMOVER_COLOR_RED_NUMBER, BACKGROUND_REMOVER_COLOR_GREEN_NUMBER, BACKGROUND_REMOVER_COLOR_BLUE_NUMBER, BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER ]
for background_remover_color_input in background_remover_color_inputs:
background_remover_color_input.change(update_background_remover_color, inputs = background_remover_color_inputs)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [BACKGROUND_REMOVER_MODEL_DROPDOWN, BACKGROUND_REMOVER_COLOR_WRAPPER])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Group]:
has_background_remover = 'background_remover' in processors
return gradio.Dropdown(visible = has_background_remover), gradio.Group(visible = has_background_remover)
def update_background_remover_model(background_remover_model : BackgroundRemoverModel) -> gradio.Dropdown:
background_remover_module = load_processor_module('background_remover')
background_remover_module.clear_inference_pool()
state_manager.set_item('background_remover_model', background_remover_model)
if background_remover_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('background_remover_model'))
return gradio.Dropdown()
def update_background_remover_color(red : int, green : int, blue : int, alpha : int) -> None:
red = sanitize_int_range(red, background_remover_choices.background_remover_color_range)
green = sanitize_int_range(green, background_remover_choices.background_remover_color_range)
blue = sanitize_int_range(blue, background_remover_choices.background_remover_color_range)
alpha = sanitize_int_range(alpha, background_remover_choices.background_remover_color_range)
state_manager.set_item('background_remover_color', (red, green, blue, alpha))
-51
View File
@@ -1,51 +0,0 @@
from typing import Any, Iterator, List, Optional
import gradio
from facefusion import benchmarker, state_manager, translator
BENCHMARK_BENCHMARKS_DATAFRAME : Optional[gradio.Dataframe] = None
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global BENCHMARK_BENCHMARKS_DATAFRAME
global BENCHMARK_START_BUTTON
BENCHMARK_BENCHMARKS_DATAFRAME = gradio.Dataframe(
headers =
[
'target_path',
'cycle_count',
'average_run',
'fastest_run',
'slowest_run',
'relative_fps'
],
datatype =
[
'str',
'number',
'number',
'number',
'number',
'number'
],
show_label = False
)
BENCHMARK_START_BUTTON = gradio.Button(
value = translator.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
def listen() -> None:
BENCHMARK_START_BUTTON.click(start, outputs = BENCHMARK_BENCHMARKS_DATAFRAME)
def start() -> Iterator[List[Any]]:
state_manager.sync_state()
for benchmark in benchmarker.run():
yield [ list(benchmark_set.values()) for benchmark_set in benchmark ]
@@ -1,54 +0,0 @@
from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.types import BenchmarkMode, BenchmarkResolution
BENCHMARK_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
BENCHMARK_CYCLE_COUNT_SLIDER : Optional[gradio.Button] = None
def render() -> None:
global BENCHMARK_MODE_DROPDOWN
global BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP
global BENCHMARK_CYCLE_COUNT_SLIDER
BENCHMARK_MODE_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.benchmark_mode_dropdown'),
choices = facefusion.choices.benchmark_modes,
value = state_manager.get_item('benchmark_mode')
)
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.benchmark_resolutions_checkbox_group'),
choices = facefusion.choices.benchmark_resolutions,
value = state_manager.get_item('benchmark_resolutions')
)
BENCHMARK_CYCLE_COUNT_SLIDER = gradio.Slider(
label = translator.get('uis.benchmark_cycle_count_slider'),
value = state_manager.get_item('benchmark_cycle_count'),
step = calculate_int_step(facefusion.choices.benchmark_cycle_count_range),
minimum = facefusion.choices.benchmark_cycle_count_range[0],
maximum = facefusion.choices.benchmark_cycle_count_range[-1]
)
def listen() -> None:
BENCHMARK_MODE_DROPDOWN.change(update_benchmark_mode, inputs = BENCHMARK_MODE_DROPDOWN)
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP.change(update_benchmark_resolutions, inputs = BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP)
BENCHMARK_CYCLE_COUNT_SLIDER.release(update_benchmark_cycle_count, inputs = BENCHMARK_CYCLE_COUNT_SLIDER)
def update_benchmark_mode(benchmark_mode : BenchmarkMode) -> None:
state_manager.set_item('benchmark_mode', benchmark_mode)
def update_benchmark_resolutions(benchmark_resolutions : List[BenchmarkResolution]) -> None:
state_manager.set_item('benchmark_resolutions', benchmark_resolutions)
def update_benchmark_cycle_count(benchmark_cycle_count : int) -> None:
state_manager.set_item('benchmark_cycle_count', benchmark_cycle_count)
@@ -1,32 +0,0 @@
from typing import List, Optional
import gradio
from facefusion import state_manager, translator
from facefusion.uis import choices as uis_choices
COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
def render() -> None:
global COMMON_OPTIONS_CHECKBOX_GROUP
common_options = []
if state_manager.get_item('keep_temp'):
common_options.append('keep-temp')
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
label = translator.get('uis.common_options_checkbox_group'),
choices = uis_choices.common_options,
value = common_options
)
def listen() -> None:
COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
def update(common_options : List[str]) -> None:
keep_temp = 'keep-temp' in common_options
state_manager.set_item('keep_temp', keep_temp)
@@ -1,64 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.deep_swapper import choices as deep_swapper_choices
from facefusion.processors.modules.deep_swapper.types import DeepSwapperModel
from facefusion.uis.core import get_ui_component, register_ui_component
DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
DEEP_SWAPPER_MORPH_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global DEEP_SWAPPER_MODEL_DROPDOWN
global DEEP_SWAPPER_MORPH_SLIDER
has_deep_swapper = 'deep_swapper' in state_manager.get_item('processors')
DEEP_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.deep_swapper'),
choices = deep_swapper_choices.deep_swapper_models,
value = state_manager.get_item('deep_swapper_model'),
visible = has_deep_swapper
)
DEEP_SWAPPER_MORPH_SLIDER = gradio.Slider(
label = translator.get('uis.morph_slider', 'facefusion.processors.modules.deep_swapper'),
value = state_manager.get_item('deep_swapper_morph'),
step = calculate_int_step(deep_swapper_choices.deep_swapper_morph_range),
minimum = deep_swapper_choices.deep_swapper_morph_range[0],
maximum = deep_swapper_choices.deep_swapper_morph_range[-1],
visible = has_deep_swapper and load_processor_module('deep_swapper').get_inference_pool() and load_processor_module('deep_swapper').has_morph_input()
)
register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN)
register_ui_component('deep_swapper_morph_slider', DEEP_SWAPPER_MORPH_SLIDER)
def listen() -> None:
DEEP_SWAPPER_MODEL_DROPDOWN.change(update_deep_swapper_model, inputs = DEEP_SWAPPER_MODEL_DROPDOWN, outputs = [ DEEP_SWAPPER_MODEL_DROPDOWN, DEEP_SWAPPER_MORPH_SLIDER ])
DEEP_SWAPPER_MORPH_SLIDER.release(update_deep_swapper_morph, inputs = DEEP_SWAPPER_MORPH_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ DEEP_SWAPPER_MODEL_DROPDOWN, DEEP_SWAPPER_MORPH_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
has_deep_swapper = 'deep_swapper' in processors
return gradio.Dropdown(visible = has_deep_swapper), gradio.Slider(visible = has_deep_swapper and load_processor_module('deep_swapper').get_inference_pool() and load_processor_module('deep_swapper').has_morph_input())
def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> Tuple[gradio.Dropdown, gradio.Slider]:
deep_swapper_module = load_processor_module('deep_swapper')
deep_swapper_module.clear_inference_pool()
state_manager.set_item('deep_swapper_model', deep_swapper_model)
if deep_swapper_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model')), gradio.Slider(visible = deep_swapper_module.has_morph_input())
return gradio.Dropdown(), gradio.Slider()
def update_deep_swapper_morph(deep_swapper_morph : int) -> None:
state_manager.set_item('deep_swapper_morph', deep_swapper_morph)
-48
View File
@@ -1,48 +0,0 @@
from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, translator, voice_extractor
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules
from facefusion.types import DownloadProvider
DOWNLOAD_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global DOWNLOAD_PROVIDERS_CHECKBOX_GROUP
DOWNLOAD_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.download_providers_checkbox_group'),
choices = facefusion.choices.download_providers,
value = state_manager.get_item('download_providers')
)
def listen() -> None:
DOWNLOAD_PROVIDERS_CHECKBOX_GROUP.change(update_download_providers, inputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP, outputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP)
def update_download_providers(download_providers : List[DownloadProvider]) -> gradio.CheckboxGroup:
common_modules =\
[
content_analyser,
face_classifier,
face_detector,
face_landmarker,
face_recognizer,
face_masker,
voice_extractor
]
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
processor_modules = get_processors_modules(available_processors)
for module in common_modules + processor_modules:
if hasattr(module, 'create_static_model_set'):
module.create_static_model_set.cache_clear()
download_providers = download_providers or facefusion.choices.download_providers
state_manager.set_item('download_providers', download_providers)
return gradio.CheckboxGroup(value = state_manager.get_item('download_providers'))
-48
View File
@@ -1,48 +0,0 @@
from typing import List, Optional
import gradio
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, translator, voice_extractor
from facefusion.execution import get_available_execution_providers
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules
from facefusion.types import ExecutionProvider
EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.execution_providers_checkbox_group'),
choices = get_available_execution_providers(),
value = state_manager.get_item('execution_providers')
)
def listen() -> None:
EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
def update_execution_providers(execution_providers : List[ExecutionProvider]) -> gradio.CheckboxGroup:
common_modules =\
[
content_analyser,
face_classifier,
face_detector,
face_landmarker,
face_masker,
face_recognizer,
voice_extractor
]
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
processor_modules = get_processors_modules(available_processors)
for module in common_modules + processor_modules:
if hasattr(module, 'clear_inference_pool'):
module.clear_inference_pool()
execution_providers = execution_providers or get_available_execution_providers()
state_manager.set_item('execution_providers', execution_providers)
return gradio.CheckboxGroup(value = state_manager.get_item('execution_providers'))
@@ -1,29 +0,0 @@
from typing import Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global EXECUTION_THREAD_COUNT_SLIDER
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
label = translator.get('uis.execution_thread_count_slider'),
value = state_manager.get_item('execution_thread_count'),
step = calculate_int_step(facefusion.choices.execution_thread_count_range),
minimum = facefusion.choices.execution_thread_count_range[0],
maximum = facefusion.choices.execution_thread_count_range[-1]
)
def listen() -> None:
EXECUTION_THREAD_COUNT_SLIDER.release(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
def update_execution_thread_count(execution_thread_count : float) -> None:
state_manager.set_item('execution_thread_count', int(execution_thread_count))
@@ -1,80 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.expression_restorer import choices as expression_restorer_choices
from facefusion.processors.modules.expression_restorer.types import ExpressionRestorerArea, ExpressionRestorerModel
from facefusion.uis.core import get_ui_component, register_ui_component
EXPRESSION_RESTORER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
EXPRESSION_RESTORER_FACTOR_SLIDER : Optional[gradio.Slider] = None
EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global EXPRESSION_RESTORER_MODEL_DROPDOWN
global EXPRESSION_RESTORER_FACTOR_SLIDER
global EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP
has_expression_restorer = 'expression_restorer' in state_manager.get_item('processors')
EXPRESSION_RESTORER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.expression_restorer'),
choices = expression_restorer_choices.expression_restorer_models,
value = state_manager.get_item('expression_restorer_model'),
visible = has_expression_restorer
)
EXPRESSION_RESTORER_FACTOR_SLIDER = gradio.Slider(
label = translator.get('uis.factor_slider', 'facefusion.processors.modules.expression_restorer'),
value = state_manager.get_item('expression_restorer_factor'),
step = calculate_float_step(expression_restorer_choices.expression_restorer_factor_range),
minimum = expression_restorer_choices.expression_restorer_factor_range[0],
maximum = expression_restorer_choices.expression_restorer_factor_range[-1],
visible = has_expression_restorer
)
EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.areas_checkbox_group', 'facefusion.processors.modules.expression_restorer'),
choices = expression_restorer_choices.expression_restorer_areas,
value = state_manager.get_item('expression_restorer_areas'),
visible = has_expression_restorer
)
register_ui_component('expression_restorer_model_dropdown', EXPRESSION_RESTORER_MODEL_DROPDOWN)
register_ui_component('expression_restorer_factor_slider', EXPRESSION_RESTORER_FACTOR_SLIDER)
register_ui_component('expression_restorer_areas_checkbox_group', EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP)
def listen() -> None:
EXPRESSION_RESTORER_MODEL_DROPDOWN.change(update_expression_restorer_model, inputs = EXPRESSION_RESTORER_MODEL_DROPDOWN, outputs = EXPRESSION_RESTORER_MODEL_DROPDOWN)
EXPRESSION_RESTORER_FACTOR_SLIDER.release(update_expression_restorer_factor, inputs = EXPRESSION_RESTORER_FACTOR_SLIDER)
EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP.change(update_expression_restorer_areas, inputs = EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP, outputs = EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ EXPRESSION_RESTORER_MODEL_DROPDOWN, EXPRESSION_RESTORER_FACTOR_SLIDER, EXPRESSION_RESTORER_AREAS_CHECKBOX_GROUP ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]:
has_expression_restorer = 'expression_restorer' in processors
return gradio.Dropdown(visible = has_expression_restorer), gradio.Slider(visible = has_expression_restorer), gradio.CheckboxGroup(visible = has_expression_restorer)
def update_expression_restorer_model(expression_restorer_model : ExpressionRestorerModel) -> gradio.Dropdown:
expression_restorer_module = load_processor_module('expression_restorer')
expression_restorer_module.clear_inference_pool()
state_manager.set_item('expression_restorer_model', expression_restorer_model)
if expression_restorer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('expression_restorer_model'))
return gradio.Dropdown()
def update_expression_restorer_factor(expression_restorer_factor : float) -> None:
state_manager.set_item('expression_restorer_factor', int(expression_restorer_factor))
def update_expression_restorer_areas(expression_restorer_areas : List[ExpressionRestorerArea]) -> gradio.CheckboxGroup:
expression_restorer_areas = expression_restorer_areas or expression_restorer_choices.expression_restorer_areas
state_manager.set_item('expression_restorer_areas', expression_restorer_areas)
return gradio.CheckboxGroup(value = state_manager.get_item('expression_restorer_areas'))
@@ -1,40 +0,0 @@
from typing import List, Optional
import gradio
from facefusion import state_manager, translator
from facefusion.processors.modules.face_debugger import choices as face_debugger_choices
from facefusion.processors.modules.face_debugger.types import FaceDebuggerItem
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
has_face_debugger = 'face_debugger' in state_manager.get_item('processors')
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.items_checkbox_group', 'facefusion.processors.modules.face_debugger'),
choices = face_debugger_choices.face_debugger_items,
value = state_manager.get_item('face_debugger_items'),
visible = has_face_debugger
)
register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
def listen() -> None:
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
def remote_update(processors : List[str]) -> gradio.CheckboxGroup:
has_face_debugger = 'face_debugger' in processors
return gradio.CheckboxGroup(visible = has_face_debugger)
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
state_manager.set_item('face_debugger_items', face_debugger_items)
-103
View File
@@ -1,103 +0,0 @@
from typing import Optional, Sequence, Tuple
import gradio
import facefusion.choices
from facefusion import face_detector, state_manager, translator
from facefusion.common_helper import calculate_float_step, get_last
from facefusion.sanitizer import sanitize_int_range
from facefusion.types import Angle, FaceDetectorModel, Score
from facefusion.uis.core import register_ui_component
from facefusion.uis.types import ComponentOptions
FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DETECTOR_MARGIN_SLIDER : Optional[gradio.Slider] = None
FACE_DETECTOR_ANGLES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_DETECTOR_MODEL_DROPDOWN
global FACE_DETECTOR_SIZE_DROPDOWN
global FACE_DETECTOR_MARGIN_SLIDER
global FACE_DETECTOR_ANGLES_CHECKBOX_GROUP
global FACE_DETECTOR_SCORE_SLIDER
face_detector_size_dropdown_options : ComponentOptions =\
{
'label': translator.get('uis.face_detector_size_dropdown'),
'value': state_manager.get_item('face_detector_size')
}
if state_manager.get_item('face_detector_size') in facefusion.choices.face_detector_set[state_manager.get_item('face_detector_model')]:
face_detector_size_dropdown_options['choices'] = facefusion.choices.face_detector_set[state_manager.get_item('face_detector_model')]
with gradio.Row():
FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_detector_model_dropdown'),
choices = facefusion.choices.face_detector_models,
value = state_manager.get_item('face_detector_model')
)
FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(**face_detector_size_dropdown_options)
FACE_DETECTOR_MARGIN_SLIDER = gradio.Slider(
label = translator.get('uis.face_detector_margin_slider'),
value = state_manager.get_item('face_detector_margin')[0],
step = calculate_float_step(facefusion.choices.face_detector_margin_range),
minimum = facefusion.choices.face_detector_margin_range[0],
maximum = facefusion.choices.face_detector_margin_range[-1]
)
FACE_DETECTOR_ANGLES_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.face_detector_angles_checkbox_group'),
choices = facefusion.choices.face_detector_angles,
value = state_manager.get_item('face_detector_angles')
)
FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
label = translator.get('uis.face_detector_score_slider'),
value = state_manager.get_item('face_detector_score'),
step = calculate_float_step(facefusion.choices.face_detector_score_range),
minimum = facefusion.choices.face_detector_score_range[0],
maximum = facefusion.choices.face_detector_score_range[-1]
)
register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN)
register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN)
register_ui_component('face_detector_margin_slider', FACE_DETECTOR_MARGIN_SLIDER)
register_ui_component('face_detector_angles_checkbox_group', FACE_DETECTOR_ANGLES_CHECKBOX_GROUP)
register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER)
def listen() -> None:
FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN, outputs = [ FACE_DETECTOR_MODEL_DROPDOWN, FACE_DETECTOR_SIZE_DROPDOWN ])
FACE_DETECTOR_SIZE_DROPDOWN.change(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
FACE_DETECTOR_MARGIN_SLIDER.release(update_face_detector_margin, inputs=FACE_DETECTOR_MARGIN_SLIDER)
FACE_DETECTOR_ANGLES_CHECKBOX_GROUP.change(update_face_detector_angles, inputs = FACE_DETECTOR_ANGLES_CHECKBOX_GROUP, outputs = FACE_DETECTOR_ANGLES_CHECKBOX_GROUP)
FACE_DETECTOR_SCORE_SLIDER.release(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
def update_face_detector_model(face_detector_model : FaceDetectorModel) -> Tuple[gradio.Dropdown, gradio.Dropdown]:
face_detector.clear_inference_pool()
state_manager.set_item('face_detector_model', face_detector_model)
if face_detector.pre_check():
face_detector_size_choices = facefusion.choices.face_detector_set.get(state_manager.get_item('face_detector_model'))
state_manager.set_item('face_detector_size', get_last(face_detector_size_choices))
return gradio.Dropdown(value = state_manager.get_item('face_detector_model')), gradio.Dropdown(value = state_manager.get_item('face_detector_size'), choices = face_detector_size_choices)
return gradio.Dropdown(), gradio.Dropdown()
def update_face_detector_size(face_detector_size : str) -> None:
state_manager.set_item('face_detector_size', face_detector_size)
def update_face_detector_margin(face_detector_margin : int) -> None:
face_detector_margin = sanitize_int_range(face_detector_margin, facefusion.choices.face_detector_margin_range)
state_manager.set_item('face_detector_margin', (face_detector_margin, face_detector_margin, face_detector_margin, face_detector_margin))
def update_face_detector_angles(face_detector_angles : Sequence[Angle]) -> gradio.CheckboxGroup:
face_detector_angles = face_detector_angles or facefusion.choices.face_detector_angles
state_manager.set_item('face_detector_angles', face_detector_angles)
return gradio.CheckboxGroup(value = state_manager.get_item('face_detector_angles'))
def update_face_detector_score(face_detector_score : Score) -> None:
state_manager.set_item('face_detector_score', face_detector_score)
@@ -1,272 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.face_editor import choices as face_editor_choices
from facefusion.processors.modules.face_editor.types import FaceEditorModel
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_EDITOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_EDITOR_EYEBROW_DIRECTION_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_EYE_OPEN_RATIO_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_LIP_OPEN_RATIO_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_GRIM_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_POUT_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_PURSE_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_SMILE_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_HEAD_PITCH_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_HEAD_YAW_SLIDER : Optional[gradio.Slider] = None
FACE_EDITOR_HEAD_ROLL_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_EDITOR_MODEL_DROPDOWN
global FACE_EDITOR_EYEBROW_DIRECTION_SLIDER
global FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER
global FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER
global FACE_EDITOR_EYE_OPEN_RATIO_SLIDER
global FACE_EDITOR_LIP_OPEN_RATIO_SLIDER
global FACE_EDITOR_MOUTH_GRIM_SLIDER
global FACE_EDITOR_MOUTH_POUT_SLIDER
global FACE_EDITOR_MOUTH_PURSE_SLIDER
global FACE_EDITOR_MOUTH_SMILE_SLIDER
global FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER
global FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER
global FACE_EDITOR_HEAD_PITCH_SLIDER
global FACE_EDITOR_HEAD_YAW_SLIDER
global FACE_EDITOR_HEAD_ROLL_SLIDER
has_face_editor = 'face_editor' in state_manager.get_item('processors')
FACE_EDITOR_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.face_editor'),
choices = face_editor_choices.face_editor_models,
value = state_manager.get_item('face_editor_model'),
visible = has_face_editor
)
FACE_EDITOR_EYEBROW_DIRECTION_SLIDER = gradio.Slider(
label = translator.get('uis.eyebrow_direction_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_eyebrow_direction'),
step = calculate_float_step(face_editor_choices.face_editor_eyebrow_direction_range),
minimum = face_editor_choices.face_editor_eyebrow_direction_range[0],
maximum = face_editor_choices.face_editor_eyebrow_direction_range[-1],
visible = has_face_editor
)
FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER = gradio.Slider(
label = translator.get('uis.eye_gaze_horizontal_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_eye_gaze_horizontal'),
step = calculate_float_step(face_editor_choices.face_editor_eye_gaze_horizontal_range),
minimum = face_editor_choices.face_editor_eye_gaze_horizontal_range[0],
maximum = face_editor_choices.face_editor_eye_gaze_horizontal_range[-1],
visible = has_face_editor
)
FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER = gradio.Slider(
label = translator.get('uis.eye_gaze_vertical_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_eye_gaze_vertical'),
step = calculate_float_step(face_editor_choices.face_editor_eye_gaze_vertical_range),
minimum = face_editor_choices.face_editor_eye_gaze_vertical_range[0],
maximum = face_editor_choices.face_editor_eye_gaze_vertical_range[-1],
visible = has_face_editor
)
FACE_EDITOR_EYE_OPEN_RATIO_SLIDER = gradio.Slider(
label = translator.get('uis.eye_open_ratio_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_eye_open_ratio'),
step = calculate_float_step(face_editor_choices.face_editor_eye_open_ratio_range),
minimum = face_editor_choices.face_editor_eye_open_ratio_range[0],
maximum = face_editor_choices.face_editor_eye_open_ratio_range[-1],
visible = has_face_editor
)
FACE_EDITOR_LIP_OPEN_RATIO_SLIDER = gradio.Slider(
label = translator.get('uis.lip_open_ratio_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_lip_open_ratio'),
step = calculate_float_step(face_editor_choices.face_editor_lip_open_ratio_range),
minimum = face_editor_choices.face_editor_lip_open_ratio_range[0],
maximum = face_editor_choices.face_editor_lip_open_ratio_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_GRIM_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_grim_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_grim'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_grim_range),
minimum = face_editor_choices.face_editor_mouth_grim_range[0],
maximum = face_editor_choices.face_editor_mouth_grim_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_POUT_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_pout_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_pout'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_pout_range),
minimum = face_editor_choices.face_editor_mouth_pout_range[0],
maximum = face_editor_choices.face_editor_mouth_pout_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_PURSE_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_purse_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_purse'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_purse_range),
minimum = face_editor_choices.face_editor_mouth_purse_range[0],
maximum = face_editor_choices.face_editor_mouth_purse_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_SMILE_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_smile_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_smile'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_smile_range),
minimum = face_editor_choices.face_editor_mouth_smile_range[0],
maximum = face_editor_choices.face_editor_mouth_smile_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_position_horizontal_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_position_horizontal'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_position_horizontal_range),
minimum = face_editor_choices.face_editor_mouth_position_horizontal_range[0],
maximum = face_editor_choices.face_editor_mouth_position_horizontal_range[-1],
visible = has_face_editor
)
FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER = gradio.Slider(
label = translator.get('uis.mouth_position_vertical_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_mouth_position_vertical'),
step = calculate_float_step(face_editor_choices.face_editor_mouth_position_vertical_range),
minimum = face_editor_choices.face_editor_mouth_position_vertical_range[0],
maximum = face_editor_choices.face_editor_mouth_position_vertical_range[-1],
visible = has_face_editor
)
FACE_EDITOR_HEAD_PITCH_SLIDER = gradio.Slider(
label = translator.get('uis.head_pitch_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_head_pitch'),
step = calculate_float_step(face_editor_choices.face_editor_head_pitch_range),
minimum = face_editor_choices.face_editor_head_pitch_range[0],
maximum = face_editor_choices.face_editor_head_pitch_range[-1],
visible = has_face_editor
)
FACE_EDITOR_HEAD_YAW_SLIDER = gradio.Slider(
label = translator.get('uis.head_yaw_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_head_yaw'),
step = calculate_float_step(face_editor_choices.face_editor_head_yaw_range),
minimum = face_editor_choices.face_editor_head_yaw_range[0],
maximum = face_editor_choices.face_editor_head_yaw_range[-1],
visible = has_face_editor
)
FACE_EDITOR_HEAD_ROLL_SLIDER = gradio.Slider(
label = translator.get('uis.head_roll_slider', 'facefusion.processors.modules.face_editor'),
value = state_manager.get_item('face_editor_head_roll'),
step = calculate_float_step(face_editor_choices.face_editor_head_roll_range),
minimum = face_editor_choices.face_editor_head_roll_range[0],
maximum = face_editor_choices.face_editor_head_roll_range[-1],
visible = has_face_editor
)
register_ui_component('face_editor_model_dropdown', FACE_EDITOR_MODEL_DROPDOWN)
register_ui_component('face_editor_eyebrow_direction_slider', FACE_EDITOR_EYEBROW_DIRECTION_SLIDER)
register_ui_component('face_editor_eye_gaze_horizontal_slider', FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER)
register_ui_component('face_editor_eye_gaze_vertical_slider', FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER)
register_ui_component('face_editor_eye_open_ratio_slider', FACE_EDITOR_EYE_OPEN_RATIO_SLIDER)
register_ui_component('face_editor_lip_open_ratio_slider', FACE_EDITOR_LIP_OPEN_RATIO_SLIDER)
register_ui_component('face_editor_mouth_grim_slider', FACE_EDITOR_MOUTH_GRIM_SLIDER)
register_ui_component('face_editor_mouth_pout_slider', FACE_EDITOR_MOUTH_POUT_SLIDER)
register_ui_component('face_editor_mouth_purse_slider', FACE_EDITOR_MOUTH_PURSE_SLIDER)
register_ui_component('face_editor_mouth_smile_slider', FACE_EDITOR_MOUTH_SMILE_SLIDER)
register_ui_component('face_editor_mouth_position_horizontal_slider', FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER)
register_ui_component('face_editor_mouth_position_vertical_slider', FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER)
register_ui_component('face_editor_head_pitch_slider', FACE_EDITOR_HEAD_PITCH_SLIDER)
register_ui_component('face_editor_head_yaw_slider', FACE_EDITOR_HEAD_YAW_SLIDER)
register_ui_component('face_editor_head_roll_slider', FACE_EDITOR_HEAD_ROLL_SLIDER)
def listen() -> None:
FACE_EDITOR_MODEL_DROPDOWN.change(update_face_editor_model, inputs = FACE_EDITOR_MODEL_DROPDOWN, outputs = FACE_EDITOR_MODEL_DROPDOWN)
FACE_EDITOR_EYEBROW_DIRECTION_SLIDER.release(update_face_editor_eyebrow_direction, inputs = FACE_EDITOR_EYEBROW_DIRECTION_SLIDER)
FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER.release(update_face_editor_eye_gaze_horizontal, inputs = FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER)
FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER.release(update_face_editor_eye_gaze_vertical, inputs = FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER)
FACE_EDITOR_EYE_OPEN_RATIO_SLIDER.release(update_face_editor_eye_open_ratio, inputs = FACE_EDITOR_EYE_OPEN_RATIO_SLIDER)
FACE_EDITOR_LIP_OPEN_RATIO_SLIDER.release(update_face_editor_lip_open_ratio, inputs = FACE_EDITOR_LIP_OPEN_RATIO_SLIDER)
FACE_EDITOR_MOUTH_GRIM_SLIDER.release(update_face_editor_mouth_grim, inputs = FACE_EDITOR_MOUTH_GRIM_SLIDER)
FACE_EDITOR_MOUTH_POUT_SLIDER.release(update_face_editor_mouth_pout, inputs = FACE_EDITOR_MOUTH_POUT_SLIDER)
FACE_EDITOR_MOUTH_PURSE_SLIDER.release(update_face_editor_mouth_purse, inputs = FACE_EDITOR_MOUTH_PURSE_SLIDER)
FACE_EDITOR_MOUTH_SMILE_SLIDER.release(update_face_editor_mouth_smile, inputs = FACE_EDITOR_MOUTH_SMILE_SLIDER)
FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER.release(update_face_editor_mouth_position_horizontal, inputs = FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER)
FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER.release(update_face_editor_mouth_position_vertical, inputs = FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER)
FACE_EDITOR_HEAD_PITCH_SLIDER.release(update_face_editor_head_pitch, inputs = FACE_EDITOR_HEAD_PITCH_SLIDER)
FACE_EDITOR_HEAD_YAW_SLIDER.release(update_face_editor_head_yaw, inputs = FACE_EDITOR_HEAD_YAW_SLIDER)
FACE_EDITOR_HEAD_ROLL_SLIDER.release(update_face_editor_head_roll, inputs = FACE_EDITOR_HEAD_ROLL_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FACE_EDITOR_MODEL_DROPDOWN, FACE_EDITOR_EYEBROW_DIRECTION_SLIDER, FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER, FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER, FACE_EDITOR_EYE_OPEN_RATIO_SLIDER, FACE_EDITOR_LIP_OPEN_RATIO_SLIDER, FACE_EDITOR_MOUTH_GRIM_SLIDER, FACE_EDITOR_MOUTH_POUT_SLIDER, FACE_EDITOR_MOUTH_PURSE_SLIDER, FACE_EDITOR_MOUTH_SMILE_SLIDER, FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER, FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER, FACE_EDITOR_HEAD_PITCH_SLIDER, FACE_EDITOR_HEAD_YAW_SLIDER, FACE_EDITOR_HEAD_ROLL_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider]:
has_face_editor = 'face_editor' in processors
return gradio.Dropdown(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor), gradio.Slider(visible = has_face_editor)
def update_face_editor_model(face_editor_model : FaceEditorModel) -> gradio.Dropdown:
face_editor_module = load_processor_module('face_editor')
face_editor_module.clear_inference_pool()
state_manager.set_item('face_editor_model', face_editor_model)
if face_editor_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('face_editor_model'))
return gradio.Dropdown()
def update_face_editor_eyebrow_direction(face_editor_eyebrow_direction : float) -> None:
state_manager.set_item('face_editor_eyebrow_direction', face_editor_eyebrow_direction)
def update_face_editor_eye_gaze_horizontal(face_editor_eye_gaze_horizontal : float) -> None:
state_manager.set_item('face_editor_eye_gaze_horizontal', face_editor_eye_gaze_horizontal)
def update_face_editor_eye_gaze_vertical(face_editor_eye_gaze_vertical : float) -> None:
state_manager.set_item('face_editor_eye_gaze_vertical', face_editor_eye_gaze_vertical)
def update_face_editor_eye_open_ratio(face_editor_eye_open_ratio : float) -> None:
state_manager.set_item('face_editor_eye_open_ratio', face_editor_eye_open_ratio)
def update_face_editor_lip_open_ratio(face_editor_lip_open_ratio : float) -> None:
state_manager.set_item('face_editor_lip_open_ratio', face_editor_lip_open_ratio)
def update_face_editor_mouth_grim(face_editor_mouth_grim : float) -> None:
state_manager.set_item('face_editor_mouth_grim', face_editor_mouth_grim)
def update_face_editor_mouth_pout(face_editor_mouth_pout : float) -> None:
state_manager.set_item('face_editor_mouth_pout', face_editor_mouth_pout)
def update_face_editor_mouth_purse(face_editor_mouth_purse : float) -> None:
state_manager.set_item('face_editor_mouth_purse', face_editor_mouth_purse)
def update_face_editor_mouth_smile(face_editor_mouth_smile : float) -> None:
state_manager.set_item('face_editor_mouth_smile', face_editor_mouth_smile)
def update_face_editor_mouth_position_horizontal(face_editor_mouth_position_horizontal : float) -> None:
state_manager.set_item('face_editor_mouth_position_horizontal', face_editor_mouth_position_horizontal)
def update_face_editor_mouth_position_vertical(face_editor_mouth_position_vertical : float) -> None:
state_manager.set_item('face_editor_mouth_position_vertical', face_editor_mouth_position_vertical)
def update_face_editor_head_pitch(face_editor_head_pitch : float) -> None:
state_manager.set_item('face_editor_head_pitch', face_editor_head_pitch)
def update_face_editor_head_yaw(face_editor_head_yaw : float) -> None:
state_manager.set_item('face_editor_head_yaw', face_editor_head_yaw)
def update_face_editor_head_roll(face_editor_head_roll : float) -> None:
state_manager.set_item('face_editor_head_roll', face_editor_head_roll)
@@ -1,81 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step, calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.face_enhancer import choices as face_enhancer_choices
from facefusion.processors.modules.face_enhancer.types import FaceEnhancerModel, FaceEnhancerWeight
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
FACE_ENHANCER_WEIGHT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_ENHANCER_MODEL_DROPDOWN
global FACE_ENHANCER_BLEND_SLIDER
global FACE_ENHANCER_WEIGHT_SLIDER
has_face_enhancer = 'face_enhancer' in state_manager.get_item('processors')
FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.face_enhancer'),
choices = face_enhancer_choices.face_enhancer_models,
value = state_manager.get_item('face_enhancer_model'),
visible = has_face_enhancer
)
FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = translator.get('uis.blend_slider', 'facefusion.processors.modules.face_enhancer'),
value = state_manager.get_item('face_enhancer_blend'),
step = calculate_int_step(face_enhancer_choices.face_enhancer_blend_range),
minimum = face_enhancer_choices.face_enhancer_blend_range[0],
maximum = face_enhancer_choices.face_enhancer_blend_range[-1],
visible = has_face_enhancer
)
FACE_ENHANCER_WEIGHT_SLIDER = gradio.Slider(
label = translator.get('uis.weight_slider', 'facefusion.processors.modules.face_enhancer'),
value = state_manager.get_item('face_enhancer_weight'),
step = calculate_float_step(face_enhancer_choices.face_enhancer_weight_range),
minimum = face_enhancer_choices.face_enhancer_weight_range[0],
maximum = face_enhancer_choices.face_enhancer_weight_range[-1],
visible = has_face_enhancer and load_processor_module('face_enhancer').get_inference_pool() and load_processor_module('face_enhancer').has_weight_input()
)
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
register_ui_component('face_enhancer_weight_slider', FACE_ENHANCER_WEIGHT_SLIDER)
def listen() -> None:
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = [ FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_WEIGHT_SLIDER ])
FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
FACE_ENHANCER_WEIGHT_SLIDER.release(update_face_enhancer_weight, inputs = FACE_ENHANCER_WEIGHT_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_ENHANCER_WEIGHT_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Slider]:
has_face_enhancer = 'face_enhancer' in processors
return gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer and load_processor_module('face_enhancer').get_inference_pool() and load_processor_module('face_enhancer').has_weight_input())
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> Tuple[gradio.Dropdown, gradio.Slider]:
face_enhancer_module = load_processor_module('face_enhancer')
face_enhancer_module.clear_inference_pool()
state_manager.set_item('face_enhancer_model', face_enhancer_model)
if face_enhancer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('face_enhancer_model')), gradio.Slider(visible = face_enhancer_module.has_weight_input())
return gradio.Dropdown(), gradio.Slider()
def update_face_enhancer_blend(face_enhancer_blend : float) -> None:
state_manager.set_item('face_enhancer_blend', int(face_enhancer_blend))
def update_face_enhancer_weight(face_enhancer_weight : FaceEnhancerWeight) -> None:
state_manager.set_item('face_enhancer_weight', face_enhancer_weight)
@@ -1,50 +0,0 @@
from typing import Optional
import gradio
import facefusion.choices
from facefusion import face_landmarker, state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.types import FaceLandmarkerModel, Score
from facefusion.uis.core import register_ui_component
FACE_LANDMARKER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_LANDMARKER_SCORE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_LANDMARKER_MODEL_DROPDOWN
global FACE_LANDMARKER_SCORE_SLIDER
FACE_LANDMARKER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_landmarker_model_dropdown'),
choices = facefusion.choices.face_landmarker_models,
value = state_manager.get_item('face_landmarker_model')
)
FACE_LANDMARKER_SCORE_SLIDER = gradio.Slider(
label = translator.get('uis.face_landmarker_score_slider'),
value = state_manager.get_item('face_landmarker_score'),
step = calculate_float_step(facefusion.choices.face_landmarker_score_range),
minimum = facefusion.choices.face_landmarker_score_range[0],
maximum = facefusion.choices.face_landmarker_score_range[-1]
)
register_ui_component('face_landmarker_model_dropdown', FACE_LANDMARKER_MODEL_DROPDOWN)
register_ui_component('face_landmarker_score_slider', FACE_LANDMARKER_SCORE_SLIDER)
def listen() -> None:
FACE_LANDMARKER_MODEL_DROPDOWN.change(update_face_landmarker_model, inputs = FACE_LANDMARKER_MODEL_DROPDOWN, outputs = FACE_LANDMARKER_MODEL_DROPDOWN)
FACE_LANDMARKER_SCORE_SLIDER.release(update_face_landmarker_score, inputs = FACE_LANDMARKER_SCORE_SLIDER)
def update_face_landmarker_model(face_landmarker_model : FaceLandmarkerModel) -> gradio.Dropdown:
face_landmarker.clear_inference_pool()
state_manager.set_item('face_landmarker_model', face_landmarker_model)
if face_landmarker.pre_check():
gradio.Dropdown(value = state_manager.get_item('face_landmarker_model'))
return gradio.Dropdown()
def update_face_landmarker_score(face_landmarker_score : Score) -> None:
state_manager.set_item('face_landmarker_score', face_landmarker_score)
-181
View File
@@ -1,181 +0,0 @@
from typing import List, Optional, Tuple
import gradio
import facefusion.choices
from facefusion import face_masker, state_manager, translator
from facefusion.common_helper import calculate_float_step, calculate_int_step
from facefusion.sanitizer import sanitize_int_range
from facefusion.types import FaceMaskArea, FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel
from facefusion.uis.core import register_ui_component
FACE_OCCLUDER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_PARSER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_MASK_AREAS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_MASK_REGIONS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_MASK_BOX_WRAPPER : Optional[gradio.Group] = None
FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_OCCLUDER_MODEL_DROPDOWN
global FACE_PARSER_MODEL_DROPDOWN
global FACE_MASK_TYPES_CHECKBOX_GROUP
global FACE_MASK_AREAS_CHECKBOX_GROUP
global FACE_MASK_REGIONS_CHECKBOX_GROUP
global FACE_MASK_BOX_WRAPPER
global FACE_MASK_BLUR_SLIDER
global FACE_MASK_PADDING_TOP_SLIDER
global FACE_MASK_PADDING_RIGHT_SLIDER
global FACE_MASK_PADDING_BOTTOM_SLIDER
global FACE_MASK_PADDING_LEFT_SLIDER
has_box_mask = 'box' in state_manager.get_item('face_mask_types')
has_region_mask = 'region' in state_manager.get_item('face_mask_types')
has_area_mask = 'area' in state_manager.get_item('face_mask_types')
with gradio.Row():
FACE_OCCLUDER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_occluder_model_dropdown'),
choices = facefusion.choices.face_occluder_models,
value = state_manager.get_item('face_occluder_model')
)
FACE_PARSER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_parser_model_dropdown'),
choices = facefusion.choices.face_parser_models,
value = state_manager.get_item('face_parser_model')
)
FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.face_mask_types_checkbox_group'),
choices = facefusion.choices.face_mask_types,
value = state_manager.get_item('face_mask_types')
)
FACE_MASK_AREAS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.face_mask_areas_checkbox_group'),
choices = facefusion.choices.face_mask_areas,
value = state_manager.get_item('face_mask_areas'),
visible = has_area_mask
)
FACE_MASK_REGIONS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.face_mask_regions_checkbox_group'),
choices = facefusion.choices.face_mask_regions,
value = state_manager.get_item('face_mask_regions'),
visible = has_region_mask
)
FACE_MASK_BLUR_SLIDER = gradio.Slider(
label = translator.get('uis.face_mask_blur_slider'),
step = calculate_float_step(facefusion.choices.face_mask_blur_range),
minimum = facefusion.choices.face_mask_blur_range[0],
maximum = facefusion.choices.face_mask_blur_range[-1],
value = state_manager.get_item('face_mask_blur'),
visible = has_box_mask
)
with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_WRAPPER:
with gradio.Row():
FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
label = translator.get('uis.face_mask_padding_top_slider'),
step = calculate_int_step(facefusion.choices.face_mask_padding_range),
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = state_manager.get_item('face_mask_padding')[0]
)
FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
label = translator.get('uis.face_mask_padding_right_slider'),
step = calculate_int_step(facefusion.choices.face_mask_padding_range),
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = state_manager.get_item('face_mask_padding')[1]
)
with gradio.Row():
FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
label = translator.get('uis.face_mask_padding_bottom_slider'),
step = calculate_int_step(facefusion.choices.face_mask_padding_range),
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = state_manager.get_item('face_mask_padding')[2]
)
FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
label = translator.get('uis.face_mask_padding_left_slider'),
step = calculate_int_step(facefusion.choices.face_mask_padding_range),
minimum = facefusion.choices.face_mask_padding_range[0],
maximum = facefusion.choices.face_mask_padding_range[-1],
value = state_manager.get_item('face_mask_padding')[3]
)
register_ui_component('face_occluder_model_dropdown', FACE_OCCLUDER_MODEL_DROPDOWN)
register_ui_component('face_parser_model_dropdown', FACE_PARSER_MODEL_DROPDOWN)
register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
register_ui_component('face_mask_areas_checkbox_group', FACE_MASK_AREAS_CHECKBOX_GROUP)
register_ui_component('face_mask_regions_checkbox_group', FACE_MASK_REGIONS_CHECKBOX_GROUP)
register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
def listen() -> None:
FACE_OCCLUDER_MODEL_DROPDOWN.change(update_face_occluder_model, inputs = FACE_OCCLUDER_MODEL_DROPDOWN)
FACE_PARSER_MODEL_DROPDOWN.change(update_face_parser_model, inputs = FACE_PARSER_MODEL_DROPDOWN)
FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_types, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_AREAS_CHECKBOX_GROUP, FACE_MASK_REGIONS_CHECKBOX_GROUP, FACE_MASK_BLUR_SLIDER, FACE_MASK_BOX_WRAPPER ])
FACE_MASK_AREAS_CHECKBOX_GROUP.change(update_face_mask_areas, inputs = FACE_MASK_AREAS_CHECKBOX_GROUP, outputs = FACE_MASK_AREAS_CHECKBOX_GROUP)
FACE_MASK_REGIONS_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGIONS_CHECKBOX_GROUP, outputs = FACE_MASK_REGIONS_CHECKBOX_GROUP)
FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
for face_mask_padding_slider in face_mask_padding_sliders:
face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders)
def update_face_occluder_model(face_occluder_model : FaceOccluderModel) -> gradio.Dropdown:
face_masker.clear_inference_pool()
state_manager.set_item('face_occluder_model', face_occluder_model)
if face_masker.pre_check():
return gradio.Dropdown(value = state_manager.get_item('face_occluder_model'))
return gradio.Dropdown()
def update_face_parser_model(face_parser_model : FaceParserModel) -> gradio.Dropdown:
face_masker.clear_inference_pool()
state_manager.set_item('face_parser_model', face_parser_model)
if face_masker.pre_check():
return gradio.Dropdown(value = state_manager.get_item('face_parser_model'))
return gradio.Dropdown()
def update_face_mask_types(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.Slider, gradio.Group]:
face_mask_types = face_mask_types or facefusion.choices.face_mask_types
state_manager.set_item('face_mask_types', face_mask_types)
has_box_mask = 'box' in face_mask_types
has_area_mask = 'area' in face_mask_types
has_region_mask = 'region' in face_mask_types
return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_types')), gradio.CheckboxGroup(visible = has_area_mask), gradio.CheckboxGroup(visible = has_region_mask), gradio.Slider(visible = has_box_mask), gradio.Group(visible = has_box_mask)
def update_face_mask_areas(face_mask_areas : List[FaceMaskArea]) -> gradio.CheckboxGroup:
face_mask_areas = face_mask_areas or facefusion.choices.face_mask_areas
state_manager.set_item('face_mask_areas', face_mask_areas)
return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_areas'))
def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
face_mask_regions = face_mask_regions or facefusion.choices.face_mask_regions
state_manager.set_item('face_mask_regions', face_mask_regions)
return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_regions'))
def update_face_mask_blur(face_mask_blur : float) -> None:
state_manager.set_item('face_mask_blur', face_mask_blur)
def update_face_mask_padding(face_mask_padding_top : float, face_mask_padding_right : float, face_mask_padding_bottom : float, face_mask_padding_left : float) -> None:
face_mask_padding_top = sanitize_int_range(int(face_mask_padding_top), facefusion.choices.face_mask_padding_range)
face_mask_padding_right = sanitize_int_range(int(face_mask_padding_right), facefusion.choices.face_mask_padding_range)
face_mask_padding_bottom = sanitize_int_range(int(face_mask_padding_bottom), facefusion.choices.face_mask_padding_range)
face_mask_padding_left = sanitize_int_range(int(face_mask_padding_left), facefusion.choices.face_mask_padding_range)
state_manager.set_item('face_mask_padding', (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left))
-231
View File
@@ -1,231 +0,0 @@
from typing import List, Optional, Tuple
import cv2
import gradio
from gradio_rangeslider import RangeSlider
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step, calculate_int_step
from facefusion.face_analyser import get_many_faces
from facefusion.face_selector import sort_and_filter_faces
from facefusion.face_store import clear_static_faces
from facefusion.filesystem import is_image, is_video
from facefusion.types import FaceSelectorMode, FaceSelectorOrder, Gender, Race, VisionFrame
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.types import ComponentOptions
from facefusion.uis.ui_helper import convert_str_none
from facefusion.vision import fit_cover_frame, read_static_image, read_video_frame
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_RACE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_AGE_RANGE_SLIDER : Optional[RangeSlider] = None
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_SELECTOR_MODE_DROPDOWN
global FACE_SELECTOR_ORDER_DROPDOWN
global FACE_SELECTOR_GENDER_DROPDOWN
global FACE_SELECTOR_RACE_DROPDOWN
global FACE_SELECTOR_AGE_RANGE_SLIDER
global REFERENCE_FACE_POSITION_GALLERY
global REFERENCE_FACE_DISTANCE_SLIDER
reference_face_gallery_options : ComponentOptions =\
{
'label': translator.get('uis.reference_face_gallery'),
'object_fit': 'cover',
'columns': 7,
'allow_preview': False,
'elem_classes': 'box-face-selector',
'visible': 'reference' in state_manager.get_item('face_selector_mode')
}
if is_image(state_manager.get_item('target_path')):
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
reference_face_gallery_options['value'] = extract_gallery_frames(target_vision_frame)
if is_video(state_manager.get_item('target_path')):
target_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_face_gallery_options['value'] = extract_gallery_frames(target_vision_frame)
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_selector_mode_dropdown'),
choices = facefusion.choices.face_selector_modes,
value = state_manager.get_item('face_selector_mode')
)
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_options)
with gradio.Group():
with gradio.Row():
FACE_SELECTOR_ORDER_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_selector_order_dropdown'),
choices = facefusion.choices.face_selector_orders,
value = state_manager.get_item('face_selector_order')
)
FACE_SELECTOR_GENDER_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_selector_gender_dropdown'),
choices = [ 'none' ] + facefusion.choices.face_selector_genders,
value = state_manager.get_item('face_selector_gender') or 'none'
)
FACE_SELECTOR_RACE_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.face_selector_race_dropdown'),
choices = [ 'none' ] + facefusion.choices.face_selector_races,
value = state_manager.get_item('face_selector_race') or 'none'
)
with gradio.Row():
face_selector_age_start = state_manager.get_item('face_selector_age_start') or facefusion.choices.face_selector_age_range[0]
face_selector_age_end = state_manager.get_item('face_selector_age_end') or facefusion.choices.face_selector_age_range[-1]
FACE_SELECTOR_AGE_RANGE_SLIDER = RangeSlider(
label = translator.get('uis.face_selector_age_range_slider'),
minimum = facefusion.choices.face_selector_age_range[0],
maximum = facefusion.choices.face_selector_age_range[-1],
value = (face_selector_age_start, face_selector_age_end),
step = calculate_int_step(facefusion.choices.face_selector_age_range)
)
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
label = translator.get('uis.reference_face_distance_slider'),
value = state_manager.get_item('reference_face_distance'),
step = calculate_float_step(facefusion.choices.reference_face_distance_range),
minimum = facefusion.choices.reference_face_distance_range[0],
maximum = facefusion.choices.reference_face_distance_range[-1],
visible = 'reference' in state_manager.get_item('face_selector_mode')
)
register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
register_ui_component('face_selector_order_dropdown', FACE_SELECTOR_ORDER_DROPDOWN)
register_ui_component('face_selector_gender_dropdown', FACE_SELECTOR_GENDER_DROPDOWN)
register_ui_component('face_selector_race_dropdown', FACE_SELECTOR_RACE_DROPDOWN)
register_ui_component('face_selector_age_range_slider', FACE_SELECTOR_AGE_RANGE_SLIDER)
register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
def listen() -> None:
FACE_SELECTOR_MODE_DROPDOWN.change(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
FACE_SELECTOR_ORDER_DROPDOWN.change(update_face_selector_order, inputs = FACE_SELECTOR_ORDER_DROPDOWN, outputs = REFERENCE_FACE_POSITION_GALLERY)
FACE_SELECTOR_GENDER_DROPDOWN.change(update_face_selector_gender, inputs = FACE_SELECTOR_GENDER_DROPDOWN, outputs = REFERENCE_FACE_POSITION_GALLERY)
FACE_SELECTOR_RACE_DROPDOWN.change(update_face_selector_race, inputs = FACE_SELECTOR_RACE_DROPDOWN, outputs = REFERENCE_FACE_POSITION_GALLERY)
FACE_SELECTOR_AGE_RANGE_SLIDER.release(update_face_selector_age_range, inputs = FACE_SELECTOR_AGE_RANGE_SLIDER, outputs = REFERENCE_FACE_POSITION_GALLERY)
REFERENCE_FACE_DISTANCE_SLIDER.release(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider:
REFERENCE_FACE_POSITION_GALLERY.select(update_reference_frame_number, inputs = preview_frame_slider)
REFERENCE_FACE_POSITION_GALLERY.select(update_reference_face_position)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(clear_reference_frame_number)
getattr(ui_component, method)(clear_reference_face_position)
getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
for ui_component in get_ui_components(
[
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_angles_checkbox_group'
]):
ui_component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
face_detector_score_slider = get_ui_component('face_detector_score_slider')
if face_detector_score_slider:
face_detector_score_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider:
for method in [ 'change', 'release' ]:
getattr(preview_frame_slider, method)(update_reference_position_gallery, inputs = preview_frame_slider, outputs = REFERENCE_FACE_POSITION_GALLERY, show_progress = 'hidden')
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
state_manager.set_item('face_selector_mode', face_selector_mode)
if face_selector_mode == 'many':
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
if face_selector_mode == 'one':
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
if face_selector_mode == 'reference':
return gradio.Gallery(visible = True), gradio.Slider(visible = True)
def update_face_selector_order(face_analyser_order : FaceSelectorOrder) -> gradio.Gallery:
state_manager.set_item('face_selector_order', convert_str_none(face_analyser_order))
return update_reference_position_gallery()
def update_face_selector_gender(face_selector_gender : Gender) -> gradio.Gallery:
state_manager.set_item('face_selector_gender', convert_str_none(face_selector_gender))
return update_reference_position_gallery()
def update_face_selector_race(face_selector_race : Race) -> gradio.Gallery:
state_manager.set_item('face_selector_race', convert_str_none(face_selector_race))
return update_reference_position_gallery()
def update_face_selector_age_range(face_selector_age_range : Tuple[float, float]) -> gradio.Gallery:
face_selector_age_start, face_selector_age_end = face_selector_age_range
state_manager.set_item('face_selector_age_start', int(face_selector_age_start))
state_manager.set_item('face_selector_age_end', int(face_selector_age_end))
return update_reference_position_gallery()
def update_reference_face_position(event : gradio.SelectData) -> None:
state_manager.set_item('reference_face_position', event.index)
def clear_reference_face_position() -> None:
state_manager.set_item('reference_face_position', 0)
def update_reference_face_distance(reference_face_distance : float) -> None:
state_manager.set_item('reference_face_distance', reference_face_distance)
def update_reference_frame_number(reference_frame_number : int = 0) -> None:
state_manager.set_item('reference_frame_number', reference_frame_number)
def clear_reference_frame_number() -> None:
state_manager.set_item('reference_frame_number', 0)
def clear_and_update_reference_position_gallery() -> gradio.Gallery:
clear_static_faces()
return update_reference_position_gallery()
def update_reference_position_gallery(frame_number : int = 0) -> gradio.Gallery:
gallery_vision_frames = []
if is_image(state_manager.get_item('target_path')):
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
gallery_vision_frames = extract_gallery_frames(target_vision_frame)
if is_video(state_manager.get_item('target_path')):
target_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
gallery_vision_frames = extract_gallery_frames(target_vision_frame)
if gallery_vision_frames:
return gradio.Gallery(value = gallery_vision_frames)
return gradio.Gallery(value = None)
def extract_gallery_frames(target_vision_frame : VisionFrame) -> List[VisionFrame]:
gallery_vision_frames = []
faces = get_many_faces([ target_vision_frame ])
faces = sort_and_filter_faces(faces)
for face in faces:
start_x, start_y, end_x, end_y = map(int, face.bounding_box)
padding_x = int((end_x - start_x) * 0.25)
padding_y = int((end_y - start_y) * 0.25)
start_x = max(0, start_x - padding_x)
start_y = max(0, start_y - padding_y)
end_x = max(0, end_x + padding_x)
end_y = max(0, end_y + padding_y)
crop_vision_frame = target_vision_frame[start_y:end_y, start_x:end_x]
crop_vision_frame = fit_cover_frame(crop_vision_frame, (128, 128))
crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_BGR2RGB)
gallery_vision_frames.append(crop_vision_frame)
return gallery_vision_frames
@@ -1,84 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step, get_first
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.face_swapper import choices as face_swapper_choices
from facefusion.processors.modules.face_swapper.types import FaceSwapperModel, FaceSwapperWeight
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SWAPPER_PIXEL_BOOST_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SWAPPER_WEIGHT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_SWAPPER_MODEL_DROPDOWN
global FACE_SWAPPER_PIXEL_BOOST_DROPDOWN
global FACE_SWAPPER_WEIGHT_SLIDER
has_face_swapper = 'face_swapper' in state_manager.get_item('processors')
FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.face_swapper'),
choices = face_swapper_choices.face_swapper_models,
value = state_manager.get_item('face_swapper_model'),
visible = has_face_swapper
)
FACE_SWAPPER_PIXEL_BOOST_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.pixel_boost_dropdown', 'facefusion.processors.modules.face_swapper'),
choices = face_swapper_choices.face_swapper_set.get(state_manager.get_item('face_swapper_model')),
value = state_manager.get_item('face_swapper_pixel_boost'),
visible = has_face_swapper
)
FACE_SWAPPER_WEIGHT_SLIDER = gradio.Slider(
label = translator.get('uis.weight_slider', 'facefusion.processors.modules.face_swapper'),
value = state_manager.get_item('face_swapper_weight'),
minimum = face_swapper_choices.face_swapper_weight_range[0],
maximum = face_swapper_choices.face_swapper_weight_range[-1],
step = calculate_float_step(face_swapper_choices.face_swapper_weight_range),
visible = has_face_swapper and has_face_swapper_weight()
)
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
register_ui_component('face_swapper_pixel_boost_dropdown', FACE_SWAPPER_PIXEL_BOOST_DROPDOWN)
register_ui_component('face_swapper_weight_slider', FACE_SWAPPER_WEIGHT_SLIDER)
def listen() -> None:
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_SWAPPER_PIXEL_BOOST_DROPDOWN, FACE_SWAPPER_WEIGHT_SLIDER ])
FACE_SWAPPER_PIXEL_BOOST_DROPDOWN.change(update_face_swapper_pixel_boost, inputs = FACE_SWAPPER_PIXEL_BOOST_DROPDOWN)
FACE_SWAPPER_WEIGHT_SLIDER.change(update_face_swapper_weight, inputs = FACE_SWAPPER_WEIGHT_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_SWAPPER_PIXEL_BOOST_DROPDOWN, FACE_SWAPPER_WEIGHT_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider]:
has_face_swapper = 'face_swapper' in processors
return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_swapper), gradio.Slider(visible = has_face_swapper)
def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider]:
face_swapper_module = load_processor_module('face_swapper')
face_swapper_module.clear_inference_pool()
state_manager.set_item('face_swapper_model', face_swapper_model)
if face_swapper_module.pre_check():
face_swapper_pixel_boost_dropdown_choices = face_swapper_choices.face_swapper_set.get(state_manager.get_item('face_swapper_model'))
state_manager.set_item('face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_dropdown_choices))
return gradio.Dropdown(value = state_manager.get_item('face_swapper_model')), gradio.Dropdown(value = state_manager.get_item('face_swapper_pixel_boost'), choices = face_swapper_pixel_boost_dropdown_choices), gradio.Slider(visible = has_face_swapper_weight())
return gradio.Dropdown(), gradio.Dropdown(), gradio.Slider()
def update_face_swapper_pixel_boost(face_swapper_pixel_boost : str) -> None:
state_manager.set_item('face_swapper_pixel_boost', face_swapper_pixel_boost)
def update_face_swapper_weight(face_swapper_weight : FaceSwapperWeight) -> None:
state_manager.set_item('face_swapper_weight', face_swapper_weight)
def has_face_swapper_weight() -> bool:
return state_manager.get_item('face_swapper_model') in [ 'ghost_1_256', 'ghost_2_256', 'ghost_3_256', 'hififace_unofficial_256', 'hyperswap_1a_256', 'hyperswap_1b_256', 'hyperswap_1c_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_unofficial_512' ]
@@ -1,81 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.frame_colorizer import choices as frame_colorizer_choices
from facefusion.processors.modules.frame_colorizer.types import FrameColorizerModel
from facefusion.uis.core import get_ui_component, register_ui_component
FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_COLORIZER_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_COLORIZER_BLEND_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FRAME_COLORIZER_MODEL_DROPDOWN
global FRAME_COLORIZER_SIZE_DROPDOWN
global FRAME_COLORIZER_BLEND_SLIDER
has_frame_colorizer = 'frame_colorizer' in state_manager.get_item('processors')
FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.frame_colorizer'),
choices = frame_colorizer_choices.frame_colorizer_models,
value = state_manager.get_item('frame_colorizer_model'),
visible = has_frame_colorizer
)
FRAME_COLORIZER_SIZE_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.size_dropdown', 'facefusion.processors.modules.frame_colorizer'),
choices = frame_colorizer_choices.frame_colorizer_sizes,
value = state_manager.get_item('frame_colorizer_size'),
visible = has_frame_colorizer
)
FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider(
label = translator.get('uis.blend_slider', 'facefusion.processors.modules.frame_colorizer'),
value = state_manager.get_item('frame_colorizer_blend'),
step = calculate_int_step(frame_colorizer_choices.frame_colorizer_blend_range),
minimum = frame_colorizer_choices.frame_colorizer_blend_range[0],
maximum = frame_colorizer_choices.frame_colorizer_blend_range[-1],
visible = has_frame_colorizer
)
register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN)
register_ui_component('frame_colorizer_size_dropdown', FRAME_COLORIZER_SIZE_DROPDOWN)
register_ui_component('frame_colorizer_blend_slider', FRAME_COLORIZER_BLEND_SLIDER)
def listen() -> None:
FRAME_COLORIZER_MODEL_DROPDOWN.change(update_frame_colorizer_model, inputs = FRAME_COLORIZER_MODEL_DROPDOWN, outputs = FRAME_COLORIZER_MODEL_DROPDOWN)
FRAME_COLORIZER_SIZE_DROPDOWN.change(update_frame_colorizer_size, inputs = FRAME_COLORIZER_SIZE_DROPDOWN)
FRAME_COLORIZER_BLEND_SLIDER.release(update_frame_colorizer_blend, inputs = FRAME_COLORIZER_BLEND_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FRAME_COLORIZER_MODEL_DROPDOWN, FRAME_COLORIZER_BLEND_SLIDER, FRAME_COLORIZER_SIZE_DROPDOWN ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
has_frame_colorizer = 'frame_colorizer' in processors
return gradio.Dropdown(visible = has_frame_colorizer), gradio.Slider(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_colorizer)
def update_frame_colorizer_model(frame_colorizer_model : FrameColorizerModel) -> gradio.Dropdown:
frame_colorizer_module = load_processor_module('frame_colorizer')
frame_colorizer_module.clear_inference_pool()
state_manager.set_item('frame_colorizer_model', frame_colorizer_model)
if frame_colorizer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('frame_colorizer_model'))
return gradio.Dropdown()
def update_frame_colorizer_size(frame_colorizer_size : str) -> None:
state_manager.set_item('frame_colorizer_size', frame_colorizer_size)
def update_frame_colorizer_blend(frame_colorizer_blend : float) -> None:
state_manager.set_item('frame_colorizer_blend', int(frame_colorizer_blend))
@@ -1,64 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.frame_enhancer import choices as frame_enhancer_choices
from facefusion.processors.modules.frame_enhancer.types import FrameEnhancerModel
from facefusion.uis.core import get_ui_component, register_ui_component
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FRAME_ENHANCER_MODEL_DROPDOWN
global FRAME_ENHANCER_BLEND_SLIDER
has_frame_enhancer = 'frame_enhancer' in state_manager.get_item('processors')
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.frame_enhancer'),
choices = frame_enhancer_choices.frame_enhancer_models,
value = state_manager.get_item('frame_enhancer_model'),
visible = has_frame_enhancer
)
FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = translator.get('uis.blend_slider', 'facefusion.processors.modules.frame_enhancer'),
value = state_manager.get_item('frame_enhancer_blend'),
step = calculate_int_step(frame_enhancer_choices.frame_enhancer_blend_range),
minimum = frame_enhancer_choices.frame_enhancer_blend_range[0],
maximum = frame_enhancer_choices.frame_enhancer_blend_range[-1],
visible = has_frame_enhancer
)
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
def listen() -> None:
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
FRAME_ENHANCER_BLEND_SLIDER.release(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
has_frame_enhancer = 'frame_enhancer' in processors
return gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer)
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
frame_enhancer_module = load_processor_module('frame_enhancer')
frame_enhancer_module.clear_inference_pool()
state_manager.set_item('frame_enhancer_model', frame_enhancer_model)
if frame_enhancer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('frame_enhancer_model'))
return gradio.Dropdown()
def update_frame_enhancer_blend(frame_enhancer_blend : float) -> None:
state_manager.set_item('frame_enhancer_blend', int(frame_enhancer_blend))
-110
View File
@@ -1,110 +0,0 @@
from time import sleep
from typing import Optional, Tuple
import gradio
from facefusion import process_manager, state_manager, translator
from facefusion.args import collect_step_args
from facefusion.core import process_step
from facefusion.filesystem import is_directory, is_image, is_video
from facefusion.jobs import job_helper, job_manager, job_runner, job_store
from facefusion.temp_helper import clear_temp_directory
from facefusion.types import Args, UiWorkflow
from facefusion.uis.core import get_ui_component
from facefusion.uis.ui_helper import suggest_output_path
INSTANT_RUNNER_WRAPPER : Optional[gradio.Row] = None
INSTANT_RUNNER_START_BUTTON : Optional[gradio.Button] = None
INSTANT_RUNNER_STOP_BUTTON : Optional[gradio.Button] = None
INSTANT_RUNNER_CLEAR_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global INSTANT_RUNNER_WRAPPER
global INSTANT_RUNNER_START_BUTTON
global INSTANT_RUNNER_STOP_BUTTON
global INSTANT_RUNNER_CLEAR_BUTTON
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
is_instant_runner = state_manager.get_item('ui_workflow') == 'instant_runner'
with gradio.Row(visible = is_instant_runner) as INSTANT_RUNNER_WRAPPER:
INSTANT_RUNNER_START_BUTTON = gradio.Button(
value = translator.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
INSTANT_RUNNER_STOP_BUTTON = gradio.Button(
value = translator.get('uis.stop_button'),
variant = 'primary',
size = 'sm',
visible = False
)
INSTANT_RUNNER_CLEAR_BUTTON = gradio.Button(
value = translator.get('uis.clear_button'),
size = 'sm'
)
def listen() -> None:
output_image = get_ui_component('output_image')
output_video = get_ui_component('output_video')
ui_workflow_dropdown = get_ui_component('ui_workflow_dropdown')
if output_image and output_video:
INSTANT_RUNNER_START_BUTTON.click(start, outputs = [ INSTANT_RUNNER_START_BUTTON, INSTANT_RUNNER_STOP_BUTTON ])
INSTANT_RUNNER_START_BUTTON.click(run, outputs = [ INSTANT_RUNNER_START_BUTTON, INSTANT_RUNNER_STOP_BUTTON, output_image, output_video ])
INSTANT_RUNNER_STOP_BUTTON.click(stop, outputs = [ INSTANT_RUNNER_START_BUTTON, INSTANT_RUNNER_STOP_BUTTON, output_image, output_video ])
INSTANT_RUNNER_CLEAR_BUTTON.click(clear, outputs = [ output_image, output_video ])
if ui_workflow_dropdown:
ui_workflow_dropdown.change(remote_update, inputs = ui_workflow_dropdown, outputs = INSTANT_RUNNER_WRAPPER)
def remote_update(ui_workflow : UiWorkflow) -> gradio.Row:
is_instant_runner = ui_workflow == 'instant_runner'
return gradio.Row(visible = is_instant_runner)
def start() -> Tuple[gradio.Button, gradio.Button]:
while not process_manager.is_processing():
sleep(0.5)
return gradio.Button(visible = False), gradio.Button(visible = True)
def run() -> Tuple[gradio.Button, gradio.Button, gradio.Image, gradio.Video]:
step_args = collect_step_args()
output_path = step_args.get('output_path')
if is_directory(step_args.get('output_path')):
step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path'))
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
create_and_run_job(step_args)
state_manager.set_item('output_path', output_path)
if is_image(step_args.get('output_path')):
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = step_args.get('output_path'), visible = True), gradio.Video(value = None, visible = False)
if is_video(step_args.get('output_path')):
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = None, visible = False), gradio.Video(value = step_args.get('output_path'), visible = True)
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = None), gradio.Video(value = None)
def create_and_run_job(step_args : Args) -> bool:
job_id = job_helper.suggest_job_id('ui')
for key in job_store.get_job_keys():
state_manager.sync_item(key) #type:ignore[arg-type]
return job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step)
def stop() -> Tuple[gradio.Button, gradio.Button, gradio.Image, gradio.Video]:
process_manager.stop()
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = None), gradio.Video(value = None)
def clear() -> Tuple[gradio.Image, gradio.Video]:
while process_manager.is_processing():
sleep(0.5)
if state_manager.get_item('target_path'):
clear_temp_directory(state_manager.get_item('target_path'))
return gradio.Image(value = None), gradio.Video(value = None)
-50
View File
@@ -1,50 +0,0 @@
from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import get_first
from facefusion.jobs import job_list, job_manager
from facefusion.types import JobStatus
from facefusion.uis.core import get_ui_component
JOB_LIST_JOBS_DATAFRAME : Optional[gradio.Dataframe] = None
JOB_LIST_REFRESH_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global JOB_LIST_JOBS_DATAFRAME
global JOB_LIST_REFRESH_BUTTON
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
job_status = get_first(facefusion.choices.job_statuses)
job_headers, job_contents = job_list.compose_job_list(job_status)
JOB_LIST_JOBS_DATAFRAME = gradio.Dataframe(
headers = job_headers,
value = job_contents,
datatype = [ 'str', 'number', 'date', 'date', 'str' ],
show_label = False
)
JOB_LIST_REFRESH_BUTTON = gradio.Button(
value = translator.get('uis.refresh_button'),
variant = 'primary',
size = 'sm'
)
def listen() -> None:
job_list_job_status_checkbox_group = get_ui_component('job_list_job_status_checkbox_group')
if job_list_job_status_checkbox_group:
job_list_job_status_checkbox_group.change(update_job_dataframe, inputs = job_list_job_status_checkbox_group, outputs = JOB_LIST_JOBS_DATAFRAME)
JOB_LIST_REFRESH_BUTTON.click(update_job_dataframe, inputs = job_list_job_status_checkbox_group, outputs = JOB_LIST_JOBS_DATAFRAME)
def update_job_dataframe(job_statuses : List[JobStatus]) -> gradio.Dataframe:
all_job_contents = []
for job_status in job_statuses:
_, job_contents = job_list.compose_job_list(job_status)
all_job_contents.extend(job_contents)
return gradio.Dataframe(value = all_job_contents)
@@ -1,35 +0,0 @@
from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import get_first
from facefusion.jobs import job_manager
from facefusion.types import JobStatus
from facefusion.uis.core import register_ui_component
JOB_LIST_JOB_STATUS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global JOB_LIST_JOB_STATUS_CHECKBOX_GROUP
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
job_status = get_first(facefusion.choices.job_statuses)
JOB_LIST_JOB_STATUS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.job_list_status_checkbox_group'),
choices = facefusion.choices.job_statuses,
value = job_status
)
register_ui_component('job_list_job_status_checkbox_group', JOB_LIST_JOB_STATUS_CHECKBOX_GROUP)
def listen() -> None:
JOB_LIST_JOB_STATUS_CHECKBOX_GROUP.change(update_job_status_checkbox_group, inputs = JOB_LIST_JOB_STATUS_CHECKBOX_GROUP, outputs = JOB_LIST_JOB_STATUS_CHECKBOX_GROUP)
def update_job_status_checkbox_group(job_statuses : List[JobStatus]) -> gradio.CheckboxGroup:
job_statuses = job_statuses or facefusion.choices.job_statuses
return gradio.CheckboxGroup(value = job_statuses)
-194
View File
@@ -1,194 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import logger, state_manager, translator
from facefusion.args import collect_step_args
from facefusion.common_helper import get_first, get_last
from facefusion.filesystem import is_directory
from facefusion.jobs import job_manager
from facefusion.types import UiWorkflow
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_component
from facefusion.uis.types import JobManagerAction
from facefusion.uis.ui_helper import convert_int_none, convert_str_none, suggest_output_path
JOB_MANAGER_WRAPPER : Optional[gradio.Column] = None
JOB_MANAGER_JOB_ACTION_DROPDOWN : Optional[gradio.Dropdown] = None
JOB_MANAGER_JOB_ID_TEXTBOX : Optional[gradio.Textbox] = None
JOB_MANAGER_JOB_ID_DROPDOWN : Optional[gradio.Dropdown] = None
JOB_MANAGER_STEP_INDEX_DROPDOWN : Optional[gradio.Dropdown] = None
JOB_MANAGER_APPLY_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global JOB_MANAGER_WRAPPER
global JOB_MANAGER_JOB_ACTION_DROPDOWN
global JOB_MANAGER_JOB_ID_TEXTBOX
global JOB_MANAGER_JOB_ID_DROPDOWN
global JOB_MANAGER_STEP_INDEX_DROPDOWN
global JOB_MANAGER_APPLY_BUTTON
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
is_job_manager = state_manager.get_item('ui_workflow') == 'job_manager'
drafted_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
with gradio.Column(visible = is_job_manager) as JOB_MANAGER_WRAPPER:
JOB_MANAGER_JOB_ACTION_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.job_manager_job_action_dropdown'),
choices = uis_choices.job_manager_actions,
value = get_first(uis_choices.job_manager_actions)
)
JOB_MANAGER_JOB_ID_TEXTBOX = gradio.Textbox(
label = translator.get('uis.job_manager_job_id_dropdown'),
max_lines = 1,
interactive = True
)
JOB_MANAGER_JOB_ID_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.job_manager_job_id_dropdown'),
choices = drafted_job_ids,
value = get_last(drafted_job_ids),
interactive = True,
visible = False
)
JOB_MANAGER_STEP_INDEX_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.job_manager_step_index_dropdown'),
choices = [ 'none' ],
value = 'none',
interactive = True,
visible = False
)
JOB_MANAGER_APPLY_BUTTON = gradio.Button(
value = translator.get('uis.apply_button'),
variant = 'primary',
size = 'sm'
)
def listen() -> None:
JOB_MANAGER_JOB_ACTION_DROPDOWN.change(update, inputs = [ JOB_MANAGER_JOB_ACTION_DROPDOWN, JOB_MANAGER_JOB_ID_DROPDOWN ], outputs = [ JOB_MANAGER_JOB_ID_TEXTBOX, JOB_MANAGER_JOB_ID_DROPDOWN, JOB_MANAGER_STEP_INDEX_DROPDOWN ])
JOB_MANAGER_JOB_ID_DROPDOWN.change(update_step_index, inputs = JOB_MANAGER_JOB_ID_DROPDOWN, outputs = JOB_MANAGER_STEP_INDEX_DROPDOWN)
JOB_MANAGER_APPLY_BUTTON.click(apply, inputs = [ JOB_MANAGER_JOB_ACTION_DROPDOWN, JOB_MANAGER_JOB_ID_TEXTBOX, JOB_MANAGER_JOB_ID_DROPDOWN, JOB_MANAGER_STEP_INDEX_DROPDOWN ], outputs = [ JOB_MANAGER_JOB_ACTION_DROPDOWN, JOB_MANAGER_JOB_ID_TEXTBOX, JOB_MANAGER_JOB_ID_DROPDOWN, JOB_MANAGER_STEP_INDEX_DROPDOWN ])
ui_workflow_dropdown = get_ui_component('ui_workflow_dropdown')
if ui_workflow_dropdown:
ui_workflow_dropdown.change(remote_update, inputs = ui_workflow_dropdown, outputs = [ JOB_MANAGER_WRAPPER, JOB_MANAGER_JOB_ACTION_DROPDOWN, JOB_MANAGER_JOB_ID_TEXTBOX, JOB_MANAGER_JOB_ID_DROPDOWN, JOB_MANAGER_STEP_INDEX_DROPDOWN ])
def remote_update(ui_workflow : UiWorkflow) -> Tuple[gradio.Row, gradio.Dropdown, gradio.Textbox, gradio.Dropdown, gradio.Dropdown]:
is_job_manager = ui_workflow == 'job_manager'
return gradio.Row(visible = is_job_manager), gradio.Dropdown(value = get_first(uis_choices.job_manager_actions)), gradio.Textbox(value = None, visible = True), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False)
def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id : str, selected_step_index : int) -> Tuple[gradio.Dropdown, gradio.Textbox, gradio.Dropdown, gradio.Dropdown]:
created_job_id = convert_str_none(created_job_id)
selected_job_id = convert_str_none(selected_job_id)
selected_step_index = convert_int_none(selected_step_index)
step_args = collect_step_args()
output_path = step_args.get('output_path')
if is_directory(step_args.get('output_path')):
step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path'))
if job_action == 'job-create':
if created_job_id and job_manager.create_job(created_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
logger.info(translator.get('job_created').format(job_id = created_job_id), __name__)
return gradio.Dropdown(value = 'job-add-step'), gradio.Textbox(visible = False), gradio.Dropdown(value = created_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown()
else:
logger.error(translator.get('job_not_created').format(job_id = created_job_id), __name__)
if job_action == 'job-submit':
if selected_job_id and job_manager.submit_job(selected_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
logger.info(translator.get('job_submitted').format(job_id = selected_job_id), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown()
else:
logger.error(translator.get('job_not_submitted').format(job_id = selected_job_id), __name__)
if job_action == 'job-delete':
if selected_job_id and job_manager.delete_job(selected_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ]
logger.info(translator.get('job_deleted').format(job_id = selected_job_id), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown()
else:
logger.error(translator.get('job_not_deleted').format(job_id = selected_job_id), __name__)
if job_action == 'job-add-step':
if selected_job_id and job_manager.add_step(selected_job_id, step_args):
state_manager.set_item('output_path', output_path)
logger.info(translator.get('job_step_added').format(job_id = selected_job_id), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(visible = True), gradio.Dropdown(visible = False)
else:
state_manager.set_item('output_path', output_path)
logger.error(translator.get('job_step_not_added').format(job_id = selected_job_id), __name__)
if job_action == 'job-remix-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remix_step(selected_job_id, selected_step_index, step_args):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
state_manager.set_item('output_path', output_path)
logger.info(translator.get('job_remix_step_added').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(visible = True), gradio.Dropdown(value = get_last(updated_step_choices), choices = updated_step_choices, visible = True)
else:
state_manager.set_item('output_path', output_path)
logger.error(translator.get('job_remix_step_not_added').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
if job_action == 'job-insert-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.insert_step(selected_job_id, selected_step_index, step_args):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
state_manager.set_item('output_path', output_path)
logger.info(translator.get('job_step_inserted').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(visible = True), gradio.Dropdown(value = get_last(updated_step_choices), choices = updated_step_choices, visible = True)
else:
state_manager.set_item('output_path', output_path)
logger.error(translator.get('job_step_not_inserted').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
if job_action == 'job-remove-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remove_step(selected_job_id, selected_step_index):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
logger.info(translator.get('job_step_removed').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(visible = True), gradio.Dropdown(value = get_last(updated_step_choices), choices = updated_step_choices, visible = True)
else:
logger.error(translator.get('job_step_not_removed').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(), gradio.Dropdown()
def get_step_choices(job_id : str) -> List[int]:
steps = job_manager.get_steps(job_id)
return [ index for index, _ in enumerate(steps) ]
def update(job_action : JobManagerAction, selected_job_id : str) -> Tuple[gradio.Textbox, gradio.Dropdown, gradio.Dropdown]:
if job_action == 'job-create':
return gradio.Textbox(value = None, visible = True), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False)
if job_action == 'job-delete':
updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)
return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False)
if job_action in [ 'job-submit', 'job-add-step' ]:
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)
return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False)
if job_action in [ 'job-remix-step', 'job-insert-step', 'job-remove-step' ]:
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)
updated_step_choices = get_step_choices(updated_job_id) or [ 'none' ] #type:ignore[list-item]
return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(value = get_last(updated_step_choices), choices = updated_step_choices, visible = True)
return gradio.Textbox(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False)
def update_step_index(job_id : str) -> gradio.Dropdown:
step_choices = get_step_choices(job_id) or [ 'none' ] #type:ignore[list-item]
return gradio.Dropdown(value = get_last(step_choices), choices = step_choices)
-142
View File
@@ -1,142 +0,0 @@
from time import sleep
from typing import Optional, Tuple
import gradio
from facefusion import logger, process_manager, state_manager, translator
from facefusion.common_helper import get_first, get_last
from facefusion.core import process_step
from facefusion.jobs import job_manager, job_runner, job_store
from facefusion.types import UiWorkflow
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_component
from facefusion.uis.types import JobRunnerAction
from facefusion.uis.ui_helper import convert_str_none
JOB_RUNNER_WRAPPER : Optional[gradio.Column] = None
JOB_RUNNER_JOB_ACTION_DROPDOWN : Optional[gradio.Dropdown] = None
JOB_RUNNER_JOB_ID_DROPDOWN : Optional[gradio.Dropdown] = None
JOB_RUNNER_START_BUTTON : Optional[gradio.Button] = None
JOB_RUNNER_STOP_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global JOB_RUNNER_WRAPPER
global JOB_RUNNER_JOB_ACTION_DROPDOWN
global JOB_RUNNER_JOB_ID_DROPDOWN
global JOB_RUNNER_START_BUTTON
global JOB_RUNNER_STOP_BUTTON
if job_manager.init_jobs(state_manager.get_item('jobs_path')):
is_job_runner = state_manager.get_item('ui_workflow') == 'job_runner'
queued_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
with gradio.Column(visible = is_job_runner) as JOB_RUNNER_WRAPPER:
JOB_RUNNER_JOB_ACTION_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.job_runner_job_action_dropdown'),
choices = uis_choices.job_runner_actions,
value = get_first(uis_choices.job_runner_actions)
)
JOB_RUNNER_JOB_ID_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.job_runner_job_id_dropdown'),
choices = queued_job_ids,
value = get_last(queued_job_ids)
)
with gradio.Row():
JOB_RUNNER_START_BUTTON = gradio.Button(
value = translator.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
JOB_RUNNER_STOP_BUTTON = gradio.Button(
value = translator.get('uis.stop_button'),
variant = 'primary',
size = 'sm',
visible = False
)
def listen() -> None:
JOB_RUNNER_JOB_ACTION_DROPDOWN.change(update_job_action, inputs = JOB_RUNNER_JOB_ACTION_DROPDOWN, outputs = JOB_RUNNER_JOB_ID_DROPDOWN)
JOB_RUNNER_START_BUTTON.click(start, outputs = [ JOB_RUNNER_START_BUTTON, JOB_RUNNER_STOP_BUTTON ])
JOB_RUNNER_START_BUTTON.click(run, inputs = [ JOB_RUNNER_JOB_ACTION_DROPDOWN, JOB_RUNNER_JOB_ID_DROPDOWN ], outputs = [ JOB_RUNNER_START_BUTTON, JOB_RUNNER_STOP_BUTTON, JOB_RUNNER_JOB_ID_DROPDOWN ])
JOB_RUNNER_STOP_BUTTON.click(stop, outputs = [ JOB_RUNNER_START_BUTTON, JOB_RUNNER_STOP_BUTTON ])
ui_workflow_dropdown = get_ui_component('ui_workflow_dropdown')
if ui_workflow_dropdown:
ui_workflow_dropdown.change(remote_update, inputs = ui_workflow_dropdown, outputs = [ JOB_RUNNER_WRAPPER, JOB_RUNNER_JOB_ACTION_DROPDOWN, JOB_RUNNER_JOB_ID_DROPDOWN ])
def remote_update(ui_workflow : UiWorkflow) -> Tuple[gradio.Row, gradio.Dropdown, gradio.Dropdown]:
is_job_runner = ui_workflow == 'job_runner'
queued_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
return gradio.Row(visible = is_job_runner), gradio.Dropdown(value = get_first(uis_choices.job_runner_actions), choices = uis_choices.job_runner_actions), gradio.Dropdown(value = get_last(queued_job_ids), choices = queued_job_ids)
def start() -> Tuple[gradio.Button, gradio.Button]:
while not process_manager.is_processing():
sleep(0.5)
return gradio.Button(visible = False), gradio.Button(visible = True)
def run(job_action : JobRunnerAction, job_id : str) -> Tuple[gradio.Button, gradio.Button, gradio.Dropdown]:
job_id = convert_str_none(job_id)
for key in job_store.get_job_keys():
state_manager.sync_item(key) #type:ignore[arg-type]
if job_action == 'job-run':
logger.info(translator.get('running_job').format(job_id = job_id), __name__)
if job_id and job_runner.run_job(job_id, process_step):
logger.info(translator.get('processing_job_succeeded').format(job_id = job_id), __name__)
else:
logger.info(translator.get('processing_job_failed').format(job_id = job_id), __name__)
updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids)
if job_action == 'job-run-all':
logger.info(translator.get('running_jobs'), __name__)
halt_on_error = False
if job_runner.run_jobs(process_step, halt_on_error):
logger.info(translator.get('processing_jobs_succeeded'), __name__)
else:
logger.info(translator.get('processing_jobs_failed'), __name__)
if job_action == 'job-retry':
logger.info(translator.get('retrying_job').format(job_id = job_id), __name__)
if job_id and job_runner.retry_job(job_id, process_step):
logger.info(translator.get('processing_job_succeeded').format(job_id = job_id), __name__)
else:
logger.info(translator.get('processing_job_failed').format(job_id = job_id), __name__)
updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ]
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids)
if job_action == 'job-retry-all':
logger.info(translator.get('retrying_jobs'), __name__)
halt_on_error = False
if job_runner.retry_jobs(process_step, halt_on_error):
logger.info(translator.get('processing_jobs_succeeded'), __name__)
else:
logger.info(translator.get('processing_jobs_failed'), __name__)
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown()
def stop() -> Tuple[gradio.Button, gradio.Button]:
process_manager.stop()
return gradio.Button(visible = True), gradio.Button(visible = False)
def update_job_action(job_action : JobRunnerAction) -> gradio.Dropdown:
if job_action == 'job-run':
updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
return gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True)
if job_action == 'job-retry':
updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ]
return gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True)
return gradio.Dropdown(visible = False)
@@ -1,64 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.lip_syncer import choices as lip_syncer_choices
from facefusion.processors.modules.lip_syncer.types import LipSyncerModel, LipSyncerWeight
from facefusion.uis.core import get_ui_component, register_ui_component
LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
LIP_SYNCER_WEIGHT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global LIP_SYNCER_MODEL_DROPDOWN
global LIP_SYNCER_WEIGHT_SLIDER
has_lip_syncer = 'lip_syncer' in state_manager.get_item('processors')
LIP_SYNCER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.lip_syncer'),
choices = lip_syncer_choices.lip_syncer_models,
value = state_manager.get_item('lip_syncer_model'),
visible = has_lip_syncer
)
LIP_SYNCER_WEIGHT_SLIDER = gradio.Slider(
label = translator.get('uis.weight_slider', 'facefusion.processors.modules.lip_syncer'),
value = state_manager.get_item('lip_syncer_weight'),
step = calculate_float_step(lip_syncer_choices.lip_syncer_weight_range),
minimum = lip_syncer_choices.lip_syncer_weight_range[0],
maximum = lip_syncer_choices.lip_syncer_weight_range[-1],
visible = has_lip_syncer
)
register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN)
register_ui_component('lip_syncer_weight_slider', LIP_SYNCER_WEIGHT_SLIDER)
def listen() -> None:
LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN)
LIP_SYNCER_WEIGHT_SLIDER.release(update_lip_syncer_weight, inputs = LIP_SYNCER_WEIGHT_SLIDER)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ LIP_SYNCER_MODEL_DROPDOWN, LIP_SYNCER_WEIGHT_SLIDER ])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
has_lip_syncer = 'lip_syncer' in processors
return gradio.Dropdown(visible = has_lip_syncer), gradio.Slider(visible = has_lip_syncer)
def update_lip_syncer_model(lip_syncer_model : LipSyncerModel) -> gradio.Dropdown:
lip_syncer_module = load_processor_module('lip_syncer')
lip_syncer_module.clear_inference_pool()
state_manager.set_item('lip_syncer_model', lip_syncer_model)
if lip_syncer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('lip_syncer_model'))
return gradio.Dropdown()
def update_lip_syncer_weight(lip_syncer_weight : LipSyncerWeight) -> None:
state_manager.set_item('lip_syncer_weight', lip_syncer_weight)
-42
View File
@@ -1,42 +0,0 @@
from typing import Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.types import VideoMemoryStrategy
VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None
SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global VIDEO_MEMORY_STRATEGY_DROPDOWN
global SYSTEM_MEMORY_LIMIT_SLIDER
VIDEO_MEMORY_STRATEGY_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.video_memory_strategy_dropdown'),
choices = facefusion.choices.video_memory_strategies,
value = state_manager.get_item('video_memory_strategy')
)
SYSTEM_MEMORY_LIMIT_SLIDER = gradio.Slider(
label = translator.get('uis.system_memory_limit_slider'),
step = calculate_int_step(facefusion.choices.system_memory_limit_range),
minimum = facefusion.choices.system_memory_limit_range[0],
maximum = facefusion.choices.system_memory_limit_range[-1],
value = state_manager.get_item('system_memory_limit')
)
def listen() -> None:
VIDEO_MEMORY_STRATEGY_DROPDOWN.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY_DROPDOWN)
SYSTEM_MEMORY_LIMIT_SLIDER.release(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER)
def update_video_memory_strategy(video_memory_strategy : VideoMemoryStrategy) -> None:
state_manager.set_item('video_memory_strategy', video_memory_strategy)
def update_system_memory_limit(system_memory_limit : float) -> None:
state_manager.set_item('system_memory_limit', int(system_memory_limit))
-48
View File
@@ -1,48 +0,0 @@
import tempfile
from pathlib import Path
from typing import Optional
import gradio
from facefusion import state_manager, translator
from facefusion.uis.core import register_ui_component
OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None
OUTPUT_IMAGE : Optional[gradio.Image] = None
OUTPUT_VIDEO : Optional[gradio.Video] = None
def render() -> None:
global OUTPUT_PATH_TEXTBOX
global OUTPUT_IMAGE
global OUTPUT_VIDEO
if not state_manager.get_item('output_path'):
documents_directory = Path.home().joinpath('Documents')
if documents_directory.exists():
state_manager.set_item('output_path', documents_directory)
else:
state_manager.set_item('output_path', tempfile.gettempdir())
OUTPUT_PATH_TEXTBOX = gradio.Textbox(
label = translator.get('uis.output_path_textbox'),
value = state_manager.get_item('output_path'),
max_lines = 1
)
OUTPUT_IMAGE = gradio.Image(
label = translator.get('uis.output_image_or_video'),
visible = False
)
OUTPUT_VIDEO = gradio.Video(
label = translator.get('uis.output_image_or_video')
)
def listen() -> None:
OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX)
register_ui_component('output_image', OUTPUT_IMAGE)
register_ui_component('output_video', OUTPUT_VIDEO)
def update_output_path(output_path : str) -> None:
state_manager.set_item('output_path', output_path)
-184
View File
@@ -1,184 +0,0 @@
from typing import Optional, Tuple
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step, calculate_int_step
from facefusion.ffmpeg import get_available_encoder_set
from facefusion.filesystem import is_image, is_video
from facefusion.types import AudioEncoder, Fps, Scale, VideoEncoder, VideoPreset
from facefusion.uis.core import get_ui_components, register_ui_component
from facefusion.vision import detect_video_fps
OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None
OUTPUT_IMAGE_SCALE_SLIDER : Optional[gradio.Slider] = None
OUTPUT_AUDIO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_AUDIO_QUALITY_SLIDER : Optional[gradio.Slider] = None
OUTPUT_AUDIO_VOLUME_SLIDER : Optional[gradio.Slider] = None
OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_VIDEO_PRESET_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_VIDEO_SCALE_SLIDER : Optional[gradio.Slider] = None
OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
OUTPUT_VIDEO_FPS_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global OUTPUT_IMAGE_QUALITY_SLIDER
global OUTPUT_IMAGE_SCALE_SLIDER
global OUTPUT_AUDIO_ENCODER_DROPDOWN
global OUTPUT_AUDIO_QUALITY_SLIDER
global OUTPUT_AUDIO_VOLUME_SLIDER
global OUTPUT_VIDEO_ENCODER_DROPDOWN
global OUTPUT_VIDEO_PRESET_DROPDOWN
global OUTPUT_VIDEO_SCALE_SLIDER
global OUTPUT_VIDEO_QUALITY_SLIDER
global OUTPUT_VIDEO_FPS_SLIDER
available_encoder_set = get_available_encoder_set()
OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
label = translator.get('uis.output_image_quality_slider'),
value = state_manager.get_item('output_image_quality'),
step = calculate_int_step(facefusion.choices.output_image_quality_range),
minimum = facefusion.choices.output_image_quality_range[0],
maximum = facefusion.choices.output_image_quality_range[-1],
visible = is_image(state_manager.get_item('target_path'))
)
OUTPUT_IMAGE_SCALE_SLIDER = gradio.Slider(
label = translator.get('uis.output_image_scale_slider'),
step = calculate_float_step(facefusion.choices.output_image_scale_range),
value = state_manager.get_item('output_image_scale'),
minimum = facefusion.choices.output_image_scale_range[0],
maximum = facefusion.choices.output_image_scale_range[-1],
visible = is_image(state_manager.get_item('target_path'))
)
OUTPUT_AUDIO_ENCODER_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.output_audio_encoder_dropdown'),
choices = available_encoder_set.get('audio'),
value = state_manager.get_item('output_audio_encoder'),
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_AUDIO_QUALITY_SLIDER = gradio.Slider(
label = translator.get('uis.output_audio_quality_slider'),
value = state_manager.get_item('output_audio_quality'),
step = calculate_int_step(facefusion.choices.output_audio_quality_range),
minimum = facefusion.choices.output_audio_quality_range[0],
maximum = facefusion.choices.output_audio_quality_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_AUDIO_VOLUME_SLIDER = gradio.Slider(
label = translator.get('uis.output_audio_volume_slider'),
value = state_manager.get_item('output_audio_volume'),
step = calculate_int_step(facefusion.choices.output_audio_volume_range),
minimum = facefusion.choices.output_audio_volume_range[0],
maximum = facefusion.choices.output_audio_volume_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.output_video_encoder_dropdown'),
choices = available_encoder_set.get('video'),
value = state_manager.get_item('output_video_encoder'),
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_PRESET_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.output_video_preset_dropdown'),
choices = facefusion.choices.output_video_presets,
value = state_manager.get_item('output_video_preset'),
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
label = translator.get('uis.output_video_quality_slider'),
value = state_manager.get_item('output_video_quality'),
step = calculate_int_step(facefusion.choices.output_video_quality_range),
minimum = facefusion.choices.output_video_quality_range[0],
maximum = facefusion.choices.output_video_quality_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_SCALE_SLIDER = gradio.Slider(
label = translator.get('uis.output_video_scale_slider'),
step = calculate_float_step(facefusion.choices.output_video_scale_range),
value = state_manager.get_item('output_video_scale'),
minimum = facefusion.choices.output_video_scale_range[0],
maximum = facefusion.choices.output_video_scale_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_FPS_SLIDER = gradio.Slider(
label = translator.get('uis.output_video_fps_slider'),
value = state_manager.get_item('output_video_fps'),
step = 0.01,
minimum = 1,
maximum = 60,
visible = is_video(state_manager.get_item('target_path'))
)
register_ui_component('output_video_fps_slider', OUTPUT_VIDEO_FPS_SLIDER)
def listen() -> None:
OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
OUTPUT_IMAGE_SCALE_SLIDER.release(update_output_image_scale, inputs = OUTPUT_IMAGE_SCALE_SLIDER)
OUTPUT_AUDIO_ENCODER_DROPDOWN.change(update_output_audio_encoder, inputs = OUTPUT_AUDIO_ENCODER_DROPDOWN)
OUTPUT_AUDIO_QUALITY_SLIDER.release(update_output_audio_quality, inputs = OUTPUT_AUDIO_QUALITY_SLIDER)
OUTPUT_AUDIO_VOLUME_SLIDER.release(update_output_audio_volume, inputs = OUTPUT_AUDIO_VOLUME_SLIDER)
OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN)
OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
OUTPUT_VIDEO_SCALE_SLIDER.release(update_output_video_scale, inputs = OUTPUT_VIDEO_SCALE_SLIDER)
OUTPUT_VIDEO_FPS_SLIDER.release(update_output_video_fps, inputs = OUTPUT_VIDEO_FPS_SLIDER)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = [OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_SCALE_SLIDER, OUTPUT_AUDIO_ENCODER_DROPDOWN, OUTPUT_AUDIO_QUALITY_SLIDER, OUTPUT_AUDIO_VOLUME_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_SCALE_SLIDER, OUTPUT_VIDEO_FPS_SLIDER])
def remote_update() -> Tuple[gradio.Slider, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Slider, gradio.Slider]:
if is_image(state_manager.get_item('target_path')):
return gradio.Slider(visible = True), gradio.Slider(visible = True), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False)
if is_video(state_manager.get_item('target_path')):
state_manager.set_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path')))
return gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Slider(visible = True), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Slider(visible = True), gradio.Slider(value = state_manager.get_item('output_video_fps'), visible = True)
return gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False)
def update_output_image_quality(output_image_quality : float) -> None:
state_manager.set_item('output_image_quality', int(output_image_quality))
def update_output_image_scale(output_image_scale : Scale) -> None:
state_manager.set_item('output_image_scale', output_image_scale)
def update_output_audio_encoder(output_audio_encoder : AudioEncoder) -> None:
state_manager.set_item('output_audio_encoder', output_audio_encoder)
def update_output_audio_quality(output_audio_quality : float) -> None:
state_manager.set_item('output_audio_quality', int(output_audio_quality))
def update_output_audio_volume(output_audio_volume: float) -> None:
state_manager.set_item('output_audio_volume', int(output_audio_volume))
def update_output_video_encoder(output_video_encoder : VideoEncoder) -> None:
state_manager.set_item('output_video_encoder', output_video_encoder)
def update_output_video_preset(output_video_preset : VideoPreset) -> None:
state_manager.set_item('output_video_preset', output_video_preset)
def update_output_video_quality(output_video_quality : float) -> None:
state_manager.set_item('output_video_quality', int(output_video_quality))
def update_output_video_scale(output_video_scale : Scale) -> None:
state_manager.set_item('output_video_scale', output_video_scale)
def update_output_video_fps(output_video_fps : Fps) -> None:
state_manager.set_item('output_video_fps', output_video_fps)
-302
View File
@@ -1,302 +0,0 @@
from time import sleep
from typing import List, Optional, Tuple
import cv2
import gradio
import numpy
from facefusion import logger, process_manager, state_manager, translator
from facefusion.audio import create_empty_audio_frame, get_voice_frame
from facefusion.common_helper import get_first
from facefusion.content_analyser import analyse_frame
from facefusion.face_analyser import get_one_face
from facefusion.face_selector import select_faces
from facefusion.face_store import clear_static_faces
from facefusion.filesystem import filter_audio_paths, is_image, is_video
from facefusion.processors.core import get_processors_modules
from facefusion.types import AudioFrame, Face, Mask, VisionFrame
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.types import ComponentOptions, PreviewMode
from facefusion.vision import detect_frame_orientation, extract_vision_mask, fit_cover_frame, merge_vision_mask, obscure_frame, read_static_image, read_static_images, read_video_frame, restrict_frame, unpack_resolution
PREVIEW_IMAGE : Optional[gradio.Image] = None
def render() -> None:
global PREVIEW_IMAGE
preview_image_options : ComponentOptions =\
{
'label': translator.get('uis.preview_image')
}
source_vision_frames = read_static_images(state_manager.get_item('source_paths'))
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
source_audio_frame = create_empty_audio_frame()
source_voice_frame = create_empty_audio_frame()
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
temp_voice_frame = get_voice_frame(source_audio_path, state_manager.get_item('output_video_fps'), state_manager.get_item('reference_frame_number'))
if numpy.any(temp_voice_frame):
source_voice_frame = temp_voice_frame
if is_image(state_manager.get_item('target_path')):
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
reference_vision_frame = read_static_image(state_manager.get_item('target_path'))
preview_vision_frame = process_preview_frame(reference_vision_frame, source_vision_frames, source_audio_frame, source_voice_frame, target_vision_frame, uis_choices.preview_modes[0], uis_choices.preview_resolutions[-1])
preview_image_options['value'] = cv2.cvtColor(preview_vision_frame, cv2.COLOR_BGR2RGB)
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
if is_video(state_manager.get_item('target_path')):
temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
preview_vision_frame = process_preview_frame(reference_vision_frame, source_vision_frames, source_audio_frame, source_voice_frame, temp_vision_frame, uis_choices.preview_modes[0], uis_choices.preview_resolutions[-1])
preview_image_options['value'] = cv2.cvtColor(preview_vision_frame, cv2.COLOR_BGR2RGB)
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
preview_image_options['visible'] = True
PREVIEW_IMAGE = gradio.Image(**preview_image_options)
register_ui_component('preview_image', PREVIEW_IMAGE)
def listen() -> None:
preview_frame_slider = get_ui_component('preview_frame_slider')
preview_mode_dropdown = get_ui_component('preview_mode_dropdown')
preview_resolution_dropdown = get_ui_component('preview_resolution_dropdown')
if preview_mode_dropdown:
preview_mode_dropdown.change(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
if preview_resolution_dropdown:
preview_resolution_dropdown.change(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
if preview_frame_slider:
preview_frame_slider.release(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE, show_progress = 'hidden')
preview_frame_slider.change(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE, show_progress = 'hidden', trigger_mode = 'once')
reference_face_position_gallery = get_ui_component('reference_face_position_gallery')
if reference_face_position_gallery:
reference_face_position_gallery.select(clear_and_update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'source_audio',
'source_image',
'target_image',
'target_video'
]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'background_remover_color_red_number',
'background_remover_color_green_number',
'background_remover_color_blue_number',
'background_remover_color_alpha_number',
'face_debugger_items_checkbox_group',
'frame_colorizer_size_dropdown',
'face_mask_types_checkbox_group',
'face_mask_areas_checkbox_group',
'face_mask_regions_checkbox_group',
'expression_restorer_areas_checkbox_group'
]):
ui_component.change(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'age_modifier_direction_slider',
'deep_swapper_morph_slider',
'expression_restorer_factor_slider',
'face_editor_eyebrow_direction_slider',
'face_editor_eye_gaze_horizontal_slider',
'face_editor_eye_gaze_vertical_slider',
'face_editor_eye_open_ratio_slider',
'face_editor_lip_open_ratio_slider',
'face_editor_mouth_grim_slider',
'face_editor_mouth_pout_slider',
'face_editor_mouth_purse_slider',
'face_editor_mouth_smile_slider',
'face_editor_mouth_position_horizontal_slider',
'face_editor_mouth_position_vertical_slider',
'face_editor_head_pitch_slider',
'face_editor_head_yaw_slider',
'face_editor_head_roll_slider',
'face_enhancer_blend_slider',
'face_enhancer_weight_slider',
'face_swapper_weight_slider',
'frame_colorizer_blend_slider',
'frame_enhancer_blend_slider',
'lip_syncer_weight_slider',
'reference_face_distance_slider',
'face_selector_age_range_slider',
'face_mask_blur_slider',
'face_mask_padding_top_slider',
'face_mask_padding_bottom_slider',
'face_mask_padding_left_slider',
'face_mask_padding_right_slider',
'output_video_fps_slider'
]):
ui_component.release(update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'age_modifier_model_dropdown',
'background_remover_model_dropdown',
'deep_swapper_model_dropdown',
'expression_restorer_model_dropdown',
'processors_checkbox_group',
'face_editor_model_dropdown',
'face_enhancer_model_dropdown',
'face_swapper_model_dropdown',
'face_swapper_pixel_boost_dropdown',
'frame_colorizer_model_dropdown',
'frame_enhancer_model_dropdown',
'lip_syncer_model_dropdown',
'face_selector_mode_dropdown',
'face_selector_order_dropdown',
'face_selector_gender_dropdown',
'face_selector_race_dropdown',
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_angles_checkbox_group',
'face_landmarker_model_dropdown',
'face_occluder_model_dropdown',
'face_parser_model_dropdown',
'voice_extractor_model_dropdown'
]):
ui_component.change(clear_and_update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'face_detector_margin_slider',
'face_detector_score_slider',
'face_landmarker_score_slider'
]):
ui_component.release(clear_and_update_preview_image, inputs = [ preview_mode_dropdown, preview_resolution_dropdown, preview_frame_slider ], outputs = PREVIEW_IMAGE)
def update_preview_image(preview_mode : PreviewMode, preview_resolution : str, frame_number : int = 0) -> gradio.Image:
while process_manager.is_checking():
sleep(0.5)
source_vision_frames = read_static_images(state_manager.get_item('source_paths'))
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
source_audio_frame = create_empty_audio_frame()
source_voice_frame = create_empty_audio_frame()
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
reference_audio_frame_number = state_manager.get_item('reference_frame_number')
if state_manager.get_item('trim_frame_start'):
reference_audio_frame_number -= state_manager.get_item('trim_frame_start')
temp_voice_frame = get_voice_frame(source_audio_path, state_manager.get_item('output_video_fps'), reference_audio_frame_number)
if numpy.any(temp_voice_frame):
source_voice_frame = temp_voice_frame
if is_image(state_manager.get_item('target_path')):
reference_vision_frame = read_static_image(state_manager.get_item('target_path'))
target_vision_frame = read_static_image(state_manager.get_item('target_path'), 'rgba')
target_vision_mask = extract_vision_mask(target_vision_frame)
target_vision_frame = merge_vision_mask(target_vision_frame, target_vision_mask)
preview_vision_frame = process_preview_frame(reference_vision_frame, source_vision_frames, source_audio_frame, source_voice_frame, target_vision_frame, preview_mode, preview_resolution)
preview_vision_frame = cv2.cvtColor(preview_vision_frame, cv2.COLOR_BGRA2RGBA)
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])
if is_video(state_manager.get_item('target_path')):
reference_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
temp_vision_mask = extract_vision_mask(temp_vision_frame)
temp_vision_frame = merge_vision_mask(temp_vision_frame, temp_vision_mask)
preview_vision_frame = process_preview_frame(reference_vision_frame, source_vision_frames, source_audio_frame, source_voice_frame, temp_vision_frame, preview_mode, preview_resolution)
preview_vision_frame = cv2.cvtColor(preview_vision_frame, cv2.COLOR_BGRA2RGBA)
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])
return gradio.Image(value = None, elem_classes = None)
def clear_and_update_preview_image(preview_mode : PreviewMode, preview_resolution : str, frame_number : int = 0) -> gradio.Image:
clear_static_faces()
return update_preview_image(preview_mode, preview_resolution, frame_number)
def process_preview_frame(reference_vision_frame : VisionFrame, source_vision_frames : List[VisionFrame], source_audio_frame : AudioFrame, source_voice_frame : AudioFrame, target_vision_frame : VisionFrame, preview_mode : PreviewMode, preview_resolution : str) -> VisionFrame:
target_vision_frame = restrict_frame(target_vision_frame, unpack_resolution(preview_resolution))
temp_vision_frame = target_vision_frame.copy()
temp_vision_mask = extract_vision_mask(temp_vision_frame)
if analyse_frame(target_vision_frame[:, :, :3]):
if preview_mode == 'frame-by-frame':
temp_vision_frame = obscure_frame(temp_vision_frame[:, :, :3])
return numpy.hstack((temp_vision_frame, temp_vision_frame))
if preview_mode == 'face-by-face':
target_crop_vision_frame, output_crop_vision_frame = create_face_by_face(reference_vision_frame, target_vision_frame[:, :, :3], temp_vision_frame[:, :, :3])
target_crop_vision_frame = obscure_frame(target_crop_vision_frame)
output_crop_vision_frame = obscure_frame(output_crop_vision_frame)
return numpy.hstack((target_crop_vision_frame, output_crop_vision_frame))
temp_vision_frame = obscure_frame(temp_vision_frame)
return temp_vision_frame
for processor_module in get_processors_modules(state_manager.get_item('processors')):
logger.disable()
if processor_module.pre_process('preview'):
logger.enable()
temp_vision_frame, temp_vision_mask = processor_module.process_frame(
{
'reference_vision_frame': reference_vision_frame,
'source_audio_frame': source_audio_frame,
'source_voice_frame': source_voice_frame,
'source_vision_frames': source_vision_frames,
'target_vision_frame': target_vision_frame[:, :, :3],
'temp_vision_frame': temp_vision_frame[:, :, :3],
'temp_vision_mask': temp_vision_mask
})
logger.enable()
temp_vision_frame = prepare_output_frame(target_vision_frame, temp_vision_frame, temp_vision_mask)
if preview_mode == 'frame-by-frame':
return numpy.hstack((target_vision_frame, temp_vision_frame))
if preview_mode == 'face-by-face':
target_crop_vision_frame, output_crop_vision_frame = create_face_by_face(reference_vision_frame, target_vision_frame, temp_vision_frame)
return numpy.hstack((target_crop_vision_frame, output_crop_vision_frame))
return temp_vision_frame
def create_face_by_face(reference_vision_frame : VisionFrame, target_vision_frame : VisionFrame, temp_vision_frame : VisionFrame) -> Tuple[VisionFrame, VisionFrame]:
target_faces = select_faces(reference_vision_frame[:, :, :3], target_vision_frame[:, :, :3])
target_face = get_one_face(target_faces)
if target_face:
target_crop_vision_frame = extract_crop_frame(target_vision_frame, target_face)
output_crop_vision_frame = extract_crop_frame(temp_vision_frame, target_face)
if numpy.any(target_crop_vision_frame) and numpy.any(output_crop_vision_frame):
target_crop_dimension = min(target_crop_vision_frame.shape[:2])
target_crop_vision_frame = fit_cover_frame(target_crop_vision_frame, (target_crop_dimension, target_crop_dimension))
output_crop_vision_frame = fit_cover_frame(output_crop_vision_frame, (target_crop_dimension, target_crop_dimension))
return target_crop_vision_frame, output_crop_vision_frame
empty_vision_frame = numpy.zeros((512, 512, 4), dtype = numpy.uint8)
return empty_vision_frame, empty_vision_frame
def extract_crop_frame(vision_frame : VisionFrame, face : Face) -> Optional[VisionFrame]:
start_x, start_y, end_x, end_y = map(int, face.bounding_box)
padding_x = int((end_x - start_x) * 0.25)
padding_y = int((end_y - start_y) * 0.25)
start_x = max(0, start_x - padding_x)
start_y = max(0, start_y - padding_y)
end_x = max(0, end_x + padding_x)
end_y = max(0, end_y + padding_y)
crop_vision_frame = vision_frame[start_y:end_y, start_x:end_x]
return crop_vision_frame
def prepare_output_frame(target_vision_frame : VisionFrame, temp_vision_frame : VisionFrame, temp_vision_mask : Mask) -> VisionFrame:
temp_vision_mask = temp_vision_mask.clip(state_manager.get_item('background_remover_color')[-1], 255)
temp_vision_frame = merge_vision_mask(temp_vision_frame, temp_vision_mask)
temp_vision_frame = cv2.resize(temp_vision_frame, target_vision_frame.shape[1::-1])
return temp_vision_frame
@@ -1,61 +0,0 @@
from typing import Optional
import gradio
from facefusion import state_manager, translator
from facefusion.filesystem import is_video
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_components, register_ui_component
from facefusion.uis.types import ComponentOptions
from facefusion.vision import count_video_frame_total
PREVIEW_FRAME_SLIDER: Optional[gradio.Slider] = None
PREVIEW_MODE_DROPDOWN: Optional[gradio.Dropdown] = None
PREVIEW_RESOLUTION_DROPDOWN: Optional[gradio.Dropdown] = None
def render() -> None:
global PREVIEW_FRAME_SLIDER, PREVIEW_MODE_DROPDOWN, PREVIEW_RESOLUTION_DROPDOWN
preview_frame_slider_options : ComponentOptions =\
{
'label': translator.get('uis.preview_frame_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
'visible': False
}
if is_video(state_manager.get_item('target_path')):
preview_frame_slider_options['value'] = state_manager.get_item('reference_frame_number')
preview_frame_slider_options['maximum'] = count_video_frame_total(state_manager.get_item('target_path'))
preview_frame_slider_options['visible'] = True
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_options)
with gradio.Row():
PREVIEW_MODE_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.preview_mode_dropdown'),
value = uis_choices.preview_modes[0],
choices = uis_choices.preview_modes,
visible = True
)
PREVIEW_RESOLUTION_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.preview_resolution_dropdown'),
value = uis_choices.preview_resolutions[-1],
choices = uis_choices.preview_resolutions,
visible = True
)
register_ui_component('preview_mode_dropdown', PREVIEW_MODE_DROPDOWN)
register_ui_component('preview_resolution_dropdown', PREVIEW_RESOLUTION_DROPDOWN)
register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
def listen() -> None:
for ui_component in get_ui_components([ 'target_image', 'target_video' ]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
def update_preview_frame_slider() -> gradio.Slider:
if is_video(state_manager.get_item('target_path')):
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
return gradio.Slider(maximum = video_frame_total, visible = True)
return gradio.Slider(value = 0, visible = False)
-49
View File
@@ -1,49 +0,0 @@
from typing import List, Optional
import gradio
from facefusion import state_manager, translator
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules
from facefusion.uis.core import register_ui_component
PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
def render() -> None:
global PROCESSORS_CHECKBOX_GROUP
PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = translator.get('uis.processors_checkbox_group'),
choices = sort_processors(state_manager.get_item('processors')),
value = state_manager.get_item('processors')
)
register_ui_component('processors_checkbox_group', PROCESSORS_CHECKBOX_GROUP)
def listen() -> None:
PROCESSORS_CHECKBOX_GROUP.change(update_processors, inputs = PROCESSORS_CHECKBOX_GROUP, outputs = PROCESSORS_CHECKBOX_GROUP)
def update_processors(processors : List[str]) -> gradio.CheckboxGroup:
for processor_module in get_processors_modules(state_manager.get_item('processors')):
if hasattr(processor_module, 'clear_inference_pool'):
processor_module.clear_inference_pool()
for processor_module in get_processors_modules(processors):
if not processor_module.pre_check():
return gradio.CheckboxGroup()
state_manager.set_item('processors', processors)
return gradio.CheckboxGroup(value = state_manager.get_item('processors'), choices = sort_processors(state_manager.get_item('processors')))
def sort_processors(processors : List[str]) -> List[str]:
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
current_processors = []
for processor in processors + available_processors:
if processor in available_processors and processor not in current_processors:
current_processors.append(processor)
return current_processors
-61
View File
@@ -1,61 +0,0 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import get_first
from facefusion.filesystem import filter_audio_paths, filter_image_paths, has_audio, has_image
from facefusion.uis.core import register_ui_component
from facefusion.uis.types import File
SOURCE_FILE : Optional[gradio.File] = None
SOURCE_AUDIO : Optional[gradio.Audio] = None
SOURCE_IMAGE : Optional[gradio.Image] = None
def render() -> None:
global SOURCE_FILE
global SOURCE_AUDIO
global SOURCE_IMAGE
has_source_audio = has_audio(state_manager.get_item('source_paths'))
has_source_image = has_image(state_manager.get_item('source_paths'))
SOURCE_FILE = gradio.File(
label = translator.get('uis.source_file'),
file_count = 'multiple',
value = state_manager.get_item('source_paths') if has_source_audio or has_source_image else None
)
source_file_names = [ source_file_value.get('path') for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
source_audio_path = get_first(filter_audio_paths(source_file_names))
source_image_path = get_first(filter_image_paths(source_file_names))
SOURCE_AUDIO = gradio.Audio(
value = source_audio_path if has_source_audio else None,
visible = has_source_audio,
show_label = False
)
SOURCE_IMAGE = gradio.Image(
value = source_image_path if has_source_image else None,
visible = has_source_image,
show_label = False
)
register_ui_component('source_audio', SOURCE_AUDIO)
register_ui_component('source_image', SOURCE_IMAGE)
def listen() -> None:
SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = [ SOURCE_AUDIO, SOURCE_IMAGE ])
def update(files : List[File]) -> Tuple[gradio.Audio, gradio.Image]:
file_names = [ file.name for file in files ] if files else None
has_source_audio = has_audio(file_names)
has_source_image = has_image(file_names)
if has_source_audio or has_source_image:
source_audio_path = get_first(filter_audio_paths(file_names))
source_image_path = get_first(filter_image_paths(file_names))
state_manager.set_item('source_paths', file_names)
return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image)
state_manager.clear_item('source_paths')
return gradio.Audio(value = None, visible = False), gradio.Image(value = None, visible = False)
-65
View File
@@ -1,65 +0,0 @@
from typing import Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.face_store import clear_static_faces
from facefusion.filesystem import is_image, is_video
from facefusion.uis.core import register_ui_component
from facefusion.uis.types import ComponentOptions, File
TARGET_FILE : Optional[gradio.File] = None
TARGET_IMAGE : Optional[gradio.Image] = None
TARGET_VIDEO : Optional[gradio.Video] = None
def render() -> None:
global TARGET_FILE
global TARGET_IMAGE
global TARGET_VIDEO
is_target_image = is_image(state_manager.get_item('target_path'))
is_target_video = is_video(state_manager.get_item('target_path'))
TARGET_FILE = gradio.File(
label = translator.get('uis.target_file'),
value = state_manager.get_item('target_path') if is_target_image or is_target_video else None
)
target_image_options : ComponentOptions =\
{
'show_label': False,
'visible': False
}
target_video_options : ComponentOptions =\
{
'show_label': False,
'visible': False
}
if is_target_image:
target_image_options['value'] = TARGET_FILE.value.get('path')
target_image_options['visible'] = True
if is_target_video:
target_video_options['value'] = TARGET_FILE.value.get('path')
target_video_options['visible'] = True
TARGET_IMAGE = gradio.Image(**target_image_options)
TARGET_VIDEO = gradio.Video(**target_video_options)
register_ui_component('target_image', TARGET_IMAGE)
register_ui_component('target_video', TARGET_VIDEO)
def listen() -> None:
TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
clear_static_faces()
if file and is_image(file.name):
state_manager.set_item('target_path', file.name)
return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
if file and is_video(file.name):
state_manager.set_item('target_path', file.name)
return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True)
state_manager.clear_item('target_path')
return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False)
-42
View File
@@ -1,42 +0,0 @@
from typing import Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator
from facefusion.filesystem import is_video
from facefusion.types import TempFrameFormat
from facefusion.uis.core import get_ui_component
TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
def render() -> None:
global TEMP_FRAME_FORMAT_DROPDOWN
TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.temp_frame_format_dropdown'),
choices = facefusion.choices.temp_frame_formats,
value = state_manager.get_item('temp_frame_format'),
visible = is_video(state_manager.get_item('target_path'))
)
def listen() -> None:
TEMP_FRAME_FORMAT_DROPDOWN.change(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN)
target_video = get_ui_component('target_video')
if target_video:
for method in [ 'change', 'clear' ]:
getattr(target_video, method)(remote_update, outputs = TEMP_FRAME_FORMAT_DROPDOWN)
def remote_update() -> gradio.Dropdown:
if is_video(state_manager.get_item('target_path')):
return gradio.Dropdown(visible = True)
return gradio.Dropdown(visible = False)
def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None:
state_manager.set_item('temp_frame_format', temp_frame_format)
-80
View File
@@ -1,80 +0,0 @@
import io
import logging
import math
import os
from typing import Optional
import gradio
from tqdm import tqdm
import facefusion.choices
from facefusion import logger, state_manager, translator
from facefusion.types import LogLevel
LOG_LEVEL_DROPDOWN : Optional[gradio.Dropdown] = None
TERMINAL_TEXTBOX : Optional[gradio.Textbox] = None
LOG_BUFFER = io.StringIO()
LOG_HANDLER = logging.StreamHandler(LOG_BUFFER)
TQDM_UPDATE = tqdm.update
def render() -> None:
global LOG_LEVEL_DROPDOWN
global TERMINAL_TEXTBOX
LOG_LEVEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.log_level_dropdown'),
choices = facefusion.choices.log_levels,
value = state_manager.get_item('log_level')
)
TERMINAL_TEXTBOX = gradio.Textbox(
label = translator.get('uis.terminal_textbox'),
value = read_logs,
lines = 8,
max_lines = 8,
every = 0.5,
show_copy_button = True
)
def listen() -> None:
LOG_LEVEL_DROPDOWN.change(update_log_level, inputs = LOG_LEVEL_DROPDOWN)
logger.get_package_logger().addHandler(LOG_HANDLER)
tqdm.update = tqdm_update
def update_log_level(log_level : LogLevel) -> None:
state_manager.set_item('log_level', log_level)
logger.init(state_manager.get_item('log_level'))
def tqdm_update(self : tqdm, n : int = 1) -> None:
TQDM_UPDATE(self, n)
output = create_tqdm_output(self)
if output:
LOG_BUFFER.seek(0)
log_buffer = LOG_BUFFER.read()
lines = log_buffer.splitlines()
if lines and lines[-1].startswith(self.desc):
position = log_buffer.rfind(lines[-1])
LOG_BUFFER.seek(position)
else:
LOG_BUFFER.seek(0, os.SEEK_END)
LOG_BUFFER.write(output + os.linesep)
LOG_BUFFER.flush()
def create_tqdm_output(self : tqdm) -> Optional[str]:
if not self.disable and self.desc and self.total:
percentage = math.floor(self.n / self.total * 100)
return self.desc + translator.get('colon') + ' ' + str(percentage) + '% (' + str(self.n) + '/' + str(self.total) + ')'
if not self.disable and self.desc and self.unit:
return self.desc + translator.get('colon') + ' ' + str(self.n) + ' ' + self.unit
return None
def read_logs() -> str:
LOG_BUFFER.seek(0)
logs = LOG_BUFFER.read().strip()
return logs
-62
View File
@@ -1,62 +0,0 @@
from typing import Optional, Tuple
from gradio_rangeslider import RangeSlider
from facefusion import state_manager, translator
from facefusion.face_store import clear_static_faces
from facefusion.filesystem import is_video
from facefusion.uis.core import get_ui_components
from facefusion.uis.types import ComponentOptions
from facefusion.vision import count_video_frame_total
TRIM_FRAME_RANGE_SLIDER : Optional[RangeSlider] = None
def render() -> None:
global TRIM_FRAME_RANGE_SLIDER
trim_frame_range_slider_options : ComponentOptions =\
{
'label': translator.get('uis.trim_frame_slider'),
'minimum': 0,
'step': 1,
'visible': False
}
if is_video(state_manager.get_item('target_path')):
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
trim_frame_start = state_manager.get_item('trim_frame_start') or 0
trim_frame_end = state_manager.get_item('trim_frame_end') or video_frame_total
trim_frame_range_slider_options['maximum'] = video_frame_total
trim_frame_range_slider_options['value'] = (trim_frame_start, trim_frame_end)
trim_frame_range_slider_options['visible'] = True
TRIM_FRAME_RANGE_SLIDER = RangeSlider(**trim_frame_range_slider_options)
def listen() -> None:
TRIM_FRAME_RANGE_SLIDER.release(update_trim_frame, inputs = TRIM_FRAME_RANGE_SLIDER)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = [ TRIM_FRAME_RANGE_SLIDER ])
def remote_update() -> RangeSlider:
if is_video(state_manager.get_item('target_path')):
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
state_manager.clear_item('trim_frame_start')
state_manager.clear_item('trim_frame_end')
return RangeSlider(value = (0, video_frame_total), maximum = video_frame_total, visible = True)
return RangeSlider(visible = False)
def update_trim_frame(trim_frame : Tuple[float, float]) -> None:
clear_static_faces()
trim_frame_start, trim_frame_end = trim_frame
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
trim_frame_start = int(trim_frame_start) if trim_frame_start > 0 else None
trim_frame_end = int(trim_frame_end) if trim_frame_end < video_frame_total else None
state_manager.set_item('trim_frame_start', trim_frame_start)
state_manager.set_item('trim_frame_end', trim_frame_end)
-21
View File
@@ -1,21 +0,0 @@
from typing import Optional
import gradio
import facefusion
from facefusion import state_manager, translator
from facefusion.uis.core import register_ui_component
UI_WORKFLOW_DROPDOWN : Optional[gradio.Dropdown] = None
def render() -> None:
global UI_WORKFLOW_DROPDOWN
UI_WORKFLOW_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.ui_workflow'),
choices = facefusion.choices.ui_workflows,
value = state_manager.get_item('ui_workflow'),
interactive = True
)
register_ui_component('ui_workflow_dropdown', UI_WORKFLOW_DROPDOWN)
@@ -1,50 +0,0 @@
from typing import Optional
import gradio
import facefusion.choices
from facefusion import state_manager, translator, voice_extractor
from facefusion.filesystem import is_video
from facefusion.types import VoiceExtractorModel
from facefusion.uis.core import get_ui_components, register_ui_component
VOICE_EXTRACTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
def render() -> None:
global VOICE_EXTRACTOR_MODEL_DROPDOWN
VOICE_EXTRACTOR_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.voice_extractor_model_dropdown'),
choices = facefusion.choices.voice_extractor_models,
value = state_manager.get_item('voice_extractor_model'),
visible = is_video(state_manager.get_item('target_path'))
)
register_ui_component('voice_extractor_model_dropdown', VOICE_EXTRACTOR_MODEL_DROPDOWN)
def listen() -> None:
VOICE_EXTRACTOR_MODEL_DROPDOWN.change(update_voice_extractor_model, inputs = VOICE_EXTRACTOR_MODEL_DROPDOWN, outputs = VOICE_EXTRACTOR_MODEL_DROPDOWN)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]):
for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = VOICE_EXTRACTOR_MODEL_DROPDOWN)
def remote_update() -> gradio.Dropdown:
if is_video(state_manager.get_item('target_path')):
return gradio.Dropdown(visible = True)
return gradio.Dropdown(visible = False)
def update_voice_extractor_model(voice_extractor_model : VoiceExtractorModel) -> gradio.Dropdown:
voice_extractor.clear_inference_pool()
state_manager.set_item('voice_extractor_model', voice_extractor_model)
if voice_extractor.pre_check():
gradio.Dropdown(value = state_manager.get_item('voice_extractor_model'))
return gradio.Dropdown()
-115
View File
@@ -1,115 +0,0 @@
from typing import Iterator, List, Optional, Tuple
import cv2
import gradio
from facefusion import state_manager, translator
from facefusion.camera_manager import clear_camera_pool, get_local_camera_capture
from facefusion.filesystem import has_image
from facefusion.streamer import multi_process_capture, open_stream
from facefusion.types import Fps, VisionFrame, WebcamMode
from facefusion.uis.core import get_ui_component
from facefusion.uis.types import File
from facefusion.vision import unpack_resolution
SOURCE_FILE : Optional[gradio.File] = None
WEBCAM_IMAGE : Optional[gradio.Image] = None
WEBCAM_START_BUTTON : Optional[gradio.Button] = None
WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None
def render() -> None:
global SOURCE_FILE
global WEBCAM_IMAGE
global WEBCAM_START_BUTTON
global WEBCAM_STOP_BUTTON
has_source_image = has_image(state_manager.get_item('source_paths'))
SOURCE_FILE = gradio.File(
label = translator.get('uis.source_file'),
file_count = 'multiple',
value = state_manager.get_item('source_paths') if has_source_image else None
)
WEBCAM_IMAGE = gradio.Image(
label = translator.get('uis.webcam_image'),
format = 'jpeg',
visible = False
)
WEBCAM_START_BUTTON = gradio.Button(
value = translator.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)
WEBCAM_STOP_BUTTON = gradio.Button(
value = translator.get('uis.stop_button'),
size = 'sm',
visible = False
)
def listen() -> None:
SOURCE_FILE.change(update_source, inputs = SOURCE_FILE, outputs = SOURCE_FILE)
webcam_device_id_dropdown = get_ui_component('webcam_device_id_dropdown')
webcam_mode_radio = get_ui_component('webcam_mode_radio')
webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown')
webcam_fps_slider = get_ui_component('webcam_fps_slider')
if webcam_device_id_dropdown and webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider:
WEBCAM_START_BUTTON.click(pre_start, outputs = [ SOURCE_FILE, WEBCAM_IMAGE, WEBCAM_START_BUTTON, WEBCAM_STOP_BUTTON ])
start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_device_id_dropdown, webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE)
start_event.then(pre_stop)
WEBCAM_STOP_BUTTON.click(stop, cancels = start_event, outputs = WEBCAM_IMAGE)
WEBCAM_STOP_BUTTON.click(pre_stop, outputs = [ SOURCE_FILE, WEBCAM_IMAGE, WEBCAM_START_BUTTON, WEBCAM_STOP_BUTTON ])
def update_source(files : List[File]) -> gradio.File:
file_names = [ file.name for file in files ] if files else None
has_source_image = has_image(file_names)
if has_source_image:
state_manager.set_item('source_paths', file_names)
return gradio.File(value = file_names)
state_manager.clear_item('source_paths')
return gradio.File(value = None)
def pre_start() -> Tuple[gradio.File, gradio.Image, gradio.Button, gradio.Button]:
return gradio.File(visible = False), gradio.Image(visible = True), gradio.Button(visible = False), gradio.Button(visible = True)
def pre_stop() -> Tuple[gradio.File, gradio.Image, gradio.Button, gradio.Button]:
return gradio.File(visible = True), gradio.Image(visible = False), gradio.Button(visible = True), gradio.Button(visible = False)
def start(webcam_device_id : int, webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Iterator[VisionFrame]:
state_manager.init_item('face_selector_mode', 'one')
state_manager.sync_state()
camera_capture = get_local_camera_capture(webcam_device_id)
stream = None
if webcam_mode in [ 'udp', 'v4l2' ]:
stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) # type:ignore[arg-type]
webcam_width, webcam_height = unpack_resolution(webcam_resolution)
if camera_capture and camera_capture.isOpened():
camera_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
camera_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
camera_capture.set(cv2.CAP_PROP_FPS, webcam_fps)
for capture_frame in multi_process_capture(camera_capture, webcam_fps):
capture_frame = cv2.cvtColor(capture_frame, cv2.COLOR_BGR2RGB)
if webcam_mode == 'inline':
yield capture_frame
else:
try:
stream.stdin.write(capture_frame.tobytes())
except Exception:
pass
def stop() -> gradio.Image:
clear_camera_pool()
return gradio.Image(value = None)
@@ -1,49 +0,0 @@
from typing import Optional
import gradio
from facefusion import translator
from facefusion.camera_manager import detect_local_camera_ids
from facefusion.common_helper import get_first
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import register_ui_component
WEBCAM_DEVICE_ID_DROPDOWN : Optional[gradio.Dropdown] = None
WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None
WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global WEBCAM_DEVICE_ID_DROPDOWN
global WEBCAM_MODE_RADIO
global WEBCAM_RESOLUTION_DROPDOWN
global WEBCAM_FPS_SLIDER
local_camera_ids = detect_local_camera_ids(0, 10) or [ 'none' ] #type:ignore[list-item]
WEBCAM_DEVICE_ID_DROPDOWN = gradio.Dropdown(
value = get_first(local_camera_ids),
label = translator.get('uis.webcam_device_id_dropdown'),
choices = local_camera_ids
)
WEBCAM_MODE_RADIO = gradio.Radio(
label = translator.get('uis.webcam_mode_radio'),
choices = uis_choices.webcam_modes,
value = uis_choices.webcam_modes[0]
)
WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.webcam_resolution_dropdown'),
choices = uis_choices.webcam_resolutions,
value = uis_choices.webcam_resolutions[0]
)
WEBCAM_FPS_SLIDER = gradio.Slider(
label = translator.get('uis.webcam_fps_slider'),
value = 30,
step = 1,
minimum = 1,
maximum = 30
)
register_ui_component('webcam_device_id_dropdown', WEBCAM_DEVICE_ID_DROPDOWN)
register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO)
register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN)
register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER)
-198
View File
@@ -1,198 +0,0 @@
import importlib
import os
import warnings
from types import ModuleType
from typing import Any, Dict, List, Optional
import gradio
from gradio.themes import Size
import facefusion.uis.overrides as uis_overrides
from facefusion import logger, metadata, state_manager, translator
from facefusion.exit_helper import hard_exit
from facefusion.filesystem import resolve_relative_path
from facefusion.uis.types import Component, ComponentName
UI_COMPONENTS: Dict[ComponentName, Component] = {}
UI_LAYOUT_MODULES : List[ModuleType] = []
UI_LAYOUT_METHODS =\
[
'pre_check',
'render',
'listen',
'run'
]
def load_ui_layout_module(ui_layout : str) -> Any:
try:
ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout)
for method_name in UI_LAYOUT_METHODS:
if not hasattr(ui_layout_module, method_name):
raise NotImplementedError
except ModuleNotFoundError as exception:
logger.error(translator.get('ui_layout_not_loaded').format(ui_layout = ui_layout), __name__)
logger.debug(exception.msg, __name__)
hard_exit(1)
except NotImplementedError:
logger.error(translator.get('ui_layout_not_implemented').format(ui_layout = ui_layout), __name__)
hard_exit(1)
return ui_layout_module
def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]:
if not UI_LAYOUT_MODULES:
for ui_layout in ui_layouts:
ui_layout_module = load_ui_layout_module(ui_layout)
UI_LAYOUT_MODULES.append(ui_layout_module)
return UI_LAYOUT_MODULES
def get_ui_component(component_name : ComponentName) -> Optional[Component]:
if component_name in UI_COMPONENTS:
return UI_COMPONENTS[component_name]
return None
def get_ui_components(component_names : List[ComponentName]) -> Optional[List[Component]]:
ui_components = []
for component_name in component_names:
component = get_ui_component(component_name)
if component:
ui_components.append(component)
return ui_components
def register_ui_component(component_name : ComponentName, component: Component) -> None:
UI_COMPONENTS[component_name] = component
def init() -> None:
os.environ['GRADIO_ANALYTICS_ENABLED'] = '0'
os.environ['GRADIO_TEMP_DIR'] = os.path.join(state_manager.get_item('temp_path'), 'gradio')
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
gradio.processing_utils._check_allowed = uis_overrides.mock
gradio.processing_utils.convert_video_to_playable_mp4 = uis_overrides.convert_video_to_playable_mp4
gradio.components.Number.raise_if_out_of_bounds = uis_overrides.mock
def launch() -> None:
ui_layouts_total = len(state_manager.get_item('ui_layouts'))
with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version'), fill_width = True) as ui:
for ui_layout in state_manager.get_item('ui_layouts'):
ui_layout_module = load_ui_layout_module(ui_layout)
if ui_layouts_total > 1:
with gradio.Tab(ui_layout):
ui_layout_module.render()
ui_layout_module.listen()
else:
ui_layout_module.render()
ui_layout_module.listen()
for ui_layout in state_manager.get_item('ui_layouts'):
ui_layout_module = load_ui_layout_module(ui_layout)
ui_layout_module.run(ui)
def get_theme() -> gradio.Theme:
return gradio.themes.Base(
primary_hue = gradio.themes.colors.red,
secondary_hue = gradio.themes.Color(
name = 'neutral',
c50 = '#fafafa',
c100 = '#f5f5f5',
c200 = '#e5e5e5',
c300 = '#d4d4d4',
c400 = '#a3a3a3',
c500 = '#737373',
c600 = '#525252',
c700 = '#404040',
c800 = '#262626',
c900 = '#212121',
c950 = '#171717',
),
radius_size = Size(
xxs = '0.375rem',
xs = '0.375rem',
sm = '0.375rem',
md = '0.375rem',
lg = '0.375rem',
xl = '0.375rem',
xxl = '0.375rem',
),
font = gradio.themes.GoogleFont('Open Sans')
).set(
color_accent = 'transparent',
color_accent_soft = 'transparent',
color_accent_soft_dark = 'transparent',
background_fill_primary = '*neutral_100',
background_fill_primary_dark = '*neutral_950',
background_fill_secondary = '*neutral_50',
background_fill_secondary_dark = '*neutral_800',
block_background_fill = 'white',
block_background_fill_dark = '*neutral_900',
block_border_width = '0',
block_label_background_fill = '*neutral_100',
block_label_background_fill_dark = '*neutral_800',
block_label_border_width = 'none',
block_label_margin = '0.5rem',
block_label_radius = '*radius_md',
block_label_text_color = '*neutral_700',
block_label_text_size = '*text_sm',
block_label_text_color_dark = 'white',
block_label_text_weight = '600',
block_title_background_fill = '*neutral_100',
block_title_background_fill_dark = '*neutral_800',
block_title_padding = '*block_label_padding',
block_title_radius = '*block_label_radius',
block_title_text_color = '*neutral_700',
block_title_text_size = '*text_sm',
block_title_text_weight = '600',
block_padding = '0.5rem',
border_color_accent = 'transparent',
border_color_accent_dark = 'transparent',
border_color_accent_subdued = 'transparent',
border_color_accent_subdued_dark = 'transparent',
border_color_primary = 'transparent',
border_color_primary_dark = 'transparent',
button_large_padding = '2rem 0.5rem',
button_large_text_weight = 'normal',
button_primary_background_fill = '*primary_500',
button_primary_background_fill_dark = '*primary_600',
button_primary_text_color = 'white',
button_secondary_background_fill = 'white',
button_secondary_background_fill_dark = '*neutral_800',
button_secondary_background_fill_hover = 'white',
button_secondary_background_fill_hover_dark = '*neutral_800',
button_secondary_text_color = '*neutral_800',
button_small_padding = '0.75rem',
button_small_text_size = '0.875rem',
checkbox_background_color = '*neutral_200',
checkbox_background_color_dark = '*neutral_900',
checkbox_background_color_selected = '*primary_600',
checkbox_background_color_selected_dark = '*primary_700',
checkbox_label_background_fill = '*neutral_50',
checkbox_label_background_fill_dark = '*neutral_800',
checkbox_label_background_fill_hover = '*neutral_50',
checkbox_label_background_fill_hover_dark = '*neutral_800',
checkbox_label_background_fill_selected = '*primary_500',
checkbox_label_background_fill_selected_dark = '*primary_600',
checkbox_label_text_color_selected = 'white',
error_background_fill = 'white',
error_background_fill_dark = '*neutral_900',
error_text_color = '*primary_500',
error_text_color_dark = '*primary_600',
input_background_fill = '*neutral_50',
input_background_fill_dark = '*neutral_800',
shadow_drop = 'none',
slider_color = '*primary_500',
slider_color_dark = '*primary_600'
)
def get_css() -> str:
overrides_css_path = resolve_relative_path('uis/assets/overrides.css')
return open(overrides_css_path).read()
-79
View File
@@ -1,79 +0,0 @@
import gradio
from facefusion import benchmarker, state_manager
from facefusion.uis.components import about, age_modifier_options, background_remover_options, benchmark, benchmark_options, deep_swapper_options, download, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors
def pre_check() -> bool:
return benchmarker.pre_check()
def render() -> gradio.Blocks:
with gradio.Blocks() as layout:
with gradio.Row():
with gradio.Column(scale = 4):
with gradio.Blocks():
about.render()
with gradio.Blocks():
benchmark_options.render()
with gradio.Blocks():
processors.render()
with gradio.Blocks():
age_modifier_options.render()
with gradio.Blocks():
background_remover_options.render()
with gradio.Blocks():
deep_swapper_options.render()
with gradio.Blocks():
expression_restorer_options.render()
with gradio.Blocks():
face_debugger_options.render()
with gradio.Blocks():
face_editor_options.render()
with gradio.Blocks():
face_enhancer_options.render()
with gradio.Blocks():
face_swapper_options.render()
with gradio.Blocks():
frame_colorizer_options.render()
with gradio.Blocks():
frame_enhancer_options.render()
with gradio.Blocks():
lip_syncer_options.render()
with gradio.Blocks():
execution.render()
execution_thread_count.render()
with gradio.Blocks():
download.render()
with gradio.Blocks():
state_manager.set_item('video_memory_strategy', 'tolerant')
memory.render()
with gradio.Column(scale = 11):
with gradio.Blocks():
benchmark.render()
return layout
def listen() -> None:
processors.listen()
age_modifier_options.listen()
background_remover_options.listen()
deep_swapper_options.listen()
expression_restorer_options.listen()
download.listen()
face_debugger_options.listen()
face_editor_options.listen()
face_enhancer_options.listen()
face_swapper_options.listen()
frame_colorizer_options.listen()
frame_enhancer_options.listen()
lip_syncer_options.listen()
execution.listen()
execution_thread_count.listen()
memory.listen()
benchmark.listen()
benchmark_options.listen()
def run(ui : gradio.Blocks) -> None:
ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser'))
-125
View File
@@ -1,125 +0,0 @@
import gradio
from facefusion import state_manager
from facefusion.uis.components import about, age_modifier_options, background_remover_options, common_options, deep_swapper_options, download, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, preview_options, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow, voice_extractor
def pre_check() -> bool:
return True
def render() -> gradio.Blocks:
with gradio.Blocks() as layout:
with gradio.Row():
with gradio.Column(scale = 4):
with gradio.Blocks():
about.render()
with gradio.Blocks():
processors.render()
with gradio.Blocks():
age_modifier_options.render()
with gradio.Blocks():
background_remover_options.render()
with gradio.Blocks():
deep_swapper_options.render()
with gradio.Blocks():
expression_restorer_options.render()
with gradio.Blocks():
face_debugger_options.render()
with gradio.Blocks():
face_editor_options.render()
with gradio.Blocks():
face_enhancer_options.render()
with gradio.Blocks():
face_swapper_options.render()
with gradio.Blocks():
frame_colorizer_options.render()
with gradio.Blocks():
frame_enhancer_options.render()
with gradio.Blocks():
lip_syncer_options.render()
with gradio.Blocks():
voice_extractor.render()
with gradio.Blocks():
execution.render()
execution_thread_count.render()
with gradio.Blocks():
download.render()
with gradio.Blocks():
memory.render()
with gradio.Blocks():
temp_frame.render()
with gradio.Blocks():
output_options.render()
with gradio.Column(scale = 4):
with gradio.Blocks():
source.render()
with gradio.Blocks():
target.render()
with gradio.Blocks():
output.render()
with gradio.Blocks():
terminal.render()
with gradio.Blocks():
ui_workflow.render()
instant_runner.render()
job_runner.render()
job_manager.render()
with gradio.Column(scale = 7):
with gradio.Blocks():
preview.render()
preview_options.render()
with gradio.Blocks():
trim_frame.render()
with gradio.Blocks():
face_selector.render()
with gradio.Blocks():
face_masker.render()
with gradio.Blocks():
face_detector.render()
with gradio.Blocks():
face_landmarker.render()
with gradio.Blocks():
common_options.render()
return layout
def listen() -> None:
processors.listen()
age_modifier_options.listen()
background_remover_options.listen()
deep_swapper_options.listen()
expression_restorer_options.listen()
face_debugger_options.listen()
face_editor_options.listen()
face_enhancer_options.listen()
face_swapper_options.listen()
frame_colorizer_options.listen()
frame_enhancer_options.listen()
lip_syncer_options.listen()
execution.listen()
execution_thread_count.listen()
download.listen()
memory.listen()
temp_frame.listen()
output_options.listen()
source.listen()
target.listen()
output.listen()
instant_runner.listen()
job_runner.listen()
job_manager.listen()
terminal.listen()
preview.listen()
preview_options.listen()
trim_frame.listen()
face_selector.listen()
face_masker.listen()
face_detector.listen()
face_landmarker.listen()
voice_extractor.listen()
common_options.listen()
def run(ui : gradio.Blocks) -> None:
ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser'))
-31
View File
@@ -1,31 +0,0 @@
import gradio
from facefusion import state_manager
from facefusion.uis.components import about, job_list, job_list_options
def pre_check() -> bool:
return True
def render() -> gradio.Blocks:
with gradio.Blocks() as layout:
with gradio.Row():
with gradio.Column(scale = 4):
with gradio.Blocks():
about.render()
with gradio.Blocks():
job_list_options.render()
with gradio.Column(scale = 11):
with gradio.Blocks():
job_list.render()
return layout
def listen() -> None:
job_list_options.listen()
job_list.listen()
def run(ui : gradio.Blocks) -> None:
ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser'))
-74
View File
@@ -1,74 +0,0 @@
import gradio
from facefusion import state_manager
from facefusion.uis.components import about, age_modifier_options, background_remover_options, deep_swapper_options, download, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, webcam, webcam_options
def pre_check() -> bool:
return True
def render() -> gradio.Blocks:
with gradio.Blocks() as layout:
with gradio.Row():
with gradio.Column(scale = 4):
with gradio.Blocks():
about.render()
with gradio.Blocks():
webcam_options.render()
with gradio.Blocks():
processors.render()
with gradio.Blocks():
age_modifier_options.render()
with gradio.Blocks():
background_remover_options.render()
with gradio.Blocks():
deep_swapper_options.render()
with gradio.Blocks():
expression_restorer_options.render()
with gradio.Blocks():
face_debugger_options.render()
with gradio.Blocks():
face_editor_options.render()
with gradio.Blocks():
face_enhancer_options.render()
with gradio.Blocks():
face_swapper_options.render()
with gradio.Blocks():
frame_colorizer_options.render()
with gradio.Blocks():
frame_enhancer_options.render()
with gradio.Blocks():
lip_syncer_options.render()
with gradio.Blocks():
execution.render()
execution_thread_count.render()
with gradio.Blocks():
download.render()
with gradio.Column(scale = 11):
with gradio.Blocks():
webcam.render()
return layout
def listen() -> None:
processors.listen()
age_modifier_options.listen()
background_remover_options.listen()
deep_swapper_options.listen()
expression_restorer_options.listen()
download.listen()
face_debugger_options.listen()
face_editor_options.listen()
face_enhancer_options.listen()
face_swapper_options.listen()
frame_colorizer_options.listen()
frame_enhancer_options.listen()
lip_syncer_options.listen()
execution.listen()
execution_thread_count.listen()
webcam.listen()
def run(ui : gradio.Blocks) -> None:
ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser'))
-31
View File
@@ -1,31 +0,0 @@
from facefusion import ffmpeg_builder
from facefusion.ffmpeg import run_ffmpeg
from facefusion.filesystem import get_file_size
from facefusion.temp_helper import create_temp_directory, get_temp_file_path
from facefusion.uis.types import MockArgs
def convert_video_to_playable_mp4(video_path : str) -> str:
video_file_size = get_file_size(video_path)
max_file_size = 512 * 1024 * 1024
create_temp_directory(video_path)
temp_video_path = get_temp_file_path(video_path)
commands = ffmpeg_builder.set_input(video_path)
if video_file_size > max_file_size:
commands.extend(ffmpeg_builder.set_video_duration(10))
commands.extend(ffmpeg_builder.force_output(temp_video_path))
process = run_ffmpeg(commands)
process.communicate()
if process.returncode == 0:
return temp_video_path
return video_path
def mock(*args : MockArgs, **kwargs : MockArgs) -> None:
pass
-99
View File
@@ -1,99 +0,0 @@
from typing import Any, Dict, IO, Literal, TypeAlias
File : TypeAlias = IO[Any]
ComponentName = Literal\
[
'age_modifier_direction_slider',
'age_modifier_model_dropdown',
'background_remover_model_dropdown',
'background_remover_color_red_number',
'background_remover_color_green_number',
'background_remover_color_blue_number',
'background_remover_color_alpha_number',
'deep_swapper_model_dropdown',
'deep_swapper_morph_slider',
'expression_restorer_factor_slider',
'expression_restorer_model_dropdown',
'expression_restorer_areas_checkbox_group',
'face_debugger_items_checkbox_group',
'face_detector_angles_checkbox_group',
'face_detector_model_dropdown',
'face_detector_margin_slider',
'face_detector_score_slider',
'face_detector_size_dropdown',
'face_editor_eyebrow_direction_slider',
'face_editor_eye_gaze_horizontal_slider',
'face_editor_eye_gaze_vertical_slider',
'face_editor_eye_open_ratio_slider',
'face_editor_head_pitch_slider',
'face_editor_head_roll_slider',
'face_editor_head_yaw_slider',
'face_editor_lip_open_ratio_slider',
'face_editor_model_dropdown',
'face_editor_mouth_grim_slider',
'face_editor_mouth_position_horizontal_slider',
'face_editor_mouth_position_vertical_slider',
'face_editor_mouth_pout_slider',
'face_editor_mouth_purse_slider',
'face_editor_mouth_smile_slider',
'face_enhancer_blend_slider',
'face_enhancer_model_dropdown',
'face_enhancer_weight_slider',
'face_landmarker_model_dropdown',
'face_landmarker_score_slider',
'face_mask_types_checkbox_group',
'face_mask_areas_checkbox_group',
'face_mask_regions_checkbox_group',
'face_mask_blur_slider',
'face_mask_padding_bottom_slider',
'face_mask_padding_left_slider',
'face_mask_padding_right_slider',
'face_mask_padding_top_slider',
'face_selector_age_range_slider',
'face_selector_gender_dropdown',
'face_selector_mode_dropdown',
'face_selector_order_dropdown',
'face_selector_race_dropdown',
'face_swapper_model_dropdown',
'face_swapper_pixel_boost_dropdown',
'face_swapper_weight_slider',
'face_occluder_model_dropdown',
'face_parser_model_dropdown',
'voice_extractor_model_dropdown',
'frame_colorizer_blend_slider',
'frame_colorizer_model_dropdown',
'frame_colorizer_size_dropdown',
'frame_enhancer_blend_slider',
'frame_enhancer_model_dropdown',
'job_list_job_status_checkbox_group',
'lip_syncer_model_dropdown',
'lip_syncer_weight_slider',
'output_image',
'output_video',
'output_video_fps_slider',
'preview_image',
'preview_frame_slider',
'preview_mode_dropdown',
'preview_resolution_dropdown',
'processors_checkbox_group',
'reference_face_distance_slider',
'reference_face_position_gallery',
'source_audio',
'source_image',
'target_image',
'target_video',
'ui_workflow_dropdown',
'webcam_device_id_dropdown',
'webcam_fps_slider',
'webcam_mode_radio',
'webcam_resolution_dropdown'
]
Component : TypeAlias = Any
ComponentOptions : TypeAlias = Dict[str, Any]
JobManagerAction = Literal['job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step']
JobRunnerAction = Literal['job-run', 'job-run-all', 'job-retry', 'job-retry-all']
PreviewMode = Literal[ 'default', 'frame-by-frame', 'face-by-face' ]
MockArgs : TypeAlias = Any
-26
View File
@@ -1,26 +0,0 @@
import hashlib
import os
from typing import Optional
from facefusion import state_manager
from facefusion.filesystem import get_file_extension, is_image, is_video
def convert_int_none(value : int) -> Optional[int]:
if value == 'none':
return None
return value
def convert_str_none(value : str) -> Optional[str]:
if value == 'none':
return None
return value
def suggest_output_path(output_directory_path : str, target_path : str) -> Optional[str]:
if is_image(target_path) or is_video(target_path):
output_file_name = hashlib.sha1(str(state_manager.get_state()).encode()).hexdigest()[:8]
target_file_extension = get_file_extension(target_path)
return os.path.join(output_directory_path, output_file_name + target_file_extension)
return None
-2
View File
@@ -1,5 +1,3 @@
gradio-rangeslider==0.0.8
gradio==5.44.1
numpy==2.2.6
onnx==1.19.1
onnxruntime==1.23.2
+2 -2
View File
@@ -25,14 +25,14 @@ def before_each() -> None:
def test_modify_age_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-age-face-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-age-face-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-age-face-to-image.jpg') is True
def test_modify_age_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-age-face-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-age-face-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-age-face-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_remove_background_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'background_remover', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-remove-background-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'background_remover', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-remove-background-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-remove-background-to-image.jpg') is True
def test_remove_background_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'background_remover', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-remove-background-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'background_remover', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-remove-background-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-remove-background-to-video.mp4') is True
+2 -2
View File
@@ -25,14 +25,14 @@ def before_each() -> None:
def test_restore_expression_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-restore-expression-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-restore-expression-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-restore-expression-to-image.jpg') is True
def test_restore_expression_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-restore-expression-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-restore-expression-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-restore-expression-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_debug_face_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-debug-face-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-debug-face-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-debug-face-to-image.jpg') is True
def test_debug_face_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-debug-face-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-debug-face-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-debug-face-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_edit_face_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-edit-face-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-edit-face-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-edit-face-to-image.jpg') is True
def test_edit_face_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-edit-face-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-edit-face-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-edit-face-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_enhance_face_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-face-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-face-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-enhance-face-to-image.jpg') is True
def test_enhance_face_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-face-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-face-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-enhance-face-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_swap_face_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-swap-face-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-swap-face-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-swap-face-to-image.jpg') is True
def test_swap_face_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-swap-face-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-swap-face-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-swap-face-to-video.mp4') is True
+2 -2
View File
@@ -27,14 +27,14 @@ def before_each() -> None:
def test_colorize_frame_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.jpg'), '-o', get_test_output_file('test_colorize-frame-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.jpg'), '-o', get_test_output_file('test_colorize-frame-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test_colorize-frame-to-image.jpg') is True
def test_colorize_frame_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.mp4'), '-o', get_test_output_file('test-colorize-frame-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.mp4'), '-o', get_test_output_file('test-colorize-frame-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-colorize-frame-to-video.mp4') is True
+2 -2
View File
@@ -26,14 +26,14 @@ def before_each() -> None:
def test_enhance_frame_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-frame-to-image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-frame-to-image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-enhance-frame-to-image.jpg') is True
def test_enhance_frame_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-frame-to-video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-frame-to-video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test-enhance-frame-to-video.mp4') is True
+2 -2
View File
@@ -27,14 +27,14 @@ def before_each() -> None:
def test_sync_lip_to_image() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test_sync_lip_to_image.jpg') ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test_sync_lip_to_image.jpg') ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test_sync_lip_to_image.jpg') is True
def test_sync_lip_to_video() -> None:
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test_sync_lip_to_video.mp4'), '--trim-frame-end', '1' ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test_sync_lip_to_video.mp4'), '--trim-frame-end', '1' ]
assert subprocess.run(commands).returncode == 0
assert is_test_output_file('test_sync_lip_to_video.mp4') is True
+2 -2
View File
@@ -36,7 +36,7 @@ def before_each() -> None:
])
def test_output_image_scale(output_image_scale : Scale, output_image_resolution : Resolution) -> None:
output_file_path = get_test_output_file('test-output-image-scale-' + str(output_image_scale) + '.jpg')
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', output_file_path, '--output-image-scale', str(output_image_scale) ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', output_file_path, '--output-image-scale', str(output_image_scale) ]
assert subprocess.run(commands).returncode == 0
assert detect_image_resolution(output_file_path) == output_image_resolution
@@ -51,7 +51,7 @@ def test_output_image_scale(output_image_scale : Scale, output_image_resolution
])
def test_output_video_scale(output_video_scale : Scale, output_video_resolution : Resolution) -> None:
output_file_path = get_test_output_file('test-output-video-scale-' + str(output_video_scale) + '.mp4')
commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', output_file_path, '--trim-frame-end', '1', '--output-video-scale', str(output_video_scale) ]
commands = [ sys.executable, 'facefusion.py', 'run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', output_file_path, '--trim-frame-end', '1', '--output-video-scale', str(output_video_scale) ]
assert subprocess.run(commands).returncode == 0
assert detect_video_resolution(output_file_path) == output_video_resolution
+2 -2
View File
@@ -24,9 +24,9 @@ def test_get_inference_pool() -> None:
assert isinstance(INFERENCE_POOL_SET.get('cli').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1'), InferenceSession)
with patch('facefusion.inference_manager.detect_app_context', return_value = 'ui'):
with patch('facefusion.inference_manager.detect_app_context', return_value = 'api'):
get_inference_pool('facefusion.content_analyser', model_names, model_source_set)
assert isinstance(INFERENCE_POOL_SET.get('cli').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1'), InferenceSession)
assert INFERENCE_POOL_SET.get('cli').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1') == INFERENCE_POOL_SET.get('ui').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1')
assert INFERENCE_POOL_SET.get('cli').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1') == INFERENCE_POOL_SET.get('api').get('facefusion.content_analyser.nsfw_1.nsfw_2.nsfw_3.0.cpu').get('nsfw_1')
+3 -3
View File
@@ -18,18 +18,18 @@ def clear_state(app_context : AppContext) -> None:
@pytest.fixture(scope = 'function', autouse = True)
def before_each() -> None:
clear_state('cli')
clear_state('ui')
clear_state('api')
def test_init_item() -> None:
init_item('video_memory_strategy', 'tolerant')
assert get_state('cli').get('video_memory_strategy') == 'tolerant'
assert get_state('ui').get('video_memory_strategy') == 'tolerant'
assert get_state('api').get('video_memory_strategy') == 'tolerant'
def test_get_item_and_set_item() -> None:
set_item('video_memory_strategy', 'tolerant')
assert get_item('video_memory_strategy') == 'tolerant'
assert get_state('ui').get('video_memory_strategy') is None
assert get_state('api').get('video_memory_strategy') is None