deepfuze
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,23 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
from deepfuze import metadata, wording
|
||||
|
||||
ABOUT_BUTTON : Optional[gradio.HTML] = None
|
||||
DONATE_BUTTON : Optional[gradio.HTML] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global ABOUT_BUTTON
|
||||
global DONATE_BUTTON
|
||||
|
||||
ABOUT_BUTTON = gradio.Button(
|
||||
value = metadata.get('name') + ' ' + metadata.get('version'),
|
||||
variant = 'primary',
|
||||
link = metadata.get('url')
|
||||
)
|
||||
DONATE_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.donate_button'),
|
||||
link = 'https://donate.deepfuze.io',
|
||||
size = 'sm'
|
||||
)
|
||||
@@ -0,0 +1,140 @@
|
||||
from typing import Any, Optional, List, Dict, Generator
|
||||
from time import sleep, perf_counter
|
||||
import tempfile
|
||||
import statistics
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import process_manager, wording
|
||||
from deepfuze.face_store import clear_static_faces
|
||||
from deepfuze.processors.frame.core import get_frame_processors_modules
|
||||
from deepfuze.vision import count_video_frame_total, detect_video_resolution, detect_video_fps, pack_resolution
|
||||
from deepfuze.core import conditional_process
|
||||
from deepfuze.memory import limit_system_memory
|
||||
from deepfuze.filesystem import clear_temp
|
||||
from deepfuze.uis.core import get_ui_component
|
||||
|
||||
BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
|
||||
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
|
||||
BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
|
||||
BENCHMARKS : Dict[str, str] =\
|
||||
{
|
||||
'240p': '../../models/facefusion/examples/target-240p.mp4',
|
||||
'360p': '../../models/facefusion/examples/target-360p.mp4',
|
||||
'540p': '../../models/facefusion/examples/target-540p.mp4',
|
||||
'720p': '../../models/facefusion/examples/target-720p.mp4',
|
||||
'1080p': '../../models/facefusion/examples/target-1080p.mp4',
|
||||
'1440p': '../../models/facefusion/examples/target-1440p.mp4',
|
||||
'2160p': '../../models/facefusion/examples/target-2160p.mp4'
|
||||
}
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global BENCHMARK_RESULTS_DATAFRAME
|
||||
global BENCHMARK_START_BUTTON
|
||||
global BENCHMARK_CLEAR_BUTTON
|
||||
|
||||
BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
|
||||
label = wording.get('uis.benchmark_results_dataframe'),
|
||||
headers =
|
||||
[
|
||||
'target_path',
|
||||
'benchmark_cycles',
|
||||
'average_run',
|
||||
'fastest_run',
|
||||
'slowest_run',
|
||||
'relative_fps'
|
||||
],
|
||||
datatype =
|
||||
[
|
||||
'str',
|
||||
'number',
|
||||
'number',
|
||||
'number',
|
||||
'number',
|
||||
'number'
|
||||
]
|
||||
)
|
||||
BENCHMARK_START_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.start_button'),
|
||||
variant = 'primary',
|
||||
size = 'sm'
|
||||
)
|
||||
BENCHMARK_CLEAR_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.clear_button'),
|
||||
size = 'sm'
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group')
|
||||
benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
|
||||
|
||||
if benchmark_runs_checkbox_group and benchmark_cycles_slider:
|
||||
BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME)
|
||||
BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME)
|
||||
|
||||
|
||||
def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
|
||||
deepfuze.globals.source_paths = [ '../../models/facefusion/examples/source.jpg', '../../models/facefusion/examples/source.mp3' ]
|
||||
deepfuze.globals.output_path = tempfile.gettempdir()
|
||||
deepfuze.globals.face_landmarker_score = 0
|
||||
deepfuze.globals.temp_frame_format = 'bmp'
|
||||
deepfuze.globals.output_video_preset = 'ultrafast'
|
||||
benchmark_results = []
|
||||
target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
|
||||
|
||||
if target_paths:
|
||||
pre_process()
|
||||
for target_path in target_paths:
|
||||
deepfuze.globals.target_path = target_path
|
||||
benchmark_results.append(benchmark(benchmark_cycles))
|
||||
yield benchmark_results
|
||||
post_process()
|
||||
|
||||
|
||||
def pre_process() -> None:
|
||||
if deepfuze.globals.system_memory_limit > 0:
|
||||
limit_system_memory(deepfuze.globals.system_memory_limit)
|
||||
for frame_processor_module in get_frame_processors_modules(deepfuze.globals.frame_processors):
|
||||
frame_processor_module.get_frame_processor()
|
||||
|
||||
|
||||
def post_process() -> None:
|
||||
clear_static_faces()
|
||||
|
||||
|
||||
def benchmark(benchmark_cycles : int) -> List[Any]:
|
||||
process_times = []
|
||||
video_frame_total = count_video_frame_total(deepfuze.globals.target_path)
|
||||
output_video_resolution = detect_video_resolution(deepfuze.globals.target_path)
|
||||
deepfuze.globals.output_video_resolution = pack_resolution(output_video_resolution)
|
||||
deepfuze.globals.output_video_fps = detect_video_fps(deepfuze.globals.target_path)
|
||||
|
||||
for index in range(benchmark_cycles):
|
||||
start_time = perf_counter()
|
||||
conditional_process()
|
||||
end_time = perf_counter()
|
||||
process_times.append(end_time - start_time)
|
||||
average_run = round(statistics.mean(process_times), 2)
|
||||
fastest_run = round(min(process_times), 2)
|
||||
slowest_run = round(max(process_times), 2)
|
||||
relative_fps = round(video_frame_total * benchmark_cycles / sum(process_times), 2)
|
||||
|
||||
return\
|
||||
[
|
||||
deepfuze.globals.target_path,
|
||||
benchmark_cycles,
|
||||
average_run,
|
||||
fastest_run,
|
||||
slowest_run,
|
||||
relative_fps
|
||||
]
|
||||
|
||||
|
||||
def clear() -> gradio.Dataframe:
|
||||
while process_manager.is_processing():
|
||||
sleep(0.5)
|
||||
if deepfuze.globals.target_path:
|
||||
clear_temp(deepfuze.globals.target_path)
|
||||
return gradio.Dataframe(value = None)
|
||||
@@ -0,0 +1,29 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
from deepfuze import wording
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
from deepfuze.uis.components.benchmark import BENCHMARKS
|
||||
|
||||
BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global BENCHMARK_RUNS_CHECKBOX_GROUP
|
||||
global BENCHMARK_CYCLES_SLIDER
|
||||
|
||||
BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.benchmark_runs_checkbox_group'),
|
||||
value = list(BENCHMARKS.keys()),
|
||||
choices = list(BENCHMARKS.keys())
|
||||
)
|
||||
BENCHMARK_CYCLES_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.benchmark_cycles_slider'),
|
||||
value = 5,
|
||||
step = 1,
|
||||
minimum = 1,
|
||||
maximum = 10
|
||||
)
|
||||
register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP)
|
||||
register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
|
||||
@@ -0,0 +1,35 @@
|
||||
from typing import Optional, List
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.uis import choices as uis_choices
|
||||
|
||||
COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global COMMON_OPTIONS_CHECKBOX_GROUP
|
||||
|
||||
value = []
|
||||
if deepfuze.globals.keep_temp:
|
||||
value.append('keep-temp')
|
||||
if deepfuze.globals.skip_audio:
|
||||
value.append('skip-audio')
|
||||
if deepfuze.globals.skip_download:
|
||||
value.append('skip-download')
|
||||
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
|
||||
label = wording.get('uis.common_options_checkbox_group'),
|
||||
choices = uis_choices.common_options,
|
||||
value = value
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def update(common_options : List[str]) -> None:
|
||||
deepfuze.globals.keep_temp = 'keep-temp' in common_options
|
||||
deepfuze.globals.skip_audio = 'skip-audio' in common_options
|
||||
deepfuze.globals.skip_download = 'skip-download' in common_options
|
||||
@@ -0,0 +1,33 @@
|
||||
from typing import List, Optional
|
||||
import gradio
|
||||
import onnxruntime
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.face_analyser import clear_face_analyser
|
||||
from deepfuze.processors.frame.core import clear_frame_processors_modules
|
||||
from deepfuze.execution import encode_execution_providers, decode_execution_providers
|
||||
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
|
||||
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.execution_providers_checkbox_group'),
|
||||
choices = encode_execution_providers(onnxruntime.get_available_providers()),
|
||||
value = encode_execution_providers(deepfuze.globals.execution_providers)
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup:
|
||||
clear_face_analyser()
|
||||
clear_frame_processors_modules()
|
||||
execution_providers = execution_providers or encode_execution_providers(onnxruntime.get_available_providers())
|
||||
deepfuze.globals.execution_providers = decode_execution_providers(execution_providers)
|
||||
return gradio.CheckboxGroup(value = execution_providers)
|
||||
@@ -0,0 +1,28 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
|
||||
EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global EXECUTION_QUEUE_COUNT_SLIDER
|
||||
|
||||
EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.execution_queue_count_slider'),
|
||||
value = deepfuze.globals.execution_queue_count,
|
||||
step = deepfuze.choices.execution_queue_count_range[1] - deepfuze.choices.execution_queue_count_range[0],
|
||||
minimum = deepfuze.choices.execution_queue_count_range[0],
|
||||
maximum = deepfuze.choices.execution_queue_count_range[-1]
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
EXECUTION_QUEUE_COUNT_SLIDER.release(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
|
||||
|
||||
|
||||
def update_execution_queue_count(execution_queue_count : int = 1) -> None:
|
||||
deepfuze.globals.execution_queue_count = execution_queue_count
|
||||
@@ -0,0 +1,29 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
|
||||
EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global EXECUTION_THREAD_COUNT_SLIDER
|
||||
|
||||
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.execution_thread_count_slider'),
|
||||
value = deepfuze.globals.execution_thread_count,
|
||||
step = deepfuze.choices.execution_thread_count_range[1] - deepfuze.choices.execution_thread_count_range[0],
|
||||
minimum = deepfuze.choices.execution_thread_count_range[0],
|
||||
maximum = deepfuze.choices.execution_thread_count_range[-1]
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
EXECUTION_THREAD_COUNT_SLIDER.release(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
|
||||
|
||||
|
||||
def update_execution_thread_count(execution_thread_count : int = 1) -> None:
|
||||
deepfuze.globals.execution_thread_count = execution_thread_count
|
||||
|
||||
@@ -0,0 +1,123 @@
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import face_analyser, wording
|
||||
from deepfuze.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
|
||||
FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_LANDMARKER_SCORE_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_ANALYSER_ORDER_DROPDOWN
|
||||
global FACE_ANALYSER_AGE_DROPDOWN
|
||||
global FACE_ANALYSER_GENDER_DROPDOWN
|
||||
global FACE_DETECTOR_MODEL_DROPDOWN
|
||||
global FACE_DETECTOR_SIZE_DROPDOWN
|
||||
global FACE_DETECTOR_SCORE_SLIDER
|
||||
global FACE_LANDMARKER_SCORE_SLIDER
|
||||
|
||||
face_detector_size_dropdown_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.face_detector_size_dropdown'),
|
||||
'value': deepfuze.globals.face_detector_size
|
||||
}
|
||||
if deepfuze.globals.face_detector_size in deepfuze.choices.face_detector_set[deepfuze.globals.face_detector_model]:
|
||||
face_detector_size_dropdown_args['choices'] = deepfuze.choices.face_detector_set[deepfuze.globals.face_detector_model]
|
||||
with gradio.Row():
|
||||
FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_analyser_order_dropdown'),
|
||||
choices = deepfuze.choices.face_analyser_orders,
|
||||
value = deepfuze.globals.face_analyser_order
|
||||
)
|
||||
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_analyser_age_dropdown'),
|
||||
choices = [ 'none' ] + deepfuze.choices.face_analyser_ages,
|
||||
value = deepfuze.globals.face_analyser_age or 'none'
|
||||
)
|
||||
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_analyser_gender_dropdown'),
|
||||
choices = [ 'none' ] + deepfuze.choices.face_analyser_genders,
|
||||
value = deepfuze.globals.face_analyser_gender or 'none'
|
||||
)
|
||||
FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_detector_model_dropdown'),
|
||||
choices = deepfuze.choices.face_detector_set.keys(),
|
||||
value = deepfuze.globals.face_detector_model
|
||||
)
|
||||
FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(**face_detector_size_dropdown_args)
|
||||
with gradio.Row():
|
||||
FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_detector_score_slider'),
|
||||
value = deepfuze.globals.face_detector_score,
|
||||
step = deepfuze.choices.face_detector_score_range[1] - deepfuze.choices.face_detector_score_range[0],
|
||||
minimum = deepfuze.choices.face_detector_score_range[0],
|
||||
maximum = deepfuze.choices.face_detector_score_range[-1]
|
||||
)
|
||||
FACE_LANDMARKER_SCORE_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_landmarker_score_slider'),
|
||||
value = deepfuze.globals.face_landmarker_score,
|
||||
step = deepfuze.choices.face_landmarker_score_range[1] - deepfuze.choices.face_landmarker_score_range[0],
|
||||
minimum = deepfuze.choices.face_landmarker_score_range[0],
|
||||
maximum = deepfuze.choices.face_landmarker_score_range[-1]
|
||||
)
|
||||
register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN)
|
||||
register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
|
||||
register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
|
||||
register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN)
|
||||
register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN)
|
||||
register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER)
|
||||
register_ui_component('face_landmarker_score_slider', FACE_LANDMARKER_SCORE_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_ANALYSER_ORDER_DROPDOWN.change(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN)
|
||||
FACE_ANALYSER_AGE_DROPDOWN.change(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
|
||||
FACE_ANALYSER_GENDER_DROPDOWN.change(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
|
||||
FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN, outputs = [ FACE_DETECTOR_MODEL_DROPDOWN, FACE_DETECTOR_SIZE_DROPDOWN ])
|
||||
FACE_DETECTOR_SIZE_DROPDOWN.change(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
|
||||
FACE_DETECTOR_SCORE_SLIDER.release(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
|
||||
FACE_LANDMARKER_SCORE_SLIDER.release(update_face_landmarker_score, inputs = FACE_LANDMARKER_SCORE_SLIDER)
|
||||
|
||||
|
||||
def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None:
|
||||
deepfuze.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None
|
||||
|
||||
|
||||
def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None:
|
||||
deepfuze.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None
|
||||
|
||||
|
||||
def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None:
|
||||
deepfuze.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None
|
||||
|
||||
|
||||
def update_face_detector_model(face_detector_model : FaceDetectorModel) -> Tuple[gradio.Dropdown, gradio.Dropdown]:
|
||||
deepfuze.globals.face_detector_model = face_detector_model
|
||||
update_face_detector_size('640x640')
|
||||
if face_analyser.pre_check():
|
||||
if deepfuze.globals.face_detector_size in deepfuze.choices.face_detector_set[face_detector_model]:
|
||||
return gradio.Dropdown(value = deepfuze.globals.face_detector_model), gradio.Dropdown(value = deepfuze.globals.face_detector_size, choices = deepfuze.choices.face_detector_set[face_detector_model])
|
||||
return gradio.Dropdown(value = deepfuze.globals.face_detector_model), gradio.Dropdown(value = deepfuze.globals.face_detector_size, choices = [ deepfuze.globals.face_detector_size ])
|
||||
return gradio.Dropdown(), gradio.Dropdown()
|
||||
|
||||
|
||||
def update_face_detector_size(face_detector_size : str) -> None:
|
||||
deepfuze.globals.face_detector_size = face_detector_size
|
||||
|
||||
|
||||
def update_face_detector_score(face_detector_score : float) -> None:
|
||||
deepfuze.globals.face_detector_score = face_detector_score
|
||||
|
||||
|
||||
def update_face_landmarker_score(face_landmarker_score : float) -> None:
|
||||
deepfuze.globals.face_landmarker_score = face_landmarker_score
|
||||
Executable
+119
@@ -0,0 +1,119 @@
|
||||
from typing import Optional, Tuple, List
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
from deepfuze.typing import FaceMaskType, FaceMaskRegion
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
|
||||
FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None
|
||||
FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None
|
||||
FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_MASK_TYPES_CHECKBOX_GROUP
|
||||
global FACE_MASK_BLUR_SLIDER
|
||||
global FACE_MASK_BOX_GROUP
|
||||
global FACE_MASK_REGION_GROUP
|
||||
global FACE_MASK_PADDING_TOP_SLIDER
|
||||
global FACE_MASK_PADDING_RIGHT_SLIDER
|
||||
global FACE_MASK_PADDING_BOTTOM_SLIDER
|
||||
global FACE_MASK_PADDING_LEFT_SLIDER
|
||||
global FACE_MASK_REGION_CHECKBOX_GROUP
|
||||
|
||||
has_box_mask = 'box' in deepfuze.globals.face_mask_types
|
||||
has_region_mask = 'region' in deepfuze.globals.face_mask_types
|
||||
FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.face_mask_types_checkbox_group'),
|
||||
choices = deepfuze.choices.face_mask_types,
|
||||
value = deepfuze.globals.face_mask_types
|
||||
)
|
||||
with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
|
||||
FACE_MASK_BLUR_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_mask_blur_slider'),
|
||||
step = deepfuze.choices.face_mask_blur_range[1] - deepfuze.choices.face_mask_blur_range[0],
|
||||
minimum = deepfuze.choices.face_mask_blur_range[0],
|
||||
maximum = deepfuze.choices.face_mask_blur_range[-1],
|
||||
value = deepfuze.globals.face_mask_blur
|
||||
)
|
||||
with gradio.Row():
|
||||
FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_mask_padding_top_slider'),
|
||||
step = deepfuze.choices.face_mask_padding_range[1] - deepfuze.choices.face_mask_padding_range[0],
|
||||
minimum = deepfuze.choices.face_mask_padding_range[0],
|
||||
maximum = deepfuze.choices.face_mask_padding_range[-1],
|
||||
value = deepfuze.globals.face_mask_padding[0]
|
||||
)
|
||||
FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_mask_padding_right_slider'),
|
||||
step = deepfuze.choices.face_mask_padding_range[1] - deepfuze.choices.face_mask_padding_range[0],
|
||||
minimum = deepfuze.choices.face_mask_padding_range[0],
|
||||
maximum = deepfuze.choices.face_mask_padding_range[-1],
|
||||
value = deepfuze.globals.face_mask_padding[1]
|
||||
)
|
||||
with gradio.Row():
|
||||
FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_mask_padding_bottom_slider'),
|
||||
step = deepfuze.choices.face_mask_padding_range[1] - deepfuze.choices.face_mask_padding_range[0],
|
||||
minimum = deepfuze.choices.face_mask_padding_range[0],
|
||||
maximum = deepfuze.choices.face_mask_padding_range[-1],
|
||||
value = deepfuze.globals.face_mask_padding[2]
|
||||
)
|
||||
FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_mask_padding_left_slider'),
|
||||
step = deepfuze.choices.face_mask_padding_range[1] - deepfuze.choices.face_mask_padding_range[0],
|
||||
minimum = deepfuze.choices.face_mask_padding_range[0],
|
||||
maximum = deepfuze.choices.face_mask_padding_range[-1],
|
||||
value = deepfuze.globals.face_mask_padding[3]
|
||||
)
|
||||
with gradio.Row():
|
||||
FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.face_mask_region_checkbox_group'),
|
||||
choices = deepfuze.choices.face_mask_regions,
|
||||
value = deepfuze.globals.face_mask_regions,
|
||||
visible = has_region_mask
|
||||
)
|
||||
register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
|
||||
register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
|
||||
register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
|
||||
register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
|
||||
register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
|
||||
register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
|
||||
register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
|
||||
FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
|
||||
FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
|
||||
face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
|
||||
for face_mask_padding_slider in face_mask_padding_sliders:
|
||||
face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders)
|
||||
|
||||
|
||||
def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:
|
||||
deepfuze.globals.face_mask_types = face_mask_types or deepfuze.choices.face_mask_types
|
||||
has_box_mask = 'box' in face_mask_types
|
||||
has_region_mask = 'region' in face_mask_types
|
||||
return gradio.CheckboxGroup(value = deepfuze.globals.face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask)
|
||||
|
||||
|
||||
def update_face_mask_blur(face_mask_blur : float) -> None:
|
||||
deepfuze.globals.face_mask_blur = face_mask_blur
|
||||
|
||||
|
||||
def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None:
|
||||
deepfuze.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left)
|
||||
|
||||
|
||||
def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
|
||||
deepfuze.globals.face_mask_regions = face_mask_regions or deepfuze.choices.face_mask_regions
|
||||
return gradio.CheckboxGroup(value = deepfuze.globals.face_mask_regions)
|
||||
@@ -0,0 +1,165 @@
|
||||
from typing import List, Optional, Tuple, Any, Dict
|
||||
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
from deepfuze.face_store import clear_static_faces, clear_reference_faces
|
||||
from deepfuze.vision import get_video_frame, read_static_image, normalize_frame_color
|
||||
from deepfuze.filesystem import is_image, is_video
|
||||
from deepfuze.face_analyser import get_many_faces
|
||||
from deepfuze.typing import VisionFrame, FaceSelectorMode
|
||||
from deepfuze.uis.core import get_ui_component, get_ui_components, register_ui_component
|
||||
|
||||
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
|
||||
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_SELECTOR_MODE_DROPDOWN
|
||||
global REFERENCE_FACE_POSITION_GALLERY
|
||||
global REFERENCE_FACE_DISTANCE_SLIDER
|
||||
|
||||
reference_face_gallery_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.reference_face_gallery'),
|
||||
'object_fit': 'cover',
|
||||
'columns': 8,
|
||||
'allow_preview': False,
|
||||
'visible': 'reference' in deepfuze.globals.face_selector_mode
|
||||
}
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
reference_frame = read_static_image(deepfuze.globals.target_path)
|
||||
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
reference_frame = get_video_frame(deepfuze.globals.target_path, deepfuze.globals.reference_frame_number)
|
||||
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
||||
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_selector_mode_dropdown'),
|
||||
choices = deepfuze.choices.face_selector_modes,
|
||||
value = deepfuze.globals.face_selector_mode
|
||||
)
|
||||
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
|
||||
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.reference_face_distance_slider'),
|
||||
value = deepfuze.globals.reference_face_distance,
|
||||
step = deepfuze.choices.reference_face_distance_range[1] - deepfuze.choices.reference_face_distance_range[0],
|
||||
minimum = deepfuze.choices.reference_face_distance_range[0],
|
||||
maximum = deepfuze.choices.reference_face_distance_range[-1],
|
||||
visible = 'reference' in deepfuze.globals.face_selector_mode
|
||||
)
|
||||
register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
|
||||
register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
|
||||
register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_SELECTOR_MODE_DROPDOWN.change(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
|
||||
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
|
||||
REFERENCE_FACE_DISTANCE_SLIDER.release(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'target_image',
|
||||
'target_video'
|
||||
]):
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(ui_component, method)(update_reference_face_position)
|
||||
getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_analyser_order_dropdown',
|
||||
'face_analyser_age_dropdown',
|
||||
'face_analyser_gender_dropdown'
|
||||
]):
|
||||
ui_component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_detector_model_dropdown',
|
||||
'face_detector_size_dropdown'
|
||||
]):
|
||||
ui_component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_detector_score_slider',
|
||||
'face_landmarker_score_slider'
|
||||
]):
|
||||
ui_component.release(clear_and_update_reference_position_gallery, outputs=REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
preview_frame_slider = get_ui_component('preview_frame_slider')
|
||||
if preview_frame_slider:
|
||||
preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
|
||||
preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
|
||||
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
|
||||
if face_selector_mode == 'many':
|
||||
deepfuze.globals.face_selector_mode = face_selector_mode
|
||||
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
|
||||
if face_selector_mode == 'one':
|
||||
deepfuze.globals.face_selector_mode = face_selector_mode
|
||||
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
|
||||
if face_selector_mode == 'reference':
|
||||
deepfuze.globals.face_selector_mode = face_selector_mode
|
||||
return gradio.Gallery(visible = True), gradio.Slider(visible = True)
|
||||
|
||||
|
||||
def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
|
||||
clear_reference_faces()
|
||||
clear_static_faces()
|
||||
update_reference_face_position(event.index)
|
||||
return update_reference_position_gallery()
|
||||
|
||||
|
||||
def update_reference_face_position(reference_face_position : int = 0) -> None:
|
||||
deepfuze.globals.reference_face_position = reference_face_position
|
||||
|
||||
|
||||
def update_reference_face_distance(reference_face_distance : float) -> None:
|
||||
deepfuze.globals.reference_face_distance = reference_face_distance
|
||||
|
||||
|
||||
def update_reference_frame_number(reference_frame_number : int) -> None:
|
||||
deepfuze.globals.reference_frame_number = reference_frame_number
|
||||
|
||||
|
||||
def clear_and_update_reference_position_gallery() -> gradio.Gallery:
|
||||
clear_reference_faces()
|
||||
clear_static_faces()
|
||||
return update_reference_position_gallery()
|
||||
|
||||
|
||||
def update_reference_position_gallery() -> gradio.Gallery:
|
||||
gallery_vision_frames = []
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
temp_vision_frame = read_static_image(deepfuze.globals.target_path)
|
||||
gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
temp_vision_frame = get_video_frame(deepfuze.globals.target_path, deepfuze.globals.reference_frame_number)
|
||||
gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
|
||||
if gallery_vision_frames:
|
||||
return gradio.Gallery(value = gallery_vision_frames)
|
||||
return gradio.Gallery(value = None)
|
||||
|
||||
|
||||
def extract_gallery_frames(temp_vision_frame : VisionFrame) -> List[VisionFrame]:
|
||||
gallery_vision_frames = []
|
||||
faces = get_many_faces(temp_vision_frame)
|
||||
|
||||
for face in faces:
|
||||
start_x, start_y, end_x, end_y = map(int, face.bounding_box)
|
||||
padding_x = int((end_x - start_x) * 0.25)
|
||||
padding_y = int((end_y - start_y) * 0.25)
|
||||
start_x = max(0, start_x - padding_x)
|
||||
start_y = max(0, start_y - padding_y)
|
||||
end_x = max(0, end_x + padding_x)
|
||||
end_y = max(0, end_y + padding_y)
|
||||
crop_vision_frame = temp_vision_frame[start_y:end_y, start_x:end_x]
|
||||
crop_vision_frame = normalize_frame_color(crop_vision_frame)
|
||||
gallery_vision_frames.append(crop_vision_frame)
|
||||
return gallery_vision_frames
|
||||
@@ -0,0 +1,40 @@
|
||||
from typing import List, Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
|
||||
from deepfuze.filesystem import list_directory
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FRAME_PROCESSORS_CHECKBOX_GROUP
|
||||
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.frame_processors_checkbox_group'),
|
||||
choices = sort_frame_processors(deepfuze.globals.frame_processors),
|
||||
value = deepfuze.globals.frame_processors
|
||||
)
|
||||
register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup:
|
||||
deepfuze.globals.frame_processors = frame_processors
|
||||
clear_frame_processors_modules()
|
||||
for frame_processor in frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
if not frame_processor_module.pre_check():
|
||||
return gradio.CheckboxGroup()
|
||||
return gradio.CheckboxGroup(value = deepfuze.globals.frame_processors, choices = sort_frame_processors(deepfuze.globals.frame_processors))
|
||||
|
||||
|
||||
def sort_frame_processors(frame_processors : List[str]) -> list[str]:
|
||||
available_frame_processors = list_directory('facefusion/processors/frame/modules')
|
||||
return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
|
||||
+216
@@ -0,0 +1,216 @@
|
||||
from typing import List, Optional, Tuple
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import face_analyser, wording
|
||||
from deepfuze.processors.frame.core import load_frame_processor_module
|
||||
from deepfuze.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
|
||||
from deepfuze.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
|
||||
from deepfuze.uis.core import get_ui_component, register_ui_component
|
||||
|
||||
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_COLORIZER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
FRAME_COLORIZER_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
|
||||
global FACE_ENHANCER_MODEL_DROPDOWN
|
||||
global FACE_ENHANCER_BLEND_SLIDER
|
||||
global FACE_SWAPPER_MODEL_DROPDOWN
|
||||
global FRAME_COLORIZER_MODEL_DROPDOWN
|
||||
global FRAME_COLORIZER_BLEND_SLIDER
|
||||
global FRAME_COLORIZER_SIZE_DROPDOWN
|
||||
global FRAME_ENHANCER_MODEL_DROPDOWN
|
||||
global FRAME_ENHANCER_BLEND_SLIDER
|
||||
global LIP_SYNCER_MODEL_DROPDOWN
|
||||
|
||||
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('uis.face_debugger_items_checkbox_group'),
|
||||
choices = frame_processors_choices.face_debugger_items,
|
||||
value = frame_processors_globals.face_debugger_items,
|
||||
visible = 'face_debugger' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_enhancer_model_dropdown'),
|
||||
choices = frame_processors_choices.face_enhancer_models,
|
||||
value = frame_processors_globals.face_enhancer_model,
|
||||
visible = 'face_enhancer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.face_enhancer_blend_slider'),
|
||||
value = frame_processors_globals.face_enhancer_blend,
|
||||
step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
|
||||
minimum = frame_processors_choices.face_enhancer_blend_range[0],
|
||||
maximum = frame_processors_choices.face_enhancer_blend_range[-1],
|
||||
visible = 'face_enhancer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.face_swapper_model_dropdown'),
|
||||
choices = frame_processors_choices.face_swapper_models,
|
||||
value = frame_processors_globals.face_swapper_model,
|
||||
visible = 'face_swapper' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.frame_colorizer_model_dropdown'),
|
||||
choices = frame_processors_choices.frame_colorizer_models,
|
||||
value = frame_processors_globals.frame_colorizer_model,
|
||||
visible = 'frame_colorizer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.frame_colorizer_blend_slider'),
|
||||
value = frame_processors_globals.frame_colorizer_blend,
|
||||
step = frame_processors_choices.frame_colorizer_blend_range[1] - frame_processors_choices.frame_colorizer_blend_range[0],
|
||||
minimum = frame_processors_choices.frame_colorizer_blend_range[0],
|
||||
maximum = frame_processors_choices.frame_colorizer_blend_range[-1],
|
||||
visible = 'frame_colorizer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FRAME_COLORIZER_SIZE_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.frame_colorizer_size_dropdown'),
|
||||
choices = frame_processors_choices.frame_colorizer_sizes,
|
||||
value = frame_processors_globals.frame_colorizer_size,
|
||||
visible = 'frame_colorizer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.frame_enhancer_model_dropdown'),
|
||||
choices = frame_processors_choices.frame_enhancer_models,
|
||||
value = frame_processors_globals.frame_enhancer_model,
|
||||
visible = 'frame_enhancer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.frame_enhancer_blend_slider'),
|
||||
value = frame_processors_globals.frame_enhancer_blend,
|
||||
step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
|
||||
minimum = frame_processors_choices.frame_enhancer_blend_range[0],
|
||||
maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
|
||||
visible = 'frame_enhancer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
LIP_SYNCER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.lip_syncer_model_dropdown'),
|
||||
choices = frame_processors_choices.lip_syncer_models,
|
||||
value = frame_processors_globals.lip_syncer_model,
|
||||
visible = 'lip_syncer' in deepfuze.globals.frame_processors
|
||||
)
|
||||
register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
|
||||
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
|
||||
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
|
||||
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_colorizer_blend_slider', FRAME_COLORIZER_BLEND_SLIDER)
|
||||
register_ui_component('frame_colorizer_size_dropdown', FRAME_COLORIZER_SIZE_DROPDOWN)
|
||||
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
|
||||
register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
|
||||
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
|
||||
FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
|
||||
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN.change(update_frame_colorizer_model, inputs = FRAME_COLORIZER_MODEL_DROPDOWN, outputs = FRAME_COLORIZER_MODEL_DROPDOWN)
|
||||
FRAME_COLORIZER_BLEND_SLIDER.release(update_frame_colorizer_blend, inputs = FRAME_COLORIZER_BLEND_SLIDER)
|
||||
FRAME_COLORIZER_SIZE_DROPDOWN.change(update_frame_colorizer_size, inputs = FRAME_COLORIZER_SIZE_DROPDOWN, outputs = FRAME_COLORIZER_SIZE_DROPDOWN)
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
|
||||
FRAME_ENHANCER_BLEND_SLIDER.release(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
|
||||
LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN)
|
||||
frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
|
||||
if frame_processors_checkbox_group:
|
||||
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_COLORIZER_MODEL_DROPDOWN, FRAME_COLORIZER_BLEND_SLIDER, FRAME_COLORIZER_SIZE_DROPDOWN, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ])
|
||||
|
||||
|
||||
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
|
||||
has_face_debugger = 'face_debugger' in frame_processors
|
||||
has_face_enhancer = 'face_enhancer' in frame_processors
|
||||
has_face_swapper = 'face_swapper' in frame_processors
|
||||
has_frame_colorizer = 'frame_colorizer' in frame_processors
|
||||
has_frame_enhancer = 'frame_enhancer' in frame_processors
|
||||
has_lip_syncer = 'lip_syncer' in frame_processors
|
||||
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_colorizer), gradio.Slider(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer)
|
||||
|
||||
|
||||
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
|
||||
frame_processors_globals.face_debugger_items = face_debugger_items
|
||||
|
||||
|
||||
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.face_enhancer_model = face_enhancer_model
|
||||
face_enhancer_module = load_frame_processor_module('face_enhancer')
|
||||
face_enhancer_module.clear_frame_processor()
|
||||
face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
|
||||
if face_enhancer_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.face_enhancer_model)
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
|
||||
frame_processors_globals.face_enhancer_blend = face_enhancer_blend
|
||||
|
||||
|
||||
def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.face_swapper_model = face_swapper_model
|
||||
if face_swapper_model == 'blendswap_256':
|
||||
deepfuze.globals.face_recognizer_model = 'arcface_blendswap'
|
||||
if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
|
||||
deepfuze.globals.face_recognizer_model = 'arcface_inswapper'
|
||||
if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
|
||||
deepfuze.globals.face_recognizer_model = 'arcface_simswap'
|
||||
if face_swapper_model == 'uniface_256':
|
||||
deepfuze.globals.face_recognizer_model = 'arcface_uniface'
|
||||
face_swapper_module = load_frame_processor_module('face_swapper')
|
||||
face_swapper_module.clear_model_initializer()
|
||||
face_swapper_module.clear_frame_processor()
|
||||
face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
|
||||
if face_analyser.pre_check() and face_swapper_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.face_swapper_model)
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_frame_colorizer_model(frame_colorizer_model : FrameColorizerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.frame_colorizer_model = frame_colorizer_model
|
||||
frame_colorizer_module = load_frame_processor_module('frame_colorizer')
|
||||
frame_colorizer_module.clear_frame_processor()
|
||||
frame_colorizer_module.set_options('model', frame_colorizer_module.MODELS[frame_colorizer_model])
|
||||
if frame_colorizer_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_model)
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_frame_colorizer_blend(frame_colorizer_blend : int) -> None:
|
||||
frame_processors_globals.frame_colorizer_blend = frame_colorizer_blend
|
||||
|
||||
|
||||
def update_frame_colorizer_size(frame_colorizer_size : str) -> gradio.Dropdown:
|
||||
frame_processors_globals.frame_colorizer_size = frame_colorizer_size
|
||||
return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_size)
|
||||
|
||||
|
||||
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.frame_enhancer_model = frame_enhancer_model
|
||||
frame_enhancer_module = load_frame_processor_module('frame_enhancer')
|
||||
frame_enhancer_module.clear_frame_processor()
|
||||
frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model])
|
||||
if frame_enhancer_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.frame_enhancer_model)
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None:
|
||||
frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend
|
||||
|
||||
|
||||
def update_lip_syncer_model(lip_syncer_model : LipSyncerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.lip_syncer_model = lip_syncer_model
|
||||
lip_syncer_module = load_frame_processor_module('lip_syncer')
|
||||
lip_syncer_module.clear_frame_processor()
|
||||
lip_syncer_module.set_options('model', lip_syncer_module.MODELS[lip_syncer_model])
|
||||
if lip_syncer_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.lip_syncer_model)
|
||||
return gradio.Dropdown()
|
||||
@@ -0,0 +1,41 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze.typing import VideoMemoryStrategy
|
||||
from deepfuze import wording
|
||||
|
||||
VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global VIDEO_MEMORY_STRATEGY_DROPDOWN
|
||||
global SYSTEM_MEMORY_LIMIT_SLIDER
|
||||
|
||||
VIDEO_MEMORY_STRATEGY_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.video_memory_strategy_dropdown'),
|
||||
choices = deepfuze.choices.video_memory_strategies,
|
||||
value = deepfuze.globals.video_memory_strategy
|
||||
)
|
||||
SYSTEM_MEMORY_LIMIT_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.system_memory_limit_slider'),
|
||||
step =deepfuze.choices.system_memory_limit_range[1] - deepfuze.choices.system_memory_limit_range[0],
|
||||
minimum = deepfuze.choices.system_memory_limit_range[0],
|
||||
maximum = deepfuze.choices.system_memory_limit_range[-1],
|
||||
value = deepfuze.globals.system_memory_limit
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
VIDEO_MEMORY_STRATEGY_DROPDOWN.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY_DROPDOWN)
|
||||
SYSTEM_MEMORY_LIMIT_SLIDER.release(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER)
|
||||
|
||||
|
||||
def update_video_memory_strategy(video_memory_strategy : VideoMemoryStrategy) -> None:
|
||||
deepfuze.globals.video_memory_strategy = video_memory_strategy
|
||||
|
||||
|
||||
def update_system_memory_limit(system_memory_limit : int) -> None:
|
||||
deepfuze.globals.system_memory_limit = system_memory_limit
|
||||
@@ -0,0 +1,88 @@
|
||||
from typing import Tuple, Optional
|
||||
from time import sleep
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import process_manager, wording
|
||||
from deepfuze.core import conditional_process
|
||||
from deepfuze.memory import limit_system_memory
|
||||
from deepfuze.normalizer import normalize_output_path
|
||||
from deepfuze.uis.core import get_ui_component
|
||||
from deepfuze.filesystem import clear_temp, is_image, is_video
|
||||
|
||||
OUTPUT_IMAGE : Optional[gradio.Image] = None
|
||||
OUTPUT_VIDEO : Optional[gradio.Video] = None
|
||||
OUTPUT_START_BUTTON : Optional[gradio.Button] = None
|
||||
OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None
|
||||
OUTPUT_STOP_BUTTON : Optional[gradio.Button] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global OUTPUT_IMAGE
|
||||
global OUTPUT_VIDEO
|
||||
global OUTPUT_START_BUTTON
|
||||
global OUTPUT_STOP_BUTTON
|
||||
global OUTPUT_CLEAR_BUTTON
|
||||
|
||||
OUTPUT_IMAGE = gradio.Image(
|
||||
label = wording.get('uis.output_image_or_video'),
|
||||
visible = False
|
||||
)
|
||||
OUTPUT_VIDEO = gradio.Video(
|
||||
label = wording.get('uis.output_image_or_video')
|
||||
)
|
||||
OUTPUT_START_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.start_button'),
|
||||
variant = 'primary',
|
||||
size = 'sm'
|
||||
)
|
||||
OUTPUT_STOP_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.stop_button'),
|
||||
variant = 'primary',
|
||||
size = 'sm',
|
||||
visible = False
|
||||
)
|
||||
OUTPUT_CLEAR_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.clear_button'),
|
||||
size = 'sm'
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
output_path_textbox = get_ui_component('output_path_textbox')
|
||||
if output_path_textbox:
|
||||
OUTPUT_START_BUTTON.click(start, outputs = [ OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ])
|
||||
OUTPUT_START_BUTTON.click(process, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO, OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ])
|
||||
OUTPUT_STOP_BUTTON.click(stop, outputs = [ OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ])
|
||||
OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
|
||||
|
||||
|
||||
def start() -> Tuple[gradio.Button, gradio.Button]:
|
||||
while not process_manager.is_processing():
|
||||
sleep(0.5)
|
||||
return gradio.Button(visible = False), gradio.Button(visible = True)
|
||||
|
||||
|
||||
def process() -> Tuple[gradio.Image, gradio.Video, gradio.Button, gradio.Button]:
|
||||
normed_output_path = normalize_output_path(deepfuze.globals.target_path, deepfuze.globals.output_path)
|
||||
if deepfuze.globals.system_memory_limit > 0:
|
||||
limit_system_memory(deepfuze.globals.system_memory_limit)
|
||||
conditional_process()
|
||||
if is_image(normed_output_path):
|
||||
return gradio.Image(value = normed_output_path, visible = True), gradio.Video(value = None, visible = False), gradio.Button(visible = True), gradio.Button(visible = False)
|
||||
if is_video(normed_output_path):
|
||||
return gradio.Image(value = None, visible = False), gradio.Video(value = normed_output_path, visible = True), gradio.Button(visible = True), gradio.Button(visible = False)
|
||||
return gradio.Image(value = None), gradio.Video(value = None), gradio.Button(visible = True), gradio.Button(visible = False)
|
||||
|
||||
|
||||
def stop() -> Tuple[gradio.Button, gradio.Button]:
|
||||
process_manager.stop()
|
||||
return gradio.Button(visible = True), gradio.Button(visible = False)
|
||||
|
||||
|
||||
def clear() -> Tuple[gradio.Image, gradio.Video]:
|
||||
while process_manager.is_processing():
|
||||
sleep(0.5)
|
||||
if deepfuze.globals.target_path:
|
||||
clear_temp(deepfuze.globals.target_path)
|
||||
return gradio.Image(value = None), gradio.Video(value = None)
|
||||
@@ -0,0 +1,161 @@
|
||||
from typing import Optional, Tuple
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
from deepfuze.typing import OutputVideoEncoder, OutputVideoPreset, Fps
|
||||
from deepfuze.filesystem import is_image, is_video
|
||||
from deepfuze.uis.core import get_ui_components, register_ui_component
|
||||
from deepfuze.vision import detect_image_resolution, create_image_resolutions, detect_video_fps, detect_video_resolution, create_video_resolutions, pack_resolution
|
||||
|
||||
OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None
|
||||
OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
||||
OUTPUT_IMAGE_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
OUTPUT_VIDEO_PRESET_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
OUTPUT_VIDEO_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
||||
OUTPUT_VIDEO_FPS_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global OUTPUT_PATH_TEXTBOX
|
||||
global OUTPUT_IMAGE_QUALITY_SLIDER
|
||||
global OUTPUT_IMAGE_RESOLUTION_DROPDOWN
|
||||
global OUTPUT_VIDEO_ENCODER_DROPDOWN
|
||||
global OUTPUT_VIDEO_PRESET_DROPDOWN
|
||||
global OUTPUT_VIDEO_RESOLUTION_DROPDOWN
|
||||
global OUTPUT_VIDEO_QUALITY_SLIDER
|
||||
global OUTPUT_VIDEO_FPS_SLIDER
|
||||
|
||||
output_image_resolutions = []
|
||||
output_video_resolutions = []
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
output_image_resolution = detect_image_resolution(deepfuze.globals.target_path)
|
||||
output_image_resolutions = create_image_resolutions(output_image_resolution)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
output_video_resolution = detect_video_resolution(deepfuze.globals.target_path)
|
||||
output_video_resolutions = create_video_resolutions(output_video_resolution)
|
||||
deepfuze.globals.output_path = deepfuze.globals.output_path or '.'
|
||||
OUTPUT_PATH_TEXTBOX = gradio.Textbox(
|
||||
label = wording.get('uis.output_path_textbox'),
|
||||
value = deepfuze.globals.output_path,
|
||||
max_lines = 1
|
||||
)
|
||||
OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.output_image_quality_slider'),
|
||||
value = deepfuze.globals.output_image_quality,
|
||||
step = deepfuze.choices.output_image_quality_range[1] - deepfuze.choices.output_image_quality_range[0],
|
||||
minimum = deepfuze.choices.output_image_quality_range[0],
|
||||
maximum = deepfuze.choices.output_image_quality_range[-1],
|
||||
visible = is_image(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_IMAGE_RESOLUTION_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.output_image_resolution_dropdown'),
|
||||
choices = output_image_resolutions,
|
||||
value = deepfuze.globals.output_image_resolution,
|
||||
visible = is_image(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.output_video_encoder_dropdown'),
|
||||
choices = deepfuze.choices.output_video_encoders,
|
||||
value = deepfuze.globals.output_video_encoder,
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_VIDEO_PRESET_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.output_video_preset_dropdown'),
|
||||
choices = deepfuze.choices.output_video_presets,
|
||||
value = deepfuze.globals.output_video_preset,
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.output_video_quality_slider'),
|
||||
value = deepfuze.globals.output_video_quality,
|
||||
step = deepfuze.choices.output_video_quality_range[1] - deepfuze.choices.output_video_quality_range[0],
|
||||
minimum = deepfuze.choices.output_video_quality_range[0],
|
||||
maximum = deepfuze.choices.output_video_quality_range[-1],
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_VIDEO_RESOLUTION_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.output_video_resolution_dropdown'),
|
||||
choices = output_video_resolutions,
|
||||
value = deepfuze.globals.output_video_resolution,
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
OUTPUT_VIDEO_FPS_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.output_video_fps_slider'),
|
||||
value = deepfuze.globals.output_video_fps,
|
||||
step = 0.01,
|
||||
minimum = 1,
|
||||
maximum = 60,
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX)
|
||||
register_ui_component('output_video_fps_slider', OUTPUT_VIDEO_FPS_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX)
|
||||
OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
|
||||
OUTPUT_IMAGE_RESOLUTION_DROPDOWN.change(update_output_image_resolution, inputs = OUTPUT_IMAGE_RESOLUTION_DROPDOWN)
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
|
||||
OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN)
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
|
||||
OUTPUT_VIDEO_RESOLUTION_DROPDOWN.change(update_output_video_resolution, inputs = OUTPUT_VIDEO_RESOLUTION_DROPDOWN)
|
||||
OUTPUT_VIDEO_FPS_SLIDER.release(update_output_video_fps, inputs = OUTPUT_VIDEO_FPS_SLIDER)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'target_image',
|
||||
'target_video'
|
||||
]):
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(ui_component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ])
|
||||
|
||||
|
||||
def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider]:
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
output_image_resolution = detect_image_resolution(deepfuze.globals.target_path)
|
||||
output_image_resolutions = create_image_resolutions(output_image_resolution)
|
||||
deepfuze.globals.output_image_resolution = pack_resolution(output_image_resolution)
|
||||
return gradio.Slider(visible = True), gradio.Dropdown(visible = True, value = deepfuze.globals.output_image_resolution, choices = output_image_resolutions), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Slider(visible = False, value = None)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
output_video_resolution = detect_video_resolution(deepfuze.globals.target_path)
|
||||
output_video_resolutions = create_video_resolutions(output_video_resolution)
|
||||
deepfuze.globals.output_video_resolution = pack_resolution(output_video_resolution)
|
||||
deepfuze.globals.output_video_fps = detect_video_fps(deepfuze.globals.target_path)
|
||||
return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Dropdown(visible = True, value = deepfuze.globals.output_video_resolution, choices = output_video_resolutions), gradio.Slider(visible = True, value = deepfuze.globals.output_video_fps)
|
||||
return gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Slider(visible = False, value = None)
|
||||
|
||||
|
||||
def update_output_path(output_path : str) -> None:
|
||||
deepfuze.globals.output_path = output_path
|
||||
|
||||
|
||||
def update_output_image_quality(output_image_quality : int) -> None:
|
||||
deepfuze.globals.output_image_quality = output_image_quality
|
||||
|
||||
|
||||
def update_output_image_resolution(output_image_resolution : str) -> None:
|
||||
deepfuze.globals.output_image_resolution = output_image_resolution
|
||||
|
||||
|
||||
def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None:
|
||||
deepfuze.globals.output_video_encoder = output_video_encoder
|
||||
|
||||
|
||||
def update_output_video_preset(output_video_preset : OutputVideoPreset) -> None:
|
||||
deepfuze.globals.output_video_preset = output_video_preset
|
||||
|
||||
|
||||
def update_output_video_quality(output_video_quality : int) -> None:
|
||||
deepfuze.globals.output_video_quality = output_video_quality
|
||||
|
||||
|
||||
def update_output_video_resolution(output_video_resolution : str) -> None:
|
||||
deepfuze.globals.output_video_resolution = output_video_resolution
|
||||
|
||||
|
||||
def update_output_video_fps(output_video_fps : Fps) -> None:
|
||||
deepfuze.globals.output_video_fps = output_video_fps
|
||||
Executable
+207
@@ -0,0 +1,207 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from time import sleep
|
||||
import cv2
|
||||
import gradio
|
||||
import numpy
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import logger, wording
|
||||
from deepfuze.audio import get_audio_frame, create_empty_audio_frame
|
||||
from deepfuze.common_helper import get_first
|
||||
from deepfuze.core import conditional_append_reference_faces
|
||||
from deepfuze.face_analyser import get_average_face, clear_face_analyser
|
||||
from deepfuze.face_store import clear_static_faces, get_reference_faces, clear_reference_faces
|
||||
from deepfuze.typing import Face, FaceSet, AudioFrame, VisionFrame
|
||||
from deepfuze.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_resolution, read_static_image, read_static_images
|
||||
from deepfuze.filesystem import is_image, is_video, filter_audio_paths
|
||||
from deepfuze.content_analyser import analyse_frame
|
||||
from deepfuze.processors.frame.core import load_frame_processor_module
|
||||
from deepfuze.uis.core import get_ui_component, get_ui_components, register_ui_component
|
||||
|
||||
PREVIEW_IMAGE : Optional[gradio.Image] = None
|
||||
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global PREVIEW_IMAGE
|
||||
global PREVIEW_FRAME_SLIDER
|
||||
|
||||
preview_image_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.preview_image'),
|
||||
'interactive': False
|
||||
}
|
||||
preview_frame_slider_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.preview_frame_slider'),
|
||||
'step': 1,
|
||||
'minimum': 0,
|
||||
'maximum': 100,
|
||||
'visible': False
|
||||
}
|
||||
conditional_append_reference_faces()
|
||||
reference_faces = get_reference_faces() if 'reference' in deepfuze.globals.face_selector_mode else None
|
||||
source_frames = read_static_images(deepfuze.globals.source_paths)
|
||||
source_face = get_average_face(source_frames)
|
||||
source_audio_path = get_first(filter_audio_paths(deepfuze.globals.source_paths))
|
||||
source_audio_frame = create_empty_audio_frame()
|
||||
if source_audio_path and deepfuze.globals.output_video_fps and deepfuze.globals.reference_frame_number:
|
||||
temp_audio_frame = get_audio_frame(source_audio_path, deepfuze.globals.output_video_fps, deepfuze.globals.reference_frame_number)
|
||||
if numpy.any(temp_audio_frame):
|
||||
source_audio_frame = temp_audio_frame
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
target_vision_frame = read_static_image(deepfuze.globals.target_path)
|
||||
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
||||
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
temp_vision_frame = get_video_frame(deepfuze.globals.target_path, deepfuze.globals.reference_frame_number)
|
||||
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
||||
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
||||
preview_image_args['visible'] = True
|
||||
preview_frame_slider_args['value'] = deepfuze.globals.reference_frame_number
|
||||
preview_frame_slider_args['maximum'] = count_video_frame_total(deepfuze.globals.target_path)
|
||||
preview_frame_slider_args['visible'] = True
|
||||
PREVIEW_IMAGE = gradio.Image(**preview_image_args)
|
||||
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
|
||||
register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
reference_face_position_gallery = get_ui_component('reference_face_position_gallery')
|
||||
if reference_face_position_gallery:
|
||||
reference_face_position_gallery.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'source_audio',
|
||||
'source_image',
|
||||
'target_image',
|
||||
'target_video'
|
||||
]):
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(ui_component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'target_image',
|
||||
'target_video'
|
||||
]):
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_debugger_items_checkbox_group',
|
||||
'frame_colorizer_size_dropdown',
|
||||
'face_selector_mode_dropdown',
|
||||
'face_mask_types_checkbox_group',
|
||||
'face_mask_region_checkbox_group',
|
||||
'face_analyser_order_dropdown',
|
||||
'face_analyser_age_dropdown',
|
||||
'face_analyser_gender_dropdown'
|
||||
]):
|
||||
ui_component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_enhancer_blend_slider',
|
||||
'frame_colorizer_blend_slider',
|
||||
'frame_enhancer_blend_slider',
|
||||
'trim_frame_start_slider',
|
||||
'trim_frame_end_slider',
|
||||
'reference_face_distance_slider',
|
||||
'face_mask_blur_slider',
|
||||
'face_mask_padding_top_slider',
|
||||
'face_mask_padding_bottom_slider',
|
||||
'face_mask_padding_left_slider',
|
||||
'face_mask_padding_right_slider',
|
||||
'output_video_fps_slider'
|
||||
]):
|
||||
ui_component.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'frame_processors_checkbox_group',
|
||||
'face_enhancer_model_dropdown',
|
||||
'face_swapper_model_dropdown',
|
||||
'frame_colorizer_model_dropdown',
|
||||
'frame_enhancer_model_dropdown',
|
||||
'lip_syncer_model_dropdown',
|
||||
'face_detector_model_dropdown',
|
||||
'face_detector_size_dropdown'
|
||||
]):
|
||||
ui_component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_detector_score_slider',
|
||||
'face_landmarker_score_slider'
|
||||
]):
|
||||
ui_component.release(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
|
||||
|
||||
|
||||
def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
|
||||
clear_face_analyser()
|
||||
clear_reference_faces()
|
||||
clear_static_faces()
|
||||
return update_preview_image(frame_number)
|
||||
|
||||
|
||||
def update_preview_image(frame_number : int = 0) -> gradio.Image:
|
||||
for frame_processor in deepfuze.globals.frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
while not frame_processor_module.post_check():
|
||||
logger.disable()
|
||||
sleep(0.5)
|
||||
logger.enable()
|
||||
conditional_append_reference_faces()
|
||||
reference_faces = get_reference_faces() if 'reference' in deepfuze.globals.face_selector_mode else None
|
||||
source_frames = read_static_images(deepfuze.globals.source_paths)
|
||||
source_face = get_average_face(source_frames)
|
||||
source_audio_path = get_first(filter_audio_paths(deepfuze.globals.source_paths))
|
||||
source_audio_frame = create_empty_audio_frame()
|
||||
if source_audio_path and deepfuze.globals.output_video_fps and deepfuze.globals.reference_frame_number:
|
||||
reference_audio_frame_number = deepfuze.globals.reference_frame_number
|
||||
if deepfuze.globals.trim_frame_start:
|
||||
reference_audio_frame_number -= deepfuze.globals.trim_frame_start
|
||||
temp_audio_frame = get_audio_frame(source_audio_path, deepfuze.globals.output_video_fps, reference_audio_frame_number)
|
||||
if numpy.any(temp_audio_frame):
|
||||
source_audio_frame = temp_audio_frame
|
||||
if is_image(deepfuze.globals.target_path):
|
||||
target_vision_frame = read_static_image(deepfuze.globals.target_path)
|
||||
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
||||
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
||||
return gradio.Image(value = preview_vision_frame)
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
temp_vision_frame = get_video_frame(deepfuze.globals.target_path, frame_number)
|
||||
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
||||
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
||||
return gradio.Image(value = preview_vision_frame)
|
||||
return gradio.Image(value = None)
|
||||
|
||||
|
||||
def update_preview_frame_slider() -> gradio.Slider:
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
video_frame_total = count_video_frame_total(deepfuze.globals.target_path)
|
||||
return gradio.Slider(maximum = video_frame_total, visible = True)
|
||||
return gradio.Slider(value = None, maximum = None, visible = False)
|
||||
|
||||
|
||||
def process_preview_frame(reference_faces : FaceSet, source_face : Face, source_audio_frame : AudioFrame, target_vision_frame : VisionFrame) -> VisionFrame:
|
||||
target_vision_frame = resize_frame_resolution(target_vision_frame, (640, 640))
|
||||
if analyse_frame(target_vision_frame):
|
||||
return cv2.GaussianBlur(target_vision_frame, (99, 99), 0)
|
||||
for frame_processor in deepfuze.globals.frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
logger.disable()
|
||||
if frame_processor_module.pre_process('preview'):
|
||||
logger.enable()
|
||||
target_vision_frame = frame_processor_module.process_frame(
|
||||
{
|
||||
'reference_faces': reference_faces,
|
||||
'source_face': source_face,
|
||||
'source_audio_frame': source_audio_frame,
|
||||
'target_vision_frame': target_vision_frame
|
||||
})
|
||||
return target_vision_frame
|
||||
@@ -0,0 +1,67 @@
|
||||
from typing import Optional, List, Tuple
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.uis.typing import File
|
||||
from deepfuze.common_helper import get_first
|
||||
from deepfuze.filesystem import has_audio, has_image, filter_audio_paths, filter_image_paths
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
|
||||
SOURCE_FILE : Optional[gradio.File] = None
|
||||
SOURCE_AUDIO : Optional[gradio.Audio] = None
|
||||
SOURCE_IMAGE : Optional[gradio.Image] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global SOURCE_FILE
|
||||
global SOURCE_AUDIO
|
||||
global SOURCE_IMAGE
|
||||
|
||||
has_source_audio = has_audio(deepfuze.globals.source_paths)
|
||||
has_source_image = has_image(deepfuze.globals.source_paths)
|
||||
SOURCE_FILE = gradio.File(
|
||||
file_count = 'multiple',
|
||||
file_types =
|
||||
[
|
||||
'.mp3',
|
||||
'.wav',
|
||||
'.png',
|
||||
'.jpg',
|
||||
'.webp'
|
||||
],
|
||||
label = wording.get('uis.source_file'),
|
||||
value = deepfuze.globals.source_paths if has_source_audio or has_source_image else None
|
||||
)
|
||||
source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
|
||||
source_audio_path = get_first(filter_audio_paths(source_file_names))
|
||||
source_image_path = get_first(filter_image_paths(source_file_names))
|
||||
SOURCE_AUDIO = gradio.Audio(
|
||||
value = source_audio_path if has_source_audio else None,
|
||||
visible = has_source_audio,
|
||||
show_label = False
|
||||
)
|
||||
SOURCE_IMAGE = gradio.Image(
|
||||
value = source_image_path if has_source_image else None,
|
||||
visible = has_source_image,
|
||||
show_label = False
|
||||
)
|
||||
register_ui_component('source_audio', SOURCE_AUDIO)
|
||||
register_ui_component('source_image', SOURCE_IMAGE)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = [ SOURCE_AUDIO, SOURCE_IMAGE ])
|
||||
|
||||
|
||||
def update(files : List[File]) -> Tuple[gradio.Audio, gradio.Image]:
|
||||
file_names = [ file.name for file in files ] if files else None
|
||||
has_source_audio = has_audio(file_names)
|
||||
has_source_image = has_image(file_names)
|
||||
if has_source_audio or has_source_image:
|
||||
source_audio_path = get_first(filter_audio_paths(file_names))
|
||||
source_image_path = get_first(filter_image_paths(file_names))
|
||||
deepfuze.globals.source_paths = file_names
|
||||
return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image)
|
||||
deepfuze.globals.source_paths = None
|
||||
return gradio.Audio(value = None, visible = False), gradio.Image(value = None, visible = False)
|
||||
@@ -0,0 +1,83 @@
|
||||
from typing import Tuple, Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.face_store import clear_static_faces, clear_reference_faces
|
||||
from deepfuze.uis.typing import File
|
||||
from deepfuze.filesystem import get_file_size, is_image, is_video
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
from deepfuze.vision import get_video_frame, normalize_frame_color
|
||||
|
||||
FILE_SIZE_LIMIT = 512 * 1024 * 1024
|
||||
|
||||
TARGET_FILE : Optional[gradio.File] = None
|
||||
TARGET_IMAGE : Optional[gradio.Image] = None
|
||||
TARGET_VIDEO : Optional[gradio.Video] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TARGET_FILE
|
||||
global TARGET_IMAGE
|
||||
global TARGET_VIDEO
|
||||
|
||||
is_target_image = is_image(deepfuze.globals.target_path)
|
||||
is_target_video = is_video(deepfuze.globals.target_path)
|
||||
TARGET_FILE = gradio.File(
|
||||
label = wording.get('uis.target_file'),
|
||||
file_count = 'single',
|
||||
file_types =
|
||||
[
|
||||
'.png',
|
||||
'.jpg',
|
||||
'.webp',
|
||||
'.webm',
|
||||
'.mp4'
|
||||
],
|
||||
value = deepfuze.globals.target_path if is_target_image or is_target_video else None
|
||||
)
|
||||
target_image_args =\
|
||||
{
|
||||
'show_label': False,
|
||||
'visible': False
|
||||
}
|
||||
target_video_args =\
|
||||
{
|
||||
'show_label': False,
|
||||
'visible': False
|
||||
}
|
||||
if is_target_image:
|
||||
target_image_args['value'] = TARGET_FILE.value['name']
|
||||
target_image_args['visible'] = True
|
||||
if is_target_video:
|
||||
if get_file_size(deepfuze.globals.target_path) > FILE_SIZE_LIMIT:
|
||||
preview_vision_frame = normalize_frame_color(get_video_frame(deepfuze.globals.target_path))
|
||||
target_image_args['value'] = preview_vision_frame
|
||||
target_image_args['visible'] = True
|
||||
else:
|
||||
target_video_args['value'] = TARGET_FILE.value['name']
|
||||
target_video_args['visible'] = True
|
||||
TARGET_IMAGE = gradio.Image(**target_image_args)
|
||||
TARGET_VIDEO = gradio.Video(**target_video_args)
|
||||
register_ui_component('target_image', TARGET_IMAGE)
|
||||
register_ui_component('target_video', TARGET_VIDEO)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
|
||||
|
||||
|
||||
def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
|
||||
clear_reference_faces()
|
||||
clear_static_faces()
|
||||
if file and is_image(file.name):
|
||||
deepfuze.globals.target_path = file.name
|
||||
return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
|
||||
if file and is_video(file.name):
|
||||
deepfuze.globals.target_path = file.name
|
||||
if get_file_size(file.name) > FILE_SIZE_LIMIT:
|
||||
preview_vision_frame = normalize_frame_color(get_video_frame(file.name))
|
||||
return gradio.Image(value = preview_vision_frame, visible = True), gradio.Video(value = None, visible = False)
|
||||
return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True)
|
||||
deepfuze.globals.target_path = None
|
||||
return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False)
|
||||
@@ -0,0 +1,41 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
import deepfuze.choices
|
||||
from deepfuze import wording
|
||||
from deepfuze.typing import TempFrameFormat
|
||||
from deepfuze.filesystem import is_video
|
||||
from deepfuze.uis.core import get_ui_component
|
||||
|
||||
TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TEMP_FRAME_FORMAT_DROPDOWN
|
||||
|
||||
TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.temp_frame_format_dropdown'),
|
||||
choices = deepfuze.choices.temp_frame_formats,
|
||||
value = deepfuze.globals.temp_frame_format,
|
||||
visible = is_video(deepfuze.globals.target_path)
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
TEMP_FRAME_FORMAT_DROPDOWN.change(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN)
|
||||
target_video = get_ui_component('target_video')
|
||||
if target_video:
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(target_video, method)(remote_update, outputs = TEMP_FRAME_FORMAT_DROPDOWN)
|
||||
|
||||
|
||||
def remote_update() -> gradio.Dropdown:
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
return gradio.Dropdown(visible = True)
|
||||
return gradio.Dropdown(visible = False)
|
||||
|
||||
|
||||
def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None:
|
||||
deepfuze.globals.temp_frame_format = temp_frame_format
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
from typing import Any, Dict, Tuple, Optional
|
||||
import gradio
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import wording
|
||||
from deepfuze.face_store import clear_static_faces
|
||||
from deepfuze.vision import count_video_frame_total
|
||||
from deepfuze.filesystem import is_video
|
||||
from deepfuze.uis.core import get_ui_components, register_ui_component
|
||||
|
||||
TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
|
||||
TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TRIM_FRAME_START_SLIDER
|
||||
global TRIM_FRAME_END_SLIDER
|
||||
|
||||
trim_frame_start_slider_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.trim_frame_start_slider'),
|
||||
'step': 1,
|
||||
'minimum': 0,
|
||||
'maximum': 100,
|
||||
'visible': False
|
||||
}
|
||||
trim_frame_end_slider_args : Dict[str, Any] =\
|
||||
{
|
||||
'label': wording.get('uis.trim_frame_end_slider'),
|
||||
'step': 1,
|
||||
'minimum': 0,
|
||||
'maximum': 100,
|
||||
'visible': False
|
||||
}
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
video_frame_total = count_video_frame_total(deepfuze.globals.target_path)
|
||||
trim_frame_start_slider_args['value'] = deepfuze.globals.trim_frame_start or 0
|
||||
trim_frame_start_slider_args['maximum'] = video_frame_total
|
||||
trim_frame_start_slider_args['visible'] = True
|
||||
trim_frame_end_slider_args['value'] = deepfuze.globals.trim_frame_end or video_frame_total
|
||||
trim_frame_end_slider_args['maximum'] = video_frame_total
|
||||
trim_frame_end_slider_args['visible'] = True
|
||||
with gradio.Row():
|
||||
TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
|
||||
TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
|
||||
register_ui_component('trim_frame_start_slider', TRIM_FRAME_START_SLIDER)
|
||||
register_ui_component('trim_frame_end_slider', TRIM_FRAME_END_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
TRIM_FRAME_START_SLIDER.release(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER)
|
||||
TRIM_FRAME_END_SLIDER.release(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER)
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'target_image',
|
||||
'target_video'
|
||||
]):
|
||||
for method in [ 'upload', 'change', 'clear' ]:
|
||||
getattr(ui_component, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ])
|
||||
|
||||
|
||||
def remote_update() -> Tuple[gradio.Slider, gradio.Slider]:
|
||||
if is_video(deepfuze.globals.target_path):
|
||||
video_frame_total = count_video_frame_total(deepfuze.globals.target_path)
|
||||
deepfuze.globals.trim_frame_start = None
|
||||
deepfuze.globals.trim_frame_end = None
|
||||
return gradio.Slider(value = 0, maximum = video_frame_total, visible = True), gradio.Slider(value = video_frame_total, maximum = video_frame_total, visible = True)
|
||||
return gradio.Slider(value = None, maximum = None, visible = False), gradio.Slider(value = None, maximum = None, visible = False)
|
||||
|
||||
|
||||
def update_trim_frame_start(trim_frame_start : int) -> None:
|
||||
clear_static_faces()
|
||||
deepfuze.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None
|
||||
|
||||
|
||||
def update_trim_frame_end(trim_frame_end : int) -> None:
|
||||
clear_static_faces()
|
||||
video_frame_total = count_video_frame_total(deepfuze.globals.target_path)
|
||||
deepfuze.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None
|
||||
@@ -0,0 +1,180 @@
|
||||
from typing import Optional, Generator, Deque
|
||||
import os
|
||||
import subprocess
|
||||
import cv2
|
||||
import gradio
|
||||
from time import sleep
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from collections import deque
|
||||
from tqdm import tqdm
|
||||
|
||||
import deepfuze.globals
|
||||
from deepfuze import logger, wording
|
||||
from deepfuze.audio import create_empty_audio_frame
|
||||
from deepfuze.common_helper import is_windows
|
||||
from deepfuze.content_analyser import analyse_stream
|
||||
from deepfuze.filesystem import filter_image_paths
|
||||
from deepfuze.typing import VisionFrame, Face, Fps
|
||||
from deepfuze.face_analyser import get_average_face
|
||||
from deepfuze.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
|
||||
from deepfuze.ffmpeg import open_ffmpeg
|
||||
from deepfuze.vision import normalize_frame_color, read_static_images, unpack_resolution
|
||||
from deepfuze.uis.typing import StreamMode, WebcamMode
|
||||
from deepfuze.uis.core import get_ui_component, get_ui_components
|
||||
|
||||
WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None
|
||||
WEBCAM_IMAGE : Optional[gradio.Image] = None
|
||||
WEBCAM_START_BUTTON : Optional[gradio.Button] = None
|
||||
WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None
|
||||
|
||||
|
||||
def get_webcam_capture() -> Optional[cv2.VideoCapture]:
|
||||
global WEBCAM_CAPTURE
|
||||
|
||||
if WEBCAM_CAPTURE is None:
|
||||
if is_windows():
|
||||
webcam_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
|
||||
else:
|
||||
webcam_capture = cv2.VideoCapture(0)
|
||||
if webcam_capture and webcam_capture.isOpened():
|
||||
WEBCAM_CAPTURE = webcam_capture
|
||||
return WEBCAM_CAPTURE
|
||||
|
||||
|
||||
def clear_webcam_capture() -> None:
|
||||
global WEBCAM_CAPTURE
|
||||
|
||||
if WEBCAM_CAPTURE:
|
||||
WEBCAM_CAPTURE.release()
|
||||
WEBCAM_CAPTURE = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global WEBCAM_IMAGE
|
||||
global WEBCAM_START_BUTTON
|
||||
global WEBCAM_STOP_BUTTON
|
||||
|
||||
WEBCAM_IMAGE = gradio.Image(
|
||||
label = wording.get('uis.webcam_image')
|
||||
)
|
||||
WEBCAM_START_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.start_button'),
|
||||
variant = 'primary',
|
||||
size = 'sm'
|
||||
)
|
||||
WEBCAM_STOP_BUTTON = gradio.Button(
|
||||
value = wording.get('uis.stop_button'),
|
||||
size = 'sm'
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
start_event = None
|
||||
webcam_mode_radio = get_ui_component('webcam_mode_radio')
|
||||
webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown')
|
||||
webcam_fps_slider = get_ui_component('webcam_fps_slider')
|
||||
if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider:
|
||||
start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE)
|
||||
WEBCAM_STOP_BUTTON.click(stop, cancels = start_event)
|
||||
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'frame_processors_checkbox_group',
|
||||
'face_swapper_model_dropdown',
|
||||
'face_enhancer_model_dropdown',
|
||||
'frame_enhancer_model_dropdown',
|
||||
'lip_syncer_model_dropdown',
|
||||
'source_image'
|
||||
]):
|
||||
ui_component.change(update, cancels = start_event)
|
||||
|
||||
|
||||
def start(webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Generator[VisionFrame, None, None]:
|
||||
deepfuze.globals.face_selector_mode = 'one'
|
||||
deepfuze.globals.face_analyser_order = 'large-small'
|
||||
source_image_paths = filter_image_paths(deepfuze.globals.source_paths)
|
||||
source_frames = read_static_images(source_image_paths)
|
||||
source_face = get_average_face(source_frames)
|
||||
stream = None
|
||||
|
||||
if webcam_mode in [ 'udp', 'v4l2' ]:
|
||||
stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) #type:ignore[arg-type]
|
||||
webcam_width, webcam_height = unpack_resolution(webcam_resolution)
|
||||
webcam_capture = get_webcam_capture()
|
||||
if webcam_capture and webcam_capture.isOpened():
|
||||
webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) #type:ignore[attr-defined]
|
||||
webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
|
||||
webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
|
||||
webcam_capture.set(cv2.CAP_PROP_FPS, webcam_fps)
|
||||
for capture_frame in multi_process_capture(source_face, webcam_capture, webcam_fps):
|
||||
if webcam_mode == 'inline':
|
||||
yield normalize_frame_color(capture_frame)
|
||||
else:
|
||||
try:
|
||||
stream.stdin.write(capture_frame.tobytes())
|
||||
except Exception:
|
||||
clear_webcam_capture()
|
||||
yield None
|
||||
|
||||
|
||||
def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, webcam_fps : Fps) -> Generator[VisionFrame, None, None]:
|
||||
with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = deepfuze.globals.log_level in [ 'warn', 'error' ]) as progress:
|
||||
with ThreadPoolExecutor(max_workers = deepfuze.globals.execution_thread_count) as executor:
|
||||
futures = []
|
||||
deque_capture_frames : Deque[VisionFrame] = deque()
|
||||
while webcam_capture and webcam_capture.isOpened():
|
||||
_, capture_frame = webcam_capture.read()
|
||||
if analyse_stream(capture_frame, webcam_fps):
|
||||
return
|
||||
future = executor.submit(process_stream_frame, source_face, capture_frame)
|
||||
futures.append(future)
|
||||
for future_done in [ future for future in futures if future.done() ]:
|
||||
capture_frame = future_done.result()
|
||||
deque_capture_frames.append(capture_frame)
|
||||
futures.remove(future_done)
|
||||
while deque_capture_frames:
|
||||
progress.update()
|
||||
yield deque_capture_frames.popleft()
|
||||
|
||||
|
||||
def update() -> None:
|
||||
for frame_processor in deepfuze.globals.frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
while not frame_processor_module.post_check():
|
||||
logger.disable()
|
||||
sleep(0.5)
|
||||
logger.enable()
|
||||
|
||||
|
||||
def stop() -> gradio.Image:
|
||||
clear_webcam_capture()
|
||||
return gradio.Image(value = None)
|
||||
|
||||
|
||||
def process_stream_frame(source_face : Face, target_vision_frame : VisionFrame) -> VisionFrame:
|
||||
source_audio_frame = create_empty_audio_frame()
|
||||
for frame_processor_module in get_frame_processors_modules(deepfuze.globals.frame_processors):
|
||||
logger.disable()
|
||||
if frame_processor_module.pre_process('stream'):
|
||||
logger.enable()
|
||||
target_vision_frame = frame_processor_module.process_frame(
|
||||
{
|
||||
'source_face': source_face,
|
||||
'source_audio_frame': source_audio_frame,
|
||||
'target_vision_frame': target_vision_frame
|
||||
})
|
||||
return target_vision_frame
|
||||
|
||||
|
||||
def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps : Fps) -> subprocess.Popen[bytes]:
|
||||
commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', stream_resolution, '-r', str(stream_fps), '-i', '-']
|
||||
if stream_mode == 'udp':
|
||||
commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ])
|
||||
if stream_mode == 'v4l2':
|
||||
try:
|
||||
device_name = os.listdir('/sys/devices/virtual/video4linux')[0]
|
||||
if device_name:
|
||||
commands.extend([ '-f', 'v4l2', '/dev/' + device_name ])
|
||||
except FileNotFoundError:
|
||||
logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__.upper())
|
||||
return open_ffmpeg(commands)
|
||||
@@ -0,0 +1,37 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
from deepfuze import wording
|
||||
from deepfuze.uis import choices as uis_choices
|
||||
from deepfuze.uis.core import register_ui_component
|
||||
|
||||
WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None
|
||||
WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global WEBCAM_MODE_RADIO
|
||||
global WEBCAM_RESOLUTION_DROPDOWN
|
||||
global WEBCAM_FPS_SLIDER
|
||||
|
||||
WEBCAM_MODE_RADIO = gradio.Radio(
|
||||
label = wording.get('uis.webcam_mode_radio'),
|
||||
choices = uis_choices.webcam_modes,
|
||||
value = 'inline'
|
||||
)
|
||||
WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.webcam_resolution_dropdown'),
|
||||
choices = uis_choices.webcam_resolutions,
|
||||
value = uis_choices.webcam_resolutions[0]
|
||||
)
|
||||
WEBCAM_FPS_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.webcam_fps_slider'),
|
||||
value = 25,
|
||||
step = 1,
|
||||
minimum = 1,
|
||||
maximum = 60
|
||||
)
|
||||
register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO)
|
||||
register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN)
|
||||
register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER)
|
||||
Reference in New Issue
Block a user