mirror of
https://github.com/facefusion/facefusion.git
synced 2026-04-28 20:46:26 +02:00
8bf9170577
* Mark as NEXT * Reduce caching to avoid RAM explosion * Reduce caching to avoid RAM explosion * Update dependencies * add face-detector-pad-factor * update facefusion.ini * fix test * change pad to margin * fix order * add prepare margin * use 50% max margin * Minor fixes part2 * Minor fixes part3 * Minor fixes part4 * Minor fixes part1 * Downgrade onnxruntime as of BiRefNet broken on CPU add test update update facefusion.ini add birefnet * rename models add more models * Fix versions * Add .claude to gitignore * add normalize color add 4 channel add colors * worflows * cleanup * cleanup * cleanup * cleanup * add more models (#961) * Fix naming * changes * Fix style and mock Gradio * Fix style and mock Gradio * Fix style and mock Gradio * apply clamp * remove clamp * Add normalizer test * Introduce sanitizer for the rescue (#963) * Introduce sanitizer for the rescue * Introduce sanitizer for the rescue * Introduce sanitizer for the rescue * prepare ffmpeg for alpha support * Some cleanup * Some cleanup * Fix CI * List as TypeAlias is not allowed (#967) * List as TypeAlias is not allowed * List as TypeAlias is not allowed * List as TypeAlias is not allowed * List as TypeAlias is not allowed * Add mpeg and mxf support (#968) * Add mpeg support * Add mxf support * Adjust fix_xxx_encoder for the new formats * Extend output pattern for batch-run (#969) * Extend output pattern for batch-run * Add {target_extension} to allowed mixed files * Catch invalid output pattern keys * alpha support * cleanup * cleanup * add ProcessorOutputs type * fix preview and streamer, support alpha for background_remover * Refactor/open close processors (#972) * Introduce open/close processors * Add locales for translator * Introduce __autoload__ for translator * More cleanup * Fix import issues * Resolve the scope situation for locals * Fix installer by not using translator * Fixes after merge * Fixes after merge * Fix translator keys in ui * Use LOCALS in installer * Update and partial fix DirectML * Use latest onnxruntime * Fix performance * Fix lint issues * fix mask * fix lint * fix lint * Remove default from translator.get() * remove 'framerate=' * fix test * Rename and reorder models * Align naming * add alpha preview * fix frame-by-frame * Add alpha effect via css * preview support alpha channel * fix preview modes * Use official assets repositories * Add support for u2net_cloth * fix naming * Add more models * Add vendor, license and year direct to the models * Add vendor, license and year direct to the models * Update dependencies, Minor CSS adjustment * Ready for 3.5.0 * Fix naming * Update about messages * Fix return * Use groups to show/hide * Update preview * Conditional merge mask * Conditional merge mask * Fix import order --------- Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com> Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
275 lines
15 KiB
Python
275 lines
15 KiB
Python
from facefusion.types import Locals
|
|
|
|
LOCALS : Locals =\
|
|
{
|
|
'en':
|
|
{
|
|
'conda_not_activated': 'conda is not activated',
|
|
'python_not_supported': 'python version is not supported, upgrade to {version} or higher',
|
|
'curl_not_installed': 'curl is not installed',
|
|
'ffmpeg_not_installed': 'ffmpeg is not installed',
|
|
'creating_temp': 'creating temporary resources',
|
|
'extracting_frames': 'extracting frames with a resolution of {resolution} and {fps} frames per second',
|
|
'extracting_frames_succeeded': 'extracting frames succeeded',
|
|
'extracting_frames_failed': 'extracting frames failed',
|
|
'analysing': 'analysing',
|
|
'extracting': 'extracting',
|
|
'streaming': 'streaming',
|
|
'processing': 'processing',
|
|
'merging': 'merging',
|
|
'downloading': 'downloading',
|
|
'temp_frames_not_found': 'temporary frames not found',
|
|
'copying_image': 'copying image with a resolution of {resolution}',
|
|
'copying_image_succeeded': 'copying image succeeded',
|
|
'copying_image_failed': 'copying image failed',
|
|
'finalizing_image': 'finalizing image with a resolution of {resolution}',
|
|
'finalizing_image_succeeded': 'finalizing image succeeded',
|
|
'finalizing_image_skipped': 'finalizing image skipped',
|
|
'merging_video': 'merging video with a resolution of {resolution} and {fps} frames per second',
|
|
'merging_video_succeeded': 'merging video succeeded',
|
|
'merging_video_failed': 'merging video failed',
|
|
'skipping_audio': 'skipping audio',
|
|
'replacing_audio_succeeded': 'replacing audio succeeded',
|
|
'replacing_audio_skipped': 'replacing audio skipped',
|
|
'restoring_audio_succeeded': 'restoring audio succeeded',
|
|
'restoring_audio_skipped': 'restoring audio skipped',
|
|
'clearing_temp': 'clearing temporary resources',
|
|
'processing_stopped': 'processing stopped',
|
|
'processing_image_succeeded': 'processing to image succeeded in {seconds} seconds',
|
|
'processing_image_failed': 'processing to image failed',
|
|
'processing_video_succeeded': 'processing to video succeeded in {seconds} seconds',
|
|
'processing_video_failed': 'processing to video failed',
|
|
'choose_image_source': 'choose an image for the source',
|
|
'choose_audio_source': 'choose an audio for the source',
|
|
'choose_video_target': 'choose a video for the target',
|
|
'choose_image_or_video_target': 'choose an image or video for the target',
|
|
'specify_image_or_video_output': 'specify the output image or video within a directory',
|
|
'match_target_and_output_extension': 'match the target and output extension',
|
|
'no_source_face_detected': 'no source face detected',
|
|
'processor_not_loaded': 'processor {processor} could not be loaded',
|
|
'processor_not_implemented': 'processor {processor} not implemented correctly',
|
|
'ui_layout_not_loaded': 'ui layout {ui_layout} could not be loaded',
|
|
'ui_layout_not_implemented': 'ui layout {ui_layout} not implemented correctly',
|
|
'stream_not_loaded': 'stream {stream_mode} could not be loaded',
|
|
'stream_not_supported': 'stream not supported',
|
|
'job_created': 'job {job_id} created',
|
|
'job_not_created': 'job {job_id} not created',
|
|
'job_submitted': 'job {job_id} submitted',
|
|
'job_not_submitted': 'job {job_id} not submitted',
|
|
'job_all_submitted': 'jobs submitted',
|
|
'job_all_not_submitted': 'jobs not submitted',
|
|
'job_deleted': 'job {job_id} deleted',
|
|
'job_not_deleted': 'job {job_id} not deleted',
|
|
'job_all_deleted': 'jobs deleted',
|
|
'job_all_not_deleted': 'jobs not deleted',
|
|
'job_step_added': 'step added to job {job_id}',
|
|
'job_step_not_added': 'step not added to job {job_id}',
|
|
'job_remix_step_added': 'step {step_index} remixed from job {job_id}',
|
|
'job_remix_step_not_added': 'step {step_index} not remixed from job {job_id}',
|
|
'job_step_inserted': 'step {step_index} inserted to job {job_id}',
|
|
'job_step_not_inserted': 'step {step_index} not inserted to job {job_id}',
|
|
'job_step_removed': 'step {step_index} removed from job {job_id}',
|
|
'job_step_not_removed': 'step {step_index} not removed from job {job_id}',
|
|
'running_job': 'running queued job {job_id}',
|
|
'running_jobs': 'running all queued jobs',
|
|
'retrying_job': 'retrying failed job {job_id}',
|
|
'retrying_jobs': 'retrying all failed jobs',
|
|
'processing_job_succeeded': 'processing of job {job_id} succeeded',
|
|
'processing_jobs_succeeded': 'processing of all jobs succeeded',
|
|
'processing_job_failed': 'processing of job {job_id} failed',
|
|
'processing_jobs_failed': 'processing of all jobs failed',
|
|
'processing_step': 'processing step {step_current} of {step_total}',
|
|
'validating_hash_succeeded': 'validating hash for {hash_file_name} succeeded',
|
|
'validating_hash_failed': 'validating hash for {hash_file_name} failed',
|
|
'validating_source_succeeded': 'validating source for {source_file_name} succeeded',
|
|
'validating_source_failed': 'validating source for {source_file_name} failed',
|
|
'deleting_corrupt_source': 'deleting corrupt source for {source_file_name}',
|
|
'loading_model_succeeded': 'loading model {model_name} succeeded in {seconds} seconds',
|
|
'loading_model_failed': 'loading model {model_name} failed',
|
|
'time_ago_now': 'just now',
|
|
'time_ago_minutes': '{minutes} minutes ago',
|
|
'time_ago_hours': '{hours} hours and {minutes} minutes ago',
|
|
'time_ago_days': '{days} days, {hours} hours and {minutes} minutes ago',
|
|
'point': '.',
|
|
'comma': ',',
|
|
'colon': ':',
|
|
'question_mark': '?',
|
|
'exclamation_mark': '!',
|
|
'help':
|
|
{
|
|
'install_dependency': 'choose the variant of {dependency} to install',
|
|
'skip_conda': 'skip the conda environment check',
|
|
'config_path': 'choose the config file to override defaults',
|
|
'temp_path': 'specify the directory for the temporary resources',
|
|
'jobs_path': 'specify the directory to store jobs',
|
|
'source_paths': 'choose the image or audio paths',
|
|
'target_path': 'choose the image or video path',
|
|
'output_path': 'specify the image or video within a directory',
|
|
'source_pattern': 'choose the image or audio pattern',
|
|
'target_pattern': 'choose the image or video pattern',
|
|
'output_pattern': 'specify the image or video pattern',
|
|
'face_detector_model': 'choose the model responsible for detecting the faces',
|
|
'face_detector_size': 'specify the frame size provided to the face detector',
|
|
'face_detector_margin': 'apply top, right, bottom and left margin to the frame',
|
|
'face_detector_angles': 'specify the angles to rotate the frame before detecting faces',
|
|
'face_detector_score': 'filter the detected faces based on the confidence score',
|
|
'face_landmarker_model': 'choose the model responsible for detecting the face landmarks',
|
|
'face_landmarker_score': 'filter the detected face landmarks based on the confidence score',
|
|
'face_selector_mode': 'use reference based tracking or simple matching',
|
|
'face_selector_order': 'specify the order of the detected faces',
|
|
'face_selector_age_start': 'filter the detected faces based on the starting age',
|
|
'face_selector_age_end': 'filter the detected faces based on the ending age',
|
|
'face_selector_gender': 'filter the detected faces based on their gender',
|
|
'face_selector_race': 'filter the detected faces based on their race',
|
|
'reference_face_position': 'specify the position used to create the reference face',
|
|
'reference_face_distance': 'specify the similarity between the reference face and target face',
|
|
'reference_frame_number': 'specify the frame used to create the reference face',
|
|
'face_occluder_model': 'choose the model responsible for the occlusion mask',
|
|
'face_parser_model': 'choose the model responsible for the region mask',
|
|
'face_mask_types': 'mix and match different face mask types (choices: {choices})',
|
|
'face_mask_areas': 'choose the items used for the area mask (choices: {choices})',
|
|
'face_mask_regions': 'choose the items used for the region mask (choices: {choices})',
|
|
'face_mask_blur': 'specify the degree of blur applied to the box mask',
|
|
'face_mask_padding': 'apply top, right, bottom and left padding to the box mask',
|
|
'voice_extractor_model': 'choose the model responsible for extracting the voices',
|
|
'trim_frame_start': 'specify the starting frame of the target video',
|
|
'trim_frame_end': 'specify the ending frame of the target video',
|
|
'temp_frame_format': 'specify the temporary resources format',
|
|
'keep_temp': 'keep the temporary resources after processing',
|
|
'output_image_quality': 'specify the image quality which translates to the image compression',
|
|
'output_image_scale': 'specify the image scale based on the target image',
|
|
'output_audio_encoder': 'specify the encoder used for the audio',
|
|
'output_audio_quality': 'specify the audio quality which translates to the audio compression',
|
|
'output_audio_volume': 'specify the audio volume based on the target video',
|
|
'output_video_encoder': 'specify the encoder used for the video',
|
|
'output_video_preset': 'balance fast video processing and video file size',
|
|
'output_video_quality': 'specify the video quality which translates to the video compression',
|
|
'output_video_scale': 'specify the video scale based on the target video',
|
|
'output_video_fps': 'specify the video fps based on the target video',
|
|
'processors': 'load a single or multiple processors (choices: {choices}, ...)',
|
|
'background-remover-model': 'choose the model responsible for removing the background',
|
|
'background-remover-color': 'apply red, green blue and alpha values of the background',
|
|
'open_browser': 'open the browser once the program is ready',
|
|
'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)',
|
|
'ui_workflow': 'choose the ui workflow',
|
|
'download_providers': 'download using different providers (choices: {choices}, ...)',
|
|
'download_scope': 'specify the download scope',
|
|
'benchmark_mode': 'choose the benchmark mode',
|
|
'benchmark_resolutions': 'choose the resolutions for the benchmarks (choices: {choices}, ...)',
|
|
'benchmark_cycle_count': 'specify the amount of cycles per benchmark',
|
|
'execution_device_ids': 'specify the devices used for processing',
|
|
'execution_providers': 'inference using different providers (choices: {choices}, ...)',
|
|
'execution_thread_count': 'specify the amount of parallel threads while processing',
|
|
'video_memory_strategy': 'balance fast processing and low VRAM usage',
|
|
'system_memory_limit': 'limit the available RAM that can be used while processing',
|
|
'log_level': 'adjust the message severity displayed in the terminal',
|
|
'halt_on_error': 'halt the program once an error occurred',
|
|
'run': 'run the program',
|
|
'headless_run': 'run the program in headless mode',
|
|
'batch_run': 'run the program in batch mode',
|
|
'force_download': 'force automate downloads and exit',
|
|
'benchmark': 'benchmark the program',
|
|
'job_id': 'specify the job id',
|
|
'job_status': 'specify the job status',
|
|
'step_index': 'specify the step index',
|
|
'job_list': 'list jobs by status',
|
|
'job_create': 'create a drafted job',
|
|
'job_submit': 'submit a drafted job to become a queued job',
|
|
'job_submit_all': 'submit all drafted jobs to become a queued jobs',
|
|
'job_delete': 'delete a drafted, queued, failed or completed job',
|
|
'job_delete_all': 'delete all drafted, queued, failed and completed jobs',
|
|
'job_add_step': 'add a step to a drafted job',
|
|
'job_remix_step': 'remix a previous step from a drafted job',
|
|
'job_insert_step': 'insert a step to a drafted job',
|
|
'job_remove_step': 'remove a step from a drafted job',
|
|
'job_run': 'run a queued job',
|
|
'job_run_all': 'run all queued jobs',
|
|
'job_retry': 'retry a failed job',
|
|
'job_retry_all': 'retry all failed jobs'
|
|
},
|
|
'about':
|
|
{
|
|
'fund': 'fund training server',
|
|
'subscribe': 'become a member',
|
|
'join': 'join our community'
|
|
},
|
|
'uis':
|
|
{
|
|
'apply_button': 'APPLY',
|
|
'benchmark_mode_dropdown': 'BENCHMARK MODE',
|
|
'benchmark_cycle_count_slider': 'BENCHMARK CYCLE COUNT',
|
|
'benchmark_resolutions_checkbox_group': 'BENCHMARK RESOLUTIONS',
|
|
'clear_button': 'CLEAR',
|
|
'common_options_checkbox_group': 'OPTIONS',
|
|
'download_providers_checkbox_group': 'DOWNLOAD PROVIDERS',
|
|
'execution_providers_checkbox_group': 'EXECUTION PROVIDERS',
|
|
'execution_thread_count_slider': 'EXECUTION THREAD COUNT',
|
|
'face_detector_angles_checkbox_group': 'FACE DETECTOR ANGLES',
|
|
'face_detector_model_dropdown': 'FACE DETECTOR MODEL',
|
|
'face_detector_margin_slider': 'FACE DETECTOR MARGIN',
|
|
'face_detector_score_slider': 'FACE DETECTOR SCORE',
|
|
'face_detector_size_dropdown': 'FACE DETECTOR SIZE',
|
|
'face_landmarker_model_dropdown': 'FACE LANDMARKER MODEL',
|
|
'face_landmarker_score_slider': 'FACE LANDMARKER SCORE',
|
|
'face_mask_blur_slider': 'FACE MASK BLUR',
|
|
'face_mask_padding_bottom_slider': 'FACE MASK PADDING BOTTOM',
|
|
'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT',
|
|
'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT',
|
|
'face_mask_padding_top_slider': 'FACE MASK PADDING TOP',
|
|
'face_mask_areas_checkbox_group': 'FACE MASK AREAS',
|
|
'face_mask_regions_checkbox_group': 'FACE MASK REGIONS',
|
|
'face_mask_types_checkbox_group': 'FACE MASK TYPES',
|
|
'face_selector_age_range_slider': 'FACE SELECTOR AGE',
|
|
'face_selector_gender_dropdown': 'FACE SELECTOR GENDER',
|
|
'face_selector_mode_dropdown': 'FACE SELECTOR MODE',
|
|
'face_selector_order_dropdown': 'FACE SELECTOR ORDER',
|
|
'face_selector_race_dropdown': 'FACE SELECTOR RACE',
|
|
'face_occluder_model_dropdown': 'FACE OCCLUDER MODEL',
|
|
'face_parser_model_dropdown': 'FACE PARSER MODEL',
|
|
'voice_extractor_model_dropdown': 'VOICE EXTRACTOR MODEL',
|
|
'job_list_status_checkbox_group': 'JOB STATUS',
|
|
'job_manager_job_action_dropdown': 'JOB_ACTION',
|
|
'job_manager_job_id_dropdown': 'JOB ID',
|
|
'job_manager_step_index_dropdown': 'STEP INDEX',
|
|
'job_runner_job_action_dropdown': 'JOB ACTION',
|
|
'job_runner_job_id_dropdown': 'JOB ID',
|
|
'log_level_dropdown': 'LOG LEVEL',
|
|
'output_audio_encoder_dropdown': 'OUTPUT AUDIO ENCODER',
|
|
'output_audio_quality_slider': 'OUTPUT AUDIO QUALITY',
|
|
'output_audio_volume_slider': 'OUTPUT AUDIO VOLUME',
|
|
'output_image_or_video': 'OUTPUT',
|
|
'output_image_quality_slider': 'OUTPUT IMAGE QUALITY',
|
|
'output_image_scale_slider': 'OUTPUT IMAGE SCALE',
|
|
'output_path_textbox': 'OUTPUT PATH',
|
|
'output_video_encoder_dropdown': 'OUTPUT VIDEO ENCODER',
|
|
'output_video_fps_slider': 'OUTPUT VIDEO FPS',
|
|
'output_video_preset_dropdown': 'OUTPUT VIDEO PRESET',
|
|
'output_video_quality_slider': 'OUTPUT VIDEO QUALITY',
|
|
'output_video_scale_slider': 'OUTPUT VIDEO SCALE',
|
|
'preview_frame_slider': 'PREVIEW FRAME',
|
|
'preview_image': 'PREVIEW',
|
|
'preview_mode_dropdown': 'PREVIEW MODE',
|
|
'preview_resolution_dropdown': 'PREVIEW RESOLUTION',
|
|
'processors_checkbox_group': 'PROCESSORS',
|
|
'reference_face_distance_slider': 'REFERENCE FACE DISTANCE',
|
|
'reference_face_gallery': 'REFERENCE FACE',
|
|
'refresh_button': 'REFRESH',
|
|
'source_file': 'SOURCE',
|
|
'start_button': 'START',
|
|
'stop_button': 'STOP',
|
|
'system_memory_limit_slider': 'SYSTEM MEMORY LIMIT',
|
|
'target_file': 'TARGET',
|
|
'temp_frame_format_dropdown': 'TEMP FRAME FORMAT',
|
|
'terminal_textbox': 'TERMINAL',
|
|
'trim_frame_slider': 'TRIM FRAME',
|
|
'ui_workflow': 'UI WORKFLOW',
|
|
'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY',
|
|
'webcam_fps_slider': 'WEBCAM FPS',
|
|
'webcam_image': 'WEBCAM',
|
|
'webcam_device_id_dropdown': 'WEBCAM DEVICE ID',
|
|
'webcam_mode_radio': 'WEBCAM MODE',
|
|
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION'
|
|
}
|
|
}
|
|
}
|