mirror of
https://github.com/facefusion/facefusion.git
synced 2026-05-07 00:26:39 +02:00
da0da3a4b4
* Rename calcXXX to calculateXXX * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Use True for the flags * Add migraphx support * add face-swapper-weight * add face-swapper-weight to facefusion.ini * changes * change choice * Fix typing for xxxWeight * Feat/log inference session (#906) * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Mark as NEXT * Follow industry standard x1, x2, y1 and y2 * Follow industry standard x1, x2, y1 and y2 * Follow industry standard in terms of naming (#908) * Follow industry standard in terms of naming * Improve xxx_embedding naming * Fix norm vs. norms * Reduce timeout to 5 * Sort out voice_extractor once again * changes * Introduce many to the occlusion mask (#910) * Introduce many to the occlusion mask * Then we use minimum * Add support for wmv * Run platform tests before has_execution_provider (#911) * Add support for wmv * Introduce benchmark mode (#912) * Honestly makes no difference to me * Honestly makes no difference to me * Fix wording * Bring back YuNet (#922) * Reintroduce YuNet without cv2 dependency * Fix variable naming * Avoid RGB to YUV colorshift using libx264rgb * Avoid RGB to YUV colorshift using libx264rgb * Make libx264 the default again * Make libx264 the default again * Fix types in ffmpeg builder * Fix quality stuff in ffmpeg builder * Fix quality stuff in ffmpeg builder * Add libx264rgb to test * Revamp Processors (#923) * Introduce new concept of pure target frames * Radical refactoring of process flow * Introduce new concept of pure target frames * Fix webcam * Minor improvements * Minor improvements * Use deque for video processing * Use deque for video processing * Extend the video manager * Polish deque * Polish deque * Deque is not even used * Improve speed with multiple futures * Fix temp frame mutation and * Fix RAM usage * Remove old types and manage method * Remove execution_queue_count * Use init_state for benchmarker to avoid issues * add voice extractor option * Change the order of voice extractor in code * Use official download urls * Use official download urls * add gui * fix preview * Add remote updates for voice extractor * fix crash on headless-run * update test_job_helper.py * Fix it for good * Remove pointless method * Fix types and unused imports * Revamp reference (#925) * Initial revamp of face references * Initial revamp of face references * Initial revamp of face references * Terminate find_similar_faces * Improve find mutant faces * Improve find mutant faces * Move sort where it belongs * Forward reference vision frame * Forward reference vision frame also in preview * Fix reference selection * Use static video frame * Fix CI * Remove reference type from frame processors * Improve some naming * Fix types and unused imports * Fix find mutant faces * Fix find mutant faces * Fix imports * Correct naming * Correct naming * simplify pad * Improve webcam performance on highres * Camera manager (#932) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Remove optional * Fix naming in webcam options * Avoid using temp faces (#933) * output video scale * Fix imports * output image scale * upscale fix (not limiter) * add unit test scale_resolution & remove unused methods * fix and add test * fix * change pack_resolution * fix tests * Simplify output scale testing * Fix benchmark UI * Fix benchmark UI * Update dependencies * Introduce REAL multi gpu support using multi dimensional inference pool (#935) * Introduce REAL multi gpu support using multi dimensional inference pool * Remove the MULTI:GPU flag * Restore "processing stop" * Restore "processing stop" * Remove old templates * Go fill in with caching * add expression restorer areas * re-arrange * rename method * Fix stop for extract frames and merge video * Replace arcface_converter models with latest crossface models * Replace arcface_converter models with latest crossface models * Move module logs to debug mode * Refactor/streamer (#938) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Fix naming in webcam options * Move logic over to streamer * Fix streamer, improve webcam experience * Improve webcam experience * Revert method * Revert method * Improve webcam again * Use release on capture instead * Only forward valid frames * Fix resolution logging * Add AVIF support * Add AVIF support * Limit avif to unix systems * Drop avif * Drop avif * Drop avif * Default to Documents in the UI if output path is not set * Update wording.py (#939) "succeed" is grammatically incorrect in the given context. To succeed is the infinitive form of the verb. Correct would be either "succeeded" or alternatively a form involving the noun "success". * Fix more grammar issue * Fix more grammar issue * Sort out caching * Move webcam choices back to UI * Move preview options to own file (#940) * Fix Migraphx execution provider * Fix benchmark * Reuse blend frame method * Fix CI * Fix CI * Fix CI * Hotfix missing check in face debugger, Enable logger for preview * Fix reference selection (#942) * Fix reference selection * Fix reference selection * Fix reference selection * Fix reference selection * Side by side preview (#941) * Initial side by side preview * More work on preview, remove UI only stuff from vision.py * Improve more * Use fit frame * Add different fit methods for vision * Improve preview part2 * Improve preview part3 * Improve preview part4 * Remove none as choice * Remove useless methods * Fix CI * Fix naming * use 1024 as preview resolution default * Fix fit_cover_frame * Uniform fit_xxx_frame methods * Add back disabled logger * Use ui choices alias * Extract select face logic from processors (#943) * Extract select face logic from processors to use it for face by face in preview * Fix order * Remove old code * Merge methods * Refactor face debugger (#944) * Refactor huge method of face debugger * Remove text metrics from face debugger * Remove useless copy of temp frame * Resort methods * Fix spacing * Remove old method * Fix hard exit to work without signals * Prevent upscaling for face-by-face * Switch to version * Improve exiting --------- Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com> Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com> Co-authored-by: Rafael Tappe Maestro <rafael@tappemaestro.com>
490 lines
29 KiB
Python
Executable File
490 lines
29 KiB
Python
Executable File
from argparse import ArgumentParser
|
|
from functools import lru_cache
|
|
from typing import Tuple
|
|
|
|
import cv2
|
|
import numpy
|
|
|
|
import facefusion.jobs.job_manager
|
|
import facefusion.jobs.job_store
|
|
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
|
|
from facefusion.common_helper import create_float_metavar
|
|
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
|
from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
|
|
from facefusion.face_masker import create_box_mask
|
|
from facefusion.face_selector import select_faces
|
|
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
|
from facefusion.processors import choices as processors_choices
|
|
from facefusion.processors.live_portrait import create_rotation, limit_angle, limit_expression
|
|
from facefusion.processors.types import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
|
|
from facefusion.program_helper import find_argument_group
|
|
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
|
|
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
|
|
from facefusion.vision import read_static_image, read_static_video_frame
|
|
|
|
|
|
@lru_cache()
|
|
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
|
return\
|
|
{
|
|
'live_portrait':
|
|
{
|
|
'hashes':
|
|
{
|
|
'feature_extractor':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash')
|
|
},
|
|
'motion_extractor':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash')
|
|
},
|
|
'eye_retargeter':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash')
|
|
},
|
|
'lip_retargeter':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash')
|
|
},
|
|
'stitcher':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash')
|
|
},
|
|
'generator':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash')
|
|
}
|
|
},
|
|
'sources':
|
|
{
|
|
'feature_extractor':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx')
|
|
},
|
|
'motion_extractor':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx')
|
|
},
|
|
'eye_retargeter':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx')
|
|
},
|
|
'lip_retargeter':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx')
|
|
},
|
|
'stitcher':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx')
|
|
},
|
|
'generator':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx')
|
|
}
|
|
},
|
|
'template': 'ffhq_512',
|
|
'size': (512, 512)
|
|
}
|
|
}
|
|
|
|
|
|
def get_inference_pool() -> InferencePool:
|
|
model_names = [ state_manager.get_item('face_editor_model') ]
|
|
model_source_set = get_model_options().get('sources')
|
|
|
|
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
|
|
|
|
|
def clear_inference_pool() -> None:
|
|
model_names = [ state_manager.get_item('face_editor_model') ]
|
|
inference_manager.clear_inference_pool(__name__, model_names)
|
|
|
|
|
|
def get_model_options() -> ModelOptions:
|
|
model_name = state_manager.get_item('face_editor_model')
|
|
return create_static_model_set('full').get(model_name)
|
|
|
|
|
|
def register_args(program : ArgumentParser) -> None:
|
|
group_processors = find_argument_group(program, 'processors')
|
|
if group_processors:
|
|
group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models)
|
|
group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range))
|
|
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range))
|
|
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range))
|
|
group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range))
|
|
group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range))
|
|
group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range))
|
|
group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range))
|
|
group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range))
|
|
group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range))
|
|
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range))
|
|
group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range))
|
|
group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range))
|
|
group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range))
|
|
group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range))
|
|
facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ])
|
|
|
|
|
|
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
|
apply_state_item('face_editor_model', args.get('face_editor_model'))
|
|
apply_state_item('face_editor_eyebrow_direction', args.get('face_editor_eyebrow_direction'))
|
|
apply_state_item('face_editor_eye_gaze_horizontal', args.get('face_editor_eye_gaze_horizontal'))
|
|
apply_state_item('face_editor_eye_gaze_vertical', args.get('face_editor_eye_gaze_vertical'))
|
|
apply_state_item('face_editor_eye_open_ratio', args.get('face_editor_eye_open_ratio'))
|
|
apply_state_item('face_editor_lip_open_ratio', args.get('face_editor_lip_open_ratio'))
|
|
apply_state_item('face_editor_mouth_grim', args.get('face_editor_mouth_grim'))
|
|
apply_state_item('face_editor_mouth_pout', args.get('face_editor_mouth_pout'))
|
|
apply_state_item('face_editor_mouth_purse', args.get('face_editor_mouth_purse'))
|
|
apply_state_item('face_editor_mouth_smile', args.get('face_editor_mouth_smile'))
|
|
apply_state_item('face_editor_mouth_position_horizontal', args.get('face_editor_mouth_position_horizontal'))
|
|
apply_state_item('face_editor_mouth_position_vertical', args.get('face_editor_mouth_position_vertical'))
|
|
apply_state_item('face_editor_head_pitch', args.get('face_editor_head_pitch'))
|
|
apply_state_item('face_editor_head_yaw', args.get('face_editor_head_yaw'))
|
|
apply_state_item('face_editor_head_roll', args.get('face_editor_head_roll'))
|
|
|
|
|
|
def pre_check() -> bool:
|
|
model_hash_set = get_model_options().get('hashes')
|
|
model_source_set = get_model_options().get('sources')
|
|
|
|
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
|
|
|
|
|
def pre_process(mode : ProcessMode) -> bool:
|
|
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
|
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
|
return False
|
|
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
|
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
|
return False
|
|
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
|
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
|
return False
|
|
return True
|
|
|
|
|
|
def post_process() -> None:
|
|
read_static_image.cache_clear()
|
|
read_static_video_frame.cache_clear()
|
|
video_manager.clear_video_pool()
|
|
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
|
clear_inference_pool()
|
|
if state_manager.get_item('video_memory_strategy') == 'strict':
|
|
content_analyser.clear_inference_pool()
|
|
face_classifier.clear_inference_pool()
|
|
face_detector.clear_inference_pool()
|
|
face_landmarker.clear_inference_pool()
|
|
face_masker.clear_inference_pool()
|
|
face_recognizer.clear_inference_pool()
|
|
|
|
|
|
def edit_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
|
model_template = get_model_options().get('template')
|
|
model_size = get_model_options().get('size')
|
|
face_landmark_5 = scale_face_landmark_5(target_face.landmark_set.get('5/68'), 1.5)
|
|
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
|
|
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
|
|
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
|
crop_vision_frame = apply_edit(crop_vision_frame, target_face.landmark_set.get('68'))
|
|
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
|
paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, box_mask, affine_matrix)
|
|
return paste_vision_frame
|
|
|
|
|
|
def apply_edit(crop_vision_frame : VisionFrame, face_landmark_68 : FaceLandmark68) -> VisionFrame:
|
|
feature_volume = forward_extract_feature(crop_vision_frame)
|
|
pitch, yaw, roll, scale, translation, expression, motion_points = forward_extract_motion(crop_vision_frame)
|
|
rotation = create_rotation(pitch, yaw, roll)
|
|
motion_points_target = scale * (motion_points @ rotation.T + expression) + translation
|
|
expression = edit_eye_gaze(expression)
|
|
expression = edit_mouth_grim(expression)
|
|
expression = edit_mouth_position(expression)
|
|
expression = edit_mouth_pout(expression)
|
|
expression = edit_mouth_purse(expression)
|
|
expression = edit_mouth_smile(expression)
|
|
expression = edit_eyebrow_direction(expression)
|
|
expression = limit_expression(expression)
|
|
rotation = edit_head_rotation(pitch, yaw, roll)
|
|
motion_points_source = motion_points @ rotation.T
|
|
motion_points_source += expression
|
|
motion_points_source *= scale
|
|
motion_points_source += translation
|
|
motion_points_source += edit_eye_open(motion_points_target, face_landmark_68)
|
|
motion_points_source += edit_lip_open(motion_points_target, face_landmark_68)
|
|
motion_points_source = forward_stitch_motion_points(motion_points_source, motion_points_target)
|
|
crop_vision_frame = forward_generate_frame(feature_volume, motion_points_source, motion_points_target)
|
|
return crop_vision_frame
|
|
|
|
|
|
def forward_extract_feature(crop_vision_frame : VisionFrame) -> LivePortraitFeatureVolume:
|
|
feature_extractor = get_inference_pool().get('feature_extractor')
|
|
|
|
with conditional_thread_semaphore():
|
|
feature_volume = feature_extractor.run(None,
|
|
{
|
|
'input': crop_vision_frame
|
|
})[0]
|
|
|
|
return feature_volume
|
|
|
|
|
|
def forward_extract_motion(crop_vision_frame : VisionFrame) -> Tuple[LivePortraitPitch, LivePortraitYaw, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitExpression, LivePortraitMotionPoints]:
|
|
motion_extractor = get_inference_pool().get('motion_extractor')
|
|
|
|
with conditional_thread_semaphore():
|
|
pitch, yaw, roll, scale, translation, expression, motion_points = motion_extractor.run(None,
|
|
{
|
|
'input': crop_vision_frame
|
|
})
|
|
|
|
return pitch, yaw, roll, scale, translation, expression, motion_points
|
|
|
|
|
|
def forward_retarget_eye(eye_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
|
eye_retargeter = get_inference_pool().get('eye_retargeter')
|
|
|
|
with conditional_thread_semaphore():
|
|
eye_motion_points = eye_retargeter.run(None,
|
|
{
|
|
'input': eye_motion_points
|
|
})[0]
|
|
|
|
return eye_motion_points
|
|
|
|
|
|
def forward_retarget_lip(lip_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
|
lip_retargeter = get_inference_pool().get('lip_retargeter')
|
|
|
|
with conditional_thread_semaphore():
|
|
lip_motion_points = lip_retargeter.run(None,
|
|
{
|
|
'input': lip_motion_points
|
|
})[0]
|
|
|
|
return lip_motion_points
|
|
|
|
|
|
def forward_stitch_motion_points(source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
|
stitcher = get_inference_pool().get('stitcher')
|
|
|
|
with thread_semaphore():
|
|
motion_points = stitcher.run(None,
|
|
{
|
|
'source': source_motion_points,
|
|
'target': target_motion_points
|
|
})[0]
|
|
|
|
return motion_points
|
|
|
|
|
|
def forward_generate_frame(feature_volume : LivePortraitFeatureVolume, source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> VisionFrame:
|
|
generator = get_inference_pool().get('generator')
|
|
|
|
with thread_semaphore():
|
|
crop_vision_frame = generator.run(None,
|
|
{
|
|
'feature_volume': feature_volume,
|
|
'source': source_motion_points,
|
|
'target': target_motion_points
|
|
})[0][0]
|
|
|
|
return crop_vision_frame
|
|
|
|
|
|
def edit_eyebrow_direction(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_eyebrow = state_manager.get_item('face_editor_eyebrow_direction')
|
|
|
|
if face_editor_eyebrow > 0:
|
|
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
|
else:
|
|
expression[0, 1, 0] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 2, 0] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
|
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
return expression
|
|
|
|
|
|
def edit_eye_gaze(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_eye_gaze_horizontal = state_manager.get_item('face_editor_eye_gaze_horizontal')
|
|
face_editor_eye_gaze_vertical = state_manager.get_item('face_editor_eye_gaze_vertical')
|
|
|
|
if face_editor_eye_gaze_horizontal > 0:
|
|
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
|
else:
|
|
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
|
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 1, 1] += numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
|
expression[0, 2, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
|
expression[0, 11, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
|
expression[0, 13, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
expression[0, 15, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
|
expression[0, 16, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
return expression
|
|
|
|
|
|
def edit_eye_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
|
face_editor_eye_open_ratio = state_manager.get_item('face_editor_eye_open_ratio')
|
|
left_eye_ratio = calculate_distance_ratio(face_landmark_68, 37, 40, 39, 36)
|
|
right_eye_ratio = calculate_distance_ratio(face_landmark_68, 43, 46, 45, 42)
|
|
|
|
if face_editor_eye_open_ratio < 0:
|
|
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.0 ] ])
|
|
else:
|
|
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.6 ] ])
|
|
eye_motion_points = eye_motion_points.reshape(1, -1).astype(numpy.float32)
|
|
eye_motion_points = forward_retarget_eye(eye_motion_points) * numpy.abs(face_editor_eye_open_ratio)
|
|
eye_motion_points = eye_motion_points.reshape(-1, 21, 3)
|
|
return eye_motion_points
|
|
|
|
|
|
def edit_lip_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
|
face_editor_lip_open_ratio = state_manager.get_item('face_editor_lip_open_ratio')
|
|
lip_ratio = calculate_distance_ratio(face_landmark_68, 62, 66, 54, 48)
|
|
|
|
if face_editor_lip_open_ratio < 0:
|
|
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 0.0 ] ])
|
|
else:
|
|
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 1.0 ] ])
|
|
lip_motion_points = lip_motion_points.reshape(1, -1).astype(numpy.float32)
|
|
lip_motion_points = forward_retarget_lip(lip_motion_points) * numpy.abs(face_editor_lip_open_ratio)
|
|
lip_motion_points = lip_motion_points.reshape(-1, 21, 3)
|
|
return lip_motion_points
|
|
|
|
|
|
def edit_mouth_grim(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_mouth_grim = state_manager.get_item('face_editor_mouth_grim')
|
|
if face_editor_mouth_grim > 0:
|
|
expression[0, 17, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
expression[0, 19, 2] += numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.01, 0.01 ])
|
|
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.06, 0.06 ])
|
|
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
|
else:
|
|
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.05, 0.05 ])
|
|
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
|
return expression
|
|
|
|
|
|
def edit_mouth_position(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_mouth_position_horizontal = state_manager.get_item('face_editor_mouth_position_horizontal')
|
|
face_editor_mouth_position_vertical = state_manager.get_item('face_editor_mouth_position_vertical')
|
|
expression[0, 19, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.05, 0.05 ])
|
|
expression[0, 20, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.04, 0.04 ])
|
|
if face_editor_mouth_position_vertical > 0:
|
|
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
|
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
else:
|
|
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.05, 0.05 ])
|
|
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
|
return expression
|
|
|
|
|
|
def edit_mouth_pout(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_mouth_pout = state_manager.get_item('face_editor_mouth_pout')
|
|
if face_editor_mouth_pout > 0:
|
|
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
|
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
|
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
|
else:
|
|
expression[0, 19, 1] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
|
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
|
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
|
return expression
|
|
|
|
|
|
def edit_mouth_purse(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_mouth_purse = state_manager.get_item('face_editor_mouth_purse')
|
|
if face_editor_mouth_purse > 0:
|
|
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.04, 0.04 ])
|
|
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
else:
|
|
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
expression[0, 17, 2] += numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.01, 0.01 ])
|
|
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.002, 0.002 ])
|
|
return expression
|
|
|
|
|
|
def edit_mouth_smile(expression : LivePortraitExpression) -> LivePortraitExpression:
|
|
face_editor_mouth_smile = state_manager.get_item('face_editor_mouth_smile')
|
|
if face_editor_mouth_smile > 0:
|
|
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.015, 0.015 ])
|
|
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.025, 0.025 ])
|
|
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
|
expression[0, 17, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.004, 0.004 ])
|
|
expression[0, 3, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
|
expression[0, 7, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
|
else:
|
|
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.003, 0.003 ])
|
|
expression[0, 19, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
|
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.005, 0.005 ])
|
|
expression[0, 20, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
|
expression[0, 3, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
|
expression[0, 7, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
|
return expression
|
|
|
|
|
|
def edit_head_rotation(pitch : LivePortraitPitch, yaw : LivePortraitYaw, roll : LivePortraitRoll) -> LivePortraitRotation:
|
|
face_editor_head_pitch = state_manager.get_item('face_editor_head_pitch')
|
|
face_editor_head_yaw = state_manager.get_item('face_editor_head_yaw')
|
|
face_editor_head_roll = state_manager.get_item('face_editor_head_roll')
|
|
edit_pitch = pitch + float(numpy.interp(face_editor_head_pitch, [ -1, 1 ], [ 20, -20 ]))
|
|
edit_yaw = yaw + float(numpy.interp(face_editor_head_yaw, [ -1, 1 ], [ 60, -60 ]))
|
|
edit_roll = roll + float(numpy.interp(face_editor_head_roll, [ -1, 1 ], [ -15, 15 ]))
|
|
edit_pitch, edit_yaw, edit_roll = limit_angle(pitch, yaw, roll, edit_pitch, edit_yaw, edit_roll)
|
|
rotation = create_rotation(edit_pitch, edit_yaw, edit_roll)
|
|
return rotation
|
|
|
|
|
|
def calculate_distance_ratio(face_landmark_68 : FaceLandmark68, top_index : int, bottom_index : int, left_index : int, right_index : int) -> float:
|
|
vertical_direction = face_landmark_68[top_index] - face_landmark_68[bottom_index]
|
|
horizontal_direction = face_landmark_68[left_index] - face_landmark_68[right_index]
|
|
distance_ratio = float(numpy.linalg.norm(vertical_direction) / (numpy.linalg.norm(horizontal_direction) + 1e-6))
|
|
return distance_ratio
|
|
|
|
|
|
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
|
model_size = get_model_options().get('size')
|
|
prepare_size = (model_size[0] // 2, model_size[1] // 2)
|
|
crop_vision_frame = cv2.resize(crop_vision_frame, prepare_size, interpolation = cv2.INTER_AREA)
|
|
crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
|
|
crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
|
return crop_vision_frame
|
|
|
|
|
|
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
|
crop_vision_frame = crop_vision_frame.transpose(1, 2, 0).clip(0, 1)
|
|
crop_vision_frame = (crop_vision_frame * 255.0)
|
|
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1]
|
|
return crop_vision_frame
|
|
|
|
|
|
def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
|
|
reference_vision_frame = inputs.get('reference_vision_frame')
|
|
target_vision_frame = inputs.get('target_vision_frame')
|
|
temp_vision_frame = inputs.get('temp_vision_frame')
|
|
target_faces = select_faces(reference_vision_frame, target_vision_frame)
|
|
|
|
if target_faces:
|
|
for target_face in target_faces:
|
|
temp_vision_frame = edit_face(target_face, temp_vision_frame)
|
|
|
|
return temp_vision_frame
|