mirror of
https://github.com/facefusion/facefusion.git
synced 2026-05-12 18:32:18 +02:00
da0da3a4b4
* Rename calcXXX to calculateXXX * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Use True for the flags * Add migraphx support * add face-swapper-weight * add face-swapper-weight to facefusion.ini * changes * change choice * Fix typing for xxxWeight * Feat/log inference session (#906) * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Mark as NEXT * Follow industry standard x1, x2, y1 and y2 * Follow industry standard x1, x2, y1 and y2 * Follow industry standard in terms of naming (#908) * Follow industry standard in terms of naming * Improve xxx_embedding naming * Fix norm vs. norms * Reduce timeout to 5 * Sort out voice_extractor once again * changes * Introduce many to the occlusion mask (#910) * Introduce many to the occlusion mask * Then we use minimum * Add support for wmv * Run platform tests before has_execution_provider (#911) * Add support for wmv * Introduce benchmark mode (#912) * Honestly makes no difference to me * Honestly makes no difference to me * Fix wording * Bring back YuNet (#922) * Reintroduce YuNet without cv2 dependency * Fix variable naming * Avoid RGB to YUV colorshift using libx264rgb * Avoid RGB to YUV colorshift using libx264rgb * Make libx264 the default again * Make libx264 the default again * Fix types in ffmpeg builder * Fix quality stuff in ffmpeg builder * Fix quality stuff in ffmpeg builder * Add libx264rgb to test * Revamp Processors (#923) * Introduce new concept of pure target frames * Radical refactoring of process flow * Introduce new concept of pure target frames * Fix webcam * Minor improvements * Minor improvements * Use deque for video processing * Use deque for video processing * Extend the video manager * Polish deque * Polish deque * Deque is not even used * Improve speed with multiple futures * Fix temp frame mutation and * Fix RAM usage * Remove old types and manage method * Remove execution_queue_count * Use init_state for benchmarker to avoid issues * add voice extractor option * Change the order of voice extractor in code * Use official download urls * Use official download urls * add gui * fix preview * Add remote updates for voice extractor * fix crash on headless-run * update test_job_helper.py * Fix it for good * Remove pointless method * Fix types and unused imports * Revamp reference (#925) * Initial revamp of face references * Initial revamp of face references * Initial revamp of face references * Terminate find_similar_faces * Improve find mutant faces * Improve find mutant faces * Move sort where it belongs * Forward reference vision frame * Forward reference vision frame also in preview * Fix reference selection * Use static video frame * Fix CI * Remove reference type from frame processors * Improve some naming * Fix types and unused imports * Fix find mutant faces * Fix find mutant faces * Fix imports * Correct naming * Correct naming * simplify pad * Improve webcam performance on highres * Camera manager (#932) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Remove optional * Fix naming in webcam options * Avoid using temp faces (#933) * output video scale * Fix imports * output image scale * upscale fix (not limiter) * add unit test scale_resolution & remove unused methods * fix and add test * fix * change pack_resolution * fix tests * Simplify output scale testing * Fix benchmark UI * Fix benchmark UI * Update dependencies * Introduce REAL multi gpu support using multi dimensional inference pool (#935) * Introduce REAL multi gpu support using multi dimensional inference pool * Remove the MULTI:GPU flag * Restore "processing stop" * Restore "processing stop" * Remove old templates * Go fill in with caching * add expression restorer areas * re-arrange * rename method * Fix stop for extract frames and merge video * Replace arcface_converter models with latest crossface models * Replace arcface_converter models with latest crossface models * Move module logs to debug mode * Refactor/streamer (#938) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Fix naming in webcam options * Move logic over to streamer * Fix streamer, improve webcam experience * Improve webcam experience * Revert method * Revert method * Improve webcam again * Use release on capture instead * Only forward valid frames * Fix resolution logging * Add AVIF support * Add AVIF support * Limit avif to unix systems * Drop avif * Drop avif * Drop avif * Default to Documents in the UI if output path is not set * Update wording.py (#939) "succeed" is grammatically incorrect in the given context. To succeed is the infinitive form of the verb. Correct would be either "succeeded" or alternatively a form involving the noun "success". * Fix more grammar issue * Fix more grammar issue * Sort out caching * Move webcam choices back to UI * Move preview options to own file (#940) * Fix Migraphx execution provider * Fix benchmark * Reuse blend frame method * Fix CI * Fix CI * Fix CI * Hotfix missing check in face debugger, Enable logger for preview * Fix reference selection (#942) * Fix reference selection * Fix reference selection * Fix reference selection * Fix reference selection * Side by side preview (#941) * Initial side by side preview * More work on preview, remove UI only stuff from vision.py * Improve more * Use fit frame * Add different fit methods for vision * Improve preview part2 * Improve preview part3 * Improve preview part4 * Remove none as choice * Remove useless methods * Fix CI * Fix naming * use 1024 as preview resolution default * Fix fit_cover_frame * Uniform fit_xxx_frame methods * Add back disabled logger * Use ui choices alias * Extract select face logic from processors (#943) * Extract select face logic from processors to use it for face by face in preview * Fix order * Remove old code * Merge methods * Refactor face debugger (#944) * Refactor huge method of face debugger * Remove text metrics from face debugger * Remove useless copy of temp frame * Resort methods * Fix spacing * Remove old method * Fix hard exit to work without signals * Prevent upscaling for face-by-face * Switch to version * Improve exiting --------- Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com> Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com> Co-authored-by: Rafael Tappe Maestro <rafael@tappemaestro.com>
131 lines
4.6 KiB
Python
131 lines
4.6 KiB
Python
from typing import List
|
|
|
|
import numpy
|
|
|
|
from facefusion import state_manager
|
|
from facefusion.face_analyser import get_many_faces, get_one_face
|
|
from facefusion.types import Face, FaceSelectorOrder, Gender, Race, Score, VisionFrame
|
|
|
|
|
|
def select_faces(reference_vision_frame : VisionFrame, target_vision_frame : VisionFrame) -> List[Face]:
|
|
target_faces = get_many_faces([ target_vision_frame ])
|
|
|
|
if state_manager.get_item('face_selector_mode') == 'many':
|
|
return sort_and_filter_faces(target_faces)
|
|
|
|
if state_manager.get_item('face_selector_mode') == 'one':
|
|
target_face = get_one_face(sort_and_filter_faces(target_faces))
|
|
if target_face:
|
|
return [ target_face ]
|
|
|
|
if state_manager.get_item('face_selector_mode') == 'reference':
|
|
reference_faces = get_many_faces([ reference_vision_frame ])
|
|
reference_faces = sort_and_filter_faces(reference_faces)
|
|
reference_face = get_one_face(reference_faces, state_manager.get_item('reference_face_position'))
|
|
if reference_face:
|
|
match_faces = find_match_faces([ reference_face ], target_faces, state_manager.get_item('reference_face_distance'))
|
|
return match_faces
|
|
|
|
return []
|
|
|
|
|
|
def find_match_faces(reference_faces : List[Face], target_faces : List[Face], face_distance : float) -> List[Face]:
|
|
match_faces : List[Face] = []
|
|
|
|
for reference_face in reference_faces:
|
|
if reference_face:
|
|
for index, target_face in enumerate(target_faces):
|
|
if compare_faces(target_face, reference_face, face_distance):
|
|
match_faces.append(target_faces[index])
|
|
|
|
return match_faces
|
|
|
|
|
|
def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
|
|
current_face_distance = calculate_face_distance(face, reference_face)
|
|
current_face_distance = float(numpy.interp(current_face_distance, [ 0, 2 ], [ 0, 1 ]))
|
|
return current_face_distance < face_distance
|
|
|
|
|
|
def calculate_face_distance(face : Face, reference_face : Face) -> float:
|
|
if hasattr(face, 'embedding_norm') and hasattr(reference_face, 'embedding_norm'):
|
|
return 1 - numpy.dot(face.embedding_norm, reference_face.embedding_norm)
|
|
return 0
|
|
|
|
|
|
def sort_and_filter_faces(faces : List[Face]) -> List[Face]:
|
|
if faces:
|
|
if state_manager.get_item('face_selector_order'):
|
|
faces = sort_faces_by_order(faces, state_manager.get_item('face_selector_order'))
|
|
if state_manager.get_item('face_selector_gender'):
|
|
faces = filter_faces_by_gender(faces, state_manager.get_item('face_selector_gender'))
|
|
if state_manager.get_item('face_selector_race'):
|
|
faces = filter_faces_by_race(faces, state_manager.get_item('face_selector_race'))
|
|
if state_manager.get_item('face_selector_age_start') or state_manager.get_item('face_selector_age_end'):
|
|
faces = filter_faces_by_age(faces, state_manager.get_item('face_selector_age_start'), state_manager.get_item('face_selector_age_end'))
|
|
return faces
|
|
|
|
|
|
def sort_faces_by_order(faces : List[Face], order : FaceSelectorOrder) -> List[Face]:
|
|
if order == 'left-right':
|
|
return sorted(faces, key = get_bounding_box_left)
|
|
if order == 'right-left':
|
|
return sorted(faces, key = get_bounding_box_left, reverse = True)
|
|
if order == 'top-bottom':
|
|
return sorted(faces, key = get_bounding_box_top)
|
|
if order == 'bottom-top':
|
|
return sorted(faces, key = get_bounding_box_top, reverse = True)
|
|
if order == 'small-large':
|
|
return sorted(faces, key = get_bounding_box_area)
|
|
if order == 'large-small':
|
|
return sorted(faces, key = get_bounding_box_area, reverse = True)
|
|
if order == 'best-worst':
|
|
return sorted(faces, key = get_face_detector_score, reverse = True)
|
|
if order == 'worst-best':
|
|
return sorted(faces, key = get_face_detector_score)
|
|
return faces
|
|
|
|
|
|
def get_bounding_box_left(face : Face) -> float:
|
|
return face.bounding_box[0]
|
|
|
|
|
|
def get_bounding_box_top(face : Face) -> float:
|
|
return face.bounding_box[1]
|
|
|
|
|
|
def get_bounding_box_area(face : Face) -> float:
|
|
return (face.bounding_box[2] - face.bounding_box[0]) * (face.bounding_box[3] - face.bounding_box[1])
|
|
|
|
|
|
def get_face_detector_score(face : Face) -> Score:
|
|
return face.score_set.get('detector')
|
|
|
|
|
|
def filter_faces_by_gender(faces : List[Face], gender : Gender) -> List[Face]:
|
|
filter_faces = []
|
|
|
|
for face in faces:
|
|
if face.gender == gender:
|
|
filter_faces.append(face)
|
|
return filter_faces
|
|
|
|
|
|
def filter_faces_by_age(faces : List[Face], face_selector_age_start : int, face_selector_age_end : int) -> List[Face]:
|
|
filter_faces = []
|
|
age = range(face_selector_age_start, face_selector_age_end)
|
|
|
|
for face in faces:
|
|
if set(face.age) & set(age):
|
|
filter_faces.append(face)
|
|
return filter_faces
|
|
|
|
|
|
def filter_faces_by_race(faces : List[Face], race : Race) -> List[Face]:
|
|
filter_faces = []
|
|
|
|
for face in faces:
|
|
if face.race == race:
|
|
filter_faces.append(face)
|
|
return filter_faces
|