merge stuff

This commit is contained in:
henryruhs
2026-03-17 14:55:36 +01:00
parent 0d0c27b117
commit 3acb71c44e
3 changed files with 252 additions and 109 deletions
-60
View File
@@ -1,60 +0,0 @@
# Naming Convention Violations
## Variables using `xxx_frame` instead of `xxx_vision_frame`
- `source_frame_resize``source_vision_frame_resize` in `facefusion/vision.py:290`
- `target_frame_resize``target_vision_frame_resize` in `facefusion/vision.py:291`
## Variables using `xxx_mask` instead of `xxx_vision_mask`
### box_mask → box_vision_mask
- `facefusion/face_masker.py:192`
- `facefusion/processors/modules/face_debugger/core.py:131`
- `facefusion/processors/modules/expression_restorer/core.py:158`
- `facefusion/processors/modules/age_modifier/core.py:179,205`
- `facefusion/processors/modules/face_enhancer/core.py:346`
- `facefusion/processors/modules/face_editor/core.py:208`
- `facefusion/processors/modules/deep_swapper/core.py:331`
- `facefusion/processors/modules/face_swapper/core.py:591`
- `facefusion/processors/modules/lip_syncer/core.py:189`
### occlusion_mask → occlusion_vision_mask
- `facefusion/face_masker.py:221,222`
- `facefusion/processors/modules/face_debugger/core.py:135`
- `facefusion/processors/modules/expression_restorer/core.py:165`
- `facefusion/processors/modules/age_modifier/core.py:186,212,214`
- `facefusion/processors/modules/face_enhancer/core.py:353`
- `facefusion/processors/modules/deep_swapper/core.py:338`
- `facefusion/processors/modules/face_swapper/core.py:595`
- `facefusion/processors/modules/lip_syncer/core.py:184`
### area_mask → area_vision_mask
- `facefusion/face_masker.py:235,237`
- `facefusion/processors/modules/face_debugger/core.py:140`
- `facefusion/processors/modules/deep_swapper/core.py:350`
- `facefusion/processors/modules/face_swapper/core.py:608`
- `facefusion/processors/modules/lip_syncer/core.py:197`
### region_mask → region_vision_mask
- `facefusion/face_masker.py:250,252,253`
- `facefusion/processors/modules/face_debugger/core.py:144`
- `facefusion/processors/modules/deep_swapper/core.py:354`
- `facefusion/processors/modules/face_swapper/core.py:612`
### crop_mask → crop_vision_mask
- `facefusion/processors/modules/face_debugger/core.py:147,148,149`
- `facefusion/processors/modules/expression_restorer/core.py:172,173`
- `facefusion/processors/modules/age_modifier/core.py:197,224,225`
- `facefusion/processors/modules/face_enhancer/core.py:360,361`
- `facefusion/processors/modules/deep_swapper/core.py:357,405,406,407,408`
- `facefusion/processors/modules/face_swapper/core.py:615,616`
- `facefusion/processors/modules/lip_syncer/core.py:206,207`
### temp_mask → temp_vision_mask
- `facefusion/face_masker.py:216,217,218`
@@ -29,6 +29,41 @@ from facefusion.vision import match_frame_color, read_static_image, read_static_
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
return\
{
'fran':
{
'__metadata__':
{
'vendor': 'ry-lu',
'license': 'mit',
'year': 2024
},
'hashes':
{
'age_modifier':
{
'url': resolve_download_url('models-3.6.0', 'fran.hash'),
'path': resolve_relative_path('../.assets/models/fran.hash')
}
},
'sources':
{
'age_modifier':
{
'url': resolve_download_url('models-3.6.0', 'fran.onnx'),
'path': resolve_relative_path('../.assets/models/fran.onnx')
}
},
'templates':
{
'target': 'ffhq_512',
},
'sizes':
{
'target': (1024, 1024),
},
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'styleganex_age':
{
'__metadata__':
@@ -62,7 +97,9 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'target': (256, 256),
'target_with_background': (384, 384)
}
},
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 0.5, 0.5, 0.5 ]
}
}
@@ -92,7 +129,7 @@ def register_args(program : ArgumentParser) -> None:
group_processors.add_argument(
'--age-modifier-model',
help = translator.get('help.model', __package__),
default = config.get_str_value('processors', 'age_modifier_model', 'styleganex_age'),
default = config.get_str_value('processors', 'age_modifier_model', 'fran'),
choices = age_modifier_choices.age_modifier_models
),
group_processors.add_argument(
@@ -151,32 +188,57 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
model_sizes = get_model_options().get('sizes')
face_landmark_5 = target_face.landmark_set.get('5/68').copy()
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_templates.get('target'), model_sizes.get('target'))
extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 0.875)
extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_templates.get('target_with_background'), model_sizes.get('target_with_background'))
extend_vision_frame_raw = extend_vision_frame.copy()
box_mask = create_box_mask(extend_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
crop_masks =\
[
box_mask
]
if 'occlusion' in state_manager.get_item('face_mask_types'):
occlusion_mask = create_occlusion_mask(crop_vision_frame)
temp_matrix = merge_matrix([ extend_affine_matrix, cv2.invertAffineTransform(affine_matrix) ])
occlusion_mask = cv2.warpAffine(occlusion_mask, temp_matrix, model_sizes.get('target_with_background'))
crop_masks.append(occlusion_mask)
if state_manager.get_item('age_modifier_model') == 'fran':
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
crop_masks =\
[
box_mask
]
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
extend_vision_frame = prepare_vision_frame(extend_vision_frame)
age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [ -100, 100 ], [ 2.5, -2.5 ])).astype(numpy.float32)
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction)
extend_vision_frame = normalize_extend_frame(extend_vision_frame)
extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame)
extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0]
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
crop_mask = cv2.resize(crop_mask, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4))
paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, crop_mask, extend_affine_matrix)
return paste_vision_frame
if 'occlusion' in state_manager.get_item('face_mask_types'):
occlusion_mask = create_occlusion_mask(crop_vision_frame)
crop_masks.append(occlusion_mask)
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
target_age = numpy.mean(target_face.age)
age_modifier_direction = numpy.array([ target_age, target_age + state_manager.get_item('age_modifier_direction') ], dtype = numpy.float32) / 100
age_modifier_direction = age_modifier_direction.clip(0, 1)
crop_vision_frame = forward(crop_vision_frame, crop_vision_frame, age_modifier_direction)
crop_vision_frame = normalize_vision_frame(crop_vision_frame)
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix)
return paste_vision_frame
if state_manager.get_item('age_modifier_model') == 'styleganex_age':
extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 0.875)
extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_templates.get('target_with_background'), model_sizes.get('target_with_background'))
extend_vision_frame_raw = extend_vision_frame.copy()
box_mask = create_box_mask(extend_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
crop_masks =\
[
box_mask
]
if 'occlusion' in state_manager.get_item('face_mask_types'):
occlusion_mask = create_occlusion_mask(crop_vision_frame)
temp_matrix = merge_matrix([ extend_affine_matrix, cv2.invertAffineTransform(affine_matrix) ])
occlusion_mask = cv2.warpAffine(occlusion_mask, temp_matrix, model_sizes.get('target_with_background'))
crop_masks.append(occlusion_mask)
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
extend_vision_frame = prepare_vision_frame(extend_vision_frame)
age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [ -100, 100 ], [ 2.5, -2.5 ])).astype(numpy.float32)
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction)
extend_vision_frame = normalize_extend_frame(extend_vision_frame)
extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame)
extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0]
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
crop_mask = cv2.resize(crop_mask, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4))
paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, crop_mask, extend_affine_matrix)
return paste_vision_frame
return temp_vision_frame
def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame, age_modifier_direction : AgeModifierDirection) -> VisionFrame:
@@ -201,12 +263,24 @@ def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame,
def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame:
model_mean = get_model_options().get('mean')
model_standard_deviation = get_model_options().get('standard_deviation')
vision_frame = vision_frame[:, :, ::-1] / 255.0
vision_frame = (vision_frame - 0.5) / 0.5
vision_frame = (vision_frame - model_mean) / model_standard_deviation
vision_frame = numpy.expand_dims(vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
return vision_frame
def normalize_vision_frame(vision_frame : VisionFrame) -> VisionFrame:
model_mean = get_model_options().get('mean')
model_standard_deviation = get_model_options().get('standard_deviation')
vision_frame = vision_frame.transpose(1, 2, 0)
vision_frame = vision_frame * model_standard_deviation + model_mean
vision_frame = vision_frame.clip(0, 1)
vision_frame = vision_frame[:, :, ::-1] * 255
return vision_frame
def normalize_extend_frame(extend_vision_frame : VisionFrame) -> VisionFrame:
model_sizes = get_model_options().get('sizes')
extend_vision_frame = numpy.clip(extend_vision_frame, -1, 1)
@@ -8,7 +8,7 @@ import numpy
import facefusion.capability_store
import facefusion.jobs.job_manager
from facefusion import config, content_analyser, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import is_macos
from facefusion.common_helper import is_macos, is_windows
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path
@@ -51,6 +51,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/ben_2.onnx')
}
},
'type': 'ben',
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -79,6 +80,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/birefnet_general.onnx')
}
},
'type': 'birefnet',
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -107,10 +109,69 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/birefnet_portrait.onnx')
}
},
'type': 'birefnet',
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'corridor_key_1024':
{
'__metadata__':
{
'vendor': 'nikopueringer',
'license': 'Non-Commercial',
'year': 2025
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.6.0', 'corridor_key_1024.hash'),
'path': resolve_relative_path('../.assets/models/corridor_key_1024.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.6.0', 'corridor_key_1024.onnx'),
'path': resolve_relative_path('../.assets/models/corridor_key_1024.onnx')
}
},
'type': 'corridor_key',
'size': (1024, 1024),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'corridor_key_2048':
{
'__metadata__':
{
'vendor': 'nikopueringer',
'license': 'Non-Commercial',
'year': 2025
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.6.0', 'corridor_key_2048.hash'),
'path': resolve_relative_path('../.assets/models/corridor_key_2048.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.6.0', 'corridor_key_2048.onnx'),
'path': resolve_relative_path('../.assets/models/corridor_key_2048.onnx')
}
},
'type': 'corridor_key',
'size': (2048, 2048),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'isnet_general':
{
'__metadata__':
@@ -135,6 +196,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/isnet_general.onnx')
}
},
'type': 'isnet',
'size': (1024, 1024),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -163,6 +225,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/modnet.onnx')
}
},
'type': 'modnet',
'size': (512, 512),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 0.5, 0.5, 0.5 ]
@@ -191,6 +254,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/ormbg.onnx')
}
},
'type': 'ormbg',
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -219,6 +283,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/rmbg_1.4.onnx')
}
},
'type': 'rmbg',
'size': (1024, 1024),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -247,6 +312,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/rmbg_2.0.onnx')
}
},
'type': 'rmbg',
'size': (1024, 1024),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -275,6 +341,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/silueta.onnx')
}
},
'type': 'silueta',
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -303,6 +370,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/u2net_cloth.onnx')
}
},
'type': 'u2net_cloth',
'size': (768, 768),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -331,6 +399,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/u2net_general.onnx')
}
},
'type': 'u2net',
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -359,6 +428,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/u2net_human.onnx')
}
},
'type': 'u2net',
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -387,6 +457,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/u2netp.onnx')
}
},
'type': 'u2netp',
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -407,7 +478,9 @@ def clear_inference_pool() -> None:
def resolve_execution_providers() -> List[ExecutionProvider]:
if is_macos() and has_execution_provider('coreml'):
model_type = get_model_options().get('type')
if is_macos() and has_execution_provider('coreml') or is_windows() and has_execution_provider('directml') and model_type == 'corridor_key':
return [ 'cpu' ]
return state_manager.get_item('execution_providers')
@@ -425,14 +498,21 @@ def register_args(program : ArgumentParser) -> None:
group_processors.add_argument(
'--background-remover-model',
help = translator.get('help.model', __package__),
default = config.get_str_value('processors', 'background_remover_model', 'rmbg_2.0'),
default = config.get_str_value('processors', 'background_remover_model', 'modnet'),
choices = background_remover_choices.background_remover_models
),
group_processors.add_argument(
'--background-remover-color',
help = translator.get('help.color', __package__),
'--background-remover-fill-color',
help = translator.get('help.fill_color', __package__),
type = partial(sanitize_int_range, int_range = background_remover_choices.background_remover_color_range),
default = config.get_int_list('processors', 'background_remover_color', '0 0 0 0'),
default = config.get_int_list('processors', 'background_remover_fill_color', '0 0 0 0'),
nargs = '+'
),
group_processors.add_argument(
'--background-remover-despill-color',
help = translator.get('help.despill_color', __package__),
type = partial(sanitize_int_range, int_range = background_remover_choices.background_remover_color_range),
default = config.get_int_list('processors', 'background_remover_despill_color', '0 0 0 0'),
nargs = '+'
)
],
@@ -442,7 +522,8 @@ def register_args(program : ArgumentParser) -> None:
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('background_remover_model', args.get('background_remover_model'))
apply_state_item('background_remover_color', normalize_color(args.get('background_remover_color')))
apply_state_item('background_remover_fill_color', normalize_color(args.get('background_remover_fill_color')))
apply_state_item('background_remover_despill_color', normalize_color(args.get('background_remover_despill_color')))
def pre_check() -> bool:
@@ -474,16 +555,26 @@ def post_process() -> None:
def remove_background(temp_vision_frame : VisionFrame) -> Tuple[VisionFrame, Mask]:
temp_vision_mask = forward(prepare_temp_frame(temp_vision_frame))
temp_vision_mask = normalize_vision_mask(temp_vision_mask)
temp_vision_mask = cv2.resize(temp_vision_mask, temp_vision_frame.shape[:2][::-1])
temp_vision_frame = apply_background_color(temp_vision_frame, temp_vision_mask)
return temp_vision_frame, temp_vision_mask
model_type = get_model_options().get('type')
if model_type == 'corridor_key':
remove_vision_mask, remove_vision_frame = forward_corridor_key(prepare_temp_frame(temp_vision_frame))
remove_vision_frame = numpy.squeeze(remove_vision_frame).transpose(1, 2, 0)
remove_vision_frame = numpy.clip(remove_vision_frame * 255, 0, 255).astype(numpy.uint8)
temp_vision_frame = cv2.resize(remove_vision_frame[:, :, ::-1], temp_vision_frame.shape[:2][::-1])
else:
remove_vision_mask = forward(prepare_temp_frame(temp_vision_frame))
remove_vision_mask = normalize_vision_mask(remove_vision_mask)
remove_vision_mask = cv2.resize(remove_vision_mask, temp_vision_frame.shape[:2][::-1])
temp_vision_frame = apply_despill_color(temp_vision_frame)
temp_vision_frame = apply_fill_color(temp_vision_frame, remove_vision_mask)
return temp_vision_frame, remove_vision_mask
def forward(temp_vision_frame : VisionFrame) -> VisionFrame:
background_remover = get_inference_pool().get('background_remover')
model_name = state_manager.get_item('background_remover_model')
model_type = get_model_options().get('type')
with thread_semaphore():
remove_vision_frame = background_remover.run(None,
@@ -491,20 +582,42 @@ def forward(temp_vision_frame : VisionFrame) -> VisionFrame:
'input': temp_vision_frame
})[0]
if model_name == 'u2net_cloth':
if model_type == 'u2net_cloth':
remove_vision_frame = numpy.argmax(remove_vision_frame, axis = 1)
return remove_vision_frame
def forward_corridor_key(temp_vision_frame : VisionFrame) -> Tuple[Mask, VisionFrame]:
background_remover = get_inference_pool().get('background_remover')
with thread_semaphore():
remove_vision_mask, remove_vision_frame = background_remover.run(None,
{
'input': temp_vision_frame
})
return remove_vision_mask, remove_vision_frame
def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
model_type = get_model_options().get('type')
model_size = get_model_options().get('size')
model_mean = get_model_options().get('mean')
model_standard_deviation = get_model_options().get('standard_deviation')
if model_type == 'corridor_key':
coarse_color = temp_vision_frame[:, :, ::-1].astype(numpy.float32) / 255.0
coarse_bias = coarse_color[:, :, 1] - numpy.maximum(coarse_color[:, :, 0], coarse_color[:, :, 2])
coarse_vision_mask = cv2.resize(1.0 - numpy.clip(coarse_bias * 2.0, 0, 1), model_size)[:, :, numpy.newaxis]
temp_vision_frame = cv2.resize(temp_vision_frame, model_size)
temp_vision_frame = temp_vision_frame[:, :, ::-1] / 255.0
temp_vision_frame = (temp_vision_frame - model_mean) / model_standard_deviation
if model_type == 'corridor_key':
temp_vision_frame = numpy.concatenate([ temp_vision_frame, coarse_vision_mask ], axis = 2)
temp_vision_frame = temp_vision_frame.transpose(2, 0, 1)
temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32)
return temp_vision_frame
@@ -516,16 +629,32 @@ def normalize_vision_mask(temp_vision_mask : Mask) -> Mask:
return temp_vision_mask
def apply_background_color(temp_vision_frame : VisionFrame, temp_vision_mask : Mask) -> VisionFrame:
background_remover_color = state_manager.get_item('background_remover_color')
def apply_fill_color(temp_vision_frame : VisionFrame, temp_vision_mask : Mask) -> VisionFrame:
background_remover_fill_color = state_manager.get_item('background_remover_fill_color')
temp_vision_mask = temp_vision_mask.astype(numpy.float32) / 255
temp_vision_mask = numpy.expand_dims(temp_vision_mask, axis = 2)
temp_vision_mask = (1 - temp_vision_mask) * background_remover_color[-1] / 255
color_frame = numpy.zeros_like(temp_vision_frame)
color_frame[:, :, 0] = background_remover_color[2]
color_frame[:, :, 1] = background_remover_color[1]
color_frame[:, :, 2] = background_remover_color[0]
temp_vision_frame = temp_vision_frame * (1 - temp_vision_mask) + color_frame * temp_vision_mask
temp_vision_mask = (1 - temp_vision_mask) * background_remover_fill_color[-1] / 255
fill_vision_frame = numpy.zeros_like(temp_vision_frame)
fill_vision_frame[:, :, 0] = background_remover_fill_color[2]
fill_vision_frame[:, :, 1] = background_remover_fill_color[1]
fill_vision_frame[:, :, 2] = background_remover_fill_color[0]
temp_vision_frame = temp_vision_frame * (1 - temp_vision_mask) + fill_vision_frame * temp_vision_mask
temp_vision_frame = temp_vision_frame.astype(numpy.uint8)
return temp_vision_frame
def apply_despill_color(temp_vision_frame : VisionFrame) -> VisionFrame:
background_remover_despill_color = state_manager.get_item('background_remover_despill_color')
temp_vision_frame = temp_vision_frame.astype(numpy.float32)
color_alpha = background_remover_despill_color[3] / 255.0
despill_vision_frame = numpy.zeros_like(temp_vision_frame)
despill_vision_frame[:, :, 0] = background_remover_despill_color[2]
despill_vision_frame[:, :, 1] = background_remover_despill_color[1]
despill_vision_frame[:, :, 2] = background_remover_despill_color[0]
color_weight = despill_vision_frame / numpy.maximum(numpy.max(background_remover_despill_color[:3]), 1)
color_limit = numpy.roll(temp_vision_frame, 1, 2) + numpy.roll(temp_vision_frame, -1, 2)
limit_vision_frame = numpy.minimum(temp_vision_frame, color_limit * 0.5)
temp_vision_frame = temp_vision_frame + (limit_vision_frame - temp_vision_frame) * color_alpha * color_weight
temp_vision_frame = temp_vision_frame.astype(numpy.uint8)
return temp_vision_frame