mirror of
https://github.com/facefusion/facefusion-labs.git
synced 2026-04-19 15:56:37 +02:00
some cleaning and add exporting
This commit is contained in:
@@ -28,9 +28,6 @@ disable = false
|
||||
arcface_path = /assets/pretrained_models/arcface_w600k_r50.pt
|
||||
landmarker_path = /assets/pretrained_models/landmark_203.pt
|
||||
motion_extractor_path = /assets/pretrained_models/liveportrait_motion_extractor.pth
|
||||
feature_extractor_path = /assets/pretrained_models/liveportrait_feature_extractor.pth
|
||||
warping_network_path = /assets/pretrained_models/liveportrait_warping_model.pth
|
||||
spade_generator_path = /assets/pretrained_models/liveportrait_spade_generator.pth
|
||||
|
||||
[training.losses]
|
||||
weight_adversarial = 1
|
||||
@@ -45,7 +42,6 @@ gamma = 0.2
|
||||
|
||||
[training.trainer]
|
||||
max_epochs = 50
|
||||
disable_discriminator = false
|
||||
|
||||
[training.output]
|
||||
checkpoint_path = checkpoints/last.ckpt
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from src.exporting import export
|
||||
|
||||
if __name__ == '__main__':
|
||||
export()
|
||||
@@ -0,0 +1,25 @@
|
||||
import configparser
|
||||
from os import makedirs
|
||||
|
||||
import torch
|
||||
|
||||
from .generator import AdaptiveEmbeddingIntegrationNetwork
|
||||
|
||||
CONFIG = configparser.ConfigParser()
|
||||
CONFIG.read('config.ini')
|
||||
|
||||
|
||||
def export() -> None:
|
||||
directory_path = CONFIG.get('exporting', 'directory_path')
|
||||
source_path = CONFIG.get('exporting', 'source_path')
|
||||
target_path = CONFIG.get('exporting', 'target_path')
|
||||
opset_version = CONFIG.getint('exporting', 'opset_version')
|
||||
|
||||
makedirs(directory_path, exist_ok = True)
|
||||
state_dict = torch.load(source_path, map_location = 'cpu')['state_dict']['generator']
|
||||
model = AdaptiveEmbeddingIntegrationNetwork(512, 2)
|
||||
model.load_state_dict(state_dict)
|
||||
model.eval()
|
||||
source_tensor = torch.randn(1, 512)
|
||||
target_tensor = torch.randn(1, 3, 256, 256)
|
||||
torch.onnx.export(model, (target_tensor, source_tensor), target_path, input_names = [ 'target', 'source' ], output_names = [ 'output' ], opset_version = opset_version)
|
||||
@@ -111,15 +111,17 @@ class AADLayer(nn.Module):
|
||||
|
||||
class AddBlocksSequential(nn.Sequential):
|
||||
def forward(self, *inputs : Tuple[Tensor, Tensor, SourceEmbedding]) -> Tuple[Tuple[Tensor, Tensor, SourceEmbedding], ...]:
|
||||
_, attr_embedding, id_embedding = inputs #@todo we are not using shortcuts, it is attribute_embedding
|
||||
_, attribute_embedding, id_embedding = inputs
|
||||
modules = self._modules.values()
|
||||
|
||||
for index, module in enumerate(self._modules.values()): #@todo refactor this to return values
|
||||
if index % 3 == 0 and index > 0:
|
||||
inputs = (inputs, attr_embedding, id_embedding) # type:ignore[assignment]
|
||||
if type(inputs) == tuple: #@todo my IDE complains about the type check
|
||||
inputs = module(*inputs)
|
||||
else:
|
||||
for module_index, module in enumerate(modules):
|
||||
if module_index % 3 == 0 and module_index > 0:
|
||||
inputs = (inputs, attribute_embedding, id_embedding) # type:ignore[assignment]
|
||||
|
||||
if isinstance(inputs, torch.Tensor):
|
||||
inputs = module(inputs)
|
||||
else:
|
||||
inputs = module(*inputs)
|
||||
return inputs
|
||||
|
||||
|
||||
@@ -133,20 +135,21 @@ class AADResBlock(nn.Module):
|
||||
for i in range(num_blocks):
|
||||
intermediate_channels = input_channels if i < (num_blocks - 1) else output_channels
|
||||
primary_add_blocks.extend(
|
||||
[ #@todo indent
|
||||
[
|
||||
AADLayer(input_channels, attribute_channels, id_channels),
|
||||
nn.ReLU(inplace = True),
|
||||
nn.Conv2d(input_channels, intermediate_channels, kernel_size = 3, stride = 1, padding = 1, bias = False)
|
||||
])
|
||||
]
|
||||
)
|
||||
self.primary_add_blocks = AddBlocksSequential(*primary_add_blocks)
|
||||
|
||||
if input_channels != output_channels:
|
||||
auxiliary_add_blocks =\
|
||||
[ #@todo indent
|
||||
AADLayer(input_channels, attribute_channels, id_channels),
|
||||
nn.ReLU(inplace = True),
|
||||
nn.Conv2d(input_channels, output_channels, kernel_size = 3, stride = 1, padding = 1, bias = False)
|
||||
]
|
||||
[
|
||||
AADLayer(input_channels, attribute_channels, id_channels),
|
||||
nn.ReLU(inplace = True),
|
||||
nn.Conv2d(input_channels, output_channels, kernel_size = 3, stride = 1, padding = 1, bias = False)
|
||||
]
|
||||
self.auxiliary_add_blocks = AddBlocksSequential(*auxiliary_add_blocks)
|
||||
|
||||
def forward(self, feature_map : Tensor, attribute_embedding : Tensor, id_embedding : SourceEmbedding) -> Tensor:
|
||||
|
||||
Reference in New Issue
Block a user