mirror of
https://github.com/facefusion/facefusion.git
synced 2026-04-29 13:05:59 +02:00
a477b66d36
* remove xml parsing * remove aitop
216 lines
6.3 KiB
Python
216 lines
6.3 KiB
Python
import os
|
|
import shutil
|
|
import subprocess
|
|
|
|
from functools import lru_cache
|
|
from typing import List, Optional
|
|
import pynvml
|
|
import onnxruntime
|
|
|
|
import facefusion.choices
|
|
from facefusion.filesystem import create_directory, is_directory
|
|
from facefusion.types import ExecutionDevice, ExecutionProvider, InferenceOptionSet, InferenceProvider, ValueAndUnit
|
|
|
|
onnxruntime.set_default_logger_severity(3)
|
|
|
|
|
|
def has_execution_provider(execution_provider : ExecutionProvider) -> bool:
|
|
return execution_provider in get_available_execution_providers()
|
|
|
|
|
|
def get_available_execution_providers() -> List[ExecutionProvider]:
|
|
inference_session_providers = onnxruntime.get_available_providers()
|
|
available_execution_providers : List[ExecutionProvider] = []
|
|
|
|
for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items():
|
|
if execution_provider_value in inference_session_providers:
|
|
index = facefusion.choices.execution_providers.index(execution_provider)
|
|
available_execution_providers.insert(index, execution_provider)
|
|
|
|
return available_execution_providers
|
|
|
|
|
|
def create_inference_providers(execution_device_id : int, execution_providers : List[ExecutionProvider]) -> List[InferenceProvider]:
|
|
inference_providers : List[InferenceProvider] = []
|
|
cache_path = resolve_cache_path()
|
|
|
|
for execution_provider in execution_providers:
|
|
if execution_provider == 'cuda':
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
|
|
{
|
|
'device_id': execution_device_id,
|
|
'cudnn_conv_algo_search': resolve_cudnn_conv_algo_search()
|
|
}))
|
|
|
|
if execution_provider == 'tensorrt':
|
|
inference_option_set : InferenceOptionSet =\
|
|
{
|
|
'device_id': execution_device_id
|
|
}
|
|
if is_directory(cache_path) or create_directory(cache_path):
|
|
inference_option_set.update(
|
|
{
|
|
'trt_engine_cache_enable': True,
|
|
'trt_engine_cache_path': cache_path,
|
|
'trt_timing_cache_enable': True,
|
|
'trt_timing_cache_path': cache_path,
|
|
'trt_builder_optimization_level': 4
|
|
})
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
|
|
|
|
if execution_provider in [ 'directml', 'rocm' ]:
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
|
|
{
|
|
'device_id': execution_device_id
|
|
}))
|
|
|
|
if execution_provider == 'migraphx':
|
|
inference_option_set =\
|
|
{
|
|
'device_id': execution_device_id
|
|
}
|
|
if is_directory(cache_path) or create_directory(cache_path):
|
|
inference_option_set.update(
|
|
{
|
|
'migraphx_model_cache_dir': cache_path
|
|
})
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
|
|
|
|
if execution_provider == 'coreml':
|
|
inference_option_set =\
|
|
{
|
|
'SpecializationStrategy': 'FastPrediction'
|
|
}
|
|
if is_directory(cache_path) or create_directory(cache_path):
|
|
inference_option_set.update(
|
|
{
|
|
'ModelCacheDirectory': cache_path
|
|
})
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_option_set))
|
|
|
|
if execution_provider == 'openvino':
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
|
|
{
|
|
'device_type': resolve_openvino_device_type(execution_device_id),
|
|
'precision': 'FP32'
|
|
}))
|
|
|
|
if execution_provider == 'qnn':
|
|
inference_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
|
|
{
|
|
'device_id': execution_device_id,
|
|
'backend_type': 'htp'
|
|
}))
|
|
|
|
if 'cpu' in execution_providers:
|
|
inference_providers.append(facefusion.choices.execution_provider_set.get('cpu'))
|
|
|
|
return inference_providers
|
|
|
|
|
|
def resolve_cache_path() -> str:
|
|
return os.path.join('.caches', onnxruntime.get_version_string())
|
|
|
|
|
|
def resolve_cudnn_conv_algo_search() -> str:
|
|
execution_devices = detect_static_execution_devices()
|
|
product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660')
|
|
|
|
for execution_device in execution_devices:
|
|
if execution_device.get('product').get('name').startswith(product_names):
|
|
return 'DEFAULT'
|
|
|
|
return 'EXHAUSTIVE'
|
|
|
|
|
|
def resolve_openvino_device_type(execution_device_id : int) -> str:
|
|
if execution_device_id == 0:
|
|
return 'GPU'
|
|
return 'GPU.' + str(execution_device_id)
|
|
|
|
|
|
def resolve_cuda_driver_version(cuda_driver_version : int) -> str:
|
|
return '{}.{}'.format(cuda_driver_version // 1000, (cuda_driver_version % 1000) // 10)
|
|
|
|
|
|
@lru_cache()
|
|
def detect_static_execution_devices() -> List[ExecutionDevice]:
|
|
return detect_execution_devices()
|
|
|
|
|
|
def detect_execution_devices() -> List[ExecutionDevice]:
|
|
execution_devices : List[ExecutionDevice] = []
|
|
|
|
try:
|
|
pynvml.nvmlInit()
|
|
device_count = pynvml.nvmlDeviceGetCount()
|
|
|
|
for device_id in range(device_count):
|
|
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
|
|
product_name = pynvml.nvmlDeviceGetName(handle)
|
|
driver_version = pynvml.nvmlSystemGetDriverVersion()
|
|
cuda_driver_version = resolve_cuda_driver_version(pynvml.nvmlSystemGetCudaDriverVersion())
|
|
memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
|
utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
|
|
temperature = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
|
|
memory_total_mib = memory_info.total // (1024 * 1024)
|
|
memory_free_mib = memory_info.free // (1024 * 1024)
|
|
memory_used_mib = memory_info.used // (1024 * 1024)
|
|
memory_percent = memory_used_mib / memory_total_mib * 100
|
|
|
|
execution_devices.append(
|
|
{
|
|
'driver_version': driver_version,
|
|
'framework':
|
|
{
|
|
'name': 'CUDA',
|
|
'version': cuda_driver_version
|
|
},
|
|
'product':
|
|
{
|
|
'vendor': 'NVIDIA',
|
|
'name': product_name.replace('NVIDIA', '').strip()
|
|
},
|
|
'video_memory':
|
|
{
|
|
'total':
|
|
{
|
|
'value': int(memory_total_mib),
|
|
'unit': 'MiB'
|
|
},
|
|
'free':
|
|
{
|
|
'value': int(memory_free_mib),
|
|
'unit': 'MiB'
|
|
}
|
|
},
|
|
'temperature':
|
|
{
|
|
'gpu':
|
|
{
|
|
'value': int(temperature),
|
|
'unit': 'C'
|
|
},
|
|
'memory': None
|
|
},
|
|
'utilization':
|
|
{
|
|
'gpu':
|
|
{
|
|
'value': int(utilization.gpu),
|
|
'unit': '%'
|
|
},
|
|
'memory':
|
|
{
|
|
'value': int(memory_percent),
|
|
'unit': '%'
|
|
}
|
|
}
|
|
})
|
|
|
|
pynvml.nvmlShutdown()
|
|
except Exception:
|
|
pass
|
|
|
|
return execution_devices
|