mirror of
https://github.com/facefusion/facefusion.git
synced 2026-04-22 17:36:16 +02:00
a498f3d618
* remove insecure flag from curl * eleminate repating definitons * limit processors and ui layouts by choices * follow couple of v4 standards * use more secure mkstemp * dynamic cache path for execution providers * fix benchmarker, prevent path traveling via job-id * fix order in execution provider choices * resort by prioroty * introduce support for QNN * close file description for Windows to stop crying * prevent ConnectionResetError under windows * needed for nested .caches directory as onnxruntime does not create it * different approach to silent asyncio * update dependencies * simplify the name to just inference providers * switch to trt_builder_optimization_level 4
25 lines
655 B
Python
25 lines
655 B
Python
from facefusion.execution import create_inference_providers, get_available_execution_providers, has_execution_provider
|
|
|
|
|
|
def test_has_execution_provider() -> None:
|
|
assert has_execution_provider('cpu') is True
|
|
assert has_execution_provider('openvino') is False
|
|
|
|
|
|
def test_get_available_execution_providers() -> None:
|
|
assert 'cpu' in get_available_execution_providers()
|
|
|
|
|
|
def test_create_inference_providers() -> None:
|
|
inference_providers =\
|
|
[
|
|
('CUDAExecutionProvider',
|
|
{
|
|
'device_id': 1,
|
|
'cudnn_conv_algo_search': 'EXHAUSTIVE'
|
|
}),
|
|
'CPUExecutionProvider'
|
|
]
|
|
|
|
assert create_inference_providers(1, [ 'cpu', 'cuda' ]) == inference_providers
|