diff --git a/comfy_extras/nodes/nodes_audio.py b/comfy_extras/nodes/nodes_audio.py index 74dd18651..1352de6e1 100644 --- a/comfy_extras/nodes/nodes_audio.py +++ b/comfy_extras/nodes/nodes_audio.py @@ -34,7 +34,7 @@ class EmptyLatentAudio(IO.ComfyNode): ) @classmethod - def execute(cls, seconds, batch_size) -> IO.NodeOutput: + def execute(cls, seconds=47.6, batch_size=1) -> IO.NodeOutput: length = round((seconds * 44100 / 2048) / 2) * 2 latent = torch.zeros([batch_size, 64, length], device=comfy.model_management.intermediate_device()) return IO.NodeOutput({"samples": latent, "type": "audio"}) diff --git a/tests/inference/test_workflows.py b/tests/inference/test_workflows.py index 8442ef4dd..4c67f7366 100644 --- a/tests/inference/test_workflows.py +++ b/tests/inference/test_workflows.py @@ -60,7 +60,12 @@ def _generate_config_params(): @pytest.fixture(scope="function", autouse=False, params=_generate_config_params()) async def client(tmp_path_factory, request) -> AsyncGenerator[Any, Any]: config = default_configuration() + # this should help things go a little faster + config.disable_all_custom_nodes = True + # this enables compilation + config.disable_pinned_memory = True config.update(request.param) + # use ProcessPoolExecutor to respect various config settings async with Comfy(configuration=config, executor=ProcessPoolExecutor(max_workers=1)) as client: yield client