Fix interrupt messaging, add AMD and Intel Dockerfiles

This commit is contained in:
doctorpangloss 2024-05-23 22:51:44 -07:00
parent af814c7390
commit 801ef2e3f0
4 changed files with 9 additions and 3 deletions

View File

@ -2,4 +2,8 @@ FROM nvcr.io/nvidia/pytorch:24.04-py3
RUN pip install --no-cache --no-build-isolation git+https://github.com/hiddenswitch/ComfyUI.git
EXPOSE 8188
WORKDIR /workspace
# tries to address https://github.com/pytorch/pytorch/issues/104801
# and issues reported by importing nodes_canny
ENV PYTORCH_CUDA_ALLOC_CONF="backend:cudaMallocAsync"
RUN comfyui --quick-test-for-ci --cpu --cwd /workspace
CMD ["/usr/local/bin/comfyui", "--listen"]

View File

@ -2,4 +2,5 @@ FROM rocm/pytorch:rocm6.0.2_ubuntu22.04_py3.10_pytorch_2.1.2
RUN pip install --no-cache --no-build-isolation git+https://github.com/hiddenswitch/ComfyUI.git
EXPOSE 8188
WORKDIR /workspace
RUN comfyui --quick-test-for-ci --cpu --cwd /workspace
CMD ["/usr/local/bin/comfyui", "--listen"]

View File

@ -213,11 +213,11 @@ async def main():
try:
await run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server,
call_on_start=call_on_start)
except asyncio.CancelledError:
except (asyncio.CancelledError, KeyboardInterrupt):
logging.debug("\nStopped server")
finally:
if distributed:
await q.close()
logging.debug("\nStopped server")
cleanup_temp()

View File

@ -2,4 +2,5 @@ FROM intel/intel-optimized-pytorch:2.3.0-pip-base
RUN pip install --no-cache --no-build-isolation git+https://github.com/hiddenswitch/ComfyUI.git
EXPOSE 8188
WORKDIR /workspace
RUN comfyui --quick-test-for-ci --cpu --cwd /workspace
CMD ["/usr/local/bin/comfyui", "--listen"]