From dfe9dce676085fd6d289a911dbd3b270c4014a3a Mon Sep 17 00:00:00 2001 From: patientx Date: Mon, 4 Aug 2025 10:48:27 +0300 Subject: [PATCH] added torch patches for triton from https://github.com/sfinktah/amd-torch) --- install-n.bat | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/install-n.bat b/install-n.bat index 87d8d1485..b0b520995 100644 --- a/install-n.bat +++ b/install-n.bat @@ -56,9 +56,10 @@ if "%PY_MINOR%"=="12" ( python -c "import sys; print(sys.version)" ) -:: patching triton +:: patching triton & torch (from sfinktah ; https://github.com/sfinktah/amd-torch ) pip install --force-reinstall pypatch-url --quiet pypatch-url apply https://raw.githubusercontent.com/sfinktah/amd-torch/refs/heads/main/patches/triton-3.4.0+gita9c80202-cp311-cp311-win_amd64.patch -p 4 triton +pypatch-url apply https://raw.githubusercontent.com/sfinktah/amd-torch/refs/heads/main/patches/torch-2.7.0+cu118-cp311-cp311-win_amd64.patch -p 4 torch echo :: %time:~0,8% :: - Installing flash-attention @@ -137,3 +138,4 @@ set MIOPEN_FIND_MODE=2 set MIOPEN_LOG_LEVEL=3 .\zluda\zluda.exe -- python main.py --auto-launch --use-quad-cross-attention +