added torch patches for triton from https://github.com/sfinktah/amd-torch)

This commit is contained in:
patientx 2025-08-04 10:48:27 +03:00 committed by GitHub
parent 37415c40c1
commit dfe9dce676
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -56,9 +56,10 @@ if "%PY_MINOR%"=="12" (
python -c "import sys; print(sys.version)"
)
:: patching triton
:: patching triton & torch (from sfinktah ; https://github.com/sfinktah/amd-torch )
pip install --force-reinstall pypatch-url --quiet
pypatch-url apply https://raw.githubusercontent.com/sfinktah/amd-torch/refs/heads/main/patches/triton-3.4.0+gita9c80202-cp311-cp311-win_amd64.patch -p 4 triton
pypatch-url apply https://raw.githubusercontent.com/sfinktah/amd-torch/refs/heads/main/patches/torch-2.7.0+cu118-cp311-cp311-win_amd64.patch -p 4 torch
echo :: %time:~0,8% :: - Installing flash-attention
@ -137,3 +138,4 @@ set MIOPEN_FIND_MODE=2
set MIOPEN_LOG_LEVEL=3
.\zluda\zluda.exe -- python main.py --auto-launch --use-quad-cross-attention