7474 libswresample-dev
7575 libswscale-dev
7676 pciutils
77+ python3-dev
7778 TORCH_INDEX : ' --pre --index-url https://download.pytorch.org/whl/nightly/xpu'
7879 AGENT_TOOLSDIRECTORY : /tmp/xpu-tool
7980
@@ -116,20 +117,36 @@ jobs:
116117 render_id : ${{ steps.runner-info.outputs.render_id }}
117118 hostname : ${{ steps.runner-info.outputs.hostname }}
118119 pytest_extra_args : ${{ steps.runner-info.outputs.pytest_extra_args }}
120+ env :
121+ VIRTUAL_ENV : ${{ github.workspace }}/.venv
119122 steps :
123+ - name : Install uv and python-${{ env.python }}
124+ uses : astral-sh/setup-uv@v6
125+ with :
126+ python-version : ${{ env.python }}
127+ - name : Prepare environment
128+ run : |
129+ rm -rf ${{ env.VIRTUAL_ENV }}
130+ uv venv ${{ env.VIRTUAL_ENV }}
120131 - id : getver
121132 run : |
122133 # We can't just `pip index version...` and get the last available
123134 # version as pytorch packages may have tricky dependencies. Instead
124- # we dry run install packages and get versions which would be installed.
135+ # we install packages and get versions which got installed. Note that
136+ # trying to --dry-run is not actually reliable as it does not make
137+ # the thorough check of package dependencies.
125138 # See: https://github.com/pytorch/pytorch/issues/154687
126- pip install --dry-run --ignore-installed $TORCH_INDEX \
139+ uv pip install $TORCH_INDEX \
127140 torch torchvision torchaudio pytorch-triton-xpu >_log.txt
128141
129- torch=$(cat _log.txt | grep "Would install" | sed -E "s/.*torch-([^ ]*).*/\1/")
130- torchvision=$(cat _log.txt | grep "Would install" | sed -E "s/.*torchvision-([^ ]*).*/\1/")
131- torchaudio=$(cat _log.txt | grep "Would install" | sed -E "s/.*torchaudio-([^ ]*).*/\1/")
132- triton=$(cat _log.txt | grep "Would install" | sed -E "s/.*pytorch-triton-xpu-([^ ]*).*/\1/")
142+ torch=$(uv pip show torch | grep Version)
143+ torchvision=$(uv pip show torchvision | grep Version)
144+ torchaudio=$(uv pip show torchaudio | grep Version)
145+ triton=$(uv pip show pytorch-triton-xpu | grep Version)
146+ torch=${torch#Version: *}
147+ torchvision=${torchvision#Version: *}
148+ torchaudio=${torchaudio#Version: *}
149+ triton=${triton#Version: *}
133150 echo "torch=$torch" | tee -a "$GITHUB_OUTPUT"
134151 echo "torchvision=$torchvision" | tee -a "$GITHUB_OUTPUT"
135152 echo "torchaudio=$torchaudio" | tee -a "$GITHUB_OUTPUT"
@@ -154,8 +171,12 @@ jobs:
154171 env :
155172 PYTORCH_DEBUG_XPU_FALLBACK : ' 1'
156173 TRANSFORMERS_TEST_DEVICE_SPEC : ' spec.py'
157- # enable pytest parallel run, and continue others if meets crash case such as segmentation fault
158- PYTEST_ADDOPTS : -rsf --timeout 600 --timeout_method=thread --dist worksteal ${{ needs.prepare.outputs.pytest_extra_args }}
174+ # Usage of `--dist loadfile` is a must as HF tests has complex setups including
175+ # setUpClass and @first_run clauses. So 'loadfile' stratagy allows to minimize
176+ # race conditions scope. Besides, that's how HF Transformers recommend to run
177+ # tests and how they run them in their own CI.
178+ # See: https://github.com/huggingface/transformers/blob/v4.56.2/CONTRIBUTING.md?plain=1#L312
179+ PYTEST_ADDOPTS : -rsf --timeout 600 --timeout_method=thread --dist loadfile ${{ needs.prepare.outputs.pytest_extra_args }}
159180 strategy :
160181 fail-fast : false
161182 max-parallel : 1
@@ -224,21 +245,9 @@ jobs:
224245 fi
225246 - name : Prepare OS environment
226247 run : |
227- # as jobs might run in parallel on the same system, apt-get might
228- # step into the lock hold by other job
229- start_time=$SECONDS
230- while ! sudo apt-get update; do
231- sleep 1;
232- if (( $SECONDS - start_time > 60 )); then false; fi
233- done
234- while ! sudo apt-get install -y $PACKAGES; do
235- sleep 1;
236- if (( $SECONDS - start_time > 60 )); then false; fi
237- done
238- while ! git lfs install; do
239- sleep 1;
240- if (( $SECONDS - start_time > 60 )); then false; fi
241- done
248+ sudo apt-get update
249+ sudo apt-get install -y $PACKAGES
250+ git lfs install
242251 - name : Setup python-${{ env.python }}
243252 uses : actions/setup-python@v5
244253 with :
@@ -250,12 +259,17 @@ jobs:
250259 pip install -U pip wheel setuptools
251260 - name : Prepare pytorch and deps
252261 run : |
253- pip install junitparser
254262 pip install $TORCH_INDEX \
255263 torch==${{ needs.prepare.outputs.torch }} \
256264 torchvision==${{ needs.prepare.outputs.torchvision }} \
257265 torchaudio==${{ needs.prepare.outputs.torchaudio }} \
258266 pytorch-triton-xpu==${{needs.prepare.outputs.triton }}
267+ pip install \
268+ junitparser \
269+ pytest \
270+ pytest-timeout \
271+ pytest-xdist \
272+ pytest-shard
259273 - name : Prepare Transformers
260274 run : |
261275 pwd
@@ -281,8 +295,6 @@ jobs:
281295 xpu-smi discovery -y --json --dump -1
282296 - name : Sanity check installed packages
283297 run : |
284- # Use latest pytest
285- pip install -U pytest pytest-timeout pytest-xdist pytest-shard
286298 # These checks are to exit earlier if for any reason Transformers
287299 # reinstalled torch packages back to CUDA versions (not expected).
288300 pip show torch | grep Version | grep xpu
0 commit comments