22set -exo pipefail
33
44# Absolute path to the directory of this script.
5- CDIR=" $( cd " $( dirname " $0 " ) " ; pwd -P) "
5+ CDIR=" $(
6+ cd " $( dirname " $0 " ) "
7+ pwd -P
8+ ) "
69
710# Import utilities.
811source " ${CDIR} /utils/run_tests_utils.sh"
912
10- # Default option values. Can be overridden via commandline flags.
11- LOGFILE=/tmp/pytorch_py_test.log
12- MAX_GRAPH_SIZE=500
13- GRAPH_CHECK_FREQUENCY=100
14- VERBOSITY=2
15-
16- # Parse commandline flags:
17- # -L
18- # disable writing to the log file at $LOGFILE.
19- # -M max_graph_size
20- # -C graph_check_frequency
21- # -V verbosity
22- # -h
23- # print the help string
24- while getopts ' LM:C:V:h' OPTION
25- do
26- case $OPTION in
27- L)
28- LOGFILE=
29- ;;
30- M)
31- MAX_GRAPH_SIZE=$OPTARG
32- ;;
33- C)
34- GRAPH_CHECK_FREQUENCY=$OPTARG
35- ;;
36- V)
37- VERBOSITY=$OPTARG
38- ;;
39- h)
40- echo -e " Usage: $0 TEST_FILTER...\nwhere TEST_FILTERs are globs match .py test files. If no test filter is provided, runs all tests."
41- exit 0
42- ;;
43- \? ) # This catches all invalid options.
44- echo " ERROR: Invalid commandline flag."
45- exit 1
46- esac
47- done
13+ parse_options_to_vars $@
14+
15+ # Consume the parsed commandline arguments.
4816shift $(( $OPTIND - 1 ))
4917
5018# Set the `CONTINUE_ON_ERROR` flag to `1` to make the CI tests continue on error.
@@ -62,35 +30,12 @@ export PYTORCH_TEST_WITH_SLOW=1
6230export XLA_DUMP_FATAL_STACK=1
6331export CPU_NUM_DEVICES=4
6432
65- TORCH_XLA_DIR=$( cd ~ ; dirname " $( python -c ' import torch_xla; print(torch_xla.__file__)' ) " )
33+ TORCH_XLA_DIR=$(
34+ cd ~
35+ dirname " $( python -c ' import torch_xla; print(torch_xla.__file__)' ) "
36+ )
6637COVERAGE_FILE=" $CDIR /../.coverage"
6738
68- # Given $1 as a (possibly not normalized) test filepath, returns successfully
69- # if it matches any of the space-separated globs $_TEST_FILTER. If
70- # $_TEST_FILTER is empty, returns successfully.
71- function test_is_selected {
72- if [[ -z " $_TEST_FILTER " ]]; then
73- return 0 # success
74- fi
75-
76- # _TEST_FILTER is a space-separate list of globs. Loop through the
77- # list elements.
78- for _FILTER in $_TEST_FILTER ; do
79- # realpath normalizes the paths (e.g. resolving `..` and relative paths)
80- # so that they can be compared.
81- case ` realpath $1 ` in
82- ` realpath $_FILTER ` )
83- return 0 # success
84- ;;
85- * )
86- # No match
87- ;;
88- esac
89- done
90-
91- return 1 # failure
92- }
93-
9439function run_coverage {
9540 if ! test_is_selected " $1 " ; then
9641 return
@@ -230,7 +175,7 @@ function run_xla_op_tests1 {
230175 run_dynamic " $CDIR /test_operations.py" " $@ " --verbosity=$VERBOSITY
231176 run_dynamic " $CDIR /ds/test_dynamic_shapes.py"
232177 run_dynamic " $CDIR /ds/test_dynamic_shape_models.py" " $@ " --verbosity=$VERBOSITY
233- run_eager_debug " $CDIR /test_operations.py" " $@ " --verbosity=$VERBOSITY
178+ run_eager_debug " $CDIR /test_operations.py" " $@ " --verbosity=$VERBOSITY
234179 run_test " $CDIR /test_operations.py" " $@ " --verbosity=$VERBOSITY
235180 run_test " $CDIR /test_xla_graph_execution.py" " $@ " --verbosity=$VERBOSITY
236181 run_pt_xla_debug_level2 " $CDIR /test_xla_graph_execution.py" " $@ " --verbosity=$VERBOSITY
@@ -263,7 +208,7 @@ function run_xla_op_tests1 {
263208 run_test " $CDIR /test_python_ops.py"
264209 run_test " $CDIR /test_ops.py"
265210 run_test " $CDIR /test_metrics.py"
266- if [ -f " /tmp/metrics.txt" ] ; then
211+ if [ -f " /tmp/metrics.txt" ]; then
267212 rm /tmp/metrics.txt
268213 fi
269214 XLA_METRICS_FILE=/tmp/metrics.txt run_test " $CDIR /test_metrics.py"
@@ -371,7 +316,7 @@ function run_xla_op_tests3 {
371316 # Please keep PJRT_DEVICE and GPU_NUM_DEVICES explicit in the following test commands.
372317 echo " single-host-single-process"
373318 PJRT_DEVICE=CUDA GPU_NUM_DEVICES=1 python3 test/test_train_mp_imagenet.py --fake_data --batch_size=16 --num_epochs=1 --num_cores=1 --num_steps=25 --model=resnet18
374- PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18
319+ PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18
375320
376321 echo " single-host-multi-process"
377322 num_devices=$( nvidia-smi --list-gpus | wc -l)
@@ -488,16 +433,7 @@ function run_tests {
488433 fi
489434}
490435
491- if [[ $# -ge 1 ]]; then
492- # There are positional arguments - set $_TEST_FILTER to them.
493- _TEST_FILTER=$@
494- # Sometimes a test may fail even if it doesn't match _TEST_FILTER. Therefore,
495- # we need to set this to be able to get to the test(s) we want to run.
496- CONTINUE_ON_ERROR=1
497- else
498- # No positional argument - run all tests.
499- _TEST_FILTER=" "
500- fi
436+ set_test_filter $@
501437
502438if [ " $LOGFILE " != " " ]; then
503439 run_tests 2>&1 | tee $LOGFILE
0 commit comments