Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TEST: using XFAIL instead of SKIP where it is required #2099

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions daal4py/sklearn/monkeypatch/tests/test_patching.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,5 +86,7 @@ def test_patching(configuration):
return
for skip in TO_SKIP:
if re.search(skip, configuration) is not None:
# TODO:
# check if xfail needed instead.
pytest.skip("SKIPPED", allow_module_level=False)
raise ValueError("Test patching failed: " + configuration)
2 changes: 1 addition & 1 deletion onedal/basic_statistics/tests/test_basic_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def test_options_csr(queue, option, dtype):
fp32tol, fp64tol = tols

if result_option == "max":
pytest.skip("There is a bug in oneDAL's max computations on GPU")
pytest.xfail("There is a bug in oneDAL's max computations on GPU")

seed = 42
row_count, column_count = 20046, 4007
Expand Down
2 changes: 1 addition & 1 deletion onedal/common/tests/test_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test_with_numpy_data(queue):
@pytest.mark.parametrize("memtype", get_memory_usm())
def test_with_usm_ndarray_data(queue, memtype):
if queue is None:
pytest.skip(
pytest.xfail(
"dpctl Memory object with queue=None uses cached default (gpu if available)"
)

Expand Down
10 changes: 5 additions & 5 deletions onedal/datatypes/tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def _test_input_format_c_contiguous_numpy(queue, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_input_format_c_contiguous_numpy(queue, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_input_format_c_contiguous_numpy(queue, dtype)


Expand All @@ -130,7 +130,7 @@ def _test_input_format_f_contiguous_numpy(queue, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_input_format_f_contiguous_numpy(queue, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_input_format_f_contiguous_numpy(queue, dtype)


Expand All @@ -156,7 +156,7 @@ def _test_input_format_c_not_contiguous_numpy(queue, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_input_format_c_not_contiguous_numpy(queue, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_input_format_c_not_contiguous_numpy(queue, dtype)


Expand All @@ -180,7 +180,7 @@ def _test_input_format_c_contiguous_pandas(queue, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_input_format_c_contiguous_pandas(queue, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_input_format_c_contiguous_pandas(queue, dtype)


Expand All @@ -204,7 +204,7 @@ def _test_input_format_f_contiguous_pandas(queue, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_input_format_f_contiguous_pandas(queue, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_input_format_f_contiguous_pandas(queue, dtype)


Expand Down
4 changes: 2 additions & 2 deletions onedal/ensemble/tests/test_random_forest.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_rf_classifier(queue):
@pytest.mark.parametrize("queue", get_queues())
def test_rf_regression(queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("RF regressor predict for the GPU sycl_queue is buggy.")
pytest.xfail("RF regressor predict for the GPU sycl_queue is buggy.")
X, y = make_regression(
n_samples=100, n_features=4, n_informative=2, random_state=0, shuffle=False
)
Expand Down Expand Up @@ -84,7 +84,7 @@ def test_rf_regression_random_splitter(queue):
# splitter_mode selection only for GPU enabled.
# For CPU only `best` mode is supported.
if queue and queue.sycl_device.is_gpu:
pytest.skip("RF regressor predict for the GPU sycl_queue is buggy.")
pytest.xfail("RF regressor predict for the GPU sycl_queue is buggy.")
X, y = make_regression(
n_samples=100, n_features=4, n_informative=2, random_state=0, shuffle=False
)
Expand Down
2 changes: 1 addition & 1 deletion onedal/neighbors/tests/test_knn_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_iris(queue):
@pytest.mark.parametrize("queue", get_queues())
def test_pickle(queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("KNN classifier pickling for the GPU sycl_queue is buggy.")
pytest.xfail("KNN classifier pickling for the GPU sycl_queue is buggy.")
iris = datasets.load_iris()
clf = KNeighborsClassifier(2).fit(iris.data, iris.target, queue=queue)
expected = clf.predict(iris.data, queue=queue)
Expand Down
4 changes: 2 additions & 2 deletions onedal/primitives/tests/test_kernel_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
@pytest.mark.parametrize("queue", get_queues())
def test_dense_self_linear_kernel(queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Linear kernel for the GPU sycl_queue is buggy.")
pytest.xfail("Linear kernel for the GPU sycl_queue is buggy.")
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((10, 4)))

Expand All @@ -56,7 +56,7 @@ def _test_dense_small_linear_kernel(queue, scale, shift, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_dense_small_linear_kernel(queue, scale, shift, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Linear kernel for the GPU sycl_queue is buggy.")
pytest.xfail("Linear kernel for the GPU sycl_queue is buggy.")
_test_dense_small_linear_kernel(queue, scale, shift, dtype)


Expand Down
4 changes: 2 additions & 2 deletions onedal/svm/tests/test_csr_svm.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def _test_iris(queue, kernel):
@pytest.mark.parametrize("kernel", ["linear", "rbf", "poly", "sigmoid"])
def test_iris(queue, kernel):
if kernel == "rbf":
pytest.skip("RBF CSR SVM test failing in 2025.0.")
pytest.xfail("RBF CSR SVM test failing in 2025.0.")
_test_iris(queue, kernel)


Expand All @@ -163,7 +163,7 @@ def _test_diabetes(queue, kernel):
@pytest.mark.parametrize("kernel", ["linear", "rbf", "poly", "sigmoid"])
def test_diabetes(queue, kernel):
if kernel == "sigmoid":
pytest.skip("Sparse sigmoid kernel function is buggy.")
pytest.xfail("Sparse sigmoid kernel function is buggy.")
_test_diabetes(queue, kernel)


Expand Down
2 changes: 1 addition & 1 deletion onedal/svm/tests/test_nusvr.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _test_diabetes_compare_with_sklearn(queue, kernel):
@pytest.mark.parametrize("kernel", ["linear", "rbf", "poly", "sigmoid"])
def test_diabetes_compare_with_sklearn(queue, kernel):
if kernel == "sigmoid":
pytest.skip("Sparse sigmoid kernel function is buggy.")
pytest.xfail("Sparse sigmoid kernel function is buggy.")
_test_diabetes_compare_with_sklearn(queue, kernel)


Expand Down
4 changes: 2 additions & 2 deletions onedal/svm/tests/test_svc.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def _test_libsvm_parameters(queue, array_constr, dtype):
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_libsvm_parameters(queue, array_constr, dtype):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
_test_libsvm_parameters(queue, array_constr, dtype)


Expand Down Expand Up @@ -76,7 +76,7 @@ def test_class_weight(queue):
@pytest.mark.parametrize("queue", get_queues())
def test_sample_weight(queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("Sporadic failures on GPU sycl_queue.")
pytest.xfail("Sporadic failures on GPU sycl_queue.")
X = np.array([[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 2]])
y = np.array([1, 1, 1, 2, 2, 2])

Expand Down
2 changes: 1 addition & 1 deletion onedal/svm/tests/test_svr.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def _test_diabetes_compare_with_sklearn(queue, kernel):
@pytest.mark.parametrize("kernel", ["linear", "rbf", "poly", "sigmoid"])
def test_diabetes_compare_with_sklearn(queue, kernel):
if kernel == "sigmoid":
pytest.skip("Sparse sigmoid kernel function is buggy.")
pytest.xfail("Sparse sigmoid kernel function is buggy.")
_test_diabetes_compare_with_sklearn(queue, kernel)


Expand Down
4 changes: 2 additions & 2 deletions sklearnex/covariance/tests/test_incremental_covariance.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
def test_sklearnex_partial_fit_on_gold_data(dataframe, queue, dtype, assume_centered):
is_gpu = queue is not None and queue.sycl_device.is_gpu
if assume_centered and is_gpu and not daal_check_version((2025, "P", 0)):
pytest.skip(
pytest.xfail(
"Due to a bug on oneDAL side, means are not set to zero when assume_centered=True"
)
from sklearnex.covariance import IncrementalEmpiricalCovariance
Expand Down Expand Up @@ -151,7 +151,7 @@ def test_sklearnex_fit_on_random_data(
):
is_gpu = queue is not None and queue.sycl_device.is_gpu
if assume_centered and is_gpu and not daal_check_version((2025, "P", 0)):
pytest.skip(
pytest.xfail(
"Due to a bug on oneDAL side, means are not set to zero when assume_centered=True"
)
from sklearnex.covariance import IncrementalEmpiricalCovariance
Expand Down
6 changes: 3 additions & 3 deletions sklearnex/ensemble/tests/test_forest.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_sklearnex_import_rf_classifier(dataframe, queue, block, trees, rows, sc
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_rf_regression(dataframe, queue):
if (not daal_check_version((2025, "P", 200))) and queue and queue.sycl_device.is_gpu:
pytest.skip("Skipping due to bug in histogram merges fixed in 2025.2.")
pytest.xfail("Skipping due to bug in histogram merges fixed in 2025.2.")
from sklearnex.ensemble import RandomForestRegressor

X, y = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)
Expand All @@ -87,7 +87,7 @@ def test_sklearnex_import_rf_regression(dataframe, queue):
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_et_classifier(dataframe, queue):
if (not daal_check_version((2025, "P", 200))) and queue and queue.sycl_device.is_gpu:
pytest.skip("Skipping due to bug in histogram merges fixed in 2025.2.")
pytest.xfail("Skipping due to bug in histogram merges fixed in 2025.2.")
from sklearnex.ensemble import ExtraTreesClassifier

X, y = make_classification(
Expand All @@ -110,7 +110,7 @@ def test_sklearnex_import_et_classifier(dataframe, queue):
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_et_regression(dataframe, queue):
if (not daal_check_version((2025, "P", 200))) and queue and queue.sycl_device.is_gpu:
pytest.skip("Skipping due to bug in histogram merges fixed in 2025.2.")
pytest.xfail("Skipping due to bug in histogram merges fixed in 2025.2.")
from sklearnex.ensemble import ExtraTreesRegressor

X, y = make_regression(n_features=1, random_state=0, shuffle=False)
Expand Down
2 changes: 2 additions & 0 deletions sklearnex/spmd/decomposition/tests/test_pca_spmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ def test_pca_spmd_synthetic(
n_samples, n_features, n_components, whiten, dataframe, queue, dtype
):
# TODO: Resolve issues with batch fallback and lack of support for n_rows_rank < n_cols
# TODO:
# check if xfail is needed instead.
if n_components == "mle" or n_components == 3:
pytest.skip("Avoid error in case of batch fallback to sklearn")
if n_samples <= n_features:
Expand Down
8 changes: 4 additions & 4 deletions sklearnex/spmd/ensemble/tests/test_forest_spmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def test_rfcls_spmd_gold(dataframe, queue):
spmd_result = spmd_model.predict(local_dpt_X_test)
batch_result = batch_model.predict(X_test)

pytest.skip("SPMD and batch random forest results not aligned")
pytest.xfail("SPMD and batch random forest results not aligned")
_spmd_assert_allclose(spmd_result, batch_result)


Expand Down Expand Up @@ -144,7 +144,7 @@ def test_rfcls_spmd_synthetic(
spmd_result = spmd_model.predict(local_dpt_X_test)
batch_result = batch_model.predict(X_test)

pytest.skip("SPMD and batch random forest results not aligned")
pytest.xfail("SPMD and batch random forest results not aligned")
_spmd_assert_allclose(spmd_result, batch_result)


Expand Down Expand Up @@ -208,7 +208,7 @@ def test_rfreg_spmd_gold(dataframe, queue):
spmd_result = spmd_model.predict(local_dpt_X_test)
batch_result = batch_model.predict(X_test)

pytest.skip("SPMD and batch random forest results not aligned")
pytest.xfail("SPMD and batch random forest results not aligned")
_spmd_assert_allclose(spmd_result, batch_result)


Expand Down Expand Up @@ -261,5 +261,5 @@ def test_rfreg_spmd_synthetic(
batch_result = batch_model.predict(X_test)

# TODO: remove skips when SPMD and batch are aligned
pytest.skip("SPMD and batch random forest results not aligned")
pytest.xfail("SPMD and batch random forest results not aligned")
_spmd_assert_allclose(spmd_result, batch_result)
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def test_logistic_spmd_gold(dataframe, queue):
def test_logistic_spmd_synthetic(n_samples, n_features, C, tol, dataframe, queue, dtype):
# TODO: Resolve numerical issues when n_rows_rank < n_cols
if n_samples <= n_features:
pytest.skip("Numerical issues when rank rows < columns")
pytest.xfail("Numerical issues when rank rows < columns")

# Import spmd and batch algo
from sklearnex.linear_model import LogisticRegression as LogisticRegression_Batch
Expand Down
8 changes: 4 additions & 4 deletions sklearnex/svm/tests/test_svm.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_svc(dataframe, queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("SVC fit for the GPU sycl_queue is buggy.")
pytest.xfail("SVC fit for the GPU sycl_queue is buggy.")
from sklearnex.svm import SVC

X = np.array([[-2, -1], [-1, -1], [-1, -2], [+1, +1], [+1, +2], [+2, +1]])
Expand All @@ -44,7 +44,7 @@ def test_sklearnex_import_svc(dataframe, queue):
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_nusvc(dataframe, queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("NuSVC fit for the GPU sycl_queue is buggy.")
pytest.xfail("NuSVC fit for the GPU sycl_queue is buggy.")
from sklearnex.svm import NuSVC

X = np.array([[-2, -1], [-1, -1], [-1, -2], [+1, +1], [+1, +2], [+2, +1]])
Expand All @@ -62,7 +62,7 @@ def test_sklearnex_import_nusvc(dataframe, queue):
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_svr(dataframe, queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("SVR fit for the GPU sycl_queue is buggy.")
pytest.xfail("SVR fit for the GPU sycl_queue is buggy.")
from sklearnex.svm import SVR

X = np.array([[-2, -1], [-1, -1], [-1, -2], [+1, +1], [+1, +2], [+2, +1]])
Expand All @@ -78,7 +78,7 @@ def test_sklearnex_import_svr(dataframe, queue):
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
def test_sklearnex_import_nusvr(dataframe, queue):
if queue and queue.sycl_device.is_gpu:
pytest.skip("NuSVR fit for the GPU sycl_queue is buggy.")
pytest.xfail("NuSVR fit for the GPU sycl_queue is buggy.")
from sklearnex.svm import NuSVR

X = np.array([[-2, -1], [-1, -1], [-1, -2], [+1, +1], [+1, +2], [+2, +1]])
Expand Down
12 changes: 6 additions & 6 deletions sklearnex/tests/test_run_to_run_stability.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def test_standard_estimator_stability(estimator, method, dataframe, queue):
if estimator in ["LogisticRegression", "TSNE"]:
pytest.skip(f"stability not guaranteed for {estimator}")
if estimator in ["KMeans", "PCA"] and "score" in method and queue == None:
pytest.skip(f"variation observed in {estimator}.score")
pytest.xfail(f"variation observed in {estimator}.score")
if estimator in ["IncrementalEmpiricalCovariance"] and method == "mahalanobis":
pytest.skip("allowed fallback to sklearn occurs")
_skip_neighbors(estimator, method)
Expand Down Expand Up @@ -191,9 +191,9 @@ def test_special_estimator_stability(estimator, method, dataframe, queue):
if queue is None and estimator in ["LogisticRegression(solver='newton-cg')"]:
pytest.skip(f"stability not guaranteed for {estimator}")
if "KMeans" in estimator and method == "score" and queue == None:
pytest.skip(f"variation observed in KMeans.score")
pytest.xfail(f"variation observed in KMeans.score")
if estimator == "BasicStatistics()" and queue == None:
pytest.skip(f"BasicStatistics not deterministic")
pytest.xfail(f"BasicStatistics not deterministic")
if "NearestNeighbors" in estimator and "radius" in method:
pytest.skip(f"RadiusNeighbors estimator not implemented in sklearnex")
_skip_neighbors(estimator, method)
Expand All @@ -216,13 +216,13 @@ def test_special_estimator_stability(estimator, method, dataframe, queue):
@pytest.mark.parametrize("estimator, method", gen_models_info(SPARSE_INSTANCES))
def test_sparse_estimator_stability(estimator, method, dataframe, queue):
if "KMeans" in estimator and method in "score" and queue == None:
pytest.skip(f"variation observed in KMeans.{method}")
pytest.xfail(f"variation observed in KMeans.{method}")
if (
not daal_check_version((2025, "P", 0))
and "KMeans()" in estimator
and queue == None
):
pytest.skip(f"variation observed in KMeans.{method} in 2024.7 oneDAL")
pytest.xfail(f"variation observed in KMeans.{method} in 2024.7 oneDAL")
if "NearestNeighbors" in estimator and "radius" in method:
pytest.skip(f"RadiusNeighbors estimator not implemented in sklearnex")
_skip_neighbors(estimator, method)
Expand All @@ -247,7 +247,7 @@ def test_sparse_estimator_stability(estimator, method, dataframe, queue):
@pytest.mark.parametrize("estimator, method", gen_models_info(STABILITY_INSTANCES))
def test_other_estimator_stability(estimator, method, dataframe, queue):
if "KMeans" in estimator and method == "score" and queue == None:
pytest.skip(f"variation observed in KMeans.score")
pytest.xfail(f"variation observed in KMeans.score")
if "NearestNeighbors" in estimator and "radius" in method:
pytest.skip(f"RadiusNeighbors estimator not implemented in sklearnex")
_skip_neighbors(estimator, method)
Expand Down
Loading