From e1c1f7bc69ab2f415684657f84e958f66c764cd5 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:11:53 +0100 Subject: [PATCH 01/85] Testing TestPyPi Package --- .github/workflows/build_wheels.yml | 60 +++++++++++++++--------------- .github/workflows/pytest_cp36.yml | 15 +++++--- .github/workflows/pytest_cp37.yml | 15 +++++--- .github/workflows/pytest_cp38.yml | 15 +++++--- .github/workflows/pytest_cp39.yml | 14 ++++--- pyproject.toml | 6 +-- 6 files changed, 69 insertions(+), 56 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index c0c1d232e..a0175f6bd 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -26,33 +26,33 @@ -# # name: Build - -# # on: -# # push: -# # branches-ignore: -# # - main -# # pull_request: -# # branches-ignore: -# # - main - -# # jobs: -# # build_wheels: -# # name: Build wheels on ${{ matrix.os }} -# # runs-on: ${{ matrix.os }} -# # strategy: -# # matrix: -# # os: [macos-10.15] -# # # os: [ubuntu-20.04, windows-2019, macos-10.15] - -# # steps: -# # - uses: actions/checkout@v2 - -# # - name: Build wheels -# # uses: pypa/cibuildwheel@v2.4.0 -# # # to supply options, put them in 'env', like: -# # # env: -# # # CIBW_SOME_OPTION: value -# # - uses: actions/upload-artifact@v2 -# # with: -# # path: ./wheelhouse/*.whl \ No newline at end of file +# name: Build + +# on: +# push: +# branches-ignore: +# - main +# pull_request: +# branches-ignore: +# - main + +# jobs: +# build_wheels: +# name: Build wheels on ${{ matrix.os }} +# runs-on: ${{ matrix.os }} +# strategy: +# matrix: +# os: [macos-10.15] +# # os: [ubuntu-20.04, windows-2019, macos-10.15] + +# steps: +# - uses: actions/checkout@v2 + +# - name: Build wheels +# uses: pypa/cibuildwheel@v2.4.0 +# # to supply options, put them in 'env', like: +# # env: +# # CIBW_SOME_OPTION: value +# - uses: actions/upload-artifact@v2 +# with: +# path: ./wheelhouse/*.whl \ No newline at end of file diff --git a/.github/workflows/pytest_cp36.yml b/.github/workflows/pytest_cp36.yml index 73a9805fb..270020916 100644 --- a/.github/workflows/pytest_cp36.yml +++ b/.github/workflows/pytest_cp36.yml @@ -31,7 +31,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt + ################### Use this when you want to use local wheel installation through dist ################# + # pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -54,15 +55,17 @@ jobs: run: | # make clean # pip install . - make dist + ################## Use this when you want to use local wheel installation through dist ###################### + # make dist # pip install dist/* - python setup.py develop + ############################################################################# + # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_IOKR - pytest tests/tests_DIOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 5e96c027a..6ca6d6473 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -31,7 +31,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt + ################### Use this when you want to use local wheel installation through dist ################# + # pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -54,15 +55,17 @@ jobs: run: | # make clean # pip install . - make dist + ################## Use this when you want to use local wheel installation through dist ###################### + # make dist # pip install dist/* - python setup.py develop + ############################################################################# + # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_IOKR - pytest tests/tests_DIOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 5d5ef1fc4..c18bc6c5f 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -31,7 +31,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt + ################### Use this when you want to use local wheel installation through dist ################# + # pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -54,15 +55,17 @@ jobs: run: | # make clean # pip install . - make dist + ################## Use this when you want to use local wheel installation through dist ###################### + # make dist # pip install dist/* - python setup.py develop + ############################################################################# + # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_IOKR - pytest tests/tests_DIOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index fcd09e610..f5af1300c 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -31,7 +31,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt + ################### Use this when you want to use local wheel installation through dist ################# + # pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -54,14 +55,17 @@ jobs: run: | # make clean # pip install . - make dist - pip install dist/* + ################## Use this when you want to use local wheel installation through dist ###################### + # make dist + # pip install dist/* + ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_DIOKR - # pytest tests/tests_IOKR tests/tests_DIOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/pyproject.toml b/pyproject.toml index 68c89fb1e..31e5b4244 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,11 +8,11 @@ build-backend = "setuptools.build_meta" #skip = ["cp39-musllinux*"] # Skip for Windows -skip = ["pp*", "cp310-*"] +# skip = ["pp*", "cp310-*"] #Skip for MacOS # skip = ["pp*"] -#Skip for Ubuntu # 32bit cp310 results in error -# skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"] +# Skip for Ubuntu # 32bit cp310 results in error +skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"] # Build `universal2` and `arm64` wheels on an Intel runner. From 61de4e89e0f24cba626c63e1fda3574e33c02c9b Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:22:34 +0100 Subject: [PATCH 02/85] Testing TestPyPi Package --- .github/workflows/build_wheels.yml | 6 ++++-- .github/workflows/pytest_cp37.yml | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index a0175f6bd..91e592663 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -1,6 +1,8 @@ -# name: Build +name: Build -# on: [push, pull_request] +on: [push, pull_request] + branches-ignore: + - "main" # jobs: # build_wheels: diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 6ca6d6473..b955ddf3b 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -53,7 +53,7 @@ jobs: - name: Build and compile run: | - # make clean + make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### # make dist From 2036e2360217eaee94cfbaa80a8243f519b98d4e Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:28:37 +0100 Subject: [PATCH 03/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index b955ddf3b..8941cf9c3 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -61,7 +61,7 @@ jobs: ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 # pip install -i https://test.pypi.org/simple/ structured-predictions python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest From d67fcc470aa9eefb3bca28f52316646ea3f36203 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:40:09 +0100 Subject: [PATCH 04/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 8941cf9c3..000bec166 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -63,7 +63,7 @@ jobs: # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 # pip install -i https://test.pypi.org/simple/ structured-predictions - python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py + # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest From 432e8d0fbbcb398d265ab31568a4f08aa17b9636 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:41:40 +0100 Subject: [PATCH 05/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 000bec166..2c4268ed0 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -61,7 +61,7 @@ jobs: ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest From 44435c7b8ad018ce5117cb6daf3860a06d4d3efc Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:46:34 +0100 Subject: [PATCH 06/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 2c4268ed0..d17493fe4 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -67,5 +67,6 @@ jobs: - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_DIOKR # pytest tests/tests_IOKR From eabd36ffbe7028c7386ffae252b07f6e58b06b12 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 14:54:08 +0100 Subject: [PATCH 07/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index d17493fe4..4db5f67fb 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -69,4 +69,4 @@ jobs: pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py # pytest tests/tests_DIOKR - # pytest tests/tests_IOKR + pytest tests/tests_IOKR From b3c76210e95cbdba2b8f8b9cc78c1aeb0882c8e0 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 15:05:01 +0100 Subject: [PATCH 08/85] Testing TestPyPi Package --- .github/workflows/pytest_cp37.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 4db5f67fb..33c0721c4 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - # os: [macos-latest, ubuntu-latest, windows-latest] + # os: [ubuntu-latest] + os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.7',] env: OS: ${{ matrix.os }} From fe1b0911ce652df587c08790e1d22bffa04b7d72 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 15:15:30 +0100 Subject: [PATCH 09/85] Testing TestPyPi Package --- .github/workflows/pytest_cp38.yml | 17 +++++++++-------- .github/workflows/pytest_cp39.yml | 17 +++++++++-------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index c18bc6c5f..36622eb9d 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - # os: [macos-latest, ubuntu-latest, windows-latest] + # os: [ubuntu-latest] + os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.8',] env: OS: ${{ matrix.os }} @@ -53,7 +53,7 @@ jobs: - name: Build and compile run: | - # make clean + make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### # make dist @@ -63,9 +63,10 @@ jobs: # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions - python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py + # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR - # pytest tests/tests_IOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_DIOKR + pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index f5af1300c..9bc620aa9 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - # os: [macos-latest, ubuntu-latest, windows-latest] + # os: [ubuntu-latest] + os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: OS: ${{ matrix.os }} @@ -53,7 +53,7 @@ jobs: - name: Build and compile run: | - # make clean + make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### # make dist @@ -63,9 +63,10 @@ jobs: # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions - python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py + # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py tests/tests_IOKR tests/tests_DIOKR - # pytest tests/tests_IOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_DIOKR + pytest tests/tests_IOKR From fc1e639e705956ed7d738e2df2e52fc3cbe08bb3 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 15:45:06 +0100 Subject: [PATCH 10/85] Testing TestPyPi Package --- .github/workflows/pytest_cp38.yml | 2 +- .github/workflows/pytest_cp39.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 36622eb9d..64d2878a0 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -67,6 +67,6 @@ jobs: - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py # pytest tests/tests_DIOKR pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 9bc620aa9..7a19ae1de 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -67,6 +67,6 @@ jobs: - name: Test with pytest run: | pip install pytest - pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py # pytest tests/tests_DIOKR pytest tests/tests_IOKR From 2124ff2b403ca840703c0db6599c46b0246f5442 Mon Sep 17 00:00:00 2001 From: yohila Date: Sat, 11 Feb 2023 15:56:13 +0100 Subject: [PATCH 11/85] Testing TestPyPi Package --- .github/workflows/pytest_cp38.yml | 10 ++++------ .github/workflows/pytest_cp39.yml | 8 ++++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 64d2878a0..df7de4263 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -32,7 +32,7 @@ jobs: run: | python -m pip install --upgrade pip ################### Use this when you want to use local wheel installation through dist ################# - # pip install -r requirements.txt + pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -56,13 +56,11 @@ jobs: make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### - # make dist - # pip install dist/* + make dist + pip install dist/* ############################################################################# # python setup.py develop - # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions - # pip install -i https://test.pypi.org/simple/ structured-predictions + # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 7a19ae1de..891c43f0e 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -32,7 +32,7 @@ jobs: run: | python -m pip install --upgrade pip ################### Use this when you want to use local wheel installation through dist ################# - # pip install -r requirements.txt + pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -56,12 +56,12 @@ jobs: make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### - # make dist - # pip install dist/* + make dist + pip install dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions + # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest From f0ad6bc0ceb2906eb3bf280acad758371be4ef07 Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 20:31:21 +0100 Subject: [PATCH 12/85] Checking Python 3.8 with version 0.0.6 --- .github/workflows/pytest_cp38.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index df7de4263..09990052b 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -32,7 +32,7 @@ jobs: run: | python -m pip install --upgrade pip ################### Use this when you want to use local wheel installation through dist ################# - pip install -r requirements.txt + # pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -56,11 +56,11 @@ jobs: make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### - make dist - pip install dist/* + # make dist + # pip install dist/* ############################################################################# # python setup.py develop - # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | From e78f498902434e529b804b28bbd3322359d59b91 Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 21:25:34 +0100 Subject: [PATCH 13/85] Checking Pyton 3.8 and 3.9 with local wheel --- .github/workflows/build_wheels.yml | 3 ++- .github/workflows/pytest_cp37.yml | 4 ++-- .github/workflows/pytest_cp38.yml | 9 +++++---- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 91e592663..193125fb7 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -1,6 +1,7 @@ name: Build -on: [push, pull_request] +on: + push: branches-ignore: - "main" diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 33c0721c4..63b4b692b 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # # branches: [ main ] schedule: - cron: '0 0 1 * *' diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 09990052b..39b19eab5 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -32,7 +32,7 @@ jobs: run: | python -m pip install --upgrade pip ################### Use this when you want to use local wheel installation through dist ################# - # pip install -r requirements.txt + pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -56,11 +56,12 @@ jobs: make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### - # make dist - # pip install dist/* + make dist + pip install dist/* ############################################################################# # python setup.py develop - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 + # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6 + # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest run: | From 6668df6e6f613b0eca37e92a1d304ce0f1f3e38d Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 21:39:26 +0100 Subject: [PATCH 14/85] Checking Pyton 3.9 with local wheel --- .github/workflows/build_wheels.yml | 42 +++++++++++++++--------------- .github/workflows/pytest_cp38.yml | 4 +-- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 193125fb7..d39e6eb85 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -5,27 +5,27 @@ on: branches-ignore: - "main" -# jobs: -# build_wheels: -# name: Build wheels on ${{ matrix.os }} -# runs-on: ${{ matrix.os }} -# strategy: -# matrix: -# # os: [ubuntu-20.04, windows-2019, macos-10.15] -# os: [windows-2019] - -# steps: -# - uses: actions/checkout@v2 - -# - name: Build wheels -# uses: pypa/cibuildwheel@v2.4.0 -# # to supply options, put them in 'env', like: -# # env: -# # CIBW_SOME_OPTION: value - -# - uses: actions/upload-artifact@v2 -# with: -# path: ./wheelhouse/*.whl +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + # os: [ubuntu-20.04, windows-2019, macos-10.15] + os: [windows-2019] + + steps: + - uses: actions/checkout@v2 + + - name: Build wheels + uses: pypa/cibuildwheel@v2.4.0 + # to supply options, put them in 'env', like: + # env: + # CIBW_SOME_OPTION: value + + - uses: actions/upload-artifact@v2 + with: + path: ./wheelhouse/*.whl diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 39b19eab5..a37a7e50a 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' From 26fc6097319f2ed1b4607e8c5b6679e4de1e6a64 Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 21:40:50 +0100 Subject: [PATCH 15/85] Checking Pyton 3.9 with local wheel only ubuntu --- .github/workflows/pytest_cp39.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 891c43f0e..e4c1f668c 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # os: [ubuntu-latest] - os: [macos-latest, ubuntu-latest, windows-latest] + os: [ubuntu-latest] + # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: OS: ${{ matrix.os }} From 8ca90c463327555f7e915061a8826edc9ee8952b Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 21:59:54 +0100 Subject: [PATCH 16/85] Testing Pyton 3.9 with local wheel only ubuntu--OK3 --- .github/workflows/pytest_cp39.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index e4c1f668c..2f9ece30c 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -67,6 +67,6 @@ jobs: - name: Test with pytest run: | pip install pytest - # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py # pytest tests/tests_DIOKR - pytest tests/tests_IOKR + # pytest tests/tests_IOKR From 9ebfcf71733a37b8ec2949c70df66a6224c84fa6 Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 22:05:05 +0100 Subject: [PATCH 17/85] Testing Pyton 3.9 with local wheel only ubuntu--OK3--DIOKR --- .github/workflows/pytest_cp39.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 2f9ece30c..f2043d643 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -68,5 +68,5 @@ jobs: run: | pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_DIOKR + pytest tests/tests_DIOKR # pytest tests/tests_IOKR From 4d86a781fbd85bc5ba5515b048be5edf7b235451 Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 22:11:49 +0100 Subject: [PATCH 18/85] Testing Pyton 3.9 with local wheel only ubuntu--OK3--DIOKR --- .github/workflows/pytest_cp38.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index a37a7e50a..e8ce4e4ff 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -67,5 +67,5 @@ jobs: run: | pip install pytest # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_DIOKR + pytest tests/tests_DIOKR pytest tests/tests_IOKR From 58d9ac0f05dc0a76b38e3ffb0d3fe706cf30866b Mon Sep 17 00:00:00 2001 From: yohila Date: Sun, 12 Feb 2023 22:28:04 +0100 Subject: [PATCH 19/85] Testing Pyton 3.9 with local wheel only ubuntu--OK3--DIOKR --- .github/workflows/pytest_cp38.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index e8ce4e4ff..8e5b32ec0 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # os: [ubuntu-latest] - os: [macos-latest, ubuntu-latest, windows-latest] + os: [ubuntu-latest] + # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.8',] env: OS: ${{ matrix.os }} From 5248cce156f9398282a68ac031bbc392d02e0496 Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 13:25:10 +0100 Subject: [PATCH 20/85] Py 3.8, 3.9 Testing --- .github/workflows/pytest_cp37.yml | 4 ++-- .github/workflows/pytest_cp38.yml | 6 +++--- .github/workflows/pytest_cp39.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 63b4b692b..739889ae2 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -68,5 +68,5 @@ jobs: run: | pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_DIOKR - pytest tests/tests_IOKR + pytest tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 8e5b32ec0..70dce7aa9 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -66,6 +66,6 @@ jobs: - name: Test with pytest run: | pip install pytest - # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - pytest tests/tests_DIOKR - pytest tests/tests_IOKR + pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py + # pytest tests/tests_DIOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index f2043d643..2f9ece30c 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -68,5 +68,5 @@ jobs: run: | pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - pytest tests/tests_DIOKR + # pytest tests/tests_DIOKR # pytest tests/tests_IOKR From 293ed3499babe746c21b0003024d0a03439e925c Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 13:36:55 +0100 Subject: [PATCH 21/85] Py 3.7 Testing --- .github/workflows/pytest_cp37.yml | 12 ++++++------ .github/workflows/pytest_cp38.yml | 4 ++-- .github/workflows/pytest_cp39.yml | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 739889ae2..b08d53431 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -32,7 +32,7 @@ jobs: run: | python -m pip install --upgrade pip ################### Use this when you want to use local wheel installation through dist ################# - # pip install -r requirements.txt + pip install -r requirements.txt # - name: Lint with flake8 # run: | # pip install flake8 @@ -56,12 +56,12 @@ jobs: make clean # pip install . ################## Use this when you want to use local wheel installation through dist ###################### - # make dist - # pip install dist/* + make dist + pip install dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 - pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions + # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions # pip install -i https://test.pypi.org/simple/ structured-predictions # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py - name: Test with pytest diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 70dce7aa9..63aca867a 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 2f9ece30c..f828007a5 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' From ccd6c8a3c66058cdfbf382ed9ece9c9bceef6c0e Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 13:43:39 +0100 Subject: [PATCH 22/85] Py 3.7 Testing --- .github/workflows/pytest_cp37.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index b08d53431..cdb067a04 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # os: [ubuntu-latest] - os: [macos-latest, ubuntu-latest, windows-latest] + os: [ubuntu-latest] + # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.7',] env: OS: ${{ matrix.os }} From 7c9659a11b7c2da4ece5036e745e02bfbc98a26b Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 13:51:13 +0100 Subject: [PATCH 23/85] Py 3.7 Testing IOKR DIOKR OK3 --- .github/workflows/pytest_cp37.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index cdb067a04..6601ee7f1 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -69,4 +69,4 @@ jobs: pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py pytest tests/tests_DIOKR - # pytest tests/tests_IOKR + pytest tests/tests_IOKR From 5324a478299d020006c1f0547403b0ff4f7381a3 Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 14:11:51 +0100 Subject: [PATCH 24/85] Py 3.8, 3.9 all OS Testing except IOKR --- .github/workflows/pytest_cp37.yml | 6 +++--- .github/workflows/pytest_cp38.yml | 10 +++++----- .github/workflows/pytest_cp39.yml | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 6601ee7f1..266a0a593 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -69,4 +69,4 @@ jobs: pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py pytest tests/tests_DIOKR - pytest tests/tests_IOKR + # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index 63aca867a..e571b65fa 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - # os: [macos-latest, ubuntu-latest, windows-latest] + # os: [ubuntu-latest] + os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.8',] env: OS: ${{ matrix.os }} @@ -67,5 +67,5 @@ jobs: run: | pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_DIOKR + pytest tests/tests_DIOKR # pytest tests/tests_IOKR diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index f828007a5..4cfb8a142 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - branches-ignore: - - "main" + # branches-ignore: + # - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - # os: [macos-latest, ubuntu-latest, windows-latest] + # os: [ubuntu-latest] + os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: OS: ${{ matrix.os }} @@ -68,5 +68,5 @@ jobs: run: | pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py - # pytest tests/tests_DIOKR + pytest tests/tests_DIOKR # pytest tests/tests_IOKR From 10dff7e09bbfb435fbbded6671f86b9363b553e2 Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 14:28:38 +0100 Subject: [PATCH 25/85] Py 3.9 win, mac Testing except IOKR --- .github/workflows/pytest_cp37.yml | 2 +- .github/workflows/pytest_cp38.yml | 4 ++-- .github/workflows/pytest_cp39.yml | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml index 266a0a593..846fc77ea 100644 --- a/.github/workflows/pytest_cp37.yml +++ b/.github/workflows/pytest_cp37.yml @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install dist/* + pip install ./dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml index e571b65fa..5f4c8be5d 100644 --- a/.github/workflows/pytest_cp38.yml +++ b/.github/workflows/pytest_cp38.yml @@ -2,8 +2,8 @@ name: pytesting on: push: - # branches-ignore: - # - "main" + branches-ignore: + - "main" # branches: [ main ] schedule: - cron: '0 0 1 * *' diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 4cfb8a142..4e1fd18af 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -16,8 +16,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # os: [ubuntu-latest] - os: [macos-latest, ubuntu-latest, windows-latest] + os: [macos-latest, windows-latest] + # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: OS: ${{ matrix.os }} @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install dist/* + pip install ./dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 From b042c2ca9e1d380a6985d8fd7c5e70806226ba45 Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 14:46:01 +0100 Subject: [PATCH 26/85] Py 3.9 win Testing except IOKR --- .github/workflows/pytest_cp39.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 4e1fd18af..30b5a0a00 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -16,7 +16,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-latest, windows-latest] + os: [windows-latest] # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install ./dist/* + pip install ../dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 From 0b5ea86816f93756701995a8f182a648d9ca573c Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 14:58:21 +0100 Subject: [PATCH 27/85] Py 3.9 win Testing except IOKR --- .github/workflows/pytest_cp39.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 30b5a0a00..0b9fd195f 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install ../dist/* + pip install ..\dist\* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 From 277959838f761aa2461e32783c9b4294885e6ab5 Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 15:33:27 +0100 Subject: [PATCH 28/85] Py 3.9 win Testing except IOKR --- .github/workflows/pytest_cp39.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 0b9fd195f..893d95ede 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install ..\dist\* + pip install dist\* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 From a98020afc5a895aecf856c7f09161222b4824eff Mon Sep 17 00:00:00 2001 From: yohila Date: Thu, 23 Feb 2023 16:23:48 +0100 Subject: [PATCH 29/85] Py 3.9 mac Testing except IOKR --- .github/workflows/pytest_cp39.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index 893d95ede..e24811dff 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -16,7 +16,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [windows-latest] + os: [macos-latest] # os: [macos-latest, ubuntu-latest, windows-latest] python: ['3.9',] env: @@ -57,7 +57,7 @@ jobs: # pip install . ################## Use this when you want to use local wheel installation through dist ###################### make dist - pip install dist\* + pip install dist/* ############################################################################# # python setup.py develop # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6 From e83d00df6baf4d5a93583a6e06485057cb042140 Mon Sep 17 00:00:00 2001 From: yohila Date: Fri, 24 Feb 2023 21:56:06 +0100 Subject: [PATCH 30/85] py 3.9 new mac --- .github/workflows/pytest_cp39.yml | 1 + stpredictions/datasets/load_data.py | 4 +- stpredictions/models/IOKR/Sketch.py | 1369 ++++++++++ stpredictions/models/IOKR/SketchedIOKR.py | 200 ++ stpredictions/models/IOKR/__init__.py | 2 + stpredictions/models/OK3/_criterion.html | 748 +++--- stpredictions/models/OK3/_splitter.html | 2492 +++++++++--------- stpredictions/models/OK3/_tree.html | 2778 +++++++++++---------- tests/tests_IOKR/IOKR.py | 89 + tests/tests_IOKR/ISOKR.py | 110 + tests/tests_IOKR/SIOKR.py | 108 + tests/tests_IOKR/SISOKR.py | 113 + 12 files changed, 5114 insertions(+), 2900 deletions(-) create mode 100644 stpredictions/models/IOKR/Sketch.py create mode 100644 stpredictions/models/IOKR/SketchedIOKR.py create mode 100644 tests/tests_IOKR/IOKR.py create mode 100644 tests/tests_IOKR/ISOKR.py create mode 100644 tests/tests_IOKR/SIOKR.py create mode 100644 tests/tests_IOKR/SISOKR.py diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml index e24811dff..46089d297 100644 --- a/.github/workflows/pytest_cp39.yml +++ b/.github/workflows/pytest_cp39.yml @@ -69,4 +69,5 @@ jobs: pip install pytest pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py pytest tests/tests_DIOKR + python tests/tests_IOKR/ISOKR.py tests/tests_IOKR/SISOKR.py # pytest tests/tests_IOKR diff --git a/stpredictions/datasets/load_data.py b/stpredictions/datasets/load_data.py index 2305d5208..9403ba121 100644 --- a/stpredictions/datasets/load_data.py +++ b/stpredictions/datasets/load_data.py @@ -186,7 +186,7 @@ def load_bibtex_train_from_arff(): """ path_tr = join(project_root(), 'datasets/bibtex/bibtex-train.arff') - print(path_tr) + # print(path_tr) X_train, Y_train = load_from_arff(path_tr, label_count=159) return X_train, Y_train @@ -195,7 +195,7 @@ def load_bibtex_test_from_arff(): """ path_tr = join(project_root(), 'datasets/bibtex/bibtex-test.arff') - print(path_tr) + # print(path_tr) X_test, Y_test = load_from_arff(path_tr, label_count=159) return X_test, Y_test diff --git a/stpredictions/models/IOKR/Sketch.py b/stpredictions/models/IOKR/Sketch.py new file mode 100644 index 000000000..1c79f2a69 --- /dev/null +++ b/stpredictions/models/IOKR/Sketch.py @@ -0,0 +1,1369 @@ +import numpy as np + + +class Sketch: + """ + Class of sketch matrices + """ + + def __init__(self, size): + """ + Initialise a sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + """ + self.size = size + + +class SubSample(Sketch): + """ + Class of sub-sampling sketch matrices + """ + + def __init__(self, size, probs=None, replace=False): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + probs: 1-D array-like of floats, optionnal + Probabilies of sampling. Default is None, leading to Uniform sampling. + + replace: boolean, optionnal + With or without replacement. Default is False, i.e. without replacement. + """ + super(SubSample, self).__init__(size) + self.indices = np.random.choice(self.size[1], self.size[0], replace=replace, p=probs) + if probs is None: + self.probs = (1.0 / self.size[1]) * np.ones(self.size[1]) + else: + self.probs = probs + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + res = np.sqrt(1.0 / self.size[0]) * x[self.indices] + res *= (1.0 / np.sqrt(self.probs[self.indices])) + return res + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) if right. + S.dot(K) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + Y_sampled = Y[self.indices] + res = np.sqrt(1.0 / self.size[0]) * kernel(X, Y_sampled) + res *= (1.0 / np.sqrt(self.probs[self.indices])) + return res + + else: + X_sampled = X[self.indices] + res = np.sqrt(1.0 / self.size[0]) * kernel(X_sampled, Y) + res *= (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = np.sqrt(1.0 / self.size[0]) * M[:, self.indices] + res *= (1.0 / np.sqrt(self.probs[self.indices])) + return res + + else: + res = np.sqrt(1.0 / self.size[0]) * M[self.indices, :] + res *= (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + X_sampled = X[self.indices] + res = (1.0 / self.size[0]) * kernel(X_sampled, X_sampled) + res *= (1.0 / np.sqrt(self.probs[self.indices])) + res *= (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = (1.0 / self.size[0]) * M[self.indices, self.indices] + res *= (1.0 / np.sqrt(self.probs[self.indices])) + res *= (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + +class SubSampleRad(Sketch): + """ + Class of sub-sampling with Rademacher variables on each line sketch matrices + """ + + def __init__(self, size, probs=None, replace=True): + """ + Initialise a sub-sampling Rademacher sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + probs: 1-D array-like of floats, optionnal + Probabilies of sampling. Default is None, leading to Uniform sampling. + + replace: boolean, optionnal + With or without replacement. Default is True, i.e. with replacement. + """ + super(SubSampleRad, self).__init__(size) + self.indices = np.random.choice(self.size[1], self.size[0], replace=replace, p=probs) + if probs is None: + self.probs = (1.0 / self.size[1]) * np.ones(self.size[1]) + else: + self.probs = probs + self.rad = 2 * np.random.binomial(1, 0.5, self.size[0]) - 1 + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + res = np.sqrt(1.0 / self.size[0]) * x[self.indices] + res *= self.rad * (1.0 / np.sqrt(self.probs[self.indices])) + return res + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) if right. + S.dot(K) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + Y_sampled = Y[self.indices] + res = np.sqrt(1.0 / self.size[0]) * kernel(X, Y_sampled) + res *= self.rad * (1.0 / np.sqrt(self.probs[self.indices])) + return res + + else: + X_sampled = X[self.indices] + res = np.sqrt(1.0 / self.size[0]) * kernel(X_sampled, Y) + res *= np.reshape(self.rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = np.sqrt(1.0 / self.size[0]) * M[:, self.indices] + res *= self.rad * (1.0 / np.sqrt(self.probs[self.indices])) + return res + + else: + res = np.sqrt(1.0 / self.size[0]) * M[self.indices, :] + res *= np.reshape(self.rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + X_sampled = X[self.indices] + res = (1.0 / self.size[0]) * kernel(X_sampled, X_sampled) + res *= self.rad * (1.0 / np.sqrt(self.probs[self.indices])) + res *= np.reshape(self.rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = (1.0 / self.size[0]) * M[self.indices, self.indices] + res *= self.rad * (1.0 / np.sqrt(self.probs[self.indices])) + res *= np.reshape(self.rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.probs[self.indices], (self.size[0], -1)))) + return res + + +class Accumulation(Sketch): + """ + Class of accumulation of Sub-Sample Rademacher sketch matrices + """ + def __init__(self, size, m, probs=None, replace=True): + """ + Initialise a sub-sampling Rademacher sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + probs: 1-D array-like of floats, optionnal + Probabilies of sampling. Default is None, leading to Uniform sampling. + + replace: boolean, optionnal + With or without replacement. Default is True, i.e. with replacement. + """ + super(Accumulation, self).__init__(size) + self.m = m + self.sketches = [] + for i in range(m): + self.sketches.append(SubSampleRad(size, probs, replace)) + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + res = np.zeros(self.size[0]) + for k in range(self.m): + res += self.sketches[k].multiply_vector(x) + res /= np.sqrt(self.m) + return res + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) if right. + S.dot(K) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + res = np.zeros((X.shape[0], self.size[0])) + for k in range(self.m): + res += self.sketches[k].multiply_Gram_one_side(X, kernel, Y, right) + res /= np.sqrt(self.m) + return res + + else: + res = np.zeros((self.size[0], Y.shape[0])) + for k in range(self.m): + res += self.sketches[k].multiply_Gram_one_side(X, kernel, Y, right) + res /= np.sqrt(self.m) + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = np.zeros((M.shape[0], self.size[0])) + for k in range(self.m): + res += self.sketches[k].multiply_matrix_one_side(M, right) + res /= np.sqrt(self.m) + return res + + else: + res = np.zeros((self.size[0], M.shape[1])) + for k in range(self.m): + res += self.sketches[k].multiply_matrix_one_side(M, right) + res /= np.sqrt(self.m) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + res = np.zeros((self.size[0], self.size[0])) + for k in range(self.m): + for l in range(self.m): + X_sampled_left = X[self.sketches[k].indices] + X_sampled_right = X[self.sketches[l].indices] + res_temp = (1.0 / self.size[0]) * kernel(X_sampled_left, X_sampled_right) + res_temp *= np.reshape(self.sketches[k].rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.sketches[k].probs[self.sketches[k].indices], (self.size[0], -1)))) + res_temp *= self.sketches[l].rad * (1.0 / np.sqrt(self.sketches[l].probs[self.sketches[l].indices])) + res += res_temp + res /= self.m + return res + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = np.zeros((self.size[0], self.size[0])) + for k in range(self.m): + for l in range(self.m): + res_temp = (1.0 / self.size[0]) * M[self.sketches[k].indices, self.sketches[l].indices] + res_temp *= np.reshape(self.sketches[k].rad, (self.size[0], -1)) * (1.0 / np.sqrt(np.reshape(self.sketches[k].probs[self.sketches[k].indices], (self.size[0], -1)))) + res_temp *= self.sketches[l].rad * (1.0 / np.sqrt(self.sketches[l].probs[self.sketches[l].indices])) + res += res_temp + res /= self.m + return res + + +class SJLT(Sketch): + """ + Class of Sparse Johnson-Lindenstrauss Transform sketch matrices + """ + + def __init__(self, size, m=1): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + m: int + Number of non-zero elements in each column + """ + super(SJLT, self).__init__(size) + s, n = size[0], size[1] + S = np.empty((0, n)) + ss = int(s / m) + for i in range(m - 1): + idx0 = np.random.choice(ss, n) + idx1 = np.arange(n) + coefs = (2 * np.random.binomial(1, 0.5, n) - 1) + S_i = np.zeros((ss, n), dtype=float) + S_i[idx0, idx1] = coefs + S = np.vstack((S, S_i)) + r = s % m + idx0 = np.random.choice(ss + r, n) + idx1 = np.arange(n) + coefs = (2 * np.random.binomial(1, 0.5, n) - 1) + S_i = np.zeros((ss + r, n), dtype=float) + S_i[idx0, idx1] = coefs + S = np.vstack((S, S_i)) + S *= 1.0 / np.sqrt(m) + self.S = S.copy() + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + return self.S.dot(x) + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) of shape (self.size[1], self.size[0]) if right. + S.dot(K) of shape (self.size[0], self.size[1]) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + K = kernel(X, Y) + res = K.dot(self.S.T) + """ + res = np.zeros((n, self.size[0])) + for i in range(Y.shape[0]): + col = kernel(X, Y[i].reshape(1, -1)) + line = self.S[:, i] + res += np.reshape(col, (n, 1)).dot(np.reshape(line, (1, self.size[0]))) + """ + return res + + else: + K = kernel(X, Y) + res = self.S.dot(K) + """ + res = np.zeros((self.size[0], n)) + for i in range(X.shape[0]): + col = self.S[:, i] + line = kernel(X[i].reshape(1, -1), Y) + res += np.reshape(col, (self.size[0], 1)).dot(np.reshape(line, (1, n))) + """ + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = M.dot(self.S.T) + return res + + else: + res = self.S.dot(M) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + res = self.multiply_Gram_one_side(X, kernel, right=True) + return self.S.dot(res) + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = self.multiply_matrix_one_side(M, right=True) + return self.S.dot(res) + + +class Rademacher(Sketch): + """ + Class of Rademacher sketch matrices + """ + + def __init__(self, size): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + """ + super(Rademacher, self).__init__(size) + self.S = (1 / np.sqrt(size[0])) * (2 * np.random.binomial(1, 0.5, size) - 1) + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + return self.S.dot(x) + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) of shape (self.size[1], self.size[0]) if right. + S.dot(K) of shape (self.size[0], self.size[1]) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + K = kernel(X, Y) + res = K.dot(self.S.T) + """ + res = np.zeros((n, self.size[0])) + for i in range(Y.shape[0]): + col = kernel(X, Y[i].reshape(1, -1)) + line = self.S[:, i] + res += np.reshape(col, (n, 1)).dot(np.reshape(line, (1, self.size[0]))) + """ + return res + + else: + K = kernel(X, Y) + res = self.S.dot(K) + """ + res = np.zeros((self.size[0], n)) + for i in range(X.shape[0]): + col = self.S[:, i] + line = kernel(X[i].reshape(1, -1), Y) + res += np.reshape(col, (self.size[0], 1)).dot(np.reshape(line, (1, n))) + """ + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = M.dot(self.S.T) + return res + + else: + res = self.S.dot(M) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + res = self.multiply_Gram_one_side(X, kernel, right=True) + return self.S.dot(res) + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = self.multiply_matrix_one_side(M, right=True) + return self.S.dot(res) + + +class Gaussian(Sketch): + """ + Class of Gaussian sketch matrices + """ + + def __init__(self, size): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + """ + super(Gaussian, self).__init__(size) + self.S = (1 / np.sqrt(size[0])) * np.random.normal(size=self.size) + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + return self.S.dot(x) + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) of shape (self.size[1], self.size[0]) if right. + S.dot(K) of shape (self.size[0], self.size[1]) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + K = kernel(X, Y) + res = K.dot(self.S.T) + """ + res = np.zeros((n, self.size[0])) + for i in range(Y.shape[0]): + col = kernel(X, Y[i].reshape(1, -1)) + line = self.S[:, i] + res += np.reshape(col, (n, 1)).dot(np.reshape(line, (1, self.size[0]))) + """ + return res + + else: + K = kernel(X, Y) + res = self.S.dot(K) + """ + res = np.zeros((self.size[0], n)) + for i in range(X.shape[0]): + col = self.S[:, i] + line = kernel(X[i].reshape(1, -1), Y) + res += np.reshape(col, (self.size[0], 1)).dot(np.reshape(line, (1, n))) + """ + return res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = M.dot(self.S.T) + return res + + else: + res = self.S.dot(M) + return res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + res = self.multiply_Gram_one_side(X, kernel, right=True) + return self.S.dot(res) + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = self.multiply_matrix_one_side(M, right=True) + return self.S.dot(res) + + +class pSparsified(Sketch): + """ + Class of Sp-Sparsified sketches implemented as product of Sub-Gaussian matrix and Sub-Sampling matrix + """ + + def __init__(self, size, p=None, type='Gaussian'): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + p: float, optionnal + Probability for an entry of the sketch matrix to being non-null. + Default is 1/size[1]. + + type: str, optionnal + Type of the p-Sparse sketch matrix, either 'Gaussian' or 'Rademacher'. + Default is 'Gaussian' + """ + super(pSparsified, self).__init__(size) + if p is None: + p = 20 / self.size[1] + self.p = p + self.type = type + B = np.random.binomial(1, self.p, self.size) + idx1 = np.where(B!=0)[1] + idx = np.argwhere(np.all(B[..., :] == 0, axis=0)) + B1 = np.delete(B, idx, axis=1) + B1 = B1.astype(float) + if type == 'Gaussian': + self.SG = np.random.normal(size=B1.shape) * B1.copy() + else: + self.SG = (2 * np.random.binomial(1, 0.5, B1.shape) - 1) * B1.copy() + self.indices = np.unique(idx1) + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + res = self.SG * x[self.indices] + return (1 / np.sqrt(self.size[0] * self.p)) * res + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) of shape (self.size[1], self.size[0]) if right. + S.dot(K) of shape (self.size[0], self.size[1]) otherwise. + """ + if Y is None: + Y = X.copy() + + if right: + Y_sampled = Y[self.indices] + res = kernel(X, Y_sampled).dot(self.SG.T) + return (1 / np.sqrt(self.size[0] * self.p)) * res + + else: + X_sampled = X[self.indices] + res = self.SG.dot(kernel(X_sampled, Y)) + return (1 / np.sqrt(self.size[0] * self.p)) * res + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + if right: + res = M[:, self.indices].dot(self.SG.T) + return (1 / np.sqrt(self.size[0] * self.p)) * res + + else: + res = self.SG.dot(M[self.indices, :]) + return (1 / np.sqrt(self.size[0] * self.p)) * res + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + X_sampled = X[self.indices] + res = self.SG.dot(kernel(X_sampled, X_sampled)).dot(self.SG.T) + return (1 / self.size[0] * self.p) * res + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + res = self.SG.dot(M[np.ix_(self.indices, self.indices)]).dot(self.SG.T) + return (1 / self.size[0] * self.p) * res + + +class Incomplete(Sketch): + """ + Class of Incomplete Rademacher or Gaussian sketch matrices + """ + + def __init__(self, size, s2, type='Gaussian', probs=None): + """ + Initialise a sub-sampling sketch matrix + + Parameters + ---------- + size: tuple of ints + Sketch matrix shape. + + s2: int + Size of Sub-Sampling matrix + + probs: 1-D array-like of floats, optionnal + Probabilies of sampling. Default is None, leading to Uniform sampling. + """ + super(Incomplete, self).__init__(size) + if type == 'Rademacher': + self.S1 = Rademacher((size[0], s2)) + else: + self.S1 = Gaussian((size[0], s2)) + self.S2 = [] + self.S2 = SubSample((s2, size[1]), probs=probs, replace=False) + + + def multiply_vector(self, x): + """ + Multiply sketch matrix with vector x + + Parameters + ---------- + x: 1-D array-like of size self.size[1] + Vector to compute multiplication with. + + Returns + ------- + res: 1-D array-like of size self.size[0] + S.dot(x). + """ + s2 = self.S2.size[0] + res = np.zeros(s2) + res = self.S2.multiply_vector(x) + return self.S1.multiply_vector(res) + + + def multiply_Gram_one_side(self, X, kernel, Y=None, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + X: 2-D array-like + First input on which Gram matrix is computed + + Y: 2-D array-like, optionnal. + Second input on which Gram matrix is computed. Default is None, + in this case Y=X. + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K between X and Y. + + right: boolean, optionnal. + If True, computation of K.dot(S.T) is performed. + Else, S.dot(K). + Default is True. + + Returns + ------- + res: 2-D array-like + K.dot(S.T) of shape (self.size[1], self.size[0]) if right. + S.dot(K) of shape (self.size[0], self.size[1]) otherwise. + """ + if Y is None: + Y = X.copy() + + s2 = self.S2.size[0] + + if right: + res = np.zeros((X.shape[0], s2)) + res = self.S2.multiply_Gram_one_side(X, kernel, Y, right) + return self.S1.multiply_matrix_one_side(res, right) + + else: + res = np.zeros((s2, Y.shape[0])) + res = self.S2.multiply_Gram_one_side(X, kernel, Y, right) + return self.S1.multiply_matrix_one_side(res, right) + + + def multiply_matrix_one_side(self, M, right=True): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + right: boolean, optionnal. + If True, computation of M.dot(S.T) is performed. + Else, S.dot(M). + Default is True. + + Returns + ------- + res: 2-D array-like + M.dot(S.T) of shape (M.shape[0], self.size[0]) if right. + S.dot(M) of shape (self.size[0], M.shape[1]) otherwise. + """ + s2 = self.S2.size[0] + + if right: + res = np.zeros((M.shape[0], s2)) + res = self.S2.multiply_matrix_one_side(M, right) + return self.S1.multiply_matrix_one_side(res, right) + + else: + res = np.zeros((s2, M.shape[1])) + res = self.S2.multiply_matrix_one_side(M, right) + return self.S1.multiply_matrix_one_side(res, right) + + + def multiply_Gram_both_sides(self, X, kernel): + """ + Multiply on both sides sketch matrix with Gram matrix formed with X and a kernel + + Parameters + ---------- + X: 2-D array-like of shape (self.size[1], n_features) + Inputs on which Gram matrix is computed + + kernel: function of 2 2-D array-like variables. + Compute Gram matrix K with inputs X. + + Returns + ------- + res: 2-D array-like of shape (self.size[0], self.size[0]) + S.dot(K.dot(S.T)). + """ + s2 = self.S2.size[0] + res = np.zeros((s2, s2)) + X_sampled_left = X[self.S2.indices] + X_sampled_right = X[self.S2.indices] + res_temp = (1.0 / s2) * kernel(X_sampled_left, X_sampled_right) + res_temp *= (1.0 / np.sqrt(np.reshape(self.S2.probs[self.S2.indices], (s2, -1)))) + res_temp *= (1.0 / np.sqrt(self.S2.probs[self.S2.indices])) + res = res_temp.copy() + return self.S1.multiply_matrix_both_sides(res) + + + def multiply_matrix_both_sides(self, M): + """ + Multiply sketch matrix with Gram matrix formed with X and Y and a kernel + + Parameters + ---------- + M: 2-D array-like + Matrix which is multiplied by S. + + Returns + ------- + res: 2-D array-like + S.dot(M.dot(S.T)) of shape (self.size[0], self.size[0]). + """ + s2 = self.S2.size[0] + res = np.zeros((s2, s2)) + res_temp = (1.0 / s2) * M[self.S2.indices, self.S2.indices] + res_temp *= (1.0 / np.sqrt(self.S2.probs[self.S2.indices]).reshape((-1, 1))) + res_temp *= (1.0 / np.sqrt(self.S2.probs[self.S2.indices])) + res = res_temp.copy() + return self.S1.multiply_matrix_both_sides(res) \ No newline at end of file diff --git a/stpredictions/models/IOKR/SketchedIOKR.py b/stpredictions/models/IOKR/SketchedIOKR.py new file mode 100644 index 000000000..3b5525c93 --- /dev/null +++ b/stpredictions/models/IOKR/SketchedIOKR.py @@ -0,0 +1,200 @@ +import numpy as np +from time import time + + +class IOKR: + + def __init__(self): + self.X_tr = None + self.Y_tr = None + self.input_kernel = None + self.output_kernel = None + self.sy = None + self.M = None + self.fit_time = None + self.decode_time = None + self.verbose = 0 + + def fit(self, X, Y, L, input_kernel, output_kernel): + + t0 = time() + self.X_tr = X.copy() + self.Y_tr = Y.copy() + self.input_kernel = input_kernel + self.output_kernel = output_kernel + Kx = input_kernel(self.X_tr, Y=self.X_tr) + n = Kx.shape[0] + self.M = np.linalg.inv(Kx + n * L * np.eye(n)) + self.fit_time = time() - t0 + if self.verbose > 0: + print(f'Fitting time: {self.fit_time}') + + def predict(self, X_te, Y_c=None): + + if Y_c is None: + Y_c = self.Y_tr.copy() + t0 = time() + Kx = self.input_kernel(X_te, Y=self.X_tr) + Ky = self.output_kernel(self.Y_tr, Y=Y_c) + scores = Kx.dot(self.M).dot(Ky) + idx_pred = np.argmax(scores, axis=1) + self.decode_time = time() - t0 + if self.verbose > 0: + print(f'Decoding time: {self.decode_time}') + + return self.Y_tr[idx_pred] + + + +class SIOKR: + + def __init__(self): + self.X_tr = None + self.Y_tr = None + self.input_kernel = None + self.output_kernel = None + self.S = None + self.M = None + self.fit_time = None + self.decode_time = None + self.verbose = 0 + + def fit(self, X, Y, S, L, input_kernel, output_kernel, mu=1e-8): + + t0 = time() + self.S = S + self.X_tr = X.copy() + self.Y_tr = Y.copy() + self.input_kernel = input_kernel + self.output_kernel = output_kernel + n = X.shape[0] + s = S.size[0] + self.Y_tr = Y + SKST = S.multiply_Gram_both_sides(X, self.input_kernel) + KST = S.multiply_Gram_one_side(X, self.input_kernel, X) + B = KST.T.dot(KST) + n * L * SKST + B_inv = np.linalg.inv(B + mu * np.eye(s)) + self.M = B_inv.dot(KST.T) + self.fit_time = time() - t0 + if self.verbose > 0: + print(f'Fitting time: {self.fit_time}') + + def predict(self, X_te, Y_c=None): + + if Y_c is None: + Y_c = self.Y_tr.copy() + t0 = time() + K_te_trST = self.S.multiply_Gram_one_side(X_te, self.input_kernel, Y=self.X_tr) + Ky = self.output_kernel(self.Y_tr, Y=Y_c) + scores = (K_te_trST.dot(self.M)).dot(Ky) + idx_pred = np.argmax(scores, axis=1) + self.decode_time = time() - t0 + if self.verbose > 0: + print(f'Decoding time: {self.decode_time}') + + return self.Y_tr[idx_pred] + + + +class ISOKR: + + def __init__(self): + self.X_tr = None + self.Y_tr = None + self.input_kernel = None + self.output_kernel = None + self.S = None + self.KyST = None + self.SKyST_inv = None + self.M = None + self.fit_time = None + self.decode_time = None + self.verbose = 0 + + def fit(self, X, Y, S, L, input_kernel, output_kernel, mu=0): + + t0 = time() + self.S = S + self.X_tr = X.copy() + self.Y_tr = Y.copy() + self.input_kernel = input_kernel + self.output_kernel = output_kernel + Kx = self.input_kernel(X, X) + n = Kx.shape[0] + s = S.size[0] + self.M = np.linalg.inv(Kx + n * L * np.eye(n)) + self.KyST = S.multiply_Gram_one_side(Y, self.output_kernel, Y) + self.SKyST_inv = np.linalg.inv(S.multiply_Gram_both_sides(Y, self.output_kernel) + mu * np.eye(s)) + self.fit_time = time() - t0 + if self.verbose > 0: + print(f'Fitting time: {self.fit_time}') + + def predict(self, X_te, Y_c=None): + + if Y_c is None: + Y_c = self.Y_tr.copy() + t0 = time() + Kx = self.input_kernel(X_te, self.X_tr) + SKy = self.S.multiply_Gram_one_side(self.Y_tr, self.output_kernel, Y_c, right=False) + scores = Kx.dot(self.M).dot(self.KyST).dot(self.SKyST_inv).dot(SKy) + idx_pred = np.argmax(scores, axis=1) + self.decode_time = time() - t0 + if self.verbose > 0: + print(f'Decoding time: {self.decode_time}') + + return self.Y_tr[idx_pred] + + + +class SISOKR: + + def __init__(self): + self.X_tr = None + self.Y_tr = None + self.input_kernel = None + self.output_kernel = None + self.S_in = None + self.S_out = None + self.M = None + self.KyST = None + self.SKyST_inv = None + self.fit_time = None + self.decode_time = None + self.verbose = 0 + + def fit(self, X, Y, S_in, S_out, L, input_kernel, output_kernel, mu_in=1e-8, mu_out=0): + + t0 = time() + self.S_in = S_in + self.S_out = S_out + self.X_tr = X.copy() + self.Y_tr = Y.copy() + self.input_kernel = input_kernel + self.output_kernel = output_kernel + n = X.shape[0] + s_in = S_in.size[0] + s_out = S_out.size[0] + SKST = S_in.multiply_Gram_both_sides(X, self.input_kernel) + KST = S_in.multiply_Gram_one_side(X, self.input_kernel, X) + B = KST.T.dot(KST) + n * L * SKST + self.M = np.linalg.inv(B + mu_in * np.eye(s_in)).dot(KST.T) + self.KyST = S_out.multiply_Gram_one_side(Y, self.output_kernel, Y) + self.SKyST_inv = np.linalg.inv(S_out.multiply_Gram_both_sides(Y, self.output_kernel) + mu_out * np.eye(s_out)) + self.fit_time = time() - t0 + if self.verbose > 0: + print(f'Fitting time: {self.fit_time}') + + def predict(self, X_te, Y_c=None): + + if Y_c is None: + Y_c = self.Y_tr.copy() + t0 = time() + KxST = self.S_in.multiply_Gram_one_side(X_te, self.input_kernel, Y=self.X_tr) + SKy = self.S_out.multiply_Gram_one_side(self.Y_tr, self.output_kernel, Y_c, right=False) + scores = KxST.dot(self.M).dot(self.KyST).dot(self.SKyST_inv).dot(SKy) + idx_pred = np.argmax(scores, axis=1) + self.decode_time = time() - t0 + if self.verbose > 0: + print(f'Decoding time: {self.decode_time}') + + return self.Y_tr[idx_pred] \ No newline at end of file diff --git a/stpredictions/models/IOKR/__init__.py b/stpredictions/models/IOKR/__init__.py index a97e1ff6c..add1ad6a4 100644 --- a/stpredictions/models/IOKR/__init__.py +++ b/stpredictions/models/IOKR/__init__.py @@ -6,6 +6,8 @@ # All submodules and packages from stpredictions.models.IOKR.model import IOKR +from stpredictions.models.IOKR.Sketch import * +from stpredictions.models.IOKR.SketchedIOKR import * from stpredictions.models.IOKR.utils import * # __all__ = ['IOKR' , 'SGD', 'MyDataset',] diff --git a/stpredictions/models/OK3/_criterion.html b/stpredictions/models/OK3/_criterion.html index cb6de1453..6c315b268 100644 --- a/stpredictions/models/OK3/_criterion.html +++ b/stpredictions/models/OK3/_criterion.html @@ -286,6 +286,80 @@ .cython.score-252 {background-color: #FFFF09;} .cython.score-253 {background-color: #FFFF09;} .cython.score-254 {background-color: #FFFF09;} +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.cython .hll { background-color: #ffffcc } +.cython { background: #f8f8f8; } +.cython .c { color: #3D7B7B; font-style: italic } /* Comment */ +.cython .err { border: 1px solid #FF0000 } /* Error */ +.cython .k { color: #008000; font-weight: bold } /* Keyword */ +.cython .o { color: #666666 } /* Operator */ +.cython .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.cython .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.cython .cp { color: #9C6500 } /* Comment.Preproc */ +.cython .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.cython .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.cython .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.cython .gd { color: #A00000 } /* Generic.Deleted */ +.cython .ge { font-style: italic } /* Generic.Emph */ +.cython .gr { color: #E40000 } /* Generic.Error */ +.cython .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.cython .gi { color: #008400 } /* Generic.Inserted */ +.cython .go { color: #717171 } /* Generic.Output */ +.cython .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.cython .gs { font-weight: bold } /* Generic.Strong */ +.cython .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.cython .gt { color: #0044DD } /* Generic.Traceback */ +.cython .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.cython .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.cython .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.cython .kp { color: #008000 } /* Keyword.Pseudo */ +.cython .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.cython .kt { color: #B00040 } /* Keyword.Type */ +.cython .m { color: #666666 } /* Literal.Number */ +.cython .s { color: #BA2121 } /* Literal.String */ +.cython .na { color: #687822 } /* Name.Attribute */ +.cython .nb { color: #008000 } /* Name.Builtin */ +.cython .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.cython .no { color: #880000 } /* Name.Constant */ +.cython .nd { color: #AA22FF } /* Name.Decorator */ +.cython .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.cython .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.cython .nf { color: #0000FF } /* Name.Function */ +.cython .nl { color: #767600 } /* Name.Label */ +.cython .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.cython .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.cython .nv { color: #19177C } /* Name.Variable */ +.cython .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.cython .w { color: #bbbbbb } /* Text.Whitespace */ +.cython .mb { color: #666666 } /* Literal.Number.Bin */ +.cython .mf { color: #666666 } /* Literal.Number.Float */ +.cython .mh { color: #666666 } /* Literal.Number.Hex */ +.cython .mi { color: #666666 } /* Literal.Number.Integer */ +.cython .mo { color: #666666 } /* Literal.Number.Oct */ +.cython .sa { color: #BA2121 } /* Literal.String.Affix */ +.cython .sb { color: #BA2121 } /* Literal.String.Backtick */ +.cython .sc { color: #BA2121 } /* Literal.String.Char */ +.cython .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.cython .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.cython .s2 { color: #BA2121 } /* Literal.String.Double */ +.cython .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.cython .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.cython .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.cython .sx { color: #008000 } /* Literal.String.Other */ +.cython .sr { color: #A45A77 } /* Literal.String.Regex */ +.cython .s1 { color: #BA2121 } /* Literal.String.Single */ +.cython .ss { color: #19177C } /* Literal.String.Symbol */ +.cython .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.cython .fm { color: #0000FF } /* Name.Function.Magic */ +.cython .vc { color: #19177C } /* Name.Variable.Class */ +.cython .vg { color: #19177C } /* Name.Variable.Global */ +.cython .vi { color: #19177C } /* Name.Variable.Instance */ +.cython .vm { color: #19177C } /* Name.Variable.Magic */ +.cython .il { color: #666666 } /* Literal.Number.Integer.Long */ @@ -295,37 +369,37 @@ Click on a line that starts with a "+" to see the C code that Cython generated for it.

Raw output: _criterion.c

-
+001: # cython: cdivision=True
+
+001: # cython: cdivision=True
  __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 002: # cython: boundscheck=False
-
 003: # cython: wraparound=False
+
 002: # cython: boundscheck=False
+
 003: # cython: wraparound=False
 004: 
-
 005: from libc.stdlib cimport calloc
-
 006: from libc.stdlib cimport free
-
 007: from libc.string cimport memcpy
-
 008: from libc.string cimport memset
-
 009: from libc.math cimport fabs
+
 005: from libc.stdlib cimport calloc
+
 006: from libc.stdlib cimport free
+
 007: from libc.string cimport memcpy
+
 008: from libc.string cimport memset
+
 009: from libc.math cimport fabs
 010: 
-
+011: import numpy as np
+
+011: import numpy as np
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(1, 11, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 012: cimport numpy as np
-
+013: np.import_array()
+
 012: cimport numpy as np
+
+013: np.import_array()
  __pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
 
 014: 
-
 015: from sklearn.tree._utils cimport log
-
 016: from sklearn.tree._utils cimport safe_realloc
-
 017: from sklearn.tree._utils cimport sizet_ptr_to_ndarray
-
 018: from sklearn.tree._utils cimport WeightedMedianCalculator
+
 015: from sklearn.tree._utils cimport log
+
 016: from sklearn.tree._utils cimport safe_realloc
+
 017: from sklearn.tree._utils cimport sizet_ptr_to_ndarray
+
 018: from sklearn.tree._utils cimport WeightedMedianCalculator
 019: 
-
 020: # from kernel import Kernel
+
 020: # from kernel import Kernel
 021: 
-
+022: cdef class Criterion:
+
+022: cdef class Criterion:
struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion {
   int (*init)(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *, __Pyx_memviewslice, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *, double, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, __pyx_t_7sklearn_4tree_5_tree_SIZE_t);
   int (*reset)(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *);
@@ -339,13 +413,13 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_vtabptr_13stpredictions_6models_3OK3_10_criterion_Criterion;
 
-
 023:     """Interface for impurity criteria.
+
 023:     """Interface for impurity criteria.
 024: 
-
 025:     This object stores methods on how to calculate how good a split is using
-
 026:     different metrics.
-
 027:     """
+
 025:     This object stores methods on how to calculate how good a split is using
+
 026:     different metrics.
+
 027:     """
 028: 
-
+029:     def __dealloc__(self):
+
+029:     def __dealloc__(self):
/* Python wrapper */
 static void __pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_1__dealloc__(PyObject *__pyx_v_self); /*proto*/
 static void __pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_1__dealloc__(PyObject *__pyx_v_self) {
@@ -364,11 +438,11 @@
   /* function exit code */
   __Pyx_RefNannyFinishContext();
 }
-
 030:         """Destructor."""
+
 030:         """Destructor."""
 031: 
-
 032:         pass
+
 032:         pass
 033: 
-
+034:     def __getstate__(self):
+
+034:     def __getstate__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_3__getstate__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_3__getstate__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -397,7 +471,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+035:         return {}
+
+035:         return {}
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 35, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -405,7 +479,7 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 036: 
-
+037:     def __setstate__(self, d):
+
+037:     def __setstate__(self, d):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_5__setstate__(PyObject *__pyx_v_self, PyObject *__pyx_v_d); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_9Criterion_5__setstate__(PyObject *__pyx_v_self, PyObject *__pyx_v_d) {
@@ -430,9 +504,9 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 038:         pass
+
 038:         pass
 039: 
-
+040:     cdef int init(self, const DOUBLE_t[:, ::1] y, DOUBLE_t* sample_weight,
+
+040:     cdef int init(self, const DOUBLE_t[:, ::1] y, DOUBLE_t* sample_weight,
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_init(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_y, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *__pyx_v_sample_weight, CYTHON_UNUSED double __pyx_v_weighted_n_samples, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end) {
   int __pyx_r;
 
@@ -440,34 +514,34 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 041:                   double weighted_n_samples, SIZE_t* samples, SIZE_t start,
-
 042:                   SIZE_t end) nogil except -1:
-
 043:         """Placeholder for a method which will initialize the criterion.
+
 041:                   double weighted_n_samples, SIZE_t* samples, SIZE_t start,
+
 042:                   SIZE_t end) nogil except -1:
+
 043:         """Placeholder for a method which will initialize the criterion.
 044: 
-
 045:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 046:         or 0 otherwise.
+
 045:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 046:         or 0 otherwise.
 047: 
-
 048:         Parameters
-
 049:         ----------
-
 050:         y : array-like, dtype=DOUBLE_t
-
 051:             y is a buffer that stores values of the output Gramm matrix of the samples
-
 052:         sample_weight : array-like, dtype=DOUBLE_t
-
 053:             The weight of each sample
-
 054:         weighted_n_samples : double
-
 055:             The total weight of the samples being considered
-
 056:         samples : array-like, dtype=SIZE_t
-
 057:             Indices of the samples in X and y, where samples[start:end]
-
 058:             correspond to the samples in this node
-
 059:         start : SIZE_t
-
 060:             The first sample to be used on this node
-
 061:         end : SIZE_t
-
 062:             The last sample used on this node
+
 048:         Parameters
+
 049:         ----------
+
 050:         y : array-like, dtype=DOUBLE_t
+
 051:             y is a buffer that stores values of the output Gramm matrix of the samples
+
 052:         sample_weight : array-like, dtype=DOUBLE_t
+
 053:             The weight of each sample
+
 054:         weighted_n_samples : double
+
 055:             The total weight of the samples being considered
+
 056:         samples : array-like, dtype=SIZE_t
+
 057:             Indices of the samples in X and y, where samples[start:end]
+
 058:             correspond to the samples in this node
+
 059:         start : SIZE_t
+
 060:             The first sample to be used on this node
+
 061:         end : SIZE_t
+
 062:             The last sample used on this node
 063: 
-
 064:         """
+
 064:         """
 065: 
-
 066:         pass
+
 066:         pass
 067: 
-
+068:     cdef int reset(self) nogil except -1:
+
+068:     cdef int reset(self) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_reset(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self) {
   int __pyx_r;
 
@@ -475,14 +549,14 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 069:         """Reset the criterion at pos=start.
+
 069:         """Reset the criterion at pos=start.
 070: 
-
 071:         This method must be implemented by the subclass.
-
 072:         """
+
 071:         This method must be implemented by the subclass.
+
 072:         """
 073: 
-
 074:         pass
+
 074:         pass
 075: 
-
+076:     cdef int reverse_reset(self) nogil except -1:
+
+076:     cdef int reverse_reset(self) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_reverse_reset(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self) {
   int __pyx_r;
 
@@ -490,13 +564,13 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 077:         """Reset the criterion at pos=end.
+
 077:         """Reset the criterion at pos=end.
 078: 
-
 079:         This method must be implemented by the subclass.
-
 080:         """
-
 081:         pass
+
 079:         This method must be implemented by the subclass.
+
 080:         """
+
 081:         pass
 082: 
-
+083:     cdef int update(self, SIZE_t new_pos) nogil except -1:
+
+083:     cdef int update(self, SIZE_t new_pos) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_update(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_new_pos) {
   int __pyx_r;
 
@@ -504,21 +578,21 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 084:         """Updated statistics by moving samples[pos:new_pos] to the left child.
+
 084:         """Updated statistics by moving samples[pos:new_pos] to the left child.
 085: 
-
 086:         This updates the collected statistics by moving samples[pos:new_pos]
-
 087:         from the right child to the left child. It must be implemented by
-
 088:         the subclass.
+
 086:         This updates the collected statistics by moving samples[pos:new_pos]
+
 087:         from the right child to the left child. It must be implemented by
+
 088:         the subclass.
 089: 
-
 090:         Parameters
-
 091:         ----------
-
 092:         new_pos : SIZE_t
-
 093:             New starting index position of the samples in the right child
-
 094:         """
+
 090:         Parameters
+
 091:         ----------
+
 092:         new_pos : SIZE_t
+
 093:             New starting index position of the samples in the right child
+
 094:         """
 095: 
-
 096:         pass
+
 096:         pass
 097: 
-
+098:     cdef double node_impurity(self) nogil:
+
+098:     cdef double node_impurity(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_node_impurity(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self) {
   double __pyx_r;
 
@@ -526,58 +600,58 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 099:         """Placeholder for calculating the impurity of the node.
+
 099:         """Placeholder for calculating the impurity of the node.
 100: 
-
 101:         Placeholder for a method which will evaluate the impurity of
-
 102:         the current node, i.e. the impurity of samples[start:end]. This is the
-
 103:         primary function of the criterion class.
-
 104:         """
+
 101:         Placeholder for a method which will evaluate the impurity of
+
 102:         the current node, i.e. the impurity of samples[start:end]. This is the
+
 103:         primary function of the criterion class.
+
 104:         """
 105: 
-
 106:         pass
+
 106:         pass
 107: 
-
+108:     cdef void children_impurity(self, double* impurity_left,
+
+108:     cdef void children_impurity(self, double* impurity_left,
static void __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_children_impurity(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self, CYTHON_UNUSED double *__pyx_v_impurity_left, CYTHON_UNUSED double *__pyx_v_impurity_right) {
 
   /* function exit code */
 }
-
 109:                                 double* impurity_right) nogil:
-
 110:         """Placeholder for calculating the impurity of children.
+
 109:                                 double* impurity_right) nogil:
+
 110:         """Placeholder for calculating the impurity of children.
 111: 
-
 112:         Placeholder for a method which evaluates the impurity in
-
 113:         children nodes, i.e. the impurity of samples[start:pos] + the impurity
-
 114:         of samples[pos:end].
+
 112:         Placeholder for a method which evaluates the impurity in
+
 113:         children nodes, i.e. the impurity of samples[start:pos] + the impurity
+
 114:         of samples[pos:end].
 115: 
-
 116:         Parameters
-
 117:         ----------
-
 118:         impurity_left : double pointer
-
 119:             The memory address where the impurity of the left child should be
-
 120:             stored.
-
 121:         impurity_right : double pointer
-
 122:             The memory address where the impurity of the right child should be
-
 123:             stored
-
 124:         """
+
 116:         Parameters
+
 117:         ----------
+
 118:         impurity_left : double pointer
+
 119:             The memory address where the impurity of the left child should be
+
 120:             stored.
+
 121:         impurity_right : double pointer
+
 122:             The memory address where the impurity of the right child should be
+
 123:             stored
+
 124:         """
 125: 
-
 126:         pass
+
 126:         pass
 127: 
-
+128:     cdef void node_value(self, double* dest) nogil:
+
+128:     cdef void node_value(self, double* dest) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_node_value(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self, CYTHON_UNUSED double *__pyx_v_dest) {
 
   /* function exit code */
 }
-
 129:         """Placeholder for storing the node value.
+
 129:         """Placeholder for storing the node value.
 130: 
-
 131:         Placeholder for a method which will save the weighted
-
 132:         samples[start:end] into dest.
+
 131:         Placeholder for a method which will save the weighted 
+
 132:         samples[start:end] into dest.
 133: 
-
 134:         Parameters
-
 135:         ----------
-
 136:         dest : double pointer
-
 137:             The memory address where the node value should be stored.
-
 138:         """
+
 134:         Parameters
+
 135:         ----------
+
 136:         dest : double pointer
+
 137:             The memory address where the node value should be stored.
+
 138:         """
 139: 
-
 140:         pass
+
 140:         pass
 141: 
-
+142:     cdef double proxy_impurity_improvement(self) nogil:
+
+142:     cdef double proxy_impurity_improvement(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_proxy_impurity_improvement(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self) {
   double __pyx_r;
 
@@ -585,26 +659,26 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 143:         """Compute a proxy of the impurity reduction
+
 143:         """Compute a proxy of the impurity reduction
 144: 
-
 145:         This method is used to speed up the search for the best split.
-
 146:         It is a proxy quantity such that the split that maximizes this value
-
 147:         also maximizes the impurity improvement. It neglects all constant terms
-
 148:         of the impurity decrease for a given split.
+
 145:         This method is used to speed up the search for the best split.
+
 146:         It is a proxy quantity such that the split that maximizes this value
+
 147:         also maximizes the impurity improvement. It neglects all constant terms
+
 148:         of the impurity decrease for a given split.
 149: 
-
 150:         The absolute impurity improvement is only computed by the
-
 151:         impurity_improvement method once the best split has been found.
-
 152:         """
-
 153:         # cdef double impurity_left
-
 154:         # cdef double impurity_right
-
 155:         # self.children_impurity(&impurity_left, &impurity_right)
+
 150:         The absolute impurity improvement is only computed by the
+
 151:         impurity_improvement method once the best split has been found.
+
 152:         """
+
 153:         # cdef double impurity_left
+
 154:         # cdef double impurity_right
+
 155:         # self.children_impurity(&impurity_left, &impurity_right)
 156: 
-
 157:         # return (- self.weighted_n_right * impurity_right
-
 158:         #         - self.weighted_n_left * impurity_left)
+
 157:         # return (- self.weighted_n_right * impurity_right
+
 158:         #         - self.weighted_n_left * impurity_left)
 159: 
-
 160:         pass
+
 160:         pass
 161: 
-
+162:     cdef double impurity_improvement(self, double impurity) nogil:
+
+162:     cdef double impurity_improvement(self, double impurity) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_9Criterion_impurity_improvement(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *__pyx_v_self, double __pyx_v_impurity) {
   double __pyx_v_impurity_left;
   double __pyx_v_impurity_right;
@@ -614,32 +688,32 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 163:         """Compute the improvement in impurity
+
 163:         """Compute the improvement in impurity
 164: 
-
 165:         This method computes the improvement in impurity when a split occurs.
-
 166:         The weighted impurity improvement equation is the following:
+
 165:         This method computes the improvement in impurity when a split occurs.
+
 166:         The weighted impurity improvement equation is the following:
 167: 
-
 168:             N_t / N * (impurity - N_t_R / N_t * right_impurity
-
 169:                                 - N_t_L / N_t * left_impurity)
+
 168:             N_t / N * (impurity - N_t_R / N_t * right_impurity
+
 169:                                 - N_t_L / N_t * left_impurity)
 170: 
-
 171:         where N is the total number of samples, N_t is the number of samples
-
 172:         at the current node, N_t_L is the number of samples in the left child,
-
 173:         and N_t_R is the number of samples in the right child,
+
 171:         where N is the total number of samples, N_t is the number of samples
+
 172:         at the current node, N_t_L is the number of samples in the left child,
+
 173:         and N_t_R is the number of samples in the right child,
 174: 
-
 175:         Parameters
-
 176:         ----------
-
 177:         impurity : double
-
 178:             The initial impurity of the node before the split
+
 175:         Parameters
+
 176:         ----------
+
 177:         impurity : double
+
 178:             The initial impurity of the node before the split
 179: 
-
 180:         Return
-
 181:         ------
-
 182:         double : improvement in impurity after the split occurs
-
 183:         """
+
 180:         Return
+
 181:         ------
+
 182:         double : improvement in impurity after the split occurs
+
 183:         """
 184: 
-
 185:         cdef double impurity_left
-
 186:         cdef double impurity_right
+
 185:         cdef double impurity_left
+
 186:         cdef double impurity_right
 187: 
-
+188:         self.children_impurity(&impurity_left, &impurity_right)
+
+188:         self.children_impurity(&impurity_left, &impurity_right)
   /* "stpredictions/models/OK3/_criterion.pyx":188
  *         cdef double impurity_right
@@ -650,13 +724,13 @@
  */
   ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_vtab)->children_impurity(__pyx_v_self, (&__pyx_v_impurity_left), (&__pyx_v_impurity_right));
 
 189: 
-
+190:         return ((self.weighted_n_node_samples / self.weighted_n_samples) *
+
+190:         return ((self.weighted_n_node_samples / self.weighted_n_samples) *
  __pyx_r = ((__pyx_v_self->weighted_n_node_samples / __pyx_v_self->weighted_n_samples) * ((__pyx_v_impurity - ((__pyx_v_self->weighted_n_right / __pyx_v_self->weighted_n_node_samples) * __pyx_v_impurity_right)) - ((__pyx_v_self->weighted_n_left / __pyx_v_self->weighted_n_node_samples) * __pyx_v_impurity_left)));
   goto __pyx_L0;
-
 191:                 (impurity - (self.weighted_n_right /
-
 192:                              self.weighted_n_node_samples * impurity_right)
-
 193:                           - (self.weighted_n_left /
-
 194:                              self.weighted_n_node_samples * impurity_left)))
+
 191:                 (impurity - (self.weighted_n_right /
+
 192:                              self.weighted_n_node_samples * impurity_right)
+
 193:                           - (self.weighted_n_left /
+
 194:                              self.weighted_n_node_samples * impurity_left)))
 195: 
 196: 
 197: 
@@ -671,21 +745,21 @@
 206: 
 207: 
 208: 
-
+209: cdef class KernelizedRegressionCriterion(Criterion):
+
+209: cdef class KernelizedRegressionCriterion(Criterion):
struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion {
   struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion __pyx_base;
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_vtabptr_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion;
 
-
 210:     r"""Abstract kernelized output regression criterion.
+
 210:     r"""Abstract kernelized output regression criterion.
 211: 
-
 212:     This handles cases where the target is a structured object and the Gramm
-
 213:     matrix (the matrix of the kernel evaluated at the output samples) is given
-
 214:     as y. The impurity is evaluated by computing the variance of the target
-
 215:     values (embedded in a larger Hilbert space) left and right of the split point.
-
 216:     """
+
 212:     This handles cases where the target is a structured object and the Gramm
+
 213:     matrix (the matrix of the kernel evaluated at the output samples) is given
+
 214:     as y. The impurity is evaluated by computing the variance of the target
+
 215:     values (embedded in a larger Hilbert space) left and right of the split point.
+
 216:     """
 217: 
-
+218:     def __cinit__(self, SIZE_t n_samples):
+
+218:     def __cinit__(self, SIZE_t n_samples):
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -749,55 +823,55 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 219:         """Initialize parameters for this criterion.
+
 219:         """Initialize parameters for this criterion.
 220: 
-
 221:         Parameters
-
 222:         ----------
-
 223:         n_samples : SIZE_t
-
 224:             The total number of samples to fit on
-
 225:         """
+
 221:         Parameters
+
 222:         ----------
+
 223:         n_samples : SIZE_t
+
 224:             The total number of samples to fit on
+
 225:         """
 226: 
-
 227:         # Default values
-
+228:         self.sample_weight = NULL
+
 227:         # Default values
+
+228:         self.sample_weight = NULL
  __pyx_v_self->__pyx_base.sample_weight = NULL;
 
 229: 
-
+230:         self.samples = NULL
+
+230:         self.samples = NULL
  __pyx_v_self->__pyx_base.samples = NULL;
-
+231:         self.start = 0
+
+231:         self.start = 0
  __pyx_v_self->__pyx_base.start = 0;
-
+232:         self.pos = 0
+
+232:         self.pos = 0
  __pyx_v_self->__pyx_base.pos = 0;
-
+233:         self.end = 0
+
+233:         self.end = 0
  __pyx_v_self->__pyx_base.end = 0;
 
 234: 
-
+235:         self.n_samples = n_samples
+
+235:         self.n_samples = n_samples
  __pyx_v_self->__pyx_base.n_samples = __pyx_v_n_samples;
-
+236:         self.n_node_samples = 0
+
+236:         self.n_node_samples = 0
  __pyx_v_self->__pyx_base.n_node_samples = 0;
-
+237:         self.weighted_n_node_samples = 0.0
+
+237:         self.weighted_n_node_samples = 0.0
  __pyx_v_self->__pyx_base.weighted_n_node_samples = 0.0;
-
+238:         self.weighted_n_left = 0.0
+
+238:         self.weighted_n_left = 0.0
  __pyx_v_self->__pyx_base.weighted_n_left = 0.0;
-
+239:         self.weighted_n_right = 0.0
+
+239:         self.weighted_n_right = 0.0
  __pyx_v_self->__pyx_base.weighted_n_right = 0.0;
 
 240: 
-
+241:         self.sum_diag_Gramm = 0.0
+
+241:         self.sum_diag_Gramm = 0.0
  __pyx_v_self->sum_diag_Gramm = 0.0;
-
+242:         self.sum_total_Gramm = 0.0
+
+242:         self.sum_total_Gramm = 0.0
  __pyx_v_self->sum_total_Gramm = 0.0;
 
 243: 
-
+244:         self.sum_diag_Gramm_left = 0.0
+
+244:         self.sum_diag_Gramm_left = 0.0
  __pyx_v_self->sum_diag_Gramm_left = 0.0;
-
+245:         self.sum_diag_Gramm_right = 0.0
+
+245:         self.sum_diag_Gramm_right = 0.0
  __pyx_v_self->sum_diag_Gramm_right = 0.0;
 
 246: 
-
+247:         self.sum_total_Gramm_left = 0.0
+
+247:         self.sum_total_Gramm_left = 0.0
  __pyx_v_self->sum_total_Gramm_left = 0.0;
-
+248:         self.sum_total_Gramm_right = 0.0
+
+248:         self.sum_total_Gramm_right = 0.0
  __pyx_v_self->sum_total_Gramm_right = 0.0;
 
 249: 
 250: 
-
+251:     def __reduce__(self):
+
+251:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_3__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_3__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -829,7 +903,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+252:         return (type(self), (self.n_samples,), self.__getstate__())
+
+252:         return (type(self), (self.n_samples,), self.__getstate__())
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.n_samples); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 252, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -870,7 +944,7 @@
   __pyx_t_3 = 0;
   goto __pyx_L0;
 
 253: 
-
+254:     cdef int init(self, const DOUBLE_t[:, ::1] y, DOUBLE_t* sample_weight,
+
+254:     cdef int init(self, const DOUBLE_t[:, ::1] y, DOUBLE_t* sample_weight,
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_init(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self, __Pyx_memviewslice __pyx_v_y, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *__pyx_v_sample_weight, double __pyx_v_weighted_n_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_i;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_j;
@@ -895,98 +969,98 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 255:                   double weighted_n_samples, SIZE_t* samples, SIZE_t start,
-
 256:                   SIZE_t end) nogil except -1:
-
 257:         """Initialize the criterion at node samples[start:end] and
-
 258:            children samples[start:start] and samples[start:end]."""
-
 259:         # Initialize fields
-
+260:         self.y = y
+
 255:                   double weighted_n_samples, SIZE_t* samples, SIZE_t start,
+
 256:                   SIZE_t end) nogil except -1:
+
 257:         """Initialize the criterion at node samples[start:end] and
+
 258:            children samples[start:start] and samples[start:end]."""
+
 259:         # Initialize fields
+
+260:         self.y = y
  __PYX_XDEC_MEMVIEW(&__pyx_v_self->__pyx_base.y, 0);
   __PYX_INC_MEMVIEW(&__pyx_v_y, 1);
   __pyx_v_self->__pyx_base.y = __pyx_v_y;
-
+261:         self.sample_weight = sample_weight
+
+261:         self.sample_weight = sample_weight
  __pyx_v_self->__pyx_base.sample_weight = __pyx_v_sample_weight;
-
+262:         self.samples = samples
+
+262:         self.samples = samples
  __pyx_v_self->__pyx_base.samples = __pyx_v_samples;
-
+263:         self.start = start
+
+263:         self.start = start
  __pyx_v_self->__pyx_base.start = __pyx_v_start;
-
+264:         self.end = end
+
+264:         self.end = end
  __pyx_v_self->__pyx_base.end = __pyx_v_end;
-
+265:         self.n_node_samples = end - start
+
+265:         self.n_node_samples = end - start
  __pyx_v_self->__pyx_base.n_node_samples = (__pyx_v_end - __pyx_v_start);
-
+266:         self.weighted_n_samples = weighted_n_samples
+
+266:         self.weighted_n_samples = weighted_n_samples
  __pyx_v_self->__pyx_base.weighted_n_samples = __pyx_v_weighted_n_samples;
-
+267:         self.weighted_n_node_samples = 0.
+
+267:         self.weighted_n_node_samples = 0.
  __pyx_v_self->__pyx_base.weighted_n_node_samples = 0.;
 
 268: 
-
 269:         cdef SIZE_t i
-
 270:         cdef SIZE_t j
-
 271:         cdef SIZE_t p
-
 272:         cdef SIZE_t q
-
+273:         cdef DOUBLE_t w_i = 1.0
+
 269:         cdef SIZE_t i
+
 270:         cdef SIZE_t j
+
 271:         cdef SIZE_t p
+
 272:         cdef SIZE_t q
+
+273:         cdef DOUBLE_t w_i = 1.0
  __pyx_v_w_i = 1.0;
-
+274:         cdef DOUBLE_t w_j = 1.0
+
+274:         cdef DOUBLE_t w_j = 1.0
  __pyx_v_w_j = 1.0;
 
 275: 
-
+276:         self.sum_diag_Gramm = 0.0
+
+276:         self.sum_diag_Gramm = 0.0
  __pyx_v_self->sum_diag_Gramm = 0.0;
-
+277:         self.sum_total_Gramm = 0.0
+
+277:         self.sum_total_Gramm = 0.0
  __pyx_v_self->sum_total_Gramm = 0.0;
 
 278: 
-
+279:         self.sum_diag_Gramm_left = 0.0
+
+279:         self.sum_diag_Gramm_left = 0.0
  __pyx_v_self->sum_diag_Gramm_left = 0.0;
-
+280:         self.sum_diag_Gramm_right = 0.0
+
+280:         self.sum_diag_Gramm_right = 0.0
  __pyx_v_self->sum_diag_Gramm_right = 0.0;
 
 281: 
-
+282:         self.sum_total_Gramm_left = 0.0
+
+282:         self.sum_total_Gramm_left = 0.0
  __pyx_v_self->sum_total_Gramm_left = 0.0;
-
+283:         self.sum_total_Gramm_right = 0.0
+
+283:         self.sum_total_Gramm_right = 0.0
  __pyx_v_self->sum_total_Gramm_right = 0.0;
 
 284: 
-
+285:         for p in range(start, end):
+
+285:         for p in range(start, end):
  __pyx_t_1 = __pyx_v_end;
   __pyx_t_2 = __pyx_t_1;
   for (__pyx_t_3 = __pyx_v_start; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
     __pyx_v_p = __pyx_t_3;
-
+286:             i = samples[p]
+
+286:             i = samples[p]
    __pyx_v_i = (__pyx_v_samples[__pyx_v_p]);
-
 287:             # with gil:
-
 288:             #     print("print samples :",i)
+
 287:             # with gil:
+
 288:             #     print("print samples :",i)
 289: 
-
+290:             if sample_weight != NULL:
+
+290:             if sample_weight != NULL:
    __pyx_t_4 = ((__pyx_v_sample_weight != NULL) != 0);
     if (__pyx_t_4) {
 /* … */
     }
-
+291:                 w_i = sample_weight[i]
+
+291:                 w_i = sample_weight[i]
      __pyx_v_w_i = (__pyx_v_sample_weight[__pyx_v_i]);
 
 292: 
-
+293:             self.weighted_n_node_samples += w_i
+
+293:             self.weighted_n_node_samples += w_i
    __pyx_v_self->__pyx_base.weighted_n_node_samples = (__pyx_v_self->__pyx_base.weighted_n_node_samples + __pyx_v_w_i);
 
 294: 
-
+295:             self.sum_diag_Gramm += w_i * self.y[i,i]
+
+295:             self.sum_diag_Gramm += w_i * self.y[i,i]
    if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 295, __pyx_L1_error)}
     __pyx_t_5 = __pyx_v_i;
     __pyx_t_6 = __pyx_v_i;
     __pyx_v_self->sum_diag_Gramm = (__pyx_v_self->sum_diag_Gramm + (__pyx_v_w_i * (*((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=1 */ ((char *) (((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=0 */ (__pyx_v_self->__pyx_base.y.data + __pyx_t_5 * __pyx_v_self->__pyx_base.y.strides[0]) )) + __pyx_t_6)) )))));
 
 296: 
-
+297:             for q in range(start, end):
+
+297:             for q in range(start, end):
    __pyx_t_7 = __pyx_v_end;
     __pyx_t_8 = __pyx_t_7;
     for (__pyx_t_9 = __pyx_v_start; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
       __pyx_v_q = __pyx_t_9;
-
+298:                 j = samples[q]
+
+298:                 j = samples[q]
      __pyx_v_j = (__pyx_v_samples[__pyx_v_q]);
 
 299: 
-
+300:                 if sample_weight != NULL:
+
+300:                 if sample_weight != NULL:
      __pyx_t_4 = ((__pyx_v_sample_weight != NULL) != 0);
       if (__pyx_t_4) {
 /* … */
       }
-
+301:                     w_j = sample_weight[j]
+
+301:                     w_j = sample_weight[j]
        __pyx_v_w_j = (__pyx_v_sample_weight[__pyx_v_j]);
 
 302: 
-
+303:                 self.sum_total_Gramm += w_i * w_j * self.y[i,j]
+
+303:                 self.sum_total_Gramm += w_i * w_j * self.y[i,j]
      if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 303, __pyx_L1_error)}
       __pyx_t_6 = __pyx_v_i;
       __pyx_t_5 = __pyx_v_j;
@@ -994,18 +1068,18 @@
     }
   }
 
 304: 
-
 305:         # Reset to pos=start
-
+306:         self.reset()
+
 305:         # Reset to pos=start
+
+306:         self.reset()
  __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base.reset(((struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self)); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 306, __pyx_L1_error)
-
 307:         # with gil:
-
 308:         #     print("print sum diag  :",self.sum_diag_Gramm)
-
 309:         #     print("print sum total :",self.sum_total_Gramm)
-
 310:         #     print("print weighted_n_node_samples :",self.weighted_n_node_samples)
-
+311:         return 0
+
 307:         # with gil:
+
 308:         #     print("print sum diag  :",self.sum_diag_Gramm)
+
 309:         #     print("print sum total :",self.sum_total_Gramm)
+
 310:         #     print("print weighted_n_node_samples :",self.weighted_n_node_samples)
+
+311:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 312: 
-
+313:     cdef int reset(self) nogil except -1:
+
+313:     cdef int reset(self) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_reset(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self) {
   int __pyx_r;
 /* … */
@@ -1013,33 +1087,33 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 314:         """Reset the criterion at pos=start."""
+
 314:         """Reset the criterion at pos=start."""
 315: 
-
+316:         self.sum_diag_Gramm_left = 0.0
+
+316:         self.sum_diag_Gramm_left = 0.0
  __pyx_v_self->sum_diag_Gramm_left = 0.0;
-
+317:         self.sum_diag_Gramm_right = self.sum_diag_Gramm
+
+317:         self.sum_diag_Gramm_right = self.sum_diag_Gramm
  __pyx_t_1 = __pyx_v_self->sum_diag_Gramm;
   __pyx_v_self->sum_diag_Gramm_right = __pyx_t_1;
 
 318: 
-
+319:         self.sum_total_Gramm_left = 0.0
+
+319:         self.sum_total_Gramm_left = 0.0
  __pyx_v_self->sum_total_Gramm_left = 0.0;
-
+320:         self.sum_total_Gramm_right = self.sum_total_Gramm
+
+320:         self.sum_total_Gramm_right = self.sum_total_Gramm
  __pyx_t_1 = __pyx_v_self->sum_total_Gramm;
   __pyx_v_self->sum_total_Gramm_right = __pyx_t_1;
 
 321: 
-
+322:         self.weighted_n_left = 0.0
+
+322:         self.weighted_n_left = 0.0
  __pyx_v_self->__pyx_base.weighted_n_left = 0.0;
-
+323:         self.weighted_n_right = self.weighted_n_node_samples
+
+323:         self.weighted_n_right = self.weighted_n_node_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.weighted_n_node_samples;
   __pyx_v_self->__pyx_base.weighted_n_right = __pyx_t_1;
-
+324:         self.pos = self.start
+
+324:         self.pos = self.start
  __pyx_t_2 = __pyx_v_self->__pyx_base.start;
   __pyx_v_self->__pyx_base.pos = __pyx_t_2;
-
+325:         return 0
+
+325:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 326: 
-
+327:     cdef int reverse_reset(self) nogil except -1:
+
+327:     cdef int reverse_reset(self) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_reverse_reset(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self) {
   int __pyx_r;
 /* … */
@@ -1047,33 +1121,33 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 328:         """Reset the criterion at pos=end."""
+
 328:         """Reset the criterion at pos=end."""
 329: 
-
+330:         self.sum_diag_Gramm_right = 0.0
+
+330:         self.sum_diag_Gramm_right = 0.0
  __pyx_v_self->sum_diag_Gramm_right = 0.0;
-
+331:         self.sum_diag_Gramm_left = self.sum_diag_Gramm
+
+331:         self.sum_diag_Gramm_left = self.sum_diag_Gramm
  __pyx_t_1 = __pyx_v_self->sum_diag_Gramm;
   __pyx_v_self->sum_diag_Gramm_left = __pyx_t_1;
 
 332: 
-
+333:         self.sum_total_Gramm_right = 0.0
+
+333:         self.sum_total_Gramm_right = 0.0
  __pyx_v_self->sum_total_Gramm_right = 0.0;
-
+334:         self.sum_total_Gramm_left = self.sum_total_Gramm
+
+334:         self.sum_total_Gramm_left = self.sum_total_Gramm
  __pyx_t_1 = __pyx_v_self->sum_total_Gramm;
   __pyx_v_self->sum_total_Gramm_left = __pyx_t_1;
 
 335: 
-
+336:         self.weighted_n_right = 0.0
+
+336:         self.weighted_n_right = 0.0
  __pyx_v_self->__pyx_base.weighted_n_right = 0.0;
-
+337:         self.weighted_n_left = self.weighted_n_node_samples
+
+337:         self.weighted_n_left = self.weighted_n_node_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.weighted_n_node_samples;
   __pyx_v_self->__pyx_base.weighted_n_left = __pyx_t_1;
-
+338:         self.pos = self.end
+
+338:         self.pos = self.end
  __pyx_t_2 = __pyx_v_self->__pyx_base.end;
   __pyx_v_self->__pyx_base.pos = __pyx_t_2;
-
+339:         return 0
+
+339:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 340: 
-
+341:     cdef int update(self, SIZE_t new_pos) nogil except -1:
+
+341:     cdef int update(self, SIZE_t new_pos) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_update(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_new_pos) {
   double *__pyx_v_sample_weight;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
@@ -1103,116 +1177,116 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 342:         """Updated statistics by moving samples[pos:new_pos] to the left."""
+
 342:         """Updated statistics by moving samples[pos:new_pos] to the left."""
 343: 
-
+344:         cdef double* sample_weight = self.sample_weight
+
+344:         cdef double* sample_weight = self.sample_weight
  __pyx_t_1 = __pyx_v_self->__pyx_base.sample_weight;
   __pyx_v_sample_weight = __pyx_t_1;
-
+345:         cdef SIZE_t* samples = self.samples
+
+345:         cdef SIZE_t* samples = self.samples
  __pyx_t_2 = __pyx_v_self->__pyx_base.samples;
   __pyx_v_samples = __pyx_t_2;
 
 346: 
-
+347:         cdef SIZE_t start = self.start
+
+347:         cdef SIZE_t start = self.start
  __pyx_t_3 = __pyx_v_self->__pyx_base.start;
   __pyx_v_start = __pyx_t_3;
-
+348:         cdef SIZE_t pos = self.pos
+
+348:         cdef SIZE_t pos = self.pos
  __pyx_t_3 = __pyx_v_self->__pyx_base.pos;
   __pyx_v_pos = __pyx_t_3;
-
+349:         cdef SIZE_t end = self.end
+
+349:         cdef SIZE_t end = self.end
  __pyx_t_3 = __pyx_v_self->__pyx_base.end;
   __pyx_v_end = __pyx_t_3;
 
 350: 
-
 351:         cdef SIZE_t i
-
 352:         cdef SIZE_t j
-
 353:         cdef SIZE_t p
-
 354:         cdef SIZE_t q
-
+355:         cdef DOUBLE_t w_i = 1.0
+
 351:         cdef SIZE_t i
+
 352:         cdef SIZE_t j
+
 353:         cdef SIZE_t p
+
 354:         cdef SIZE_t q
+
+355:         cdef DOUBLE_t w_i = 1.0
  __pyx_v_w_i = 1.0;
-
+356:         cdef DOUBLE_t w_j = 1.0
+
+356:         cdef DOUBLE_t w_j = 1.0
  __pyx_v_w_j = 1.0;
 
 357: 
-
 358:         # Update statistics up to new_pos
+
 358:         # Update statistics up to new_pos
 359: 
-
+360:         for p in range(pos, new_pos):
+
+360:         for p in range(pos, new_pos):
  __pyx_t_3 = __pyx_v_new_pos;
   __pyx_t_4 = __pyx_t_3;
   for (__pyx_t_5 = __pyx_v_pos; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
     __pyx_v_p = __pyx_t_5;
-
+361:             i = samples[p]
+
+361:             i = samples[p]
    __pyx_v_i = (__pyx_v_samples[__pyx_v_p]);
 
 362: 
-
+363:             if sample_weight != NULL:
+
+363:             if sample_weight != NULL:
    __pyx_t_6 = ((__pyx_v_sample_weight != NULL) != 0);
     if (__pyx_t_6) {
 /* … */
     }
-
+364:                 w_i = sample_weight[i]
+
+364:                 w_i = sample_weight[i]
      __pyx_v_w_i = (__pyx_v_sample_weight[__pyx_v_i]);
 
 365: 
-
+366:             self.sum_diag_Gramm_left += w_i * self.y[i,i]
+
+366:             self.sum_diag_Gramm_left += w_i * self.y[i,i]
    if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 366, __pyx_L1_error)}
     __pyx_t_7 = __pyx_v_i;
     __pyx_t_8 = __pyx_v_i;
     __pyx_v_self->sum_diag_Gramm_left = (__pyx_v_self->sum_diag_Gramm_left + (__pyx_v_w_i * (*((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=1 */ ((char *) (((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=0 */ (__pyx_v_self->__pyx_base.y.data + __pyx_t_7 * __pyx_v_self->__pyx_base.y.strides[0]) )) + __pyx_t_8)) )))));
 
 367: 
-
+368:             self.sum_diag_Gramm_right -= w_i * self.y[i,i]
+
+368:             self.sum_diag_Gramm_right -= w_i * self.y[i,i]
    if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 368, __pyx_L1_error)}
     __pyx_t_8 = __pyx_v_i;
     __pyx_t_7 = __pyx_v_i;
     __pyx_v_self->sum_diag_Gramm_right = (__pyx_v_self->sum_diag_Gramm_right - (__pyx_v_w_i * (*((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=1 */ ((char *) (((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=0 */ (__pyx_v_self->__pyx_base.y.data + __pyx_t_8 * __pyx_v_self->__pyx_base.y.strides[0]) )) + __pyx_t_7)) )))));
 
 369: 
-
+370:             self.weighted_n_left += w_i
+
+370:             self.weighted_n_left += w_i
    __pyx_v_self->__pyx_base.weighted_n_left = (__pyx_v_self->__pyx_base.weighted_n_left + __pyx_v_w_i);
 
 371: 
-
+372:             self.weighted_n_right -= w_i
+
+372:             self.weighted_n_right -= w_i
    __pyx_v_self->__pyx_base.weighted_n_right = (__pyx_v_self->__pyx_base.weighted_n_right - __pyx_v_w_i);
 
 373: 
-
+374:             for q in range(start, pos):
+
+374:             for q in range(start, pos):
    __pyx_t_9 = __pyx_v_pos;
     __pyx_t_10 = __pyx_t_9;
     for (__pyx_t_11 = __pyx_v_start; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
       __pyx_v_q = __pyx_t_11;
-
+375:                 j = samples[q]
+
+375:                 j = samples[q]
      __pyx_v_j = (__pyx_v_samples[__pyx_v_q]);
 
 376: 
-
+377:                 if sample_weight != NULL:
+
+377:                 if sample_weight != NULL:
      __pyx_t_6 = ((__pyx_v_sample_weight != NULL) != 0);
       if (__pyx_t_6) {
 /* … */
       }
-
+378:                     w_j = sample_weight[j]
+
+378:                     w_j = sample_weight[j]
        __pyx_v_w_j = (__pyx_v_sample_weight[__pyx_v_j]);
 
 379: 
-
+380:                 self.sum_total_Gramm_left += 2 * w_i * w_j * self.y[i,j]
+
+380:                 self.sum_total_Gramm_left += 2 * w_i * w_j * self.y[i,j]
      if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 380, __pyx_L1_error)}
       __pyx_t_7 = __pyx_v_i;
       __pyx_t_8 = __pyx_v_j;
       __pyx_v_self->sum_total_Gramm_left = (__pyx_v_self->sum_total_Gramm_left + (((2.0 * __pyx_v_w_i) * __pyx_v_w_j) * (*((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=1 */ ((char *) (((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=0 */ (__pyx_v_self->__pyx_base.y.data + __pyx_t_7 * __pyx_v_self->__pyx_base.y.strides[0]) )) + __pyx_t_8)) )))));
     }
 
 381: 
-
+382:             for q in range(pos, new_pos):
+
+382:             for q in range(pos, new_pos):
    __pyx_t_9 = __pyx_v_new_pos;
     __pyx_t_10 = __pyx_t_9;
     for (__pyx_t_11 = __pyx_v_pos; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
       __pyx_v_q = __pyx_t_11;
-
+383:                 j = samples[q]
+
+383:                 j = samples[q]
      __pyx_v_j = (__pyx_v_samples[__pyx_v_q]);
 
 384: 
-
+385:                 if sample_weight != NULL:
+
+385:                 if sample_weight != NULL:
      __pyx_t_6 = ((__pyx_v_sample_weight != NULL) != 0);
       if (__pyx_t_6) {
 /* … */
       }
-
+386:                     w_j = sample_weight[j]
+
+386:                     w_j = sample_weight[j]
        __pyx_v_w_j = (__pyx_v_sample_weight[__pyx_v_j]);
 
 387: 
-
+388:                 self.sum_total_Gramm_left += w_i * w_j * self.y[i,j]
+
+388:                 self.sum_total_Gramm_left += w_i * w_j * self.y[i,j]
      if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 388, __pyx_L1_error)}
       __pyx_t_8 = __pyx_v_i;
       __pyx_t_7 = __pyx_v_j;
       __pyx_v_self->sum_total_Gramm_left = (__pyx_v_self->sum_total_Gramm_left + ((__pyx_v_w_i * __pyx_v_w_j) * (*((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=1 */ ((char *) (((__pyx_t_7sklearn_4tree_5_tree_DOUBLE_t const  *) ( /* dim=0 */ (__pyx_v_self->__pyx_base.y.data + __pyx_t_8 * __pyx_v_self->__pyx_base.y.strides[0]) )) + __pyx_t_7)) )))));
 
 389: 
-
+390:                 self.sum_total_Gramm_right -= w_i * w_j * self.y[i,j]
+
+390:                 self.sum_total_Gramm_right -= w_i * w_j * self.y[i,j]
      if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 390, __pyx_L1_error)}
       __pyx_t_7 = __pyx_v_i;
       __pyx_t_8 = __pyx_v_j;
@@ -1220,39 +1294,39 @@
     }
   }
 
 391: 
-
+392:         for p in range(new_pos, end):
+
+392:         for p in range(new_pos, end):
  __pyx_t_3 = __pyx_v_end;
   __pyx_t_4 = __pyx_t_3;
   for (__pyx_t_5 = __pyx_v_new_pos; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
     __pyx_v_p = __pyx_t_5;
-
+393:             i = samples[p]
+
+393:             i = samples[p]
    __pyx_v_i = (__pyx_v_samples[__pyx_v_p]);
 
 394: 
-
+395:             if sample_weight != NULL:
+
+395:             if sample_weight != NULL:
    __pyx_t_6 = ((__pyx_v_sample_weight != NULL) != 0);
     if (__pyx_t_6) {
 /* … */
     }
-
+396:                 w_i = sample_weight[i]
+
+396:                 w_i = sample_weight[i]
      __pyx_v_w_i = (__pyx_v_sample_weight[__pyx_v_i]);
 
 397: 
-
+398:             for q in range(pos, new_pos):
+
+398:             for q in range(pos, new_pos):
    __pyx_t_9 = __pyx_v_new_pos;
     __pyx_t_10 = __pyx_t_9;
     for (__pyx_t_11 = __pyx_v_pos; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
       __pyx_v_q = __pyx_t_11;
-
+399:                 j = samples[q]
+
+399:                 j = samples[q]
      __pyx_v_j = (__pyx_v_samples[__pyx_v_q]);
 
 400: 
-
+401:                 if sample_weight != NULL:
+
+401:                 if sample_weight != NULL:
      __pyx_t_6 = ((__pyx_v_sample_weight != NULL) != 0);
       if (__pyx_t_6) {
 /* … */
       }
-
+402:                     w_j = sample_weight[j]
+
+402:                     w_j = sample_weight[j]
        __pyx_v_w_j = (__pyx_v_sample_weight[__pyx_v_j]);
 
 403: 
-
+404:                 self.sum_total_Gramm_right -= 2 * w_i * w_j * self.y[i,j]
+
+404:                 self.sum_total_Gramm_right -= 2 * w_i * w_j * self.y[i,j]
      if (unlikely(!__pyx_v_self->__pyx_base.y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(1, 404, __pyx_L1_error)}
       __pyx_t_8 = __pyx_v_i;
       __pyx_t_7 = __pyx_v_j;
@@ -1260,13 +1334,13 @@
     }
   }
 
 405: 
-
+406:         self.pos = new_pos
+
+406:         self.pos = new_pos
  __pyx_v_self->__pyx_base.pos = __pyx_v_new_pos;
-
+407:         return 0
+
+407:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 408: 
-
+409:     cdef double node_impurity(self) nogil:
+
+409:     cdef double node_impurity(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_node_impurity(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self) {
   double __pyx_r;
 
@@ -1274,16 +1348,16 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 410:         pass
+
 410:         pass
 411: 
-
+412:     cdef void children_impurity(self, double* impurity_left, double* impurity_right) nogil:
+
+412:     cdef void children_impurity(self, double* impurity_left, double* impurity_right) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_children_impurity(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self, CYTHON_UNUSED double *__pyx_v_impurity_left, CYTHON_UNUSED double *__pyx_v_impurity_right) {
 
   /* function exit code */
 }
-
 413:         pass
+
 413:         pass
 414: 
-
+415:     cdef void node_value(self, double* dest) nogil:
+
+415:     cdef void node_value(self, double* dest) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_10_criterion_29KernelizedRegressionCriterion_node_value(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion *__pyx_v_self, double *__pyx_v_dest) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_p;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_k;
@@ -1291,31 +1365,31 @@
 /* … */
   /* function exit code */
 }
-
 416:         """Compute the node value of samples[start:end] into dest."""
+
 416:         """Compute the node value of samples[start:end] into dest."""
 417: 
-
 418:         cdef SIZE_t p
-
 419:         cdef SIZE_t k
-
+420:         cdef DOUBLE_t w = 1.0
+
 418:         cdef SIZE_t p
+
 419:         cdef SIZE_t k
+
+420:         cdef DOUBLE_t w = 1.0
  __pyx_v_w = 1.0;
 
 421: 
-
+422:         for p in range(self.start, self.end):
+
+422:         for p in range(self.start, self.end):
  __pyx_t_1 = __pyx_v_self->__pyx_base.end;
   __pyx_t_2 = __pyx_t_1;
   for (__pyx_t_3 = __pyx_v_self->__pyx_base.start; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
     __pyx_v_p = __pyx_t_3;
 
 423: 
-
+424:             k = self.samples[p]
+
+424:             k = self.samples[p]
    __pyx_v_k = (__pyx_v_self->__pyx_base.samples[__pyx_v_p]);
 
 425: 
-
+426:             if self.sample_weight != NULL:
+
+426:             if self.sample_weight != NULL:
    __pyx_t_4 = ((__pyx_v_self->__pyx_base.sample_weight != NULL) != 0);
     if (__pyx_t_4) {
 /* … */
     }
-
+427:                 w = self.sample_weight[k]
+
+427:                 w = self.sample_weight[k]
      __pyx_v_w = (__pyx_v_self->__pyx_base.sample_weight[__pyx_v_k]);
 
 428: 
-
+429:             dest[k] = w / self.weighted_n_node_samples
+
+429:             dest[k] = w / self.weighted_n_node_samples
    (__pyx_v_dest[__pyx_v_k]) = (__pyx_v_w / __pyx_v_self->__pyx_base.weighted_n_node_samples);
   }
 
 430: 
@@ -1333,7 +1407,7 @@
 442: 
 443: 
 444: 
-
+445: cdef class KernelizedMSE(KernelizedRegressionCriterion):
+
+445: cdef class KernelizedMSE(KernelizedRegressionCriterion):
struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE {
   struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedRegressionCriterion __pyx_base;
 };
@@ -1343,15 +1417,15 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE *__pyx_vtabptr_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE;
 
-
 446:     """Mean squared error impurity criterion.
-
 447: 
-
 448:         var = \sum_i^n (phi(y_i) - phi(y)_bar) ** 2
-
 449:             = (\sum_i^n phi(y_i) ** 2) - n_samples * phi(y)_bar ** 2
+
 446:     """Mean squared error impurity criterion.
+
 447:     
+
 448:         var = \sum_i^n (phi(y_i) - phi(y)_bar) ** 2
+
 449:             = (\sum_i^n phi(y_i) ** 2) - n_samples * phi(y)_bar ** 2
 450: 
-
 451:         MSE = var_left + var_right
-
 452:     """
+
 451:         MSE = var_left + var_right
+
 452:     """
 453: 
-
+454:     cdef double node_impurity(self) nogil:
+
+454:     cdef double node_impurity(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_13KernelizedMSE_node_impurity(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE *__pyx_v_self) {
   double __pyx_v_impurity;
   double __pyx_r;
@@ -1360,19 +1434,19 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 455:         """Evaluate the impurity of the current node, i.e. the impurity of
-
 456:            samples[start:end]."""
+
 455:         """Evaluate the impurity of the current node, i.e. the impurity of
+
 456:            samples[start:end]."""
 457: 
-
 458:         cdef double impurity
+
 458:         cdef double impurity
 459: 
-
+460:         impurity = self.sum_diag_Gramm / self.weighted_n_node_samples - self.sum_total_Gramm / (self.weighted_n_node_samples)**2
+
+460:         impurity = self.sum_diag_Gramm / self.weighted_n_node_samples - self.sum_total_Gramm / (self.weighted_n_node_samples)**2
  __pyx_v_impurity = ((__pyx_v_self->__pyx_base.sum_diag_Gramm / __pyx_v_self->__pyx_base.__pyx_base.weighted_n_node_samples) - (__pyx_v_self->__pyx_base.sum_total_Gramm / pow(__pyx_v_self->__pyx_base.__pyx_base.weighted_n_node_samples, 2.0)));
 
 461: 
-
+462:         return impurity
+
+462:         return impurity
  __pyx_r = __pyx_v_impurity;
   goto __pyx_L0;
 
 463: 
-
+464:     cdef double proxy_impurity_improvement(self) nogil:
+
+464:     cdef double proxy_impurity_improvement(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_10_criterion_13KernelizedMSE_proxy_impurity_improvement(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE *__pyx_v_self) {
   double __pyx_v_proxy_impurity_left;
   double __pyx_v_proxy_impurity_right;
@@ -1382,27 +1456,27 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 465:         """Compute a proxy of the impurity reduction
+
 465:         """Compute a proxy of the impurity reduction
 466: 
-
 467:         This method is used to speed up the search for the best split.
-
 468:         It is a proxy quantity such that the split that maximizes this value
-
 469:         also maximizes the impurity improvement. It neglects all constant terms
-
 470:         of the impurity decrease for a given split.
+
 467:         This method is used to speed up the search for the best split.
+
 468:         It is a proxy quantity such that the split that maximizes this value
+
 469:         also maximizes the impurity improvement. It neglects all constant terms
+
 470:         of the impurity decrease for a given split.
 471: 
-
 472:         The absolute impurity improvement is only computed by the
-
 473:         impurity_improvement method once the best split has been found.
-
 474:         """
+
 472:         The absolute impurity improvement is only computed by the
+
 473:         impurity_improvement method once the best split has been found.
+
 474:         """
 475: 
-
+476:         cdef double proxy_impurity_left = self.sum_diag_Gramm_left - self.sum_total_Gramm_left / self.weighted_n_left
+
+476:         cdef double proxy_impurity_left = self.sum_diag_Gramm_left - self.sum_total_Gramm_left / self.weighted_n_left
  __pyx_v_proxy_impurity_left = (__pyx_v_self->__pyx_base.sum_diag_Gramm_left - (__pyx_v_self->__pyx_base.sum_total_Gramm_left / __pyx_v_self->__pyx_base.__pyx_base.weighted_n_left));
-
+477:         cdef double proxy_impurity_right = self.sum_diag_Gramm_right - self.sum_total_Gramm_right / self.weighted_n_right
+
+477:         cdef double proxy_impurity_right = self.sum_diag_Gramm_right - self.sum_total_Gramm_right / self.weighted_n_right
  __pyx_v_proxy_impurity_right = (__pyx_v_self->__pyx_base.sum_diag_Gramm_right - (__pyx_v_self->__pyx_base.sum_total_Gramm_right / __pyx_v_self->__pyx_base.__pyx_base.weighted_n_right));
 
 478: 
-
+479:         return (- proxy_impurity_left - proxy_impurity_right)
+
+479:         return (- proxy_impurity_left - proxy_impurity_right)
  __pyx_r = ((-__pyx_v_proxy_impurity_left) - __pyx_v_proxy_impurity_right);
   goto __pyx_L0;
 
 480: 
-
+481:     cdef void children_impurity(self, double* impurity_left, double* impurity_right) nogil:
+
+481:     cdef void children_impurity(self, double* impurity_left, double* impurity_right) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_10_criterion_13KernelizedMSE_children_impurity(struct __pyx_obj_13stpredictions_6models_3OK3_10_criterion_KernelizedMSE *__pyx_v_self, double *__pyx_v_impurity_left, double *__pyx_v_impurity_right) {
   double __pyx_v_sum_diag_Gramm_left;
   double __pyx_v_sum_diag_Gramm_right;
@@ -1411,27 +1485,27 @@
 /* … */
   /* function exit code */
 }
-
 482:         """Evaluate the impurity in children nodes, i.e. the impurity of the
-
 483:            left child (samples[start:pos]) and the impurity the right child
-
 484:            (samples[pos:end])."""
+
 482:         """Evaluate the impurity in children nodes, i.e. the impurity of the
+
 483:            left child (samples[start:pos]) and the impurity the right child
+
 484:            (samples[pos:end])."""
 485: 
-
+486:         cdef double sum_diag_Gramm_left = self.sum_diag_Gramm_left
+
+486:         cdef double sum_diag_Gramm_left = self.sum_diag_Gramm_left
  __pyx_t_1 = __pyx_v_self->__pyx_base.sum_diag_Gramm_left;
   __pyx_v_sum_diag_Gramm_left = __pyx_t_1;
-
+487:         cdef double sum_diag_Gramm_right = self.sum_diag_Gramm_right
+
+487:         cdef double sum_diag_Gramm_right = self.sum_diag_Gramm_right
  __pyx_t_1 = __pyx_v_self->__pyx_base.sum_diag_Gramm_right;
   __pyx_v_sum_diag_Gramm_right = __pyx_t_1;
 
 488: 
-
+489:         cdef double sum_total_Gramm_left = self.sum_total_Gramm_left
+
+489:         cdef double sum_total_Gramm_left = self.sum_total_Gramm_left
  __pyx_t_1 = __pyx_v_self->__pyx_base.sum_total_Gramm_left;
   __pyx_v_sum_total_Gramm_left = __pyx_t_1;
-
+490:         cdef double sum_total_Gramm_right = self.sum_total_Gramm_right
+
+490:         cdef double sum_total_Gramm_right = self.sum_total_Gramm_right
  __pyx_t_1 = __pyx_v_self->__pyx_base.sum_total_Gramm_right;
   __pyx_v_sum_total_Gramm_right = __pyx_t_1;
 
 491: 
-
+492:         impurity_left[0] = sum_diag_Gramm_left / self.weighted_n_left - sum_total_Gramm_left / (self.weighted_n_left)**2
+
+492:         impurity_left[0] = sum_diag_Gramm_left / self.weighted_n_left - sum_total_Gramm_left / (self.weighted_n_left)**2
  (__pyx_v_impurity_left[0]) = ((__pyx_v_sum_diag_Gramm_left / __pyx_v_self->__pyx_base.__pyx_base.weighted_n_left) - (__pyx_v_sum_total_Gramm_left / pow(__pyx_v_self->__pyx_base.__pyx_base.weighted_n_left, 2.0)));
-
+493:         impurity_right[0] = sum_diag_Gramm_right / self.weighted_n_right - sum_total_Gramm_right / (self.weighted_n_right)**2
+
+493:         impurity_right[0] = sum_diag_Gramm_right / self.weighted_n_right - sum_total_Gramm_right / (self.weighted_n_right)**2
  (__pyx_v_impurity_right[0]) = ((__pyx_v_sum_diag_Gramm_right / __pyx_v_self->__pyx_base.__pyx_base.weighted_n_right) - (__pyx_v_sum_total_Gramm_right / pow(__pyx_v_self->__pyx_base.__pyx_base.weighted_n_right, 2.0)));
 
 494: 
 495: 
diff --git a/stpredictions/models/OK3/_splitter.html b/stpredictions/models/OK3/_splitter.html index 849439216..a6e8fdd15 100644 --- a/stpredictions/models/OK3/_splitter.html +++ b/stpredictions/models/OK3/_splitter.html @@ -286,6 +286,80 @@ .cython.score-252 {background-color: #FFFF09;} .cython.score-253 {background-color: #FFFF09;} .cython.score-254 {background-color: #FFFF09;} +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.cython .hll { background-color: #ffffcc } +.cython { background: #f8f8f8; } +.cython .c { color: #3D7B7B; font-style: italic } /* Comment */ +.cython .err { border: 1px solid #FF0000 } /* Error */ +.cython .k { color: #008000; font-weight: bold } /* Keyword */ +.cython .o { color: #666666 } /* Operator */ +.cython .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.cython .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.cython .cp { color: #9C6500 } /* Comment.Preproc */ +.cython .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.cython .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.cython .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.cython .gd { color: #A00000 } /* Generic.Deleted */ +.cython .ge { font-style: italic } /* Generic.Emph */ +.cython .gr { color: #E40000 } /* Generic.Error */ +.cython .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.cython .gi { color: #008400 } /* Generic.Inserted */ +.cython .go { color: #717171 } /* Generic.Output */ +.cython .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.cython .gs { font-weight: bold } /* Generic.Strong */ +.cython .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.cython .gt { color: #0044DD } /* Generic.Traceback */ +.cython .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.cython .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.cython .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.cython .kp { color: #008000 } /* Keyword.Pseudo */ +.cython .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.cython .kt { color: #B00040 } /* Keyword.Type */ +.cython .m { color: #666666 } /* Literal.Number */ +.cython .s { color: #BA2121 } /* Literal.String */ +.cython .na { color: #687822 } /* Name.Attribute */ +.cython .nb { color: #008000 } /* Name.Builtin */ +.cython .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.cython .no { color: #880000 } /* Name.Constant */ +.cython .nd { color: #AA22FF } /* Name.Decorator */ +.cython .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.cython .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.cython .nf { color: #0000FF } /* Name.Function */ +.cython .nl { color: #767600 } /* Name.Label */ +.cython .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.cython .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.cython .nv { color: #19177C } /* Name.Variable */ +.cython .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.cython .w { color: #bbbbbb } /* Text.Whitespace */ +.cython .mb { color: #666666 } /* Literal.Number.Bin */ +.cython .mf { color: #666666 } /* Literal.Number.Float */ +.cython .mh { color: #666666 } /* Literal.Number.Hex */ +.cython .mi { color: #666666 } /* Literal.Number.Integer */ +.cython .mo { color: #666666 } /* Literal.Number.Oct */ +.cython .sa { color: #BA2121 } /* Literal.String.Affix */ +.cython .sb { color: #BA2121 } /* Literal.String.Backtick */ +.cython .sc { color: #BA2121 } /* Literal.String.Char */ +.cython .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.cython .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.cython .s2 { color: #BA2121 } /* Literal.String.Double */ +.cython .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.cython .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.cython .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.cython .sx { color: #008000 } /* Literal.String.Other */ +.cython .sr { color: #A45A77 } /* Literal.String.Regex */ +.cython .s1 { color: #BA2121 } /* Literal.String.Single */ +.cython .ss { color: #19177C } /* Literal.String.Symbol */ +.cython .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.cython .fm { color: #0000FF } /* Name.Function.Magic */ +.cython .vc { color: #19177C } /* Name.Variable.Class */ +.cython .vg { color: #19177C } /* Name.Variable.Global */ +.cython .vi { color: #19177C } /* Name.Variable.Instance */ +.cython .vm { color: #19177C } /* Name.Variable.Magic */ +.cython .il { color: #666666 } /* Literal.Number.Integer.Long */ @@ -295,31 +369,31 @@ Click on a line that starts with a "+" to see the C code that Cython generated for it.

Raw output: _splitter.c

-
+0001: # cython: cdivision=True
+
+0001: # cython: cdivision=True
  __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 0002: # cython: boundscheck=False
-
 0003: # cython: wraparound=False
+
 0002: # cython: boundscheck=False
+
 0003: # cython: wraparound=False
 0004: 
-
 0005: from _criterion cimport Criterion
+
 0005: from _criterion cimport Criterion
 0006: 
-
 0007: from libc.stdlib cimport free
-
 0008: from libc.stdlib cimport qsort
-
 0009: from libc.string cimport memcpy
-
 0010: from libc.string cimport memset
+
 0007: from libc.stdlib cimport free
+
 0008: from libc.stdlib cimport qsort
+
 0009: from libc.string cimport memcpy
+
 0010: from libc.string cimport memset
 0011: 
-
+0012: import numpy as np
+
+0012: import numpy as np
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 0013: cimport numpy as np
-
+0014: np.import_array()
+
 0013: cimport numpy as np
+
+0014: np.import_array()
  __pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 14, __pyx_L1_error)
 
 0015: 
-
+0016: from scipy.sparse import csc_matrix
+
+0016: from scipy.sparse import csc_matrix
  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_csc_matrix);
@@ -334,13 +408,13 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
 0017: 
-
 0018: from sklearn.tree._utils cimport log
-
 0019: from sklearn.tree._utils cimport rand_int
-
 0020: from sklearn.tree._utils cimport rand_uniform
-
 0021: from sklearn.tree._utils cimport RAND_R_MAX
-
 0022: from sklearn.tree._utils cimport safe_realloc
+
 0018: from sklearn.tree._utils cimport log
+
 0019: from sklearn.tree._utils cimport rand_int
+
 0020: from sklearn.tree._utils cimport rand_uniform
+
 0021: from sklearn.tree._utils cimport RAND_R_MAX
+
 0022: from sklearn.tree._utils cimport safe_realloc
 0023: 
-
+0024: cdef double INFINITY = np.inf
+
+0024: cdef double INFINITY = np.inf
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_inf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
@@ -350,34 +424,34 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY = __pyx_t_4;
 
 0025: 
-
 0026: # Mitigate precision differences between 32 bit and 64 bit
-
+0027: cdef DTYPE_t FEATURE_THRESHOLD = 1e-7
+
 0026: # Mitigate precision differences between 32 bit and 64 bit
+
+0027: cdef DTYPE_t FEATURE_THRESHOLD = 1e-7
  __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD = 1e-7;
 
 0028: 
-
 0029: # Constant to switch between algorithm non zero value extract algorithm
-
 0030: # in SparseSplitter
-
+0031: cdef DTYPE_t EXTRACT_NNZ_SWITCH = 0.1
+
 0029: # Constant to switch between algorithm non zero value extract algorithm
+
 0030: # in SparseSplitter
+
+0031: cdef DTYPE_t EXTRACT_NNZ_SWITCH = 0.1
  __pyx_v_13stpredictions_6models_3OK3_9_splitter_EXTRACT_NNZ_SWITCH = 0.1;
 
 0032: 
-
+0033: cdef inline void _init_split(SplitRecord* self, SIZE_t start_pos) nogil:
+
+0033: cdef inline void _init_split(SplitRecord* self, SIZE_t start_pos) nogil:
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter__init_split(struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_self, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start_pos) {
 /* … */
   /* function exit code */
 }
-
+0034:     self.impurity_left = INFINITY
+
+0034:     self.impurity_left = INFINITY
  __pyx_v_self->impurity_left = __pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY;
-
+0035:     self.impurity_right = INFINITY
+
+0035:     self.impurity_right = INFINITY
  __pyx_v_self->impurity_right = __pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY;
-
+0036:     self.pos = start_pos
+
+0036:     self.pos = start_pos
  __pyx_v_self->pos = __pyx_v_start_pos;
-
+0037:     self.feature = 0
+
+0037:     self.feature = 0
  __pyx_v_self->feature = 0;
-
+0038:     self.threshold = 0.
+
+0038:     self.threshold = 0.
  __pyx_v_self->threshold = 0.;
-
+0039:     self.improvement = -INFINITY
+
+0039:     self.improvement = -INFINITY
  __pyx_v_self->improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
 
 0040: 
-
+0041: cdef class Splitter:
+
+0041: cdef class Splitter:
struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter {
   int (*init)(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *, PyObject *, __Pyx_memviewslice, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *);
   int (*node_reset)(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, double *);
@@ -387,13 +461,13 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_Splitter;
 
-
 0042:     """Abstract splitter class.
+
 0042:     """Abstract splitter class.
 0043: 
-
 0044:     Splitters are called by tree builders to find the best splits on both
-
 0045:     sparse and dense data, one split at a time.
-
 0046:     """
+
 0044:     Splitters are called by tree builders to find the best splits on both
+
 0045:     sparse and dense data, one split at a time.
+
 0046:     """
 0047: 
-
+0048:     def __cinit__(self, Criterion criterion, SIZE_t max_features,
+
+0048:     def __cinit__(self, Criterion criterion, SIZE_t max_features,
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -506,66 +580,66 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0049:                   SIZE_t min_samples_leaf, double min_weight_leaf,
-
 0050:                   object random_state):
-
 0051:         """
-
 0052:         Parameters
-
 0053:         ----------
-
 0054:         criterion : Criterion
-
 0055:             The criterion to measure the quality of a split.
+
 0049:                   SIZE_t min_samples_leaf, double min_weight_leaf,
+
 0050:                   object random_state):
+
 0051:         """
+
 0052:         Parameters
+
 0053:         ----------
+
 0054:         criterion : Criterion
+
 0055:             The criterion to measure the quality of a split.
 0056: 
-
 0057:         max_features : SIZE_t
-
 0058:             The maximal number of randomly selected features which can be
-
 0059:             considered for a split.
+
 0057:         max_features : SIZE_t
+
 0058:             The maximal number of randomly selected features which can be
+
 0059:             considered for a split.
 0060: 
-
 0061:         min_samples_leaf : SIZE_t
-
 0062:             The minimal number of samples each leaf can have, where splits
-
 0063:             which would result in having less samples in a leaf are not
-
 0064:             considered.
+
 0061:         min_samples_leaf : SIZE_t
+
 0062:             The minimal number of samples each leaf can have, where splits
+
 0063:             which would result in having less samples in a leaf are not
+
 0064:             considered.
 0065: 
-
 0066:         min_weight_leaf : double
-
 0067:             The minimal weight each leaf can have, where the weight is the sum
-
 0068:             of the weights of each sample in it.
+
 0066:         min_weight_leaf : double
+
 0067:             The minimal weight each leaf can have, where the weight is the sum
+
 0068:             of the weights of each sample in it.
 0069: 
-
 0070:         random_state : object
-
 0071:             The user inputted random state to be used for pseudo-randomness
-
 0072:         """
+
 0070:         random_state : object
+
 0071:             The user inputted random state to be used for pseudo-randomness
+
 0072:         """
 0073: 
-
+0074:         self.criterion = criterion
+
+0074:         self.criterion = criterion
  __Pyx_INCREF(((PyObject *)__pyx_v_criterion));
   __Pyx_GIVEREF(((PyObject *)__pyx_v_criterion));
   __Pyx_GOTREF(__pyx_v_self->criterion);
   __Pyx_DECREF(((PyObject *)__pyx_v_self->criterion));
   __pyx_v_self->criterion = __pyx_v_criterion;
 
 0075: 
-
+0076:         self.samples = NULL
+
+0076:         self.samples = NULL
  __pyx_v_self->samples = NULL;
-
+0077:         self.n_samples = 0
+
+0077:         self.n_samples = 0
  __pyx_v_self->n_samples = 0;
-
+0078:         self.features = NULL
+
+0078:         self.features = NULL
  __pyx_v_self->features = NULL;
-
+0079:         self.n_features = 0
+
+0079:         self.n_features = 0
  __pyx_v_self->n_features = 0;
-
+0080:         self.feature_values = NULL
+
+0080:         self.feature_values = NULL
  __pyx_v_self->feature_values = NULL;
 
 0081: 
-
+0082:         self.sample_weight = NULL
+
+0082:         self.sample_weight = NULL
  __pyx_v_self->sample_weight = NULL;
 
 0083: 
-
+0084:         self.max_features = max_features
+
+0084:         self.max_features = max_features
  __pyx_v_self->max_features = __pyx_v_max_features;
-
+0085:         self.min_samples_leaf = min_samples_leaf
+
+0085:         self.min_samples_leaf = min_samples_leaf
  __pyx_v_self->min_samples_leaf = __pyx_v_min_samples_leaf;
-
+0086:         self.min_weight_leaf = min_weight_leaf
+
+0086:         self.min_weight_leaf = min_weight_leaf
  __pyx_v_self->min_weight_leaf = __pyx_v_min_weight_leaf;
-
+0087:         self.random_state = random_state
+
+0087:         self.random_state = random_state
  __Pyx_INCREF(__pyx_v_random_state);
   __Pyx_GIVEREF(__pyx_v_random_state);
   __Pyx_GOTREF(__pyx_v_self->random_state);
   __Pyx_DECREF(__pyx_v_self->random_state);
   __pyx_v_self->random_state = __pyx_v_random_state;
 
 0088: 
-
+0089:     def __dealloc__(self):
+
+0089:     def __dealloc__(self):
/* Python wrapper */
 static void __pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
 static void __pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_3__dealloc__(PyObject *__pyx_v_self) {
@@ -584,18 +658,18 @@
   /* function exit code */
   __Pyx_RefNannyFinishContext();
 }
-
 0090:         """Destructor."""
+
 0090:         """Destructor."""
 0091: 
-
+0092:         free(self.samples)
+
+0092:         free(self.samples)
  free(__pyx_v_self->samples);
-
+0093:         free(self.features)
+
+0093:         free(self.features)
  free(__pyx_v_self->features);
-
+0094:         free(self.constant_features)
+
+0094:         free(self.constant_features)
  free(__pyx_v_self->constant_features);
-
+0095:         free(self.feature_values)
+
+0095:         free(self.feature_values)
  free(__pyx_v_self->feature_values);
 
 0096: 
-
+0097:     def __getstate__(self):
+
+0097:     def __getstate__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_5__getstate__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_5__getstate__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -624,7 +698,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0098:         return {}
+
+0098:         return {}
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -632,7 +706,7 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0099: 
-
+0100:     def __setstate__(self, d):
+
+0100:     def __setstate__(self, d):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_7__setstate__(PyObject *__pyx_v_self, PyObject *__pyx_v_d); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_8Splitter_7__setstate__(PyObject *__pyx_v_self, PyObject *__pyx_v_d) {
@@ -657,9 +731,9 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0101:         pass
+
 0101:         pass
 0102: 
-
+0103:     cdef int init(self,
+
+0103:     cdef int init(self,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_init(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_self, PyObject *__pyx_v_X, __Pyx_memviewslice __pyx_v_y, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *__pyx_v_sample_weight) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
@@ -685,31 +759,31 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0104:                    object X,
-
 0105:                    const DOUBLE_t[:, ::1] y,
-
 0106:                    DOUBLE_t* sample_weight) except -1:
-
 0107:         """Initialize the splitter.
+
 0104:                    object X,
+
 0105:                    const DOUBLE_t[:, ::1] y,
+
 0106:                    DOUBLE_t* sample_weight) except -1:
+
 0107:         """Initialize the splitter.
 0108: 
-
 0109:         Take in the input data X, the target Y, and optional sample weights.
+
 0109:         Take in the input data X, the target Y, and optional sample weights.
 0110: 
-
 0111:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0112:         or 0 otherwise.
+
 0111:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0112:         or 0 otherwise.
 0113: 
-
 0114:         Parameters
-
 0115:         ----------
-
 0116:         X : object
-
 0117:             This contains the inputs. Usually it is a 2d numpy array.
+
 0114:         Parameters
+
 0115:         ----------
+
 0116:         X : object
+
 0117:             This contains the inputs. Usually it is a 2d numpy array.
 0118: 
-
 0119:         y : ndarray, dtype=DOUBLE_t
-
 0120:             This is the vector of targets, or true labels, for the samples
+
 0119:         y : ndarray, dtype=DOUBLE_t
+
 0120:             This is the vector of targets, or true labels, for the samples
 0121: 
-
 0122:         sample_weight : DOUBLE_t*
-
 0123:             The weights of the samples, where higher weighted samples are fit
-
 0124:             closer than lower weight samples. If not provided, all samples
-
 0125:             are assumed to have uniform weight.
-
 0126:         """
+
 0122:         sample_weight : DOUBLE_t*
+
 0123:             The weights of the samples, where higher weighted samples are fit
+
 0124:             closer than lower weight samples. If not provided, all samples
+
 0125:             are assumed to have uniform weight.
+
 0126:         """
 0127: 
-
+0128:         self.rand_r_state = self.random_state.randint(0, RAND_R_MAX)
+
+0128:         self.rand_r_state = self.random_state.randint(0, RAND_R_MAX)
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->random_state, __pyx_n_s_randint); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_e_7sklearn_4tree_6_utils_RAND_R_MAX); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error)
@@ -764,7 +838,7 @@
   __pyx_t_7 = __Pyx_PyInt_As_npy_uint32(__pyx_t_1); if (unlikely((__pyx_t_7 == ((npy_uint32)-1)) && PyErr_Occurred())) __PYX_ERR(0, 128, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->rand_r_state = __pyx_t_7;
-
+0129:         cdef SIZE_t n_samples = X.shape[0]
+
+0129:         cdef SIZE_t n_samples = X.shape[0]
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error)
@@ -774,25 +848,25 @@
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_v_n_samples = __pyx_t_8;
 
 0130: 
-
 0131:         # Create a new array which will be used to store nonzero
-
 0132:         # samples from the feature of interest
-
+0133:         cdef SIZE_t* samples = safe_realloc(&self.samples, n_samples)
+
 0131:         # Create a new array which will be used to store nonzero
+
 0132:         # samples from the feature of interest
+
+0133:         cdef SIZE_t* samples = safe_realloc(&self.samples, n_samples)
  __pyx_t_9 = __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->samples), __pyx_v_n_samples); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 133, __pyx_L1_error)
   __pyx_v_samples = __pyx_t_9;
 
 0134: 
-
 0135:         cdef SIZE_t i, j
-
+0136:         cdef double weighted_n_samples = 0.0
+
 0135:         cdef SIZE_t i, j
+
+0136:         cdef double weighted_n_samples = 0.0
  __pyx_v_weighted_n_samples = 0.0;
-
+0137:         j = 0
+
+0137:         j = 0
  __pyx_v_j = 0;
 
 0138: 
-
+0139:         for i in range(n_samples):
+
+0139:         for i in range(n_samples):
  __pyx_t_8 = __pyx_v_n_samples;
   __pyx_t_10 = __pyx_t_8;
   for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
     __pyx_v_i = __pyx_t_11;
-
 0140:             # Only work with positively weighted samples
-
+0141:             if sample_weight == NULL or sample_weight[i] != 0.0:
+
 0140:             # Only work with positively weighted samples
+
+0141:             if sample_weight == NULL or sample_weight[i] != 0.0:
    __pyx_t_13 = ((__pyx_v_sample_weight == NULL) != 0);
     if (!__pyx_t_13) {
     } else {
@@ -805,34 +879,34 @@
     if (__pyx_t_12) {
 /* … */
     }
-
+0142:                 samples[j] = i
+
+0142:                 samples[j] = i
      (__pyx_v_samples[__pyx_v_j]) = __pyx_v_i;
-
+0143:                 j += 1
+
+0143:                 j += 1
      __pyx_v_j = (__pyx_v_j + 1);
 
 0144: 
-
+0145:             if sample_weight != NULL:
+
+0145:             if sample_weight != NULL:
    __pyx_t_12 = ((__pyx_v_sample_weight != NULL) != 0);
     if (__pyx_t_12) {
 /* … */
       goto __pyx_L8;
     }
-
+0146:                 weighted_n_samples += sample_weight[i]
+
+0146:                 weighted_n_samples += sample_weight[i]
      __pyx_v_weighted_n_samples = (__pyx_v_weighted_n_samples + (__pyx_v_sample_weight[__pyx_v_i]));
-
 0147:             else:
-
+0148:                 weighted_n_samples += 1.0
+
 0147:             else:
+
+0148:                 weighted_n_samples += 1.0
    /*else*/ {
       __pyx_v_weighted_n_samples = (__pyx_v_weighted_n_samples + 1.0);
     }
     __pyx_L8:;
   }
 
 0149: 
-
 0150:         # Number of samples is number of positively weighted samples
-
+0151:         self.n_samples = j
+
 0150:         # Number of samples is number of positively weighted samples
+
+0151:         self.n_samples = j
  __pyx_v_self->n_samples = __pyx_v_j;
-
+0152:         self.weighted_n_samples = weighted_n_samples
+
+0152:         self.weighted_n_samples = weighted_n_samples
  __pyx_v_self->weighted_n_samples = __pyx_v_weighted_n_samples;
 
 0153: 
-
+0154:         cdef SIZE_t n_features = X.shape[1]
+
+0154:         cdef SIZE_t n_features = X.shape[1]
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error)
@@ -841,39 +915,39 @@
   __pyx_t_8 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 154, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_n_features = __pyx_t_8;
-
+0155:         cdef SIZE_t* features = safe_realloc(&self.features, n_features)
+
+0155:         cdef SIZE_t* features = safe_realloc(&self.features, n_features)
  __pyx_t_9 = __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->features), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L1_error)
   __pyx_v_features = __pyx_t_9;
 
 0156: 
-
+0157:         for i in range(n_features):
+
+0157:         for i in range(n_features):
  __pyx_t_8 = __pyx_v_n_features;
   __pyx_t_10 = __pyx_t_8;
   for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
     __pyx_v_i = __pyx_t_11;
-
+0158:             features[i] = i
+
+0158:             features[i] = i
    (__pyx_v_features[__pyx_v_i]) = __pyx_v_i;
   }
 
 0159: 
-
+0160:         self.n_features = n_features
+
+0160:         self.n_features = n_features
  __pyx_v_self->n_features = __pyx_v_n_features;
 
 0161: 
-
+0162:         safe_realloc(&self.feature_values, n_samples)
+
+0162:         safe_realloc(&self.feature_values, n_samples)
  __pyx_fuse_0__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->feature_values), __pyx_v_n_samples); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L1_error)
-
+0163:         safe_realloc(&self.constant_features, n_features)
+
+0163:         safe_realloc(&self.constant_features, n_features)
  __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->constant_features), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 163, __pyx_L1_error)
 
 0164: 
-
+0165:         self.y = y
+
+0165:         self.y = y
  __PYX_XDEC_MEMVIEW(&__pyx_v_self->y, 0);
   __PYX_INC_MEMVIEW(&__pyx_v_y, 0);
   __pyx_v_self->y = __pyx_v_y;
 
 0166: 
-
+0167:         self.sample_weight = sample_weight
+
+0167:         self.sample_weight = sample_weight
  __pyx_v_self->sample_weight = __pyx_v_sample_weight;
-
+0168:         return 0
+
+0168:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0169: 
-
+0170:     cdef int node_reset(self, SIZE_t start, SIZE_t end,
+
+0170:     cdef int node_reset(self, SIZE_t start, SIZE_t end,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_node_reset(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_self, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end, double *__pyx_v_weighted_n_node_samples) {
   int __pyx_r;
 /* … */
@@ -892,45 +966,45 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0171:                         double* weighted_n_node_samples) nogil except -1:
-
 0172:         """Reset splitter on node samples[start:end].
+
 0171:                         double* weighted_n_node_samples) nogil except -1:
+
 0172:         """Reset splitter on node samples[start:end].
 0173: 
-
 0174:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0175:         or 0 otherwise.
+
 0174:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0175:         or 0 otherwise.
 0176: 
-
 0177:         Parameters
-
 0178:         ----------
-
 0179:         start : SIZE_t
-
 0180:             The index of the first sample to consider
-
 0181:         end : SIZE_t
-
 0182:             The index of the last sample to consider
-
 0183:         weighted_n_node_samples : ndarray, dtype=double pointer
-
 0184:             The total weight of those samples
-
 0185:         """
+
 0177:         Parameters
+
 0178:         ----------
+
 0179:         start : SIZE_t
+
 0180:             The index of the first sample to consider
+
 0181:         end : SIZE_t
+
 0182:             The index of the last sample to consider
+
 0183:         weighted_n_node_samples : ndarray, dtype=double pointer
+
 0184:             The total weight of those samples
+
 0185:         """
 0186: 
-
+0187:         self.start = start
+
+0187:         self.start = start
  __pyx_v_self->start = __pyx_v_start;
-
+0188:         self.end = end
+
+0188:         self.end = end
  __pyx_v_self->end = __pyx_v_end;
 
 0189: 
-
+0190:         self.criterion.init(self.y,
+
+0190:         self.criterion.init(self.y,
  if (unlikely(!__pyx_v_self->y.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 190, __pyx_L1_error)}
 /* … */
   __pyx_t_1 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->criterion->__pyx_vtab)->init(__pyx_v_self->criterion, __pyx_v_self->y, __pyx_v_self->sample_weight, __pyx_v_self->weighted_n_samples, __pyx_v_self->samples, __pyx_v_start, __pyx_v_end); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 190, __pyx_L1_error)
-
 0191:                             self.sample_weight,
-
 0192:                             self.weighted_n_samples,
-
 0193:                             self.samples,
-
 0194:                             start,
-
 0195:                             end)
+
 0191:                             self.sample_weight,
+
 0192:                             self.weighted_n_samples,
+
 0193:                             self.samples,
+
 0194:                             start,
+
 0195:                             end)
 0196: 
-
+0197:         weighted_n_node_samples[0] = self.criterion.weighted_n_node_samples
+
+0197:         weighted_n_node_samples[0] = self.criterion.weighted_n_node_samples
  __pyx_t_2 = __pyx_v_self->criterion->weighted_n_node_samples;
   (__pyx_v_weighted_n_node_samples[0]) = __pyx_t_2;
-
+0198:         return 0
+
+0198:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0199: 
-
+0200:     cdef int node_split(self, double impurity, SplitRecord* split,
+
+0200:     cdef int node_split(self, double impurity, SplitRecord* split,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_node_split(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_self, CYTHON_UNUSED double __pyx_v_impurity, CYTHON_UNUSED struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_split, CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_n_constant_features) {
   int __pyx_r;
 
@@ -938,28 +1012,28 @@
   __pyx_r = 0;
   return __pyx_r;
 }
-
 0201:                         SIZE_t* n_constant_features) nogil except -1:
-
 0202:         """Find the best split on node samples[start:end].
+
 0201:                         SIZE_t* n_constant_features) nogil except -1:
+
 0202:         """Find the best split on node samples[start:end].
 0203: 
-
 0204:         This is a placeholder method. The majority of computation will be done
-
 0205:         here.
+
 0204:         This is a placeholder method. The majority of computation will be done
+
 0205:         here.
 0206: 
-
 0207:         It should return -1 upon errors.
-
 0208:         """
+
 0207:         It should return -1 upon errors.
+
 0208:         """
 0209: 
-
 0210:         pass
+
 0210:         pass
 0211: 
-
+0212:     cdef void node_value(self, double* dest) nogil:
+
+0212:     cdef void node_value(self, double* dest) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_node_value(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_self, double *__pyx_v_dest) {
 /* … */
   /* function exit code */
 }
-
 0213:         """Copy the value of node samples[start:end] into dest."""
+
 0213:         """Copy the value of node samples[start:end] into dest."""
 0214: 
-
+0215:         self.criterion.node_value(dest)
+
+0215:         self.criterion.node_value(dest)
  ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->criterion->__pyx_vtab)->node_value(__pyx_v_self->criterion, __pyx_v_dest);
 
 0216: 
-
+0217:     cdef double node_impurity(self) nogil:
+
+0217:     cdef double node_impurity(self) nogil:
static double __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_node_impurity(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_self) {
   double __pyx_r;
 /* … */
@@ -967,14 +1041,14 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0218:         """Return the impurity of the current node."""
+
 0218:         """Return the impurity of the current node."""
 0219: 
-
+0220:         return self.criterion.node_impurity()
+
+0220:         return self.criterion.node_impurity()
  __pyx_r = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->criterion->__pyx_vtab)->node_impurity(__pyx_v_self->criterion);
   goto __pyx_L0;
 
 0221: 
 0222: 
-
+0223: cdef class BaseDenseSplitter(Splitter):
+
+0223: cdef class BaseDenseSplitter(Splitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter __pyx_base;
   __Pyx_memviewslice X;
@@ -986,11 +1060,11 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter;
 
-
 0224:     cdef const DTYPE_t[:, :] X
+
 0224:     cdef const DTYPE_t[:, :] X
 0225: 
-
 0226:     cdef SIZE_t n_total_samples
+
 0226:     cdef SIZE_t n_total_samples
 0227: 
-
+0228:     cdef int init(self,
+
+0228:     cdef int init(self,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_17BaseDenseSplitter_init(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter *__pyx_v_self, PyObject *__pyx_v_X, __Pyx_memviewslice __pyx_v_y, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *__pyx_v_sample_weight) {
   int __pyx_r;
   __Pyx_RefNannyDeclarations
@@ -1005,31 +1079,31 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0229:                   object X,
-
 0230:                   const DOUBLE_t[:, ::1] y,
-
 0231:                   DOUBLE_t* sample_weight) except -1:
-
 0232:         """Initialize the splitter
+
 0229:                   object X,
+
 0230:                   const DOUBLE_t[:, ::1] y,
+
 0231:                   DOUBLE_t* sample_weight) except -1:
+
 0232:         """Initialize the splitter
 0233: 
-
 0234:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0235:         or 0 otherwise.
-
 0236:         """
+
 0234:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0235:         or 0 otherwise.
+
 0236:         """
 0237: 
-
 0238:         # Call parent init
-
+0239:         Splitter.init(self, X, y, sample_weight)
+
 0238:         # Call parent init
+
+0239:         Splitter.init(self, X, y, sample_weight)
  __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_init(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_self), __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 239, __pyx_L1_error)
 
 0240: 
-
+0241:         self.X = X
+
+0241:         self.X = X
  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7sklearn_4tree_5_tree_DTYPE_t__const__(__pyx_v_X, 0); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 241, __pyx_L1_error)
   __PYX_XDEC_MEMVIEW(&__pyx_v_self->X, 0);
   __pyx_v_self->X = __pyx_t_2;
   __pyx_t_2.memview = NULL;
   __pyx_t_2.data = NULL;
-
+0242:         return 0
+
+0242:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0243: 
 0244: 
-
+0245: cdef class BestSplitter(BaseDenseSplitter):
+
+0245: cdef class BestSplitter(BaseDenseSplitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BestSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter __pyx_base;
 };
@@ -1039,8 +1113,8 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_BestSplitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_BestSplitter;
 
-
 0246:     """Splitter for finding the best split."""
-
+0247:     def __reduce__(self):
+
 0246:     """Splitter for finding the best split."""
+
+0247:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_12BestSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_12BestSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -1072,7 +1146,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0248:         return (BestSplitter, (self.criterion,
+
+0248:         return (BestSplitter, (self.criterion,
  __Pyx_XDECREF(__pyx_r);
 /* … */
   __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 248, __pyx_L1_error)
@@ -1107,16 +1181,16 @@
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
-
+0249:                                self.max_features,
+
+0249:                                self.max_features,
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.max_features); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
-
+0250:                                self.min_samples_leaf,
+
+0250:                                self.min_samples_leaf,
  __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 250, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
-
+0251:                                self.min_weight_leaf,
+
+0251:                                self.min_weight_leaf,
  __pyx_t_3 = PyFloat_FromDouble(__pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 251, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
-
+0252:                                self.random_state), self.__getstate__())
+
+0252:                                self.random_state), self.__getstate__())
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_getstate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 252, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_1 = NULL;
@@ -1135,7 +1209,7 @@
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 0253: 
-
+0254:     cdef int node_split(self, double impurity, SplitRecord* split,
+
+0254:     cdef int node_split(self, double impurity, SplitRecord* split,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_12BestSplitter_node_split(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BestSplitter *__pyx_v_self, double __pyx_v_impurity, struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_split, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_n_constant_features) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start;
@@ -1179,92 +1253,92 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0255:                         SIZE_t* n_constant_features) nogil except -1:
-
 0256:         """Find the best split on node samples[start:end]
+
 0255:                         SIZE_t* n_constant_features) nogil except -1:
+
 0256:         """Find the best split on node samples[start:end]
 0257: 
-
 0258:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0259:         or 0 otherwise.
-
 0260:         """
-
 0261:         # Find the best split
-
+0262:         cdef SIZE_t* samples = self.samples
+
 0258:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0259:         or 0 otherwise.
+
 0260:         """
+
 0261:         # Find the best split
+
+0262:         cdef SIZE_t* samples = self.samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.samples;
   __pyx_v_samples = __pyx_t_1;
-
+0263:         cdef SIZE_t start = self.start
+
+0263:         cdef SIZE_t start = self.start
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.start;
   __pyx_v_start = __pyx_t_2;
-
+0264:         cdef SIZE_t end = self.end
+
+0264:         cdef SIZE_t end = self.end
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.end;
   __pyx_v_end = __pyx_t_2;
 
 0265: 
-
+0266:         cdef SIZE_t* features = self.features
+
+0266:         cdef SIZE_t* features = self.features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.features;
   __pyx_v_features = __pyx_t_1;
-
+0267:         cdef SIZE_t* constant_features = self.constant_features
+
+0267:         cdef SIZE_t* constant_features = self.constant_features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.constant_features;
   __pyx_v_constant_features = __pyx_t_1;
-
+0268:         cdef SIZE_t n_features = self.n_features
+
+0268:         cdef SIZE_t n_features = self.n_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.n_features;
   __pyx_v_n_features = __pyx_t_2;
 
 0269: 
-
+0270:         cdef DTYPE_t* Xf = self.feature_values
+
+0270:         cdef DTYPE_t* Xf = self.feature_values
  __pyx_t_3 = __pyx_v_self->__pyx_base.__pyx_base.feature_values;
   __pyx_v_Xf = __pyx_t_3;
-
+0271:         cdef SIZE_t max_features = self.max_features
+
+0271:         cdef SIZE_t max_features = self.max_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.max_features;
   __pyx_v_max_features = __pyx_t_2;
-
+0272:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+0272:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_2;
-
+0273:         cdef double min_weight_leaf = self.min_weight_leaf
+
+0273:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_4 = __pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_4;
-
+0274:         cdef UINT32_t* random_state = &self.rand_r_state
+
+0274:         cdef UINT32_t* random_state = &self.rand_r_state
  __pyx_v_random_state = (&__pyx_v_self->__pyx_base.__pyx_base.rand_r_state);
 
 0275: 
-
 0276:         cdef SplitRecord best, current
-
+0277:         cdef double current_proxy_improvement = -INFINITY
+
 0276:         cdef SplitRecord best, current
+
+0277:         cdef double current_proxy_improvement = -INFINITY
  __pyx_v_current_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
-
+0278:         cdef double best_proxy_improvement = -INFINITY
+
+0278:         cdef double best_proxy_improvement = -INFINITY
  __pyx_v_best_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
 
 0279: 
-
+0280:         cdef SIZE_t f_i = n_features
+
+0280:         cdef SIZE_t f_i = n_features
  __pyx_v_f_i = __pyx_v_n_features;
-
 0281:         cdef SIZE_t f_j
-
 0282:         cdef SIZE_t p
-
 0283:         cdef SIZE_t feature_idx_offset
-
 0284:         cdef SIZE_t feature_offset
-
 0285:         cdef SIZE_t i
-
 0286:         cdef SIZE_t j
+
 0281:         cdef SIZE_t f_j
+
 0282:         cdef SIZE_t p
+
 0283:         cdef SIZE_t feature_idx_offset
+
 0284:         cdef SIZE_t feature_offset
+
 0285:         cdef SIZE_t i
+
 0286:         cdef SIZE_t j
 0287: 
-
+0288:         cdef SIZE_t n_visited_features = 0
+
+0288:         cdef SIZE_t n_visited_features = 0
  __pyx_v_n_visited_features = 0;
-
 0289:         # Number of features discovered to be constant during the split search
-
+0290:         cdef SIZE_t n_found_constants = 0
+
 0289:         # Number of features discovered to be constant during the split search
+
+0290:         cdef SIZE_t n_found_constants = 0
  __pyx_v_n_found_constants = 0;
-
 0291:         # Number of features known to be constant and drawn without replacement
-
+0292:         cdef SIZE_t n_drawn_constants = 0
+
 0291:         # Number of features known to be constant and drawn without replacement
+
+0292:         cdef SIZE_t n_drawn_constants = 0
  __pyx_v_n_drawn_constants = 0;
-
+0293:         cdef SIZE_t n_known_constants = n_constant_features[0]
+
+0293:         cdef SIZE_t n_known_constants = n_constant_features[0]
  __pyx_v_n_known_constants = (__pyx_v_n_constant_features[0]);
-
 0294:         # n_total_constants = n_known_constants + n_found_constants
-
+0295:         cdef SIZE_t n_total_constants = n_known_constants
+
 0294:         # n_total_constants = n_known_constants + n_found_constants
+
+0295:         cdef SIZE_t n_total_constants = n_known_constants
  __pyx_v_n_total_constants = __pyx_v_n_known_constants;
-
 0296:         cdef DTYPE_t current_feature_value
-
 0297:         cdef SIZE_t partition_end
+
 0296:         cdef DTYPE_t current_feature_value
+
 0297:         cdef SIZE_t partition_end
 0298: 
-
+0299:         _init_split(&best, end)
+
+0299:         _init_split(&best, end)
  __pyx_f_13stpredictions_6models_3OK3_9_splitter__init_split((&__pyx_v_best), __pyx_v_end);
 
 0300: 
-
 0301:         # Sample up to max_features without replacement using a
-
 0302:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
-
 0303:         # `f_j` to compute a permutation of the `features` array).
-
 0304:         #
-
 0305:         # Skip the CPU intensive evaluation of the impurity criterion for
-
 0306:         # features that were already detected as constant (hence not suitable
-
 0307:         # for good splitting) by ancestor nodes and save the information on
-
 0308:         # newly discovered constant features to spare computation on descendant
-
 0309:         # nodes.
-
+0310:         while (f_i > n_total_constants and  # Stop early if remaining features
+
 0301:         # Sample up to max_features without replacement using a
+
 0302:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
+
 0303:         # `f_j` to compute a permutation of the `features` array).
+
 0304:         #
+
 0305:         # Skip the CPU intensive evaluation of the impurity criterion for
+
 0306:         # features that were already detected as constant (hence not suitable
+
 0307:         # for good splitting) by ancestor nodes and save the information on
+
 0308:         # newly discovered constant features to spare computation on descendant
+
 0309:         # nodes.
+
+0310:         while (f_i > n_total_constants and  # Stop early if remaining features
  while (1) {
     __pyx_t_6 = ((__pyx_v_f_i > __pyx_v_n_total_constants) != 0);
     if (__pyx_t_6) {
@@ -1272,122 +1346,122 @@
       __pyx_t_5 = __pyx_t_6;
       goto __pyx_L5_bool_binop_done;
     }
-
 0311:                                             # are constant
-
+0312:                 (n_visited_features < max_features or
+
 0311:                                             # are constant
+
+0312:                 (n_visited_features < max_features or
    __pyx_t_6 = ((__pyx_v_n_visited_features < __pyx_v_max_features) != 0);
     if (!__pyx_t_6) {
     } else {
       __pyx_t_5 = __pyx_t_6;
       goto __pyx_L5_bool_binop_done;
     }
-
 0313:                  # At least one drawn features must be non constant
-
+0314:                  n_visited_features <= n_found_constants + n_drawn_constants)):
+
 0313:                  # At least one drawn features must be non constant
+
+0314:                  n_visited_features <= n_found_constants + n_drawn_constants)):
    __pyx_t_6 = ((__pyx_v_n_visited_features <= (__pyx_v_n_found_constants + __pyx_v_n_drawn_constants)) != 0);
     __pyx_t_5 = __pyx_t_6;
     __pyx_L5_bool_binop_done:;
     if (!__pyx_t_5) break;
 
 0315: 
-
+0316:             n_visited_features += 1
+
+0316:             n_visited_features += 1
    __pyx_v_n_visited_features = (__pyx_v_n_visited_features + 1);
 
 0317: 
-
 0318:             # Loop invariant: elements of features in
-
 0319:             # - [:n_drawn_constant[ holds drawn and known constant features;
-
 0320:             # - [n_drawn_constant:n_known_constant[ holds known constant
-
 0321:             #   features that haven't been drawn yet;
-
 0322:             # - [n_known_constant:n_total_constant[ holds newly found constant
-
 0323:             #   features;
-
 0324:             # - [n_total_constant:f_i[ holds features that haven't been drawn
-
 0325:             #   yet and aren't constant apriori.
-
 0326:             # - [f_i:n_features[ holds features that have been drawn
-
 0327:             #   and aren't constant.
+
 0318:             # Loop invariant: elements of features in
+
 0319:             # - [:n_drawn_constant[ holds drawn and known constant features;
+
 0320:             # - [n_drawn_constant:n_known_constant[ holds known constant
+
 0321:             #   features that haven't been drawn yet;
+
 0322:             # - [n_known_constant:n_total_constant[ holds newly found constant
+
 0323:             #   features;
+
 0324:             # - [n_total_constant:f_i[ holds features that haven't been drawn
+
 0325:             #   yet and aren't constant apriori.
+
 0326:             # - [f_i:n_features[ holds features that have been drawn
+
 0327:             #   and aren't constant.
 0328: 
-
 0329:             # Draw a feature at random
-
+0330:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
+
 0329:             # Draw a feature at random
+
+0330:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
    __pyx_v_f_j = __pyx_f_7sklearn_4tree_6_utils_rand_int(__pyx_v_n_drawn_constants, (__pyx_v_f_i - __pyx_v_n_found_constants), __pyx_v_random_state);
-
 0331:                            random_state)
+
 0331:                            random_state)
 0332: 
-
+0333:             if f_j < n_known_constants:
+
+0333:             if f_j < n_known_constants:
    __pyx_t_5 = ((__pyx_v_f_j < __pyx_v_n_known_constants) != 0);
     if (__pyx_t_5) {
 /* … */
       goto __pyx_L8;
     }
-
 0334:                 # f_j in the interval [n_drawn_constants, n_known_constants[
-
+0335:                 features[n_drawn_constants], features[f_j] = features[f_j], features[n_drawn_constants]
+
 0334:                 # f_j in the interval [n_drawn_constants, n_known_constants[
+
+0335:                 features[n_drawn_constants], features[f_j] = features[f_j], features[n_drawn_constants]
      __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
       __pyx_t_7 = (__pyx_v_features[__pyx_v_n_drawn_constants]);
       (__pyx_v_features[__pyx_v_n_drawn_constants]) = __pyx_t_2;
       (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
 
 0336: 
-
+0337:                 n_drawn_constants += 1
+
+0337:                 n_drawn_constants += 1
      __pyx_v_n_drawn_constants = (__pyx_v_n_drawn_constants + 1);
 
 0338: 
-
 0339:             else:
-
 0340:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
-
+0341:                 f_j += n_found_constants
+
 0339:             else:
+
 0340:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
+
+0341:                 f_j += n_found_constants
    /*else*/ {
       __pyx_v_f_j = (__pyx_v_f_j + __pyx_v_n_found_constants);
-
 0342:                 # f_j in the interval [n_total_constants, f_i[
-
+0343:                 current.feature = features[f_j]
+
 0342:                 # f_j in the interval [n_total_constants, f_i[
+
+0343:                 current.feature = features[f_j]
      __pyx_v_current.feature = (__pyx_v_features[__pyx_v_f_j]);
 
 0344: 
-
 0345:                 # Sort samples along that feature; by
-
 0346:                 # copying the values into an array and
-
 0347:                 # sorting the array in a manner which utilizes the cache more
-
 0348:                 # effectively.
-
+0349:                 for i in range(start, end):
+
 0345:                 # Sort samples along that feature; by
+
 0346:                 # copying the values into an array and
+
 0347:                 # sorting the array in a manner which utilizes the cache more
+
 0348:                 # effectively.
+
+0349:                 for i in range(start, end):
      __pyx_t_7 = __pyx_v_end;
       __pyx_t_2 = __pyx_t_7;
       for (__pyx_t_8 = __pyx_v_start; __pyx_t_8 < __pyx_t_2; __pyx_t_8+=1) {
         __pyx_v_i = __pyx_t_8;
-
+0350:                     Xf[i] = self.X[samples[i], current.feature]
+
+0350:                     Xf[i] = self.X[samples[i], current.feature]
        if (unlikely(!__pyx_v_self->__pyx_base.X.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 350, __pyx_L1_error)}
         __pyx_t_9 = (__pyx_v_samples[__pyx_v_i]);
         __pyx_t_10 = __pyx_v_current.feature;
         (__pyx_v_Xf[__pyx_v_i]) = (*((__pyx_t_7sklearn_4tree_5_tree_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_self->__pyx_base.X.data + __pyx_t_9 * __pyx_v_self->__pyx_base.X.strides[0]) ) + __pyx_t_10 * __pyx_v_self->__pyx_base.X.strides[1]) )));
       }
 
 0351: 
-
+0352:                 sort(Xf + start, samples + start, end - start)
+
+0352:                 sort(Xf + start, samples + start, end - start)
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_sort((__pyx_v_Xf + __pyx_v_start), (__pyx_v_samples + __pyx_v_start), (__pyx_v_end - __pyx_v_start));
 
 0353: 
-
+0354:                 if Xf[end - 1] <= Xf[start] + FEATURE_THRESHOLD:
+
+0354:                 if Xf[end - 1] <= Xf[start] + FEATURE_THRESHOLD:
      __pyx_t_5 = (((__pyx_v_Xf[(__pyx_v_end - 1)]) <= ((__pyx_v_Xf[__pyx_v_start]) + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
       if (__pyx_t_5) {
 /* … */
         goto __pyx_L11;
       }
-
+0355:                     features[f_j], features[n_total_constants] = features[n_total_constants], features[f_j]
+
+0355:                     features[f_j], features[n_total_constants] = features[n_total_constants], features[f_j]
        __pyx_t_7 = (__pyx_v_features[__pyx_v_n_total_constants]);
         __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
         (__pyx_v_features[__pyx_v_n_total_constants]) = __pyx_t_2;
 
 0356: 
-
+0357:                     n_found_constants += 1
+
+0357:                     n_found_constants += 1
        __pyx_v_n_found_constants = (__pyx_v_n_found_constants + 1);
-
+0358:                     n_total_constants += 1
+
+0358:                     n_total_constants += 1
        __pyx_v_n_total_constants = (__pyx_v_n_total_constants + 1);
 
 0359: 
-
 0360:                 else:
-
+0361:                     f_i -= 1
+
 0360:                 else:
+
+0361:                     f_i -= 1
      /*else*/ {
         __pyx_v_f_i = (__pyx_v_f_i - 1);
-
+0362:                     features[f_i], features[f_j] = features[f_j], features[f_i]
+
+0362:                     features[f_i], features[f_j] = features[f_j], features[f_i]
        __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
         __pyx_t_7 = (__pyx_v_features[__pyx_v_f_i]);
         (__pyx_v_features[__pyx_v_f_i]) = __pyx_t_2;
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
 
 0363: 
-
 0364:                     # Evaluate all splits
-
+0365:                     self.criterion.reset()
+
 0364:                     # Evaluate all splits
+
+0365:                     self.criterion.reset()
        __pyx_t_11 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 365, __pyx_L1_error)
-
+0366:                     p = start
+
+0366:                     p = start
        __pyx_v_p = __pyx_v_start;
 
 0367: 
-
+0368:                     while p < end:
+
+0368:                     while p < end:
        while (1) {
           __pyx_t_5 = ((__pyx_v_p < __pyx_v_end) != 0);
           if (!__pyx_t_5) break;
-
+0369:                         while (p + 1 < end and
+
+0369:                         while (p + 1 < end and
          while (1) {
             __pyx_t_6 = (((__pyx_v_p + 1) < __pyx_v_end) != 0);
             if (__pyx_t_6) {
@@ -1395,23 +1469,23 @@
               __pyx_t_5 = __pyx_t_6;
               goto __pyx_L16_bool_binop_done;
             }
-
+0370:                                Xf[p + 1] <= Xf[p] + FEATURE_THRESHOLD):
+
+0370:                                Xf[p + 1] <= Xf[p] + FEATURE_THRESHOLD):
            __pyx_t_6 = (((__pyx_v_Xf[(__pyx_v_p + 1)]) <= ((__pyx_v_Xf[__pyx_v_p]) + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
             __pyx_t_5 = __pyx_t_6;
             __pyx_L16_bool_binop_done:;
             if (!__pyx_t_5) break;
-
+0371:                             p += 1
+
+0371:                             p += 1
            __pyx_v_p = (__pyx_v_p + 1);
           }
 
 0372: 
-
 0373:                         # (p + 1 >= end) or (X[samples[p + 1], current.feature] >
-
 0374:                         #                    X[samples[p], current.feature])
-
+0375:                         p += 1
+
 0373:                         # (p + 1 >= end) or (X[samples[p + 1], current.feature] >
+
 0374:                         #                    X[samples[p], current.feature])
+
+0375:                         p += 1
          __pyx_v_p = (__pyx_v_p + 1);
-
 0376:                         # (p >= end) or (X[samples[p], current.feature] >
-
 0377:                         #                X[samples[p - 1], current.feature])
+
 0376:                         # (p >= end) or (X[samples[p], current.feature] >
+
 0377:                         #                X[samples[p - 1], current.feature])
 0378: 
-
+0379:                         if p < end:
+
+0379:                         if p < end:
          __pyx_t_5 = ((__pyx_v_p < __pyx_v_end) != 0);
           if (__pyx_t_5) {
 /* … */
@@ -1423,11 +1497,11 @@
     }
     __pyx_L8:;
   }
-
+0380:                             current.pos = p
+
+0380:                             current.pos = p
            __pyx_v_current.pos = __pyx_v_p;
 
 0381: 
-
 0382:                             # Reject if min_samples_leaf is not guaranteed
-
+0383:                             if (((current.pos - start) < min_samples_leaf) or
+
 0382:                             # Reject if min_samples_leaf is not guaranteed
+
+0383:                             if (((current.pos - start) < min_samples_leaf) or
            __pyx_t_6 = (((__pyx_v_current.pos - __pyx_v_start) < __pyx_v_min_samples_leaf) != 0);
             if (!__pyx_t_6) {
             } else {
@@ -1438,18 +1512,18 @@
             if (__pyx_t_5) {
 /* … */
             }
-
+0384:                                     ((end - current.pos) < min_samples_leaf)):
+
+0384:                                     ((end - current.pos) < min_samples_leaf)):
            __pyx_t_6 = (((__pyx_v_end - __pyx_v_current.pos) < __pyx_v_min_samples_leaf) != 0);
             __pyx_t_5 = __pyx_t_6;
             __pyx_L20_bool_binop_done:;
-
+0385:                                 continue
+
+0385:                                 continue
              goto __pyx_L12_continue;
 
 0386: 
-
+0387:                             self.criterion.update(current.pos)
+
+0387:                             self.criterion.update(current.pos)
            __pyx_t_11 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_current.pos); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 387, __pyx_L1_error)
 
 0388: 
-
 0389:                             # Reject if min_weight_leaf is not satisfied
-
+0390:                             if ((self.criterion.weighted_n_left < min_weight_leaf) or
+
 0389:                             # Reject if min_weight_leaf is not satisfied
+
+0390:                             if ((self.criterion.weighted_n_left < min_weight_leaf) or
            __pyx_t_6 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_left < __pyx_v_min_weight_leaf) != 0);
             if (!__pyx_t_6) {
             } else {
@@ -1460,34 +1534,34 @@
             if (__pyx_t_5) {
 /* … */
             }
-
+0391:                                     (self.criterion.weighted_n_right < min_weight_leaf)):
+
+0391:                                     (self.criterion.weighted_n_right < min_weight_leaf)):
            __pyx_t_6 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_right < __pyx_v_min_weight_leaf) != 0);
             __pyx_t_5 = __pyx_t_6;
             __pyx_L23_bool_binop_done:;
-
+0392:                                 continue
+
+0392:                                 continue
              goto __pyx_L12_continue;
 
 0393: 
-
+0394:                             current_proxy_improvement = self.criterion.proxy_impurity_improvement()
+
+0394:                             current_proxy_improvement = self.criterion.proxy_impurity_improvement()
            __pyx_v_current_proxy_improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->proxy_impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion);
 
 0395: 
-
 0396:                             # with gil:
-
 0397:                             #     print("feature ", current.feature)
-
 0398:                             #     print("threshold ", current.threshold)
-
 0399:                             #     print("proxy ", current_proxy_improvement)
-
 0400:                             #     print()
+
 0396:                             # with gil:
+
 0397:                             #     print("feature ", current.feature)
+
 0398:                             #     print("threshold ", current.threshold)
+
 0399:                             #     print("proxy ", current_proxy_improvement)
+
 0400:                             #     print()
 0401: 
-
+0402:                             if current_proxy_improvement > best_proxy_improvement:
+
+0402:                             if current_proxy_improvement > best_proxy_improvement:
            __pyx_t_5 = ((__pyx_v_current_proxy_improvement > __pyx_v_best_proxy_improvement) != 0);
             if (__pyx_t_5) {
 /* … */
             }
-
+0403:                                 best_proxy_improvement = current_proxy_improvement
+
+0403:                                 best_proxy_improvement = current_proxy_improvement
              __pyx_v_best_proxy_improvement = __pyx_v_current_proxy_improvement;
-
 0404:                                 # sum of halves is used to avoid infinite value
-
+0405:                                 current.threshold = Xf[p - 1] / 2.0 + Xf[p] / 2.0
+
 0404:                                 # sum of halves is used to avoid infinite value
+
+0405:                                 current.threshold = Xf[p - 1] / 2.0 + Xf[p] / 2.0
              __pyx_v_current.threshold = (((__pyx_v_Xf[(__pyx_v_p - 1)]) / 2.0) + ((__pyx_v_Xf[__pyx_v_p]) / 2.0));
 
 0406: 
-
+0407:                                 if ((current.threshold == Xf[p]) or
+
+0407:                                 if ((current.threshold == Xf[p]) or
              __pyx_t_6 = ((__pyx_v_current.threshold == (__pyx_v_Xf[__pyx_v_p])) != 0);
               if (!__pyx_t_6) {
               } else {
@@ -1498,39 +1572,39 @@
               if (__pyx_t_5) {
 /* … */
               }
-
+0408:                                     (current.threshold == INFINITY) or
+
+0408:                                     (current.threshold == INFINITY) or
              __pyx_t_6 = ((__pyx_v_current.threshold == __pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY) != 0);
               if (!__pyx_t_6) {
               } else {
                 __pyx_t_5 = __pyx_t_6;
                 goto __pyx_L27_bool_binop_done;
               }
-
+0409:                                     (current.threshold == -INFINITY)):
+
+0409:                                     (current.threshold == -INFINITY)):
              __pyx_t_6 = ((__pyx_v_current.threshold == (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY)) != 0);
               __pyx_t_5 = __pyx_t_6;
               __pyx_L27_bool_binop_done:;
-
+0410:                                     current.threshold = Xf[p - 1]
+
+0410:                                     current.threshold = Xf[p - 1]
                __pyx_v_current.threshold = (__pyx_v_Xf[(__pyx_v_p - 1)]);
 
 0411: 
-
+0412:                                 best = current  # copy
+
+0412:                                 best = current  # copy
              __pyx_v_best = __pyx_v_current;
 
 0413: 
-
 0414:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
-
+0415:         if best.pos < end:
+
 0414:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
+
+0415:         if best.pos < end:
  __pyx_t_5 = ((__pyx_v_best.pos < __pyx_v_end) != 0);
   if (__pyx_t_5) {
 /* … */
   }
-
+0416:             partition_end = end
+
+0416:             partition_end = end
    __pyx_v_partition_end = __pyx_v_end;
-
+0417:             p = start
+
+0417:             p = start
    __pyx_v_p = __pyx_v_start;
 
 0418: 
-
+0419:             while p < partition_end:
+
+0419:             while p < partition_end:
    while (1) {
       __pyx_t_5 = ((__pyx_v_p < __pyx_v_partition_end) != 0);
       if (!__pyx_t_5) break;
-
+0420:                 if self.X[samples[p], best.feature] <= best.threshold:
+
+0420:                 if self.X[samples[p], best.feature] <= best.threshold:
      if (unlikely(!__pyx_v_self->__pyx_base.X.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 420, __pyx_L1_error)}
       __pyx_t_10 = (__pyx_v_samples[__pyx_v_p]);
       __pyx_t_9 = __pyx_v_best.feature;
@@ -1539,15 +1613,15 @@
 /* … */
         goto __pyx_L33;
       }
-
+0421:                     p += 1
+
+0421:                     p += 1
        __pyx_v_p = (__pyx_v_p + 1);
 
 0422: 
-
 0423:                 else:
-
+0424:                     partition_end -= 1
+
 0423:                 else:
+
+0424:                     partition_end -= 1
      /*else*/ {
         __pyx_v_partition_end = (__pyx_v_partition_end - 1);
 
 0425: 
-
+0426:                     samples[p], samples[partition_end] = samples[partition_end], samples[p]
+
+0426:                     samples[p], samples[partition_end] = samples[partition_end], samples[p]
        __pyx_t_7 = (__pyx_v_samples[__pyx_v_partition_end]);
         __pyx_t_2 = (__pyx_v_samples[__pyx_v_p]);
         (__pyx_v_samples[__pyx_v_p]) = __pyx_t_7;
@@ -1556,80 +1630,80 @@
       __pyx_L33:;
     }
 
 0427: 
-
+0428:             self.criterion.reset()
+
+0428:             self.criterion.reset()
    __pyx_t_11 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 428, __pyx_L1_error)
-
+0429:             self.criterion.update(best.pos)
+
+0429:             self.criterion.update(best.pos)
    __pyx_t_11 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_best.pos); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 429, __pyx_L1_error)
-
+0430:             best.improvement = self.criterion.impurity_improvement(impurity)
+
+0430:             best.improvement = self.criterion.impurity_improvement(impurity)
    __pyx_v_best.improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_impurity);
-
+0431:             self.criterion.children_impurity(&best.impurity_left,
+
+0431:             self.criterion.children_impurity(&best.impurity_left,
    ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->children_impurity(__pyx_v_self->__pyx_base.__pyx_base.criterion, (&__pyx_v_best.impurity_left), (&__pyx_v_best.impurity_right));
-
 0432:                                              &best.impurity_right)
+
 0432:                                              &best.impurity_right)
 0433: 
-
 0434:         # Respect invariant for constant features: the original order of
-
 0435:         # element in features[:n_known_constants] must be preserved for sibling
-
 0436:         # and child nodes
-
+0437:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
+
 0434:         # Respect invariant for constant features: the original order of
+
 0435:         # element in features[:n_known_constants] must be preserved for sibling
+
 0436:         # and child nodes
+
+0437:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
  (void)(memcpy(__pyx_v_features, __pyx_v_constant_features, ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_known_constants)));
 
 0438: 
-
 0439:         # Copy newly found constant features
-
+0440:         memcpy(constant_features + n_known_constants,
+
 0439:         # Copy newly found constant features
+
+0440:         memcpy(constant_features + n_known_constants,
  (void)(memcpy((__pyx_v_constant_features + __pyx_v_n_known_constants), (__pyx_v_features + __pyx_v_n_known_constants), ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_found_constants)));
-
 0441:                features + n_known_constants,
-
 0442:                sizeof(SIZE_t) * n_found_constants)
+
 0441:                features + n_known_constants,
+
 0442:                sizeof(SIZE_t) * n_found_constants)
 0443: 
-
 0444:         # Return values
-
+0445:         split[0] = best
+
 0444:         # Return values
+
+0445:         split[0] = best
  (__pyx_v_split[0]) = __pyx_v_best;
-
+0446:         n_constant_features[0] = n_total_constants
+
+0446:         n_constant_features[0] = n_total_constants
  (__pyx_v_n_constant_features[0]) = __pyx_v_n_total_constants;
-
+0447:         return 0
+
+0447:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0448: 
 0449: 
-
 0450: # Sort n-element arrays pointed to by Xf and samples, simultaneously,
-
 0451: # by the values in Xf. Algorithm: Introsort (Musser, SP&E, 1997).
-
+0452: cdef inline void sort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
+
 0450: # Sort n-element arrays pointed to by Xf and samples, simultaneously,
+
 0451: # by the values in Xf. Algorithm: Introsort (Musser, SP&E, 1997).
+
+0452: cdef inline void sort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_sort(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n) {
   int __pyx_v_maxd;
 /* … */
   /* function exit code */
   __pyx_L0:;
 }
-
+0453:     if n == 0:
+
+0453:     if n == 0:
  __pyx_t_1 = ((__pyx_v_n == 0) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+0454:       return
+
+0454:       return
    goto __pyx_L0;
-
+0455:     cdef int maxd = 2 * <int>log(n)
+
+0455:     cdef int maxd = 2 * <int>log(n)
  __pyx_v_maxd = (2 * ((int)__pyx_f_7sklearn_4tree_6_utils_log(__pyx_v_n)));
-
+0456:     introsort(Xf, samples, n, maxd)
+
+0456:     introsort(Xf, samples, n, maxd)
  __pyx_f_13stpredictions_6models_3OK3_9_splitter_introsort(__pyx_v_Xf, __pyx_v_samples, __pyx_v_n, __pyx_v_maxd);
 
 0457: 
 0458: 
-
+0459: cdef inline void swap(DTYPE_t* Xf, SIZE_t* samples,
+
+0459: cdef inline void swap(DTYPE_t* Xf, SIZE_t* samples,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_swap(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_i, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_j) {
 /* … */
   /* function exit code */
 }
-
 0460:         SIZE_t i, SIZE_t j) nogil:
-
 0461:     # Helper for sort
-
+0462:     Xf[i], Xf[j] = Xf[j], Xf[i]
+
 0460:         SIZE_t i, SIZE_t j) nogil:
+
 0461:     # Helper for sort
+
+0462:     Xf[i], Xf[j] = Xf[j], Xf[i]
  __pyx_t_1 = (__pyx_v_Xf[__pyx_v_j]);
   __pyx_t_2 = (__pyx_v_Xf[__pyx_v_i]);
   (__pyx_v_Xf[__pyx_v_i]) = __pyx_t_1;
   (__pyx_v_Xf[__pyx_v_j]) = __pyx_t_2;
-
+0463:     samples[i], samples[j] = samples[j], samples[i]
+
+0463:     samples[i], samples[j] = samples[j], samples[i]
  __pyx_t_3 = (__pyx_v_samples[__pyx_v_j]);
   __pyx_t_4 = (__pyx_v_samples[__pyx_v_i]);
   (__pyx_v_samples[__pyx_v_i]) = __pyx_t_3;
   (__pyx_v_samples[__pyx_v_j]) = __pyx_t_4;
 
 0464: 
 0465: 
-
+0466: cdef inline DTYPE_t median3(DTYPE_t* Xf, SIZE_t n) nogil:
+
+0466: cdef inline DTYPE_t median3(DTYPE_t* Xf, SIZE_t n) nogil:
static CYTHON_INLINE __pyx_t_7sklearn_4tree_5_tree_DTYPE_t __pyx_f_13stpredictions_6models_3OK3_9_splitter_median3(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n) {
   __pyx_t_7sklearn_4tree_5_tree_DTYPE_t __pyx_v_a;
   __pyx_t_7sklearn_4tree_5_tree_DTYPE_t __pyx_v_b;
@@ -1640,69 +1714,69 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0467:     # Median of three pivot selection, after Bentley and McIlroy (1993).
-
 0468:     # Engineering a sort function. SP&E. Requires 8/3 comparisons on average.
-
+0469:     cdef DTYPE_t a = Xf[0], b = Xf[n / 2], c = Xf[n - 1]
+
 0467:     # Median of three pivot selection, after Bentley and McIlroy (1993).
+
 0468:     # Engineering a sort function. SP&E. Requires 8/3 comparisons on average.
+
+0469:     cdef DTYPE_t a = Xf[0], b = Xf[n / 2], c = Xf[n - 1]
  __pyx_v_a = (__pyx_v_Xf[0]);
   __pyx_v_b = (__pyx_v_Xf[(__pyx_v_n / 2)]);
   __pyx_v_c = (__pyx_v_Xf[(__pyx_v_n - 1)]);
-
+0470:     if a < b:
+
+0470:     if a < b:
  __pyx_t_1 = ((__pyx_v_a < __pyx_v_b) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+0471:         if b < c:
+
+0471:         if b < c:
    __pyx_t_1 = ((__pyx_v_b < __pyx_v_c) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0472:             return b
+
+0472:             return b
      __pyx_r = __pyx_v_b;
       goto __pyx_L0;
-
+0473:         elif a < c:
+
+0473:         elif a < c:
    __pyx_t_1 = ((__pyx_v_a < __pyx_v_c) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0474:             return c
+
+0474:             return c
      __pyx_r = __pyx_v_c;
       goto __pyx_L0;
-
 0475:         else:
-
+0476:             return a
+
 0475:         else:
+
+0476:             return a
    /*else*/ {
       __pyx_r = __pyx_v_a;
       goto __pyx_L0;
     }
-
+0477:     elif b < c:
+
+0477:     elif b < c:
  __pyx_t_1 = ((__pyx_v_b < __pyx_v_c) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+0478:         if a < c:
+
+0478:         if a < c:
    __pyx_t_1 = ((__pyx_v_a < __pyx_v_c) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0479:             return a
+
+0479:             return a
      __pyx_r = __pyx_v_a;
       goto __pyx_L0;
-
 0480:         else:
-
+0481:             return c
+
 0480:         else:
+
+0481:             return c
    /*else*/ {
       __pyx_r = __pyx_v_c;
       goto __pyx_L0;
     }
-
 0482:     else:
-
+0483:         return b
+
 0482:     else:
+
+0483:         return b
  /*else*/ {
     __pyx_r = __pyx_v_b;
     goto __pyx_L0;
   }
 
 0484: 
 0485: 
-
 0486: # Introsort with median of 3 pivot selection and 3-way partition function
-
 0487: # (robust to repeated elements, e.g. lots of zero features).
-
+0488: cdef void introsort(DTYPE_t* Xf, SIZE_t *samples,
+
 0486: # Introsort with median of 3 pivot selection and 3-way partition function
+
 0487: # (robust to repeated elements, e.g. lots of zero features).
+
+0488: cdef void introsort(DTYPE_t* Xf, SIZE_t *samples,
static void __pyx_f_13stpredictions_6models_3OK3_9_splitter_introsort(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n, int __pyx_v_maxd) {
   __pyx_t_7sklearn_4tree_5_tree_DTYPE_t __pyx_v_pivot;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_i;
@@ -1712,81 +1786,81 @@
   /* function exit code */
   __pyx_L0:;
 }
-
 0489:                     SIZE_t n, int maxd) nogil:
-
 0490:     cdef DTYPE_t pivot
-
 0491:     cdef SIZE_t i, l, r
+
 0489:                     SIZE_t n, int maxd) nogil:
+
 0490:     cdef DTYPE_t pivot
+
 0491:     cdef SIZE_t i, l, r
 0492: 
-
+0493:     while n > 1:
+
+0493:     while n > 1:
  while (1) {
     __pyx_t_1 = ((__pyx_v_n > 1) != 0);
     if (!__pyx_t_1) break;
-
+0494:         if maxd <= 0:   # max depth limit exceeded ("gone quadratic")
+
+0494:         if maxd <= 0:   # max depth limit exceeded ("gone quadratic")
    __pyx_t_1 = ((__pyx_v_maxd <= 0) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0495:             heapsort(Xf, samples, n)
+
+0495:             heapsort(Xf, samples, n)
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_heapsort(__pyx_v_Xf, __pyx_v_samples, __pyx_v_n);
-
+0496:             return
+
+0496:             return
      goto __pyx_L0;
-
+0497:         maxd -= 1
+
+0497:         maxd -= 1
    __pyx_v_maxd = (__pyx_v_maxd - 1);
 
 0498: 
-
+0499:         pivot = median3(Xf, n)
+
+0499:         pivot = median3(Xf, n)
    __pyx_v_pivot = __pyx_f_13stpredictions_6models_3OK3_9_splitter_median3(__pyx_v_Xf, __pyx_v_n);
 
 0500: 
-
 0501:         # Three-way partition.
-
+0502:         i = l = 0
+
 0501:         # Three-way partition.
+
+0502:         i = l = 0
    __pyx_v_i = 0;
     __pyx_v_l = 0;
-
+0503:         r = n
+
+0503:         r = n
    __pyx_v_r = __pyx_v_n;
-
+0504:         while i < r:
+
+0504:         while i < r:
    while (1) {
       __pyx_t_1 = ((__pyx_v_i < __pyx_v_r) != 0);
       if (!__pyx_t_1) break;
-
+0505:             if Xf[i] < pivot:
+
+0505:             if Xf[i] < pivot:
      __pyx_t_1 = (((__pyx_v_Xf[__pyx_v_i]) < __pyx_v_pivot) != 0);
       if (__pyx_t_1) {
 /* … */
         goto __pyx_L8;
       }
-
+0506:                 swap(Xf, samples, i, l)
+
+0506:                 swap(Xf, samples, i, l)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_swap(__pyx_v_Xf, __pyx_v_samples, __pyx_v_i, __pyx_v_l);
-
+0507:                 i += 1
+
+0507:                 i += 1
        __pyx_v_i = (__pyx_v_i + 1);
-
+0508:                 l += 1
+
+0508:                 l += 1
        __pyx_v_l = (__pyx_v_l + 1);
-
+0509:             elif Xf[i] > pivot:
+
+0509:             elif Xf[i] > pivot:
      __pyx_t_1 = (((__pyx_v_Xf[__pyx_v_i]) > __pyx_v_pivot) != 0);
       if (__pyx_t_1) {
 /* … */
         goto __pyx_L8;
       }
-
+0510:                 r -= 1
+
+0510:                 r -= 1
        __pyx_v_r = (__pyx_v_r - 1);
-
+0511:                 swap(Xf, samples, i, r)
+
+0511:                 swap(Xf, samples, i, r)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_swap(__pyx_v_Xf, __pyx_v_samples, __pyx_v_i, __pyx_v_r);
-
 0512:             else:
-
+0513:                 i += 1
+
 0512:             else:
+
+0513:                 i += 1
      /*else*/ {
         __pyx_v_i = (__pyx_v_i + 1);
       }
       __pyx_L8:;
     }
 
 0514: 
-
+0515:         introsort(Xf, samples, l, maxd)
+
+0515:         introsort(Xf, samples, l, maxd)
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_introsort(__pyx_v_Xf, __pyx_v_samples, __pyx_v_l, __pyx_v_maxd);
-
+0516:         Xf += r
+
+0516:         Xf += r
    __pyx_v_Xf = (__pyx_v_Xf + __pyx_v_r);
-
+0517:         samples += r
+
+0517:         samples += r
    __pyx_v_samples = (__pyx_v_samples + __pyx_v_r);
-
+0518:         n -= r
+
+0518:         n -= r
    __pyx_v_n = (__pyx_v_n - __pyx_v_r);
   }
 
 0519: 
 0520: 
-
+0521: cdef inline void sift_down(DTYPE_t* Xf, SIZE_t* samples,
+
+0521: cdef inline void sift_down(DTYPE_t* Xf, SIZE_t* samples,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_sift_down(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_child;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_maxind;
@@ -1794,21 +1868,21 @@
 /* … */
   /* function exit code */
 }
-
 0522:                            SIZE_t start, SIZE_t end) nogil:
-
 0523:     # Restore heap order in Xf[start:end] by moving the max element to start.
-
 0524:     cdef SIZE_t child, maxind, root
+
 0522:                            SIZE_t start, SIZE_t end) nogil:
+
 0523:     # Restore heap order in Xf[start:end] by moving the max element to start.
+
 0524:     cdef SIZE_t child, maxind, root
 0525: 
-
+0526:     root = start
+
+0526:     root = start
  __pyx_v_root = __pyx_v_start;
-
+0527:     while True:
+
+0527:     while True:
  while (1) {
-
+0528:         child = root * 2 + 1
+
+0528:         child = root * 2 + 1
    __pyx_v_child = ((__pyx_v_root * 2) + 1);
 
 0529: 
-
 0530:         # find max of root, left child, right child
-
+0531:         maxind = root
+
 0530:         # find max of root, left child, right child
+
+0531:         maxind = root
    __pyx_v_maxind = __pyx_v_root;
-
+0532:         if child < end and Xf[maxind] < Xf[child]:
+
+0532:         if child < end and Xf[maxind] < Xf[child]:
    __pyx_t_2 = ((__pyx_v_child < __pyx_v_end) != 0);
     if (__pyx_t_2) {
     } else {
@@ -1821,9 +1895,9 @@
     if (__pyx_t_1) {
 /* … */
     }
-
+0533:             maxind = child
+
+0533:             maxind = child
      __pyx_v_maxind = __pyx_v_child;
-
+0534:         if child + 1 < end and Xf[maxind] < Xf[child + 1]:
+
+0534:         if child + 1 < end and Xf[maxind] < Xf[child + 1]:
    __pyx_t_2 = (((__pyx_v_child + 1) < __pyx_v_end) != 0);
     if (__pyx_t_2) {
     } else {
@@ -1836,74 +1910,74 @@
     if (__pyx_t_1) {
 /* … */
     }
-
+0535:             maxind = child + 1
+
+0535:             maxind = child + 1
      __pyx_v_maxind = (__pyx_v_child + 1);
 
 0536: 
-
+0537:         if maxind == root:
+
+0537:         if maxind == root:
    __pyx_t_1 = ((__pyx_v_maxind == __pyx_v_root) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0538:             break
+
+0538:             break
      goto __pyx_L4_break;
-
 0539:         else:
-
+0540:             swap(Xf, samples, root, maxind)
+
 0539:         else:
+
+0540:             swap(Xf, samples, root, maxind)
    /*else*/ {
       __pyx_f_13stpredictions_6models_3OK3_9_splitter_swap(__pyx_v_Xf, __pyx_v_samples, __pyx_v_root, __pyx_v_maxind);
-
+0541:             root = maxind
+
+0541:             root = maxind
      __pyx_v_root = __pyx_v_maxind;
     }
   }
   __pyx_L4_break:;
 
 0542: 
 0543: 
-
+0544: cdef void heapsort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
+
+0544: cdef void heapsort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_9_splitter_heapsort(__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end;
 /* … */
   /* function exit code */
 }
-
 0545:     cdef SIZE_t start, end
+
 0545:     cdef SIZE_t start, end
 0546: 
-
 0547:     # heapify
-
+0548:     start = (n - 2) / 2
+
 0547:     # heapify
+
+0548:     start = (n - 2) / 2
  __pyx_v_start = ((__pyx_v_n - 2) / 2);
-
+0549:     end = n
+
+0549:     end = n
  __pyx_v_end = __pyx_v_n;
-
+0550:     while True:
+
+0550:     while True:
  while (1) {
-
+0551:         sift_down(Xf, samples, start, end)
+
+0551:         sift_down(Xf, samples, start, end)
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_sift_down(__pyx_v_Xf, __pyx_v_samples, __pyx_v_start, __pyx_v_end);
-
+0552:         if start == 0:
+
+0552:         if start == 0:
    __pyx_t_1 = ((__pyx_v_start == 0) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0553:             break
+
+0553:             break
      goto __pyx_L4_break;
-
+0554:         start -= 1
+
+0554:         start -= 1
    __pyx_v_start = (__pyx_v_start - 1);
   }
   __pyx_L4_break:;
 
 0555: 
-
 0556:     # sort by shrinking the heap, putting the max element immediately after it
-
+0557:     end = n - 1
+
 0556:     # sort by shrinking the heap, putting the max element immediately after it
+
+0557:     end = n - 1
  __pyx_v_end = (__pyx_v_n - 1);
-
+0558:     while end > 0:
+
+0558:     while end > 0:
  while (1) {
     __pyx_t_1 = ((__pyx_v_end > 0) != 0);
     if (!__pyx_t_1) break;
-
+0559:         swap(Xf, samples, 0, end)
+
+0559:         swap(Xf, samples, 0, end)
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_swap(__pyx_v_Xf, __pyx_v_samples, 0, __pyx_v_end);
-
+0560:         sift_down(Xf, samples, 0, end)
+
+0560:         sift_down(Xf, samples, 0, end)
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_sift_down(__pyx_v_Xf, __pyx_v_samples, 0, __pyx_v_end);
-
+0561:         end = end - 1
+
+0561:         end = end - 1
    __pyx_v_end = (__pyx_v_end - 1);
   }
 
 0562: 
 0563: 
-
+0564: cdef class RandomSplitter(BaseDenseSplitter):
+
+0564: cdef class RandomSplitter(BaseDenseSplitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_RandomSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseDenseSplitter __pyx_base;
 };
@@ -1913,8 +1987,8 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_RandomSplitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_RandomSplitter;
 
-
 0565:     """Splitter for finding the best random split."""
-
+0566:     def __reduce__(self):
+
 0565:     """Splitter for finding the best random split."""
+
+0566:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_14RandomSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_14RandomSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -1946,7 +2020,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0567:         return (RandomSplitter, (self.criterion,
+
+0567:         return (RandomSplitter, (self.criterion,
  __Pyx_XDECREF(__pyx_r);
 /* … */
   __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 567, __pyx_L1_error)
@@ -1981,16 +2055,16 @@
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
-
+0568:                                  self.max_features,
+
+0568:                                  self.max_features,
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.max_features); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 568, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
-
+0569:                                  self.min_samples_leaf,
+
+0569:                                  self.min_samples_leaf,
  __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 569, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
-
+0570:                                  self.min_weight_leaf,
+
+0570:                                  self.min_weight_leaf,
  __pyx_t_3 = PyFloat_FromDouble(__pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 570, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
-
+0571:                                  self.random_state), self.__getstate__())
+
+0571:                                  self.random_state), self.__getstate__())
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_getstate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 571, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_1 = NULL;
@@ -2009,7 +2083,7 @@
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 0572: 
-
+0573:     cdef int node_split(self, double impurity, SplitRecord* split,
+
+0573:     cdef int node_split(self, double impurity, SplitRecord* split,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_14RandomSplitter_node_split(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_RandomSplitter *__pyx_v_self, double __pyx_v_impurity, struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_split, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_n_constant_features) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start;
@@ -2055,90 +2129,90 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0574:                         SIZE_t* n_constant_features) nogil except -1:
-
 0575:         """Find the best random split on node samples[start:end]
+
 0574:                         SIZE_t* n_constant_features) nogil except -1:
+
 0575:         """Find the best random split on node samples[start:end]
 0576: 
-
 0577:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0578:         or 0 otherwise.
-
 0579:         """
-
 0580:         # Draw random splits and pick the best
-
+0581:         cdef SIZE_t* samples = self.samples
+
 0577:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0578:         or 0 otherwise.
+
 0579:         """
+
 0580:         # Draw random splits and pick the best
+
+0581:         cdef SIZE_t* samples = self.samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.samples;
   __pyx_v_samples = __pyx_t_1;
-
+0582:         cdef SIZE_t start = self.start
+
+0582:         cdef SIZE_t start = self.start
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.start;
   __pyx_v_start = __pyx_t_2;
-
+0583:         cdef SIZE_t end = self.end
+
+0583:         cdef SIZE_t end = self.end
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.end;
   __pyx_v_end = __pyx_t_2;
 
 0584: 
-
+0585:         cdef SIZE_t* features = self.features
+
+0585:         cdef SIZE_t* features = self.features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.features;
   __pyx_v_features = __pyx_t_1;
-
+0586:         cdef SIZE_t* constant_features = self.constant_features
+
+0586:         cdef SIZE_t* constant_features = self.constant_features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.constant_features;
   __pyx_v_constant_features = __pyx_t_1;
-
+0587:         cdef SIZE_t n_features = self.n_features
+
+0587:         cdef SIZE_t n_features = self.n_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.n_features;
   __pyx_v_n_features = __pyx_t_2;
 
 0588: 
-
+0589:         cdef DTYPE_t* Xf = self.feature_values
+
+0589:         cdef DTYPE_t* Xf = self.feature_values
  __pyx_t_3 = __pyx_v_self->__pyx_base.__pyx_base.feature_values;
   __pyx_v_Xf = __pyx_t_3;
-
+0590:         cdef SIZE_t max_features = self.max_features
+
+0590:         cdef SIZE_t max_features = self.max_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.max_features;
   __pyx_v_max_features = __pyx_t_2;
-
+0591:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+0591:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_2;
-
+0592:         cdef double min_weight_leaf = self.min_weight_leaf
+
+0592:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_4 = __pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_4;
-
+0593:         cdef UINT32_t* random_state = &self.rand_r_state
+
+0593:         cdef UINT32_t* random_state = &self.rand_r_state
  __pyx_v_random_state = (&__pyx_v_self->__pyx_base.__pyx_base.rand_r_state);
 
 0594: 
-
 0595:         cdef SplitRecord best, current
-
+0596:         cdef double current_proxy_improvement = - INFINITY
+
 0595:         cdef SplitRecord best, current
+
+0596:         cdef double current_proxy_improvement = - INFINITY
  __pyx_v_current_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
-
+0597:         cdef double best_proxy_improvement = - INFINITY
+
+0597:         cdef double best_proxy_improvement = - INFINITY
  __pyx_v_best_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
 
 0598: 
-
+0599:         cdef SIZE_t f_i = n_features
+
+0599:         cdef SIZE_t f_i = n_features
  __pyx_v_f_i = __pyx_v_n_features;
-
 0600:         cdef SIZE_t f_j
-
 0601:         cdef SIZE_t p
-
 0602:         cdef SIZE_t partition_end
-
 0603:         cdef SIZE_t feature_stride
-
 0604:         # Number of features discovered to be constant during the split search
-
+0605:         cdef SIZE_t n_found_constants = 0
+
 0600:         cdef SIZE_t f_j
+
 0601:         cdef SIZE_t p
+
 0602:         cdef SIZE_t partition_end
+
 0603:         cdef SIZE_t feature_stride
+
 0604:         # Number of features discovered to be constant during the split search
+
+0605:         cdef SIZE_t n_found_constants = 0
  __pyx_v_n_found_constants = 0;
-
 0606:         # Number of features known to be constant and drawn without replacement
-
+0607:         cdef SIZE_t n_drawn_constants = 0
+
 0606:         # Number of features known to be constant and drawn without replacement
+
+0607:         cdef SIZE_t n_drawn_constants = 0
  __pyx_v_n_drawn_constants = 0;
-
+0608:         cdef SIZE_t n_known_constants = n_constant_features[0]
+
+0608:         cdef SIZE_t n_known_constants = n_constant_features[0]
  __pyx_v_n_known_constants = (__pyx_v_n_constant_features[0]);
-
 0609:         # n_total_constants = n_known_constants + n_found_constants
-
+0610:         cdef SIZE_t n_total_constants = n_known_constants
+
 0609:         # n_total_constants = n_known_constants + n_found_constants
+
+0610:         cdef SIZE_t n_total_constants = n_known_constants
  __pyx_v_n_total_constants = __pyx_v_n_known_constants;
-
+0611:         cdef SIZE_t n_visited_features = 0
+
+0611:         cdef SIZE_t n_visited_features = 0
  __pyx_v_n_visited_features = 0;
-
 0612:         cdef DTYPE_t min_feature_value
-
 0613:         cdef DTYPE_t max_feature_value
-
 0614:         cdef DTYPE_t current_feature_value
+
 0612:         cdef DTYPE_t min_feature_value
+
 0613:         cdef DTYPE_t max_feature_value
+
 0614:         cdef DTYPE_t current_feature_value
 0615: 
-
+0616:         _init_split(&best, end)
+
+0616:         _init_split(&best, end)
  __pyx_f_13stpredictions_6models_3OK3_9_splitter__init_split((&__pyx_v_best), __pyx_v_end);
 
 0617: 
-
 0618:         # Sample up to max_features without replacement using a
-
 0619:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
-
 0620:         # `f_j` to compute a permutation of the `features` array).
-
 0621:         #
-
 0622:         # Skip the CPU intensive evaluation of the impurity criterion for
-
 0623:         # features that were already detected as constant (hence not suitable
-
 0624:         # for good splitting) by ancestor nodes and save the information on
-
 0625:         # newly discovered constant features to spare computation on descendant
-
 0626:         # nodes.
-
+0627:         while (f_i > n_total_constants and  # Stop early if remaining features
+
 0618:         # Sample up to max_features without replacement using a
+
 0619:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
+
 0620:         # `f_j` to compute a permutation of the `features` array).
+
 0621:         #
+
 0622:         # Skip the CPU intensive evaluation of the impurity criterion for
+
 0623:         # features that were already detected as constant (hence not suitable
+
 0624:         # for good splitting) by ancestor nodes and save the information on
+
 0625:         # newly discovered constant features to spare computation on descendant
+
 0626:         # nodes.
+
+0627:         while (f_i > n_total_constants and  # Stop early if remaining features
  while (1) {
     __pyx_t_6 = ((__pyx_v_f_i > __pyx_v_n_total_constants) != 0);
     if (__pyx_t_6) {
@@ -2146,176 +2220,176 @@
       __pyx_t_5 = __pyx_t_6;
       goto __pyx_L5_bool_binop_done;
     }
-
 0628:                                             # are constant
-
+0629:                 (n_visited_features < max_features or
+
 0628:                                             # are constant
+
+0629:                 (n_visited_features < max_features or
    __pyx_t_6 = ((__pyx_v_n_visited_features < __pyx_v_max_features) != 0);
     if (!__pyx_t_6) {
     } else {
       __pyx_t_5 = __pyx_t_6;
       goto __pyx_L5_bool_binop_done;
     }
-
 0630:                  # At least one drawn features must be non constant
-
+0631:                  n_visited_features <= n_found_constants + n_drawn_constants)):
+
 0630:                  # At least one drawn features must be non constant
+
+0631:                  n_visited_features <= n_found_constants + n_drawn_constants)):
    __pyx_t_6 = ((__pyx_v_n_visited_features <= (__pyx_v_n_found_constants + __pyx_v_n_drawn_constants)) != 0);
     __pyx_t_5 = __pyx_t_6;
     __pyx_L5_bool_binop_done:;
     if (!__pyx_t_5) break;
-
+0632:             n_visited_features += 1
+
+0632:             n_visited_features += 1
    __pyx_v_n_visited_features = (__pyx_v_n_visited_features + 1);
 
 0633: 
-
 0634:             # Loop invariant: elements of features in
-
 0635:             # - [:n_drawn_constant[ holds drawn and known constant features;
-
 0636:             # - [n_drawn_constant:n_known_constant[ holds known constant
-
 0637:             #   features that haven't been drawn yet;
-
 0638:             # - [n_known_constant:n_total_constant[ holds newly found constant
-
 0639:             #   features;
-
 0640:             # - [n_total_constant:f_i[ holds features that haven't been drawn
-
 0641:             #   yet and aren't constant apriori.
-
 0642:             # - [f_i:n_features[ holds features that have been drawn
-
 0643:             #   and aren't constant.
+
 0634:             # Loop invariant: elements of features in
+
 0635:             # - [:n_drawn_constant[ holds drawn and known constant features;
+
 0636:             # - [n_drawn_constant:n_known_constant[ holds known constant
+
 0637:             #   features that haven't been drawn yet;
+
 0638:             # - [n_known_constant:n_total_constant[ holds newly found constant
+
 0639:             #   features;
+
 0640:             # - [n_total_constant:f_i[ holds features that haven't been drawn
+
 0641:             #   yet and aren't constant apriori.
+
 0642:             # - [f_i:n_features[ holds features that have been drawn
+
 0643:             #   and aren't constant.
 0644: 
-
 0645:             # Draw a feature at random
-
+0646:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
+
 0645:             # Draw a feature at random
+
+0646:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
    __pyx_v_f_j = __pyx_f_7sklearn_4tree_6_utils_rand_int(__pyx_v_n_drawn_constants, (__pyx_v_f_i - __pyx_v_n_found_constants), __pyx_v_random_state);
-
 0647:                            random_state)
+
 0647:                            random_state)
 0648: 
-
+0649:             if f_j < n_known_constants:
+
+0649:             if f_j < n_known_constants:
    __pyx_t_5 = ((__pyx_v_f_j < __pyx_v_n_known_constants) != 0);
     if (__pyx_t_5) {
 /* … */
       goto __pyx_L8;
     }
-
 0650:                 # f_j in the interval [n_drawn_constants, n_known_constants[
-
+0651:                 features[n_drawn_constants], features[f_j] = features[f_j], features[n_drawn_constants]
+
 0650:                 # f_j in the interval [n_drawn_constants, n_known_constants[
+
+0651:                 features[n_drawn_constants], features[f_j] = features[f_j], features[n_drawn_constants]
      __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
       __pyx_t_7 = (__pyx_v_features[__pyx_v_n_drawn_constants]);
       (__pyx_v_features[__pyx_v_n_drawn_constants]) = __pyx_t_2;
       (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
-
+0652:                 n_drawn_constants += 1
+
+0652:                 n_drawn_constants += 1
      __pyx_v_n_drawn_constants = (__pyx_v_n_drawn_constants + 1);
 
 0653: 
-
 0654:             else:
-
 0655:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
-
+0656:                 f_j += n_found_constants
+
 0654:             else:
+
 0655:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
+
+0656:                 f_j += n_found_constants
    /*else*/ {
       __pyx_v_f_j = (__pyx_v_f_j + __pyx_v_n_found_constants);
-
 0657:                 # f_j in the interval [n_total_constants, f_i[
+
 0657:                 # f_j in the interval [n_total_constants, f_i[
 0658: 
-
+0659:                 current.feature = features[f_j]
+
+0659:                 current.feature = features[f_j]
      __pyx_v_current.feature = (__pyx_v_features[__pyx_v_f_j]);
 
 0660: 
-
 0661:                 # Find min, max
-
+0662:                 min_feature_value = self.X[samples[start], current.feature]
+
 0661:                 # Find min, max
+
+0662:                 min_feature_value = self.X[samples[start], current.feature]
      if (unlikely(!__pyx_v_self->__pyx_base.X.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 662, __pyx_L1_error)}
       __pyx_t_8 = (__pyx_v_samples[__pyx_v_start]);
       __pyx_t_9 = __pyx_v_current.feature;
       __pyx_v_min_feature_value = (*((__pyx_t_7sklearn_4tree_5_tree_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_self->__pyx_base.X.data + __pyx_t_8 * __pyx_v_self->__pyx_base.X.strides[0]) ) + __pyx_t_9 * __pyx_v_self->__pyx_base.X.strides[1]) )));
-
+0663:                 max_feature_value = min_feature_value
+
+0663:                 max_feature_value = min_feature_value
      __pyx_v_max_feature_value = __pyx_v_min_feature_value;
-
+0664:                 Xf[start] = min_feature_value
+
+0664:                 Xf[start] = min_feature_value
      (__pyx_v_Xf[__pyx_v_start]) = __pyx_v_min_feature_value;
 
 0665: 
-
+0666:                 for p in range(start + 1, end):
+
+0666:                 for p in range(start + 1, end):
      __pyx_t_7 = __pyx_v_end;
       __pyx_t_2 = __pyx_t_7;
       for (__pyx_t_10 = (__pyx_v_start + 1); __pyx_t_10 < __pyx_t_2; __pyx_t_10+=1) {
         __pyx_v_p = __pyx_t_10;
-
+0667:                     current_feature_value = self.X[samples[p], current.feature]
+
+0667:                     current_feature_value = self.X[samples[p], current.feature]
        if (unlikely(!__pyx_v_self->__pyx_base.X.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 667, __pyx_L1_error)}
         __pyx_t_9 = (__pyx_v_samples[__pyx_v_p]);
         __pyx_t_8 = __pyx_v_current.feature;
         __pyx_v_current_feature_value = (*((__pyx_t_7sklearn_4tree_5_tree_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_self->__pyx_base.X.data + __pyx_t_9 * __pyx_v_self->__pyx_base.X.strides[0]) ) + __pyx_t_8 * __pyx_v_self->__pyx_base.X.strides[1]) )));
-
+0668:                     Xf[p] = current_feature_value
+
+0668:                     Xf[p] = current_feature_value
        (__pyx_v_Xf[__pyx_v_p]) = __pyx_v_current_feature_value;
 
 0669: 
-
+0670:                     if current_feature_value < min_feature_value:
+
+0670:                     if current_feature_value < min_feature_value:
        __pyx_t_5 = ((__pyx_v_current_feature_value < __pyx_v_min_feature_value) != 0);
         if (__pyx_t_5) {
 /* … */
           goto __pyx_L11;
         }
-
+0671:                         min_feature_value = current_feature_value
+
+0671:                         min_feature_value = current_feature_value
          __pyx_v_min_feature_value = __pyx_v_current_feature_value;
-
+0672:                     elif current_feature_value > max_feature_value:
+
+0672:                     elif current_feature_value > max_feature_value:
        __pyx_t_5 = ((__pyx_v_current_feature_value > __pyx_v_max_feature_value) != 0);
         if (__pyx_t_5) {
 /* … */
         }
         __pyx_L11:;
       }
-
+0673:                         max_feature_value = current_feature_value
+
+0673:                         max_feature_value = current_feature_value
          __pyx_v_max_feature_value = __pyx_v_current_feature_value;
 
 0674: 
-
+0675:                 if max_feature_value <= min_feature_value + FEATURE_THRESHOLD:
+
+0675:                 if max_feature_value <= min_feature_value + FEATURE_THRESHOLD:
      __pyx_t_5 = ((__pyx_v_max_feature_value <= (__pyx_v_min_feature_value + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
       if (__pyx_t_5) {
 /* … */
         goto __pyx_L12;
       }
-
+0676:                     features[f_j], features[n_total_constants] = features[n_total_constants], current.feature
+
+0676:                     features[f_j], features[n_total_constants] = features[n_total_constants], current.feature
        __pyx_t_7 = (__pyx_v_features[__pyx_v_n_total_constants]);
         __pyx_t_2 = __pyx_v_current.feature;
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
         (__pyx_v_features[__pyx_v_n_total_constants]) = __pyx_t_2;
 
 0677: 
-
+0678:                     n_found_constants += 1
+
+0678:                     n_found_constants += 1
        __pyx_v_n_found_constants = (__pyx_v_n_found_constants + 1);
-
+0679:                     n_total_constants += 1
+
+0679:                     n_total_constants += 1
        __pyx_v_n_total_constants = (__pyx_v_n_total_constants + 1);
 
 0680: 
-
 0681:                 else:
-
+0682:                     f_i -= 1
+
 0681:                 else:
+
+0682:                     f_i -= 1
      /*else*/ {
         __pyx_v_f_i = (__pyx_v_f_i - 1);
-
+0683:                     features[f_i], features[f_j] = features[f_j], features[f_i]
+
+0683:                     features[f_i], features[f_j] = features[f_j], features[f_i]
        __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
         __pyx_t_7 = (__pyx_v_features[__pyx_v_f_i]);
         (__pyx_v_features[__pyx_v_f_i]) = __pyx_t_2;
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_7;
 
 0684: 
-
 0685:                     # Draw a random threshold
-
+0686:                     current.threshold = rand_uniform(min_feature_value,
+
 0685:                     # Draw a random threshold
+
+0686:                     current.threshold = rand_uniform(min_feature_value,
        __pyx_v_current.threshold = __pyx_f_7sklearn_4tree_6_utils_rand_uniform(__pyx_v_min_feature_value, __pyx_v_max_feature_value, __pyx_v_random_state);
-
 0687:                                                      max_feature_value,
-
 0688:                                                      random_state)
+
 0687:                                                      max_feature_value,
+
 0688:                                                      random_state)
 0689: 
-
+0690:                     if current.threshold == max_feature_value:
+
+0690:                     if current.threshold == max_feature_value:
        __pyx_t_5 = ((__pyx_v_current.threshold == __pyx_v_max_feature_value) != 0);
         if (__pyx_t_5) {
 /* … */
         }
-
+0691:                         current.threshold = min_feature_value
+
+0691:                         current.threshold = min_feature_value
          __pyx_v_current.threshold = __pyx_v_min_feature_value;
 
 0692: 
-
 0693:                     # Partition
-
+0694:                     p, partition_end = start, end
+
 0693:                     # Partition
+
+0694:                     p, partition_end = start, end
        __pyx_t_7 = __pyx_v_start;
         __pyx_t_2 = __pyx_v_end;
         __pyx_v_p = __pyx_t_7;
         __pyx_v_partition_end = __pyx_t_2;
-
+0695:                     while p < partition_end:
+
+0695:                     while p < partition_end:
        while (1) {
           __pyx_t_5 = ((__pyx_v_p < __pyx_v_partition_end) != 0);
           if (!__pyx_t_5) break;
-
+0696:                         if Xf[p] <= current.threshold:
+
+0696:                         if Xf[p] <= current.threshold:
          __pyx_t_5 = (((__pyx_v_Xf[__pyx_v_p]) <= __pyx_v_current.threshold) != 0);
           if (__pyx_t_5) {
 /* … */
             goto __pyx_L16;
           }
-
+0697:                             p += 1
+
+0697:                             p += 1
            __pyx_v_p = (__pyx_v_p + 1);
-
 0698:                         else:
-
+0699:                             partition_end -= 1
+
 0698:                         else:
+
+0699:                             partition_end -= 1
          /*else*/ {
             __pyx_v_partition_end = (__pyx_v_partition_end - 1);
 
 0700: 
-
+0701:                             Xf[p], Xf[partition_end] = Xf[partition_end], Xf[p]
+
+0701:                             Xf[p], Xf[partition_end] = Xf[partition_end], Xf[p]
            __pyx_t_11 = (__pyx_v_Xf[__pyx_v_partition_end]);
             __pyx_t_12 = (__pyx_v_Xf[__pyx_v_p]);
             (__pyx_v_Xf[__pyx_v_p]) = __pyx_t_11;
             (__pyx_v_Xf[__pyx_v_partition_end]) = __pyx_t_12;
-
+0702:                             samples[p], samples[partition_end] = samples[partition_end], samples[p]
+
+0702:                             samples[p], samples[partition_end] = samples[partition_end], samples[p]
            __pyx_t_2 = (__pyx_v_samples[__pyx_v_partition_end]);
             __pyx_t_7 = (__pyx_v_samples[__pyx_v_p]);
             (__pyx_v_samples[__pyx_v_p]) = __pyx_t_2;
@@ -2324,11 +2398,11 @@
           __pyx_L16:;
         }
 
 0703: 
-
+0704:                     current.pos = partition_end
+
+0704:                     current.pos = partition_end
        __pyx_v_current.pos = __pyx_v_partition_end;
 
 0705: 
-
 0706:                     # Reject if min_samples_leaf is not guaranteed
-
+0707:                     if (((current.pos - start) < min_samples_leaf) or
+
 0706:                     # Reject if min_samples_leaf is not guaranteed
+
+0707:                     if (((current.pos - start) < min_samples_leaf) or
        __pyx_t_6 = (((__pyx_v_current.pos - __pyx_v_start) < __pyx_v_min_samples_leaf) != 0);
         if (!__pyx_t_6) {
         } else {
@@ -2339,21 +2413,21 @@
         if (__pyx_t_5) {
 /* … */
         }
-
+0708:                             ((end - current.pos) < min_samples_leaf)):
+
+0708:                             ((end - current.pos) < min_samples_leaf)):
        __pyx_t_6 = (((__pyx_v_end - __pyx_v_current.pos) < __pyx_v_min_samples_leaf) != 0);
         __pyx_t_5 = __pyx_t_6;
         __pyx_L18_bool_binop_done:;
-
+0709:                         continue
+
+0709:                         continue
          goto __pyx_L3_continue;
 
 0710: 
-
 0711:                     # Evaluate split
-
+0712:                     self.criterion.reset()
+
 0711:                     # Evaluate split
+
+0712:                     self.criterion.reset()
        __pyx_t_13 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_13 == ((int)-1))) __PYX_ERR(0, 712, __pyx_L1_error)
-
+0713:                     self.criterion.update(current.pos)
+
+0713:                     self.criterion.update(current.pos)
        __pyx_t_13 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_current.pos); if (unlikely(__pyx_t_13 == ((int)-1))) __PYX_ERR(0, 713, __pyx_L1_error)
 
 0714: 
-
 0715:                     # Reject if min_weight_leaf is not satisfied
-
+0716:                     if ((self.criterion.weighted_n_left < min_weight_leaf) or
+
 0715:                     # Reject if min_weight_leaf is not satisfied
+
+0716:                     if ((self.criterion.weighted_n_left < min_weight_leaf) or
        __pyx_t_6 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_left < __pyx_v_min_weight_leaf) != 0);
         if (!__pyx_t_6) {
         } else {
@@ -2364,17 +2438,17 @@
         if (__pyx_t_5) {
 /* … */
         }
-
+0717:                             (self.criterion.weighted_n_right < min_weight_leaf)):
+
+0717:                             (self.criterion.weighted_n_right < min_weight_leaf)):
        __pyx_t_6 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_right < __pyx_v_min_weight_leaf) != 0);
         __pyx_t_5 = __pyx_t_6;
         __pyx_L21_bool_binop_done:;
-
+0718:                         continue
+
+0718:                         continue
          goto __pyx_L3_continue;
 
 0719: 
-
+0720:                     current_proxy_improvement = self.criterion.proxy_impurity_improvement()
+
+0720:                     current_proxy_improvement = self.criterion.proxy_impurity_improvement()
        __pyx_v_current_proxy_improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->proxy_impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion);
 
 0721: 
-
+0722:                     if current_proxy_improvement > best_proxy_improvement:
+
+0722:                     if current_proxy_improvement > best_proxy_improvement:
        __pyx_t_5 = ((__pyx_v_current_proxy_improvement > __pyx_v_best_proxy_improvement) != 0);
         if (__pyx_t_5) {
 /* … */
@@ -2385,33 +2459,33 @@
     __pyx_L8:;
     __pyx_L3_continue:;
   }
-
+0723:                         best_proxy_improvement = current_proxy_improvement
+
+0723:                         best_proxy_improvement = current_proxy_improvement
          __pyx_v_best_proxy_improvement = __pyx_v_current_proxy_improvement;
-
+0724:                         best = current  # copy
+
+0724:                         best = current  # copy
          __pyx_v_best = __pyx_v_current;
 
 0725: 
-
 0726:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
-
+0727:         if best.pos < end:
+
 0726:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
+
+0727:         if best.pos < end:
  __pyx_t_5 = ((__pyx_v_best.pos < __pyx_v_end) != 0);
   if (__pyx_t_5) {
 /* … */
   }
-
+0728:             if current.feature != best.feature:
+
+0728:             if current.feature != best.feature:
    __pyx_t_5 = ((__pyx_v_current.feature != __pyx_v_best.feature) != 0);
     if (__pyx_t_5) {
 /* … */
     }
-
+0729:                 p, partition_end = start, end
+
+0729:                 p, partition_end = start, end
      __pyx_t_7 = __pyx_v_start;
       __pyx_t_2 = __pyx_v_end;
       __pyx_v_p = __pyx_t_7;
       __pyx_v_partition_end = __pyx_t_2;
 
 0730: 
-
+0731:                 while p < partition_end:
+
+0731:                 while p < partition_end:
      while (1) {
         __pyx_t_5 = ((__pyx_v_p < __pyx_v_partition_end) != 0);
         if (!__pyx_t_5) break;
-
+0732:                     if self.X[samples[p], best.feature] <= best.threshold:
+
+0732:                     if self.X[samples[p], best.feature] <= best.threshold:
        if (unlikely(!__pyx_v_self->__pyx_base.X.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 732, __pyx_L1_error)}
         __pyx_t_8 = (__pyx_v_samples[__pyx_v_p]);
         __pyx_t_9 = __pyx_v_best.feature;
@@ -2420,14 +2494,14 @@
 /* … */
           goto __pyx_L28;
         }
-
+0733:                         p += 1
+
+0733:                         p += 1
          __pyx_v_p = (__pyx_v_p + 1);
-
 0734:                     else:
-
+0735:                         partition_end -= 1
+
 0734:                     else:
+
+0735:                         partition_end -= 1
        /*else*/ {
           __pyx_v_partition_end = (__pyx_v_partition_end - 1);
 
 0736: 
-
+0737:                         samples[p], samples[partition_end] = samples[partition_end], samples[p]
+
+0737:                         samples[p], samples[partition_end] = samples[partition_end], samples[p]
          __pyx_t_2 = (__pyx_v_samples[__pyx_v_partition_end]);
           __pyx_t_7 = (__pyx_v_samples[__pyx_v_p]);
           (__pyx_v_samples[__pyx_v_p]) = __pyx_t_2;
@@ -2436,39 +2510,39 @@
         __pyx_L28:;
       }
 
 0738: 
-
+0739:             self.criterion.reset()
+
+0739:             self.criterion.reset()
    __pyx_t_13 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_13 == ((int)-1))) __PYX_ERR(0, 739, __pyx_L1_error)
-
+0740:             self.criterion.update(best.pos)
+
+0740:             self.criterion.update(best.pos)
    __pyx_t_13 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_best.pos); if (unlikely(__pyx_t_13 == ((int)-1))) __PYX_ERR(0, 740, __pyx_L1_error)
-
+0741:             best.improvement = self.criterion.impurity_improvement(impurity)
+
+0741:             best.improvement = self.criterion.impurity_improvement(impurity)
    __pyx_v_best.improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_impurity);
-
+0742:             self.criterion.children_impurity(&best.impurity_left,
+
+0742:             self.criterion.children_impurity(&best.impurity_left,
    ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->children_impurity(__pyx_v_self->__pyx_base.__pyx_base.criterion, (&__pyx_v_best.impurity_left), (&__pyx_v_best.impurity_right));
-
 0743:                                              &best.impurity_right)
+
 0743:                                              &best.impurity_right)
 0744: 
-
 0745:         # Respect invariant for constant features: the original order of
-
 0746:         # element in features[:n_known_constants] must be preserved for sibling
-
 0747:         # and child nodes
-
+0748:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
+
 0745:         # Respect invariant for constant features: the original order of
+
 0746:         # element in features[:n_known_constants] must be preserved for sibling
+
 0747:         # and child nodes
+
+0748:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
  (void)(memcpy(__pyx_v_features, __pyx_v_constant_features, ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_known_constants)));
 
 0749: 
-
 0750:         # Copy newly found constant features
-
+0751:         memcpy(constant_features + n_known_constants,
+
 0750:         # Copy newly found constant features
+
+0751:         memcpy(constant_features + n_known_constants,
  (void)(memcpy((__pyx_v_constant_features + __pyx_v_n_known_constants), (__pyx_v_features + __pyx_v_n_known_constants), ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_found_constants)));
-
 0752:                features + n_known_constants,
-
 0753:                sizeof(SIZE_t) * n_found_constants)
+
 0752:                features + n_known_constants,
+
 0753:                sizeof(SIZE_t) * n_found_constants)
 0754: 
-
 0755:         # Return values
-
+0756:         split[0] = best
+
 0755:         # Return values
+
+0756:         split[0] = best
  (__pyx_v_split[0]) = __pyx_v_best;
-
+0757:         n_constant_features[0] = n_total_constants
+
+0757:         n_constant_features[0] = n_total_constants
  (__pyx_v_n_constant_features[0]) = __pyx_v_n_total_constants;
-
+0758:         return 0
+
+0758:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0759: 
 0760: 
-
+0761: cdef class BaseSparseSplitter(Splitter):
+
+0761: cdef class BaseSparseSplitter(Splitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter __pyx_base;
   __pyx_t_7sklearn_4tree_5_tree_DTYPE_t *X_data;
@@ -2488,17 +2562,17 @@
 static CYTHON_INLINE __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter__partition(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *, double, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, __pyx_t_7sklearn_4tree_5_tree_SIZE_t);
 static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *, __pyx_t_7sklearn_4tree_5_tree_SIZE_t, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *, int *);
 
-
 0762:     # The sparse splitter works only with csc sparse matrix format
-
 0763:     cdef DTYPE_t* X_data
-
 0764:     cdef INT32_t* X_indices
-
 0765:     cdef INT32_t* X_indptr
+
 0762:     # The sparse splitter works only with csc sparse matrix format
+
 0763:     cdef DTYPE_t* X_data
+
 0764:     cdef INT32_t* X_indices
+
 0765:     cdef INT32_t* X_indptr
 0766: 
-
 0767:     cdef SIZE_t n_total_samples
+
 0767:     cdef SIZE_t n_total_samples
 0768: 
-
 0769:     cdef SIZE_t* index_to_samples
-
 0770:     cdef SIZE_t* sorted_samples
+
 0769:     cdef SIZE_t* index_to_samples
+
 0770:     cdef SIZE_t* sorted_samples
 0771: 
-
+0772:     def __cinit__(self, Criterion criterion, SIZE_t max_features,
+
+0772:     def __cinit__(self, Criterion criterion, SIZE_t max_features,
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -2611,26 +2685,26 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0773:                   SIZE_t min_samples_leaf, double min_weight_leaf,
-
 0774:                   object random_state):
-
 0775:         # Parent __cinit__ is automatically called
+
 0773:                   SIZE_t min_samples_leaf, double min_weight_leaf,
+
 0774:                   object random_state):
+
 0775:         # Parent __cinit__ is automatically called
 0776: 
-
+0777:         self.X_data = NULL
+
+0777:         self.X_data = NULL
  __pyx_v_self->X_data = NULL;
-
+0778:         self.X_indices = NULL
+
+0778:         self.X_indices = NULL
  __pyx_v_self->X_indices = NULL;
-
+0779:         self.X_indptr = NULL
+
+0779:         self.X_indptr = NULL
  __pyx_v_self->X_indptr = NULL;
 
 0780: 
-
+0781:         self.n_total_samples = 0
+
+0781:         self.n_total_samples = 0
  __pyx_v_self->n_total_samples = 0;
 
 0782: 
-
+0783:         self.index_to_samples = NULL
+
+0783:         self.index_to_samples = NULL
  __pyx_v_self->index_to_samples = NULL;
-
+0784:         self.sorted_samples = NULL
+
+0784:         self.sorted_samples = NULL
  __pyx_v_self->sorted_samples = NULL;
 
 0785: 
-
+0786:     def __dealloc__(self):
+
+0786:     def __dealloc__(self):
/* Python wrapper */
 static void __pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
 static void __pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_3__dealloc__(PyObject *__pyx_v_self) {
@@ -2649,13 +2723,13 @@
   /* function exit code */
   __Pyx_RefNannyFinishContext();
 }
-
 0787:         """Deallocate memory."""
-
+0788:         free(self.index_to_samples)
+
 0787:         """Deallocate memory."""
+
+0788:         free(self.index_to_samples)
  free(__pyx_v_self->index_to_samples);
-
+0789:         free(self.sorted_samples)
+
+0789:         free(self.sorted_samples)
  free(__pyx_v_self->sorted_samples);
 
 0790: 
-
+0791:     cdef int init(self,
+
+0791:     cdef int init(self,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_init(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *__pyx_v_self, PyObject *__pyx_v_X, __Pyx_memviewslice __pyx_v_y, __pyx_t_7sklearn_4tree_5_tree_DOUBLE_t *__pyx_v_sample_weight) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n_samples;
@@ -2713,19 +2787,19 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0792:                   object X,
-
 0793:                   const DOUBLE_t[:, ::1] y,
-
 0794:                   DOUBLE_t* sample_weight) except -1:
-
 0795:         """Initialize the splitter
+
 0792:                   object X,
+
 0793:                   const DOUBLE_t[:, ::1] y,
+
 0794:                   DOUBLE_t* sample_weight) except -1:
+
 0795:         """Initialize the splitter
 0796: 
-
 0797:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0798:         or 0 otherwise.
-
 0799:         """
-
 0800:         # Call parent init
-
+0801:         Splitter.init(self, X, y, sample_weight)
+
 0797:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0798:         or 0 otherwise.
+
 0799:         """
+
 0800:         # Call parent init
+
+0801:         Splitter.init(self, X, y, sample_weight)
  __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_9_splitter_8Splitter_init(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_self), __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 801, __pyx_L1_error)
 
 0802: 
-
+0803:         if not isinstance(X, csc_matrix):
+
+0803:         if not isinstance(X, csc_matrix):
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_csc_matrix); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 803, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = PyObject_IsInstance(__pyx_v_X, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 803, __pyx_L1_error)
@@ -2734,7 +2808,7 @@
   if (unlikely(__pyx_t_4)) {
 /* … */
   }
-
+0804:             raise ValueError("X should be in csc format")
+
+0804:             raise ValueError("X should be in csc format")
    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 804, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_Raise(__pyx_t_2, 0, 0, 0);
@@ -2745,15 +2819,15 @@
   __Pyx_GOTREF(__pyx_tuple__5);
   __Pyx_GIVEREF(__pyx_tuple__5);
 
 0805: 
-
+0806:         cdef SIZE_t* samples = self.samples
+
+0806:         cdef SIZE_t* samples = self.samples
  __pyx_t_5 = __pyx_v_self->__pyx_base.samples;
   __pyx_v_samples = __pyx_t_5;
-
+0807:         cdef SIZE_t n_samples = self.n_samples
+
+0807:         cdef SIZE_t n_samples = self.n_samples
  __pyx_t_6 = __pyx_v_self->__pyx_base.n_samples;
   __pyx_v_n_samples = __pyx_t_6;
 
 0808: 
-
 0809:         # Initialize X
-
+0810:         cdef np.ndarray[dtype=DTYPE_t, ndim=1] data = X.data
+
 0809:         # Initialize X
+
+0810:         cdef np.ndarray[dtype=DTYPE_t, ndim=1] data = X.data
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 810, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 810, __pyx_L1_error)
@@ -2769,7 +2843,7 @@
   __pyx_t_7 = 0;
   __pyx_v_data = ((PyArrayObject *)__pyx_t_2);
   __pyx_t_2 = 0;
-
+0811:         cdef np.ndarray[dtype=INT32_t, ndim=1] indices = X.indices
+
+0811:         cdef np.ndarray[dtype=INT32_t, ndim=1] indices = X.indices
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 811, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 811, __pyx_L1_error)
@@ -2785,7 +2859,7 @@
   __pyx_t_8 = 0;
   __pyx_v_indices = ((PyArrayObject *)__pyx_t_2);
   __pyx_t_2 = 0;
-
+0812:         cdef np.ndarray[dtype=INT32_t, ndim=1] indptr = X.indptr
+
+0812:         cdef np.ndarray[dtype=INT32_t, ndim=1] indptr = X.indptr
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indptr); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 812, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 812, __pyx_L1_error)
@@ -2801,7 +2875,7 @@
   __pyx_t_9 = 0;
   __pyx_v_indptr = ((PyArrayObject *)__pyx_t_2);
   __pyx_t_2 = 0;
-
+0813:         cdef SIZE_t n_total_samples = X.shape[0]
+
+0813:         cdef SIZE_t n_total_samples = X.shape[0]
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 813, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_10 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 813, __pyx_L1_error)
@@ -2811,47 +2885,47 @@
   __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
   __pyx_v_n_total_samples = __pyx_t_6;
 
 0814: 
-
+0815:         self.X_data = <DTYPE_t*> data.data
+
+0815:         self.X_data = <DTYPE_t*> data.data
  __pyx_v_self->X_data = ((__pyx_t_7sklearn_4tree_5_tree_DTYPE_t *)__pyx_v_data->data);
-
+0816:         self.X_indices = <INT32_t*> indices.data
+
+0816:         self.X_indices = <INT32_t*> indices.data
  __pyx_v_self->X_indices = ((__pyx_t_7sklearn_4tree_5_tree_INT32_t *)__pyx_v_indices->data);
-
+0817:         self.X_indptr = <INT32_t*> indptr.data
+
+0817:         self.X_indptr = <INT32_t*> indptr.data
  __pyx_v_self->X_indptr = ((__pyx_t_7sklearn_4tree_5_tree_INT32_t *)__pyx_v_indptr->data);
-
+0818:         self.n_total_samples = n_total_samples
+
+0818:         self.n_total_samples = n_total_samples
  __pyx_v_self->n_total_samples = __pyx_v_n_total_samples;
 
 0819: 
-
 0820:         # Initialize auxiliary array used to perform split
-
+0821:         safe_realloc(&self.index_to_samples, n_total_samples)
+
 0820:         # Initialize auxiliary array used to perform split
+
+0821:         safe_realloc(&self.index_to_samples, n_total_samples)
  __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->index_to_samples), __pyx_v_n_total_samples); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 821, __pyx_L1_error)
-
+0822:         safe_realloc(&self.sorted_samples, n_samples)
+
+0822:         safe_realloc(&self.sorted_samples, n_samples)
  __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->sorted_samples), __pyx_v_n_samples); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 822, __pyx_L1_error)
 
 0823: 
-
+0824:         cdef SIZE_t* index_to_samples = self.index_to_samples
+
+0824:         cdef SIZE_t* index_to_samples = self.index_to_samples
  __pyx_t_5 = __pyx_v_self->index_to_samples;
   __pyx_v_index_to_samples = __pyx_t_5;
-
 0825:         cdef SIZE_t p
-
+0826:         for p in range(n_total_samples):
+
 0825:         cdef SIZE_t p
+
+0826:         for p in range(n_total_samples):
  __pyx_t_6 = __pyx_v_n_total_samples;
   __pyx_t_11 = __pyx_t_6;
   for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
     __pyx_v_p = __pyx_t_12;
-
+0827:             index_to_samples[p] = -1
+
+0827:             index_to_samples[p] = -1
    (__pyx_v_index_to_samples[__pyx_v_p]) = -1;
   }
 
 0828: 
-
+0829:         for p in range(n_samples):
+
+0829:         for p in range(n_samples):
  __pyx_t_6 = __pyx_v_n_samples;
   __pyx_t_11 = __pyx_t_6;
   for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
     __pyx_v_p = __pyx_t_12;
-
+0830:             index_to_samples[samples[p]] = p
+
+0830:             index_to_samples[samples[p]] = p
    (__pyx_v_index_to_samples[(__pyx_v_samples[__pyx_v_p])]) = __pyx_v_p;
   }
-
+0831:         return 0
+
+0831:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0832: 
-
+0833:     cdef inline SIZE_t _partition(self, double threshold,
+
+0833:     cdef inline SIZE_t _partition(self, double threshold,
static CYTHON_INLINE __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter__partition(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *__pyx_v_self, double __pyx_v_threshold, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end_negative, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start_positive, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_zero_pos) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_p;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_partition_end;
@@ -2864,88 +2938,88 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0834:                                   SIZE_t end_negative, SIZE_t start_positive,
-
 0835:                                   SIZE_t zero_pos) nogil:
-
 0836:         """Partition samples[start:end] based on threshold."""
+
 0834:                                   SIZE_t end_negative, SIZE_t start_positive,
+
 0835:                                   SIZE_t zero_pos) nogil:
+
 0836:         """Partition samples[start:end] based on threshold."""
 0837: 
-
 0838:         cdef SIZE_t p
-
 0839:         cdef SIZE_t partition_end
+
 0838:         cdef SIZE_t p
+
 0839:         cdef SIZE_t partition_end
 0840: 
-
+0841:         cdef DTYPE_t* Xf = self.feature_values
+
+0841:         cdef DTYPE_t* Xf = self.feature_values
  __pyx_t_1 = __pyx_v_self->__pyx_base.feature_values;
   __pyx_v_Xf = __pyx_t_1;
-
+0842:         cdef SIZE_t* samples = self.samples
+
+0842:         cdef SIZE_t* samples = self.samples
  __pyx_t_2 = __pyx_v_self->__pyx_base.samples;
   __pyx_v_samples = __pyx_t_2;
-
+0843:         cdef SIZE_t* index_to_samples = self.index_to_samples
+
+0843:         cdef SIZE_t* index_to_samples = self.index_to_samples
  __pyx_t_2 = __pyx_v_self->index_to_samples;
   __pyx_v_index_to_samples = __pyx_t_2;
 
 0844: 
-
+0845:         if threshold < 0.:
+
+0845:         if threshold < 0.:
  __pyx_t_3 = ((__pyx_v_threshold < 0.) != 0);
   if (__pyx_t_3) {
 /* … */
     goto __pyx_L3;
   }
-
+0846:             p = self.start
+
+0846:             p = self.start
    __pyx_t_4 = __pyx_v_self->__pyx_base.start;
     __pyx_v_p = __pyx_t_4;
-
+0847:             partition_end = end_negative
+
+0847:             partition_end = end_negative
    __pyx_v_partition_end = __pyx_v_end_negative;
-
+0848:         elif threshold > 0.:
+
+0848:         elif threshold > 0.:
  __pyx_t_3 = ((__pyx_v_threshold > 0.) != 0);
   if (__pyx_t_3) {
 /* … */
     goto __pyx_L3;
   }
-
+0849:             p = start_positive
+
+0849:             p = start_positive
    __pyx_v_p = __pyx_v_start_positive;
-
+0850:             partition_end = self.end
+
+0850:             partition_end = self.end
    __pyx_t_4 = __pyx_v_self->__pyx_base.end;
     __pyx_v_partition_end = __pyx_t_4;
-
 0851:         else:
-
 0852:             # Data are already split
-
+0853:             return zero_pos
+
 0851:         else:
+
 0852:             # Data are already split
+
+0853:             return zero_pos
  /*else*/ {
     __pyx_r = __pyx_v_zero_pos;
     goto __pyx_L0;
   }
   __pyx_L3:;
 
 0854: 
-
+0855:         while p < partition_end:
+
+0855:         while p < partition_end:
  while (1) {
     __pyx_t_3 = ((__pyx_v_p < __pyx_v_partition_end) != 0);
     if (!__pyx_t_3) break;
-
+0856:             if Xf[p] <= threshold:
+
+0856:             if Xf[p] <= threshold:
    __pyx_t_3 = (((__pyx_v_Xf[__pyx_v_p]) <= __pyx_v_threshold) != 0);
     if (__pyx_t_3) {
 /* … */
       goto __pyx_L6;
     }
-
+0857:                 p += 1
+
+0857:                 p += 1
      __pyx_v_p = (__pyx_v_p + 1);
 
 0858: 
-
 0859:             else:
-
+0860:                 partition_end -= 1
+
 0859:             else:
+
+0860:                 partition_end -= 1
    /*else*/ {
       __pyx_v_partition_end = (__pyx_v_partition_end - 1);
 
 0861: 
-
+0862:                 Xf[p], Xf[partition_end] = Xf[partition_end], Xf[p]
+
+0862:                 Xf[p], Xf[partition_end] = Xf[partition_end], Xf[p]
      __pyx_t_5 = (__pyx_v_Xf[__pyx_v_partition_end]);
       __pyx_t_6 = (__pyx_v_Xf[__pyx_v_p]);
       (__pyx_v_Xf[__pyx_v_p]) = __pyx_t_5;
       (__pyx_v_Xf[__pyx_v_partition_end]) = __pyx_t_6;
-
+0863:                 sparse_swap(index_to_samples, samples, p, partition_end)
+
+0863:                 sparse_swap(index_to_samples, samples, p, partition_end)
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_v_index_to_samples, __pyx_v_samples, __pyx_v_p, __pyx_v_partition_end);
     }
     __pyx_L6:;
   }
 
 0864: 
-
+0865:         return partition_end
+
+0865:         return partition_end
  __pyx_r = __pyx_v_partition_end;
   goto __pyx_L0;
 
 0866: 
-
+0867:     cdef inline void extract_nnz(self, SIZE_t feature,
+
+0867:     cdef inline void extract_nnz(self, SIZE_t feature,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *__pyx_v_self, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_feature, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_end_negative, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_start_positive, int *__pyx_v_is_samples_sorted) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_indptr_start;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_indptr_end;
@@ -2954,84 +3028,84 @@
 /* … */
   /* function exit code */
 }
-
 0868:                                  SIZE_t* end_negative, SIZE_t* start_positive,
-
 0869:                                  bint* is_samples_sorted) nogil:
-
 0870:         """Extract and partition values for a given feature.
+
 0868:                                  SIZE_t* end_negative, SIZE_t* start_positive,
+
 0869:                                  bint* is_samples_sorted) nogil:
+
 0870:         """Extract and partition values for a given feature.
 0871: 
-
 0872:         The extracted values are partitioned between negative values
-
 0873:         Xf[start:end_negative[0]] and positive values Xf[start_positive[0]:end].
-
 0874:         The samples and index_to_samples are modified according to this
-
 0875:         partition.
+
 0872:         The extracted values are partitioned between negative values
+
 0873:         Xf[start:end_negative[0]] and positive values Xf[start_positive[0]:end].
+
 0874:         The samples and index_to_samples are modified according to this
+
 0875:         partition.
 0876: 
-
 0877:         The extraction corresponds to the intersection between the arrays
-
 0878:         X_indices[indptr_start:indptr_end] and samples[start:end].
-
 0879:         This is done efficiently using either an index_to_samples based approach
-
 0880:         or binary search based approach.
+
 0877:         The extraction corresponds to the intersection between the arrays
+
 0878:         X_indices[indptr_start:indptr_end] and samples[start:end].
+
 0879:         This is done efficiently using either an index_to_samples based approach
+
 0880:         or binary search based approach.
 0881: 
-
 0882:         Parameters
-
 0883:         ----------
-
 0884:         feature : SIZE_t,
-
 0885:             Index of the feature we want to extract non zero value.
+
 0882:         Parameters
+
 0883:         ----------
+
 0884:         feature : SIZE_t,
+
 0885:             Index of the feature we want to extract non zero value.
 0886: 
 0887: 
-
 0888:         end_negative, start_positive : SIZE_t*, SIZE_t*,
-
 0889:             Return extracted non zero values in self.samples[start:end] where
-
 0890:             negative values are in self.feature_values[start:end_negative[0]]
-
 0891:             and positive values are in
-
 0892:             self.feature_values[start_positive[0]:end].
+
 0888:         end_negative, start_positive : SIZE_t*, SIZE_t*,
+
 0889:             Return extracted non zero values in self.samples[start:end] where
+
 0890:             negative values are in self.feature_values[start:end_negative[0]]
+
 0891:             and positive values are in
+
 0892:             self.feature_values[start_positive[0]:end].
 0893: 
-
 0894:         is_samples_sorted : bint*,
-
 0895:             If is_samples_sorted, then self.sorted_samples[start:end] will be
-
 0896:             the sorted version of self.samples[start:end].
+
 0894:         is_samples_sorted : bint*,
+
 0895:             If is_samples_sorted, then self.sorted_samples[start:end] will be
+
 0896:             the sorted version of self.samples[start:end].
 0897: 
-
 0898:         """
-
+0899:         cdef SIZE_t indptr_start = self.X_indptr[feature],
+
 0898:         """
+
+0899:         cdef SIZE_t indptr_start = self.X_indptr[feature],
  __pyx_v_indptr_start = (__pyx_v_self->X_indptr[__pyx_v_feature]);
-
+0900:         cdef SIZE_t indptr_end = self.X_indptr[feature + 1]
+
+0900:         cdef SIZE_t indptr_end = self.X_indptr[feature + 1]
  __pyx_v_indptr_end = (__pyx_v_self->X_indptr[(__pyx_v_feature + 1)]);
-
+0901:         cdef SIZE_t n_indices = <SIZE_t>(indptr_end - indptr_start)
+
+0901:         cdef SIZE_t n_indices = <SIZE_t>(indptr_end - indptr_start)
  __pyx_v_n_indices = ((__pyx_t_7sklearn_4tree_5_tree_SIZE_t)(__pyx_v_indptr_end - __pyx_v_indptr_start));
-
+0902:         cdef SIZE_t n_samples = self.end - self.start
+
+0902:         cdef SIZE_t n_samples = self.end - self.start
  __pyx_v_n_samples = (__pyx_v_self->__pyx_base.end - __pyx_v_self->__pyx_base.start);
 
 0903: 
-
 0904:         # Use binary search if n_samples * log(n_indices) <
-
 0905:         # n_indices and index_to_samples approach otherwise.
-
 0906:         # O(n_samples * log(n_indices)) is the running time of binary
-
 0907:         # search and O(n_indices) is the running time of index_to_samples
-
 0908:         # approach.
-
+0909:         if ((1 - is_samples_sorted[0]) * n_samples * log(n_samples) +
+
 0904:         # Use binary search if n_samples * log(n_indices) <
+
 0905:         # n_indices and index_to_samples approach otherwise.
+
 0906:         # O(n_samples * log(n_indices)) is the running time of binary
+
 0907:         # search and O(n_indices) is the running time of index_to_samples
+
 0908:         # approach.
+
+0909:         if ((1 - is_samples_sorted[0]) * n_samples * log(n_samples) +
  if (__pyx_t_1) {
 /* … */
     goto __pyx_L3;
   }
-
+0910:                 n_samples * log(n_indices) < EXTRACT_NNZ_SWITCH * n_indices):
+
+0910:                 n_samples * log(n_indices) < EXTRACT_NNZ_SWITCH * n_indices):
  __pyx_t_1 = ((((((1 - (__pyx_v_is_samples_sorted[0])) * __pyx_v_n_samples) * __pyx_f_7sklearn_4tree_6_utils_log(__pyx_v_n_samples)) + (__pyx_v_n_samples * __pyx_f_7sklearn_4tree_6_utils_log(__pyx_v_n_indices))) < (__pyx_v_13stpredictions_6models_3OK3_9_splitter_EXTRACT_NNZ_SWITCH * __pyx_v_n_indices)) != 0);
-
+0911:             extract_nnz_binary_search(self.X_indices, self.X_data,
+
+0911:             extract_nnz_binary_search(self.X_indices, self.X_data,
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_extract_nnz_binary_search(__pyx_v_self->X_indices, __pyx_v_self->X_data, __pyx_v_indptr_start, __pyx_v_indptr_end, __pyx_v_self->__pyx_base.samples, __pyx_v_self->__pyx_base.start, __pyx_v_self->__pyx_base.end, __pyx_v_self->index_to_samples, __pyx_v_self->__pyx_base.feature_values, __pyx_v_end_negative, __pyx_v_start_positive, __pyx_v_self->sorted_samples, __pyx_v_is_samples_sorted);
-
 0912:                                       indptr_start, indptr_end,
-
 0913:                                       self.samples, self.start, self.end,
-
 0914:                                       self.index_to_samples,
-
 0915:                                       self.feature_values,
-
 0916:                                       end_negative, start_positive,
-
 0917:                                       self.sorted_samples, is_samples_sorted)
+
 0912:                                       indptr_start, indptr_end,
+
 0913:                                       self.samples, self.start, self.end,
+
 0914:                                       self.index_to_samples,
+
 0915:                                       self.feature_values,
+
 0916:                                       end_negative, start_positive,
+
 0917:                                       self.sorted_samples, is_samples_sorted)
 0918: 
-
 0919:         # Using an index to samples  technique to extract non zero values
-
 0920:         # index_to_samples is a mapping from X_indices to samples
-
 0921:         else:
-
+0922:             extract_nnz_index_to_samples(self.X_indices, self.X_data,
+
 0919:         # Using an index to samples  technique to extract non zero values
+
 0920:         # index_to_samples is a mapping from X_indices to samples
+
 0921:         else:
+
+0922:             extract_nnz_index_to_samples(self.X_indices, self.X_data,
  /*else*/ {
 /* … */
     __pyx_f_13stpredictions_6models_3OK3_9_splitter_extract_nnz_index_to_samples(__pyx_v_self->X_indices, __pyx_v_self->X_data, __pyx_v_indptr_start, __pyx_v_indptr_end, __pyx_v_self->__pyx_base.samples, __pyx_v_self->__pyx_base.start, __pyx_v_self->__pyx_base.end, __pyx_v_self->index_to_samples, __pyx_v_self->__pyx_base.feature_values, __pyx_v_end_negative, __pyx_v_start_positive);
   }
   __pyx_L3:;
-
 0923:                                          indptr_start, indptr_end,
-
 0924:                                          self.samples, self.start, self.end,
-
 0925:                                          self.index_to_samples,
-
 0926:                                          self.feature_values,
-
 0927:                                          end_negative, start_positive)
+
 0923:                                          indptr_start, indptr_end,
+
 0924:                                          self.samples, self.start, self.end,
+
 0925:                                          self.index_to_samples,
+
 0926:                                          self.feature_values,
+
 0927:                                          end_negative, start_positive)
 0928: 
 0929: 
-
+0930: cdef int compare_SIZE_t(const void* a, const void* b) nogil:
+
+0930: cdef int compare_SIZE_t(const void* a, const void* b) nogil:
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_compare_SIZE_t(void const *__pyx_v_a, void const *__pyx_v_b) {
   int __pyx_r;
 /* … */
@@ -3039,68 +3113,68 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0931:     """Comparison function for sort."""
-
+0932:     return <int>((<SIZE_t*>a)[0] - (<SIZE_t*>b)[0])
+
 0931:     """Comparison function for sort."""
+
+0932:     return <int>((<SIZE_t*>a)[0] - (<SIZE_t*>b)[0])
  __pyx_r = ((int)((((__pyx_t_7sklearn_4tree_5_tree_SIZE_t *)__pyx_v_a)[0]) - (((__pyx_t_7sklearn_4tree_5_tree_SIZE_t *)__pyx_v_b)[0])));
   goto __pyx_L0;
 
 0933: 
 0934: 
-
+0935: cdef inline void binary_search(INT32_t* sorted_array,
+
+0935: cdef inline void binary_search(INT32_t* sorted_array,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_binary_search(__pyx_t_7sklearn_4tree_5_tree_INT32_t *__pyx_v_sorted_array, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_end, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_value, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_index, __pyx_t_7sklearn_4tree_5_tree_INT32_t *__pyx_v_new_start) {
   __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_pivot;
 /* … */
   /* function exit code */
 }
-
 0936:                                INT32_t start, INT32_t end,
-
 0937:                                SIZE_t value, SIZE_t* index,
-
 0938:                                INT32_t* new_start) nogil:
-
 0939:     """Return the index of value in the sorted array.
+
 0936:                                INT32_t start, INT32_t end,
+
 0937:                                SIZE_t value, SIZE_t* index,
+
 0938:                                INT32_t* new_start) nogil:
+
 0939:     """Return the index of value in the sorted array.
 0940: 
-
 0941:     If not found, return -1. new_start is the last pivot + 1
-
 0942:     """
-
 0943:     cdef INT32_t pivot
-
+0944:     index[0] = -1
+
 0941:     If not found, return -1. new_start is the last pivot + 1
+
 0942:     """
+
 0943:     cdef INT32_t pivot
+
+0944:     index[0] = -1
  (__pyx_v_index[0]) = -1;
-
+0945:     while start < end:
+
+0945:     while start < end:
  while (1) {
     __pyx_t_1 = ((__pyx_v_start < __pyx_v_end) != 0);
     if (!__pyx_t_1) break;
-
+0946:         pivot = start + (end - start) / 2
+
+0946:         pivot = start + (end - start) / 2
    __pyx_v_pivot = (__pyx_v_start + ((__pyx_v_end - __pyx_v_start) / 2));
 
 0947: 
-
+0948:         if sorted_array[pivot] == value:
+
+0948:         if sorted_array[pivot] == value:
    __pyx_t_1 = (((__pyx_v_sorted_array[__pyx_v_pivot]) == __pyx_v_value) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
+0949:             index[0] = pivot
+
+0949:             index[0] = pivot
      (__pyx_v_index[0]) = __pyx_v_pivot;
-
+0950:             start = pivot + 1
+
+0950:             start = pivot + 1
      __pyx_v_start = (__pyx_v_pivot + 1);
-
+0951:             break
+
+0951:             break
      goto __pyx_L4_break;
 
 0952: 
-
+0953:         if sorted_array[pivot] < value:
+
+0953:         if sorted_array[pivot] < value:
    __pyx_t_1 = (((__pyx_v_sorted_array[__pyx_v_pivot]) < __pyx_v_value) != 0);
     if (__pyx_t_1) {
 /* … */
       goto __pyx_L6;
     }
-
+0954:             start = pivot + 1
+
+0954:             start = pivot + 1
      __pyx_v_start = (__pyx_v_pivot + 1);
-
 0955:         else:
-
+0956:             end = pivot
+
 0955:         else:
+
+0956:             end = pivot
    /*else*/ {
       __pyx_v_end = __pyx_v_pivot;
     }
     __pyx_L6:;
   }
   __pyx_L4_break:;
-
+0957:     new_start[0] = start
+
+0957:     new_start[0] = start
  (__pyx_v_new_start[0]) = __pyx_v_start;
 
 0958: 
 0959: 
-
+0960: cdef inline void extract_nnz_index_to_samples(INT32_t* X_indices,
+
+0960: cdef inline void extract_nnz_index_to_samples(INT32_t* X_indices,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_extract_nnz_index_to_samples(__pyx_t_7sklearn_4tree_5_tree_INT32_t *__pyx_v_X_indices, __pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_X_data, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_indptr_start, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_indptr_end, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_index_to_samples, __pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_end_negative, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_start_positive) {
   __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_k;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_index;
@@ -3109,33 +3183,33 @@
 /* … */
   /* function exit code */
 }
-
 0961:                                               DTYPE_t* X_data,
-
 0962:                                               INT32_t indptr_start,
-
 0963:                                               INT32_t indptr_end,
-
 0964:                                               SIZE_t* samples,
-
 0965:                                               SIZE_t start,
-
 0966:                                               SIZE_t end,
-
 0967:                                               SIZE_t* index_to_samples,
-
 0968:                                               DTYPE_t* Xf,
-
 0969:                                               SIZE_t* end_negative,
-
 0970:                                               SIZE_t* start_positive) nogil:
-
 0971:     """Extract and partition values for a feature using index_to_samples.
+
 0961:                                               DTYPE_t* X_data,
+
 0962:                                               INT32_t indptr_start,
+
 0963:                                               INT32_t indptr_end,
+
 0964:                                               SIZE_t* samples,
+
 0965:                                               SIZE_t start,
+
 0966:                                               SIZE_t end,
+
 0967:                                               SIZE_t* index_to_samples,
+
 0968:                                               DTYPE_t* Xf,
+
 0969:                                               SIZE_t* end_negative,
+
 0970:                                               SIZE_t* start_positive) nogil:
+
 0971:     """Extract and partition values for a feature using index_to_samples.
 0972: 
-
 0973:     Complexity is O(indptr_end - indptr_start).
-
 0974:     """
-
 0975:     cdef INT32_t k
-
 0976:     cdef SIZE_t index
-
+0977:     cdef SIZE_t end_negative_ = start
+
 0973:     Complexity is O(indptr_end - indptr_start).
+
 0974:     """
+
 0975:     cdef INT32_t k
+
 0976:     cdef SIZE_t index
+
+0977:     cdef SIZE_t end_negative_ = start
  __pyx_v_end_negative_ = __pyx_v_start;
-
+0978:     cdef SIZE_t start_positive_ = end
+
+0978:     cdef SIZE_t start_positive_ = end
  __pyx_v_start_positive_ = __pyx_v_end;
 
 0979: 
-
+0980:     for k in range(indptr_start, indptr_end):
+
+0980:     for k in range(indptr_start, indptr_end):
  __pyx_t_1 = __pyx_v_indptr_end;
   __pyx_t_2 = __pyx_t_1;
   for (__pyx_t_3 = __pyx_v_indptr_start; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
     __pyx_v_k = __pyx_t_3;
-
+0981:         if start <= index_to_samples[X_indices[k]] < end:
+
+0981:         if start <= index_to_samples[X_indices[k]] < end:
    __pyx_t_4 = (__pyx_v_start <= (__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]));
     if (__pyx_t_4) {
       __pyx_t_4 = ((__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]) < __pyx_v_end);
@@ -3145,45 +3219,45 @@
 /* … */
     }
   }
-
+0982:             if X_data[k] > 0:
+
+0982:             if X_data[k] > 0:
      __pyx_t_5 = (((__pyx_v_X_data[__pyx_v_k]) > 0.0) != 0);
       if (__pyx_t_5) {
 /* … */
         goto __pyx_L6;
       }
-
+0983:                 start_positive_ -= 1
+
+0983:                 start_positive_ -= 1
        __pyx_v_start_positive_ = (__pyx_v_start_positive_ - 1);
-
+0984:                 Xf[start_positive_] = X_data[k]
+
+0984:                 Xf[start_positive_] = X_data[k]
        (__pyx_v_Xf[__pyx_v_start_positive_]) = (__pyx_v_X_data[__pyx_v_k]);
-
+0985:                 index = index_to_samples[X_indices[k]]
+
+0985:                 index = index_to_samples[X_indices[k]]
        __pyx_v_index = (__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]);
-
+0986:                 sparse_swap(index_to_samples, samples, index, start_positive_)
+
+0986:                 sparse_swap(index_to_samples, samples, index, start_positive_)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_v_index_to_samples, __pyx_v_samples, __pyx_v_index, __pyx_v_start_positive_);
 
 0987: 
 0988: 
-
+0989:             elif X_data[k] < 0:
+
+0989:             elif X_data[k] < 0:
      __pyx_t_5 = (((__pyx_v_X_data[__pyx_v_k]) < 0.0) != 0);
       if (__pyx_t_5) {
 /* … */
       }
       __pyx_L6:;
-
+0990:                 Xf[end_negative_] = X_data[k]
+
+0990:                 Xf[end_negative_] = X_data[k]
        (__pyx_v_Xf[__pyx_v_end_negative_]) = (__pyx_v_X_data[__pyx_v_k]);
-
+0991:                 index = index_to_samples[X_indices[k]]
+
+0991:                 index = index_to_samples[X_indices[k]]
        __pyx_v_index = (__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]);
-
+0992:                 sparse_swap(index_to_samples, samples, index, end_negative_)
+
+0992:                 sparse_swap(index_to_samples, samples, index, end_negative_)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_v_index_to_samples, __pyx_v_samples, __pyx_v_index, __pyx_v_end_negative_);
-
+0993:                 end_negative_ += 1
+
+0993:                 end_negative_ += 1
        __pyx_v_end_negative_ = (__pyx_v_end_negative_ + 1);
 
 0994: 
-
 0995:     # Returned values
-
+0996:     end_negative[0] = end_negative_
+
 0995:     # Returned values
+
+0996:     end_negative[0] = end_negative_
  (__pyx_v_end_negative[0]) = __pyx_v_end_negative_;
-
+0997:     start_positive[0] = start_positive_
+
+0997:     start_positive[0] = start_positive_
  (__pyx_v_start_positive[0]) = __pyx_v_start_positive_;
 
 0998: 
 0999: 
-
+1000: cdef inline void extract_nnz_binary_search(INT32_t* X_indices,
+
+1000: cdef inline void extract_nnz_binary_search(INT32_t* X_indices,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_extract_nnz_binary_search(__pyx_t_7sklearn_4tree_5_tree_INT32_t *__pyx_v_X_indices, __pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_X_data, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_indptr_start, __pyx_t_7sklearn_4tree_5_tree_INT32_t __pyx_v_indptr_end, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_end, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_index_to_samples, __pyx_t_7sklearn_4tree_5_tree_DTYPE_t *__pyx_v_Xf, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_end_negative, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_start_positive, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_sorted_samples, int *__pyx_v_is_samples_sorted) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_n_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_p;
@@ -3194,45 +3268,45 @@
 /* … */
   /* function exit code */
 }
-
 1001:                                            DTYPE_t* X_data,
-
 1002:                                            INT32_t indptr_start,
-
 1003:                                            INT32_t indptr_end,
-
 1004:                                            SIZE_t* samples,
-
 1005:                                            SIZE_t start,
-
 1006:                                            SIZE_t end,
-
 1007:                                            SIZE_t* index_to_samples,
-
 1008:                                            DTYPE_t* Xf,
-
 1009:                                            SIZE_t* end_negative,
-
 1010:                                            SIZE_t* start_positive,
-
 1011:                                            SIZE_t* sorted_samples,
-
 1012:                                            bint* is_samples_sorted) nogil:
-
 1013:     """Extract and partition values for a given feature using binary search.
+
 1001:                                            DTYPE_t* X_data,
+
 1002:                                            INT32_t indptr_start,
+
 1003:                                            INT32_t indptr_end,
+
 1004:                                            SIZE_t* samples,
+
 1005:                                            SIZE_t start,
+
 1006:                                            SIZE_t end,
+
 1007:                                            SIZE_t* index_to_samples,
+
 1008:                                            DTYPE_t* Xf,
+
 1009:                                            SIZE_t* end_negative,
+
 1010:                                            SIZE_t* start_positive,
+
 1011:                                            SIZE_t* sorted_samples,
+
 1012:                                            bint* is_samples_sorted) nogil:
+
 1013:     """Extract and partition values for a given feature using binary search.
 1014: 
-
 1015:     If n_samples = end - start and n_indices = indptr_end - indptr_start,
-
 1016:     the complexity is
+
 1015:     If n_samples = end - start and n_indices = indptr_end - indptr_start,
+
 1016:     the complexity is
 1017: 
-
 1018:         O((1 - is_samples_sorted[0]) * n_samples * log(n_samples) +
-
 1019:           n_samples * log(n_indices)).
-
 1020:     """
-
 1021:     cdef SIZE_t n_samples
+
 1018:         O((1 - is_samples_sorted[0]) * n_samples * log(n_samples) +
+
 1019:           n_samples * log(n_indices)).
+
 1020:     """
+
 1021:     cdef SIZE_t n_samples
 1022: 
-
+1023:     if not is_samples_sorted[0]:
+
+1023:     if not is_samples_sorted[0]:
  __pyx_t_1 = ((!((__pyx_v_is_samples_sorted[0]) != 0)) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+1024:         n_samples = end - start
+
+1024:         n_samples = end - start
    __pyx_v_n_samples = (__pyx_v_end - __pyx_v_start);
-
+1025:         memcpy(sorted_samples + start, samples + start,
+
+1025:         memcpy(sorted_samples + start, samples + start,
    (void)(memcpy((__pyx_v_sorted_samples + __pyx_v_start), (__pyx_v_samples + __pyx_v_start), (__pyx_v_n_samples * (sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)))));
-
 1026:                n_samples * sizeof(SIZE_t))
-
+1027:         qsort(sorted_samples + start, n_samples, sizeof(SIZE_t),
+
 1026:                n_samples * sizeof(SIZE_t))
+
+1027:         qsort(sorted_samples + start, n_samples, sizeof(SIZE_t),
    qsort((__pyx_v_sorted_samples + __pyx_v_start), __pyx_v_n_samples, (sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)), __pyx_f_13stpredictions_6models_3OK3_9_splitter_compare_SIZE_t);
-
 1028:               compare_SIZE_t)
-
+1029:         is_samples_sorted[0] = 1
+
 1028:               compare_SIZE_t)
+
+1029:         is_samples_sorted[0] = 1
    (__pyx_v_is_samples_sorted[0]) = 1;
 
 1030: 
-
+1031:     while (indptr_start < indptr_end and
+
+1031:     while (indptr_start < indptr_end and
  while (1) {
     __pyx_t_2 = ((__pyx_v_indptr_start < __pyx_v_indptr_end) != 0);
     if (__pyx_t_2) {
@@ -3240,16 +3314,16 @@
       __pyx_t_1 = __pyx_t_2;
       goto __pyx_L6_bool_binop_done;
     }
-
+1032:            sorted_samples[start] > X_indices[indptr_start]):
+
+1032:            sorted_samples[start] > X_indices[indptr_start]):
    __pyx_t_2 = (((__pyx_v_sorted_samples[__pyx_v_start]) > (__pyx_v_X_indices[__pyx_v_indptr_start])) != 0);
     __pyx_t_1 = __pyx_t_2;
     __pyx_L6_bool_binop_done:;
     if (!__pyx_t_1) break;
-
+1033:         indptr_start += 1
+
+1033:         indptr_start += 1
    __pyx_v_indptr_start = (__pyx_v_indptr_start + 1);
   }
 
 1034: 
-
+1035:     while (indptr_start < indptr_end and
+
+1035:     while (indptr_start < indptr_end and
  while (1) {
     __pyx_t_2 = ((__pyx_v_indptr_start < __pyx_v_indptr_end) != 0);
     if (__pyx_t_2) {
@@ -3257,25 +3331,25 @@
       __pyx_t_1 = __pyx_t_2;
       goto __pyx_L10_bool_binop_done;
     }
-
+1036:            sorted_samples[end - 1] < X_indices[indptr_end - 1]):
+
+1036:            sorted_samples[end - 1] < X_indices[indptr_end - 1]):
    __pyx_t_2 = (((__pyx_v_sorted_samples[(__pyx_v_end - 1)]) < (__pyx_v_X_indices[(__pyx_v_indptr_end - 1)])) != 0);
     __pyx_t_1 = __pyx_t_2;
     __pyx_L10_bool_binop_done:;
     if (!__pyx_t_1) break;
-
+1037:         indptr_end -= 1
+
+1037:         indptr_end -= 1
    __pyx_v_indptr_end = (__pyx_v_indptr_end - 1);
   }
 
 1038: 
-
+1039:     cdef SIZE_t p = start
+
+1039:     cdef SIZE_t p = start
  __pyx_v_p = __pyx_v_start;
-
 1040:     cdef SIZE_t index
-
 1041:     cdef SIZE_t k
-
+1042:     cdef SIZE_t end_negative_ = start
+
 1040:     cdef SIZE_t index
+
 1041:     cdef SIZE_t k
+
+1042:     cdef SIZE_t end_negative_ = start
  __pyx_v_end_negative_ = __pyx_v_start;
-
+1043:     cdef SIZE_t start_positive_ = end
+
+1043:     cdef SIZE_t start_positive_ = end
  __pyx_v_start_positive_ = __pyx_v_end;
 
 1044: 
-
+1045:     while (p < end and indptr_start < indptr_end):
+
+1045:     while (p < end and indptr_start < indptr_end):
  while (1) {
     __pyx_t_2 = ((__pyx_v_p < __pyx_v_end) != 0);
     if (__pyx_t_2) {
@@ -3287,78 +3361,78 @@
     __pyx_t_1 = __pyx_t_2;
     __pyx_L14_bool_binop_done:;
     if (!__pyx_t_1) break;
-
 1046:         # Find index of sorted_samples[p] in X_indices
-
+1047:         binary_search(X_indices, indptr_start, indptr_end,
+
 1046:         # Find index of sorted_samples[p] in X_indices
+
+1047:         binary_search(X_indices, indptr_start, indptr_end,
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_binary_search(__pyx_v_X_indices, __pyx_v_indptr_start, __pyx_v_indptr_end, (__pyx_v_sorted_samples[__pyx_v_p]), (&__pyx_v_k), (&__pyx_v_indptr_start));
-
 1048:                       sorted_samples[p], &k, &indptr_start)
+
 1048:                       sorted_samples[p], &k, &indptr_start)
 1049: 
-
+1050:         if k != -1:
+
+1050:         if k != -1:
    __pyx_t_1 = ((__pyx_v_k != -1L) != 0);
     if (__pyx_t_1) {
 /* … */
     }
-
 1051:              # If k != -1, we have found a non zero value
+
 1051:              # If k != -1, we have found a non zero value
 1052: 
-
+1053:             if X_data[k] > 0:
+
+1053:             if X_data[k] > 0:
      __pyx_t_1 = (((__pyx_v_X_data[__pyx_v_k]) > 0.0) != 0);
       if (__pyx_t_1) {
 /* … */
         goto __pyx_L17;
       }
-
+1054:                 start_positive_ -= 1
+
+1054:                 start_positive_ -= 1
        __pyx_v_start_positive_ = (__pyx_v_start_positive_ - 1);
-
+1055:                 Xf[start_positive_] = X_data[k]
+
+1055:                 Xf[start_positive_] = X_data[k]
        (__pyx_v_Xf[__pyx_v_start_positive_]) = (__pyx_v_X_data[__pyx_v_k]);
-
+1056:                 index = index_to_samples[X_indices[k]]
+
+1056:                 index = index_to_samples[X_indices[k]]
        __pyx_v_index = (__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]);
-
+1057:                 sparse_swap(index_to_samples, samples, index, start_positive_)
+
+1057:                 sparse_swap(index_to_samples, samples, index, start_positive_)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_v_index_to_samples, __pyx_v_samples, __pyx_v_index, __pyx_v_start_positive_);
 
 1058: 
 1059: 
-
+1060:             elif X_data[k] < 0:
+
+1060:             elif X_data[k] < 0:
      __pyx_t_1 = (((__pyx_v_X_data[__pyx_v_k]) < 0.0) != 0);
       if (__pyx_t_1) {
 /* … */
       }
       __pyx_L17:;
-
+1061:                 Xf[end_negative_] = X_data[k]
+
+1061:                 Xf[end_negative_] = X_data[k]
        (__pyx_v_Xf[__pyx_v_end_negative_]) = (__pyx_v_X_data[__pyx_v_k]);
-
+1062:                 index = index_to_samples[X_indices[k]]
+
+1062:                 index = index_to_samples[X_indices[k]]
        __pyx_v_index = (__pyx_v_index_to_samples[(__pyx_v_X_indices[__pyx_v_k])]);
-
+1063:                 sparse_swap(index_to_samples, samples, index, end_negative_)
+
+1063:                 sparse_swap(index_to_samples, samples, index, end_negative_)
        __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_v_index_to_samples, __pyx_v_samples, __pyx_v_index, __pyx_v_end_negative_);
-
+1064:                 end_negative_ += 1
+
+1064:                 end_negative_ += 1
        __pyx_v_end_negative_ = (__pyx_v_end_negative_ + 1);
-
+1065:         p += 1
+
+1065:         p += 1
    __pyx_v_p = (__pyx_v_p + 1);
   }
 
 1066: 
-
 1067:     # Returned values
-
+1068:     end_negative[0] = end_negative_
+
 1067:     # Returned values
+
+1068:     end_negative[0] = end_negative_
  (__pyx_v_end_negative[0]) = __pyx_v_end_negative_;
-
+1069:     start_positive[0] = start_positive_
+
+1069:     start_positive[0] = start_positive_
  (__pyx_v_start_positive[0]) = __pyx_v_start_positive_;
 
 1070: 
 1071: 
-
+1072: cdef inline void sparse_swap(SIZE_t* index_to_samples, SIZE_t* samples,
+
+1072: cdef inline void sparse_swap(SIZE_t* index_to_samples, SIZE_t* samples,
static CYTHON_INLINE void __pyx_f_13stpredictions_6models_3OK3_9_splitter_sparse_swap(__pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_index_to_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_pos_1, __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_pos_2) {
 /* … */
   /* function exit code */
 }
-
 1073:                              SIZE_t pos_1, SIZE_t pos_2) nogil:
-
 1074:     """Swap sample pos_1 and pos_2 preserving sparse invariant."""
-
+1075:     samples[pos_1], samples[pos_2] =  samples[pos_2], samples[pos_1]
+
 1073:                              SIZE_t pos_1, SIZE_t pos_2) nogil:
+
 1074:     """Swap sample pos_1 and pos_2 preserving sparse invariant."""
+
+1075:     samples[pos_1], samples[pos_2] =  samples[pos_2], samples[pos_1]
  __pyx_t_1 = (__pyx_v_samples[__pyx_v_pos_2]);
   __pyx_t_2 = (__pyx_v_samples[__pyx_v_pos_1]);
   (__pyx_v_samples[__pyx_v_pos_1]) = __pyx_t_1;
   (__pyx_v_samples[__pyx_v_pos_2]) = __pyx_t_2;
-
+1076:     index_to_samples[samples[pos_1]] = pos_1
+
+1076:     index_to_samples[samples[pos_1]] = pos_1
  (__pyx_v_index_to_samples[(__pyx_v_samples[__pyx_v_pos_1])]) = __pyx_v_pos_1;
-
+1077:     index_to_samples[samples[pos_2]] = pos_2
+
+1077:     index_to_samples[samples[pos_2]] = pos_2
  (__pyx_v_index_to_samples[(__pyx_v_samples[__pyx_v_pos_2])]) = __pyx_v_pos_2;
 
 1078: 
 1079: 
-
+1080: cdef class BestSparseSplitter(BaseSparseSplitter):
+
+1080: cdef class BestSparseSplitter(BaseSparseSplitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BestSparseSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter __pyx_base;
 };
@@ -3368,9 +3442,9 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_BestSparseSplitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_BestSparseSplitter;
 
-
 1081:     """Splitter for finding the best split, using the sparse data."""
+
 1081:     """Splitter for finding the best split, using the sparse data."""
 1082: 
-
+1083:     def __reduce__(self):
+
+1083:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BestSparseSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_18BestSparseSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -3402,7 +3476,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+1084:         return (BestSparseSplitter, (self.criterion,
+
+1084:         return (BestSparseSplitter, (self.criterion,
  __Pyx_XDECREF(__pyx_r);
 /* … */
   __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1084, __pyx_L1_error)
@@ -3437,16 +3511,16 @@
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
-
+1085:                                      self.max_features,
+
+1085:                                      self.max_features,
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.max_features); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1085, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
-
+1086:                                      self.min_samples_leaf,
+
+1086:                                      self.min_samples_leaf,
  __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1086, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
-
+1087:                                      self.min_weight_leaf,
+
+1087:                                      self.min_weight_leaf,
  __pyx_t_3 = PyFloat_FromDouble(__pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1087, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
-
+1088:                                      self.random_state), self.__getstate__())
+
+1088:                                      self.random_state), self.__getstate__())
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_getstate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1088, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_1 = NULL;
@@ -3465,7 +3539,7 @@
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 1089: 
-
+1090:     cdef int node_split(self, double impurity, SplitRecord* split,
+
+1090:     cdef int node_split(self, double impurity, SplitRecord* split,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BestSparseSplitter_node_split(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BestSparseSplitter *__pyx_v_self, double __pyx_v_impurity, struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_split, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_n_constant_features) {
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start;
@@ -3517,111 +3591,111 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 1091:                         SIZE_t* n_constant_features) nogil except -1:
-
 1092:         """Find the best split on node samples[start:end], using sparse features
+
 1091:                         SIZE_t* n_constant_features) nogil except -1:
+
 1092:         """Find the best split on node samples[start:end], using sparse features
 1093: 
-
 1094:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 1095:         or 0 otherwise.
-
 1096:         """
-
 1097:         # Find the best split
-
+1098:         cdef SIZE_t* samples = self.samples
+
 1094:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 1095:         or 0 otherwise.
+
 1096:         """
+
 1097:         # Find the best split
+
+1098:         cdef SIZE_t* samples = self.samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.samples;
   __pyx_v_samples = __pyx_t_1;
-
+1099:         cdef SIZE_t start = self.start
+
+1099:         cdef SIZE_t start = self.start
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.start;
   __pyx_v_start = __pyx_t_2;
-
+1100:         cdef SIZE_t end = self.end
+
+1100:         cdef SIZE_t end = self.end
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.end;
   __pyx_v_end = __pyx_t_2;
 
 1101: 
-
+1102:         cdef INT32_t* X_indices = self.X_indices
+
+1102:         cdef INT32_t* X_indices = self.X_indices
  __pyx_t_3 = __pyx_v_self->__pyx_base.X_indices;
   __pyx_v_X_indices = __pyx_t_3;
-
+1103:         cdef INT32_t* X_indptr = self.X_indptr
+
+1103:         cdef INT32_t* X_indptr = self.X_indptr
  __pyx_t_3 = __pyx_v_self->__pyx_base.X_indptr;
   __pyx_v_X_indptr = __pyx_t_3;
-
+1104:         cdef DTYPE_t* X_data = self.X_data
+
+1104:         cdef DTYPE_t* X_data = self.X_data
  __pyx_t_4 = __pyx_v_self->__pyx_base.X_data;
   __pyx_v_X_data = __pyx_t_4;
 
 1105: 
-
+1106:         cdef SIZE_t* features = self.features
+
+1106:         cdef SIZE_t* features = self.features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.features;
   __pyx_v_features = __pyx_t_1;
-
+1107:         cdef SIZE_t* constant_features = self.constant_features
+
+1107:         cdef SIZE_t* constant_features = self.constant_features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.constant_features;
   __pyx_v_constant_features = __pyx_t_1;
-
+1108:         cdef SIZE_t n_features = self.n_features
+
+1108:         cdef SIZE_t n_features = self.n_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.n_features;
   __pyx_v_n_features = __pyx_t_2;
 
 1109: 
-
+1110:         cdef DTYPE_t* Xf = self.feature_values
+
+1110:         cdef DTYPE_t* Xf = self.feature_values
  __pyx_t_4 = __pyx_v_self->__pyx_base.__pyx_base.feature_values;
   __pyx_v_Xf = __pyx_t_4;
-
+1111:         cdef SIZE_t* sorted_samples = self.sorted_samples
+
+1111:         cdef SIZE_t* sorted_samples = self.sorted_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.sorted_samples;
   __pyx_v_sorted_samples = __pyx_t_1;
-
+1112:         cdef SIZE_t* index_to_samples = self.index_to_samples
+
+1112:         cdef SIZE_t* index_to_samples = self.index_to_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.index_to_samples;
   __pyx_v_index_to_samples = __pyx_t_1;
-
+1113:         cdef SIZE_t max_features = self.max_features
+
+1113:         cdef SIZE_t max_features = self.max_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.max_features;
   __pyx_v_max_features = __pyx_t_2;
-
+1114:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+1114:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_2;
-
+1115:         cdef double min_weight_leaf = self.min_weight_leaf
+
+1115:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_5 = __pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_5;
-
+1116:         cdef UINT32_t* random_state = &self.rand_r_state
+
+1116:         cdef UINT32_t* random_state = &self.rand_r_state
  __pyx_v_random_state = (&__pyx_v_self->__pyx_base.__pyx_base.rand_r_state);
 
 1117: 
-
 1118:         cdef SplitRecord best, current
-
+1119:         _init_split(&best, end)
+
 1118:         cdef SplitRecord best, current
+
+1119:         _init_split(&best, end)
  __pyx_f_13stpredictions_6models_3OK3_9_splitter__init_split((&__pyx_v_best), __pyx_v_end);
-
+1120:         cdef double current_proxy_improvement = - INFINITY
+
+1120:         cdef double current_proxy_improvement = - INFINITY
  __pyx_v_current_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
-
+1121:         cdef double best_proxy_improvement = - INFINITY
+
+1121:         cdef double best_proxy_improvement = - INFINITY
  __pyx_v_best_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
 
 1122: 
-
+1123:         cdef SIZE_t f_i = n_features
+
+1123:         cdef SIZE_t f_i = n_features
  __pyx_v_f_i = __pyx_v_n_features;
-
 1124:         cdef SIZE_t f_j, p
-
+1125:         cdef SIZE_t n_visited_features = 0
+
 1124:         cdef SIZE_t f_j, p
+
+1125:         cdef SIZE_t n_visited_features = 0
  __pyx_v_n_visited_features = 0;
-
 1126:         # Number of features discovered to be constant during the split search
-
+1127:         cdef SIZE_t n_found_constants = 0
+
 1126:         # Number of features discovered to be constant during the split search
+
+1127:         cdef SIZE_t n_found_constants = 0
  __pyx_v_n_found_constants = 0;
-
 1128:         # Number of features known to be constant and drawn without replacement
-
+1129:         cdef SIZE_t n_drawn_constants = 0
+
 1128:         # Number of features known to be constant and drawn without replacement
+
+1129:         cdef SIZE_t n_drawn_constants = 0
  __pyx_v_n_drawn_constants = 0;
-
+1130:         cdef SIZE_t n_known_constants = n_constant_features[0]
+
+1130:         cdef SIZE_t n_known_constants = n_constant_features[0]
  __pyx_v_n_known_constants = (__pyx_v_n_constant_features[0]);
-
 1131:         # n_total_constants = n_known_constants + n_found_constants
-
+1132:         cdef SIZE_t n_total_constants = n_known_constants
+
 1131:         # n_total_constants = n_known_constants + n_found_constants
+
+1132:         cdef SIZE_t n_total_constants = n_known_constants
  __pyx_v_n_total_constants = __pyx_v_n_known_constants;
-
 1133:         cdef DTYPE_t current_feature_value
+
 1133:         cdef DTYPE_t current_feature_value
 1134: 
-
 1135:         cdef SIZE_t p_next
-
 1136:         cdef SIZE_t p_prev
-
+1137:         cdef bint is_samples_sorted = 0  # indicate is sorted_samples is
+
 1135:         cdef SIZE_t p_next
+
 1136:         cdef SIZE_t p_prev
+
+1137:         cdef bint is_samples_sorted = 0  # indicate is sorted_samples is
  __pyx_v_is_samples_sorted = 0;
-
 1138:                                          # inititialized
+
 1138:                                          # inititialized
 1139: 
-
 1140:         # We assume implicitly that end_positive = end and
-
 1141:         # start_negative = start
-
 1142:         cdef SIZE_t start_positive
-
 1143:         cdef SIZE_t end_negative
+
 1140:         # We assume implicitly that end_positive = end and
+
 1141:         # start_negative = start
+
 1142:         cdef SIZE_t start_positive
+
 1143:         cdef SIZE_t end_negative
 1144: 
-
 1145:         # Sample up to max_features without replacement using a
-
 1146:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
-
 1147:         # `f_j` to compute a permutation of the `features` array).
-
 1148:         #
-
 1149:         # Skip the CPU intensive evaluation of the impurity criterion for
-
 1150:         # features that were already detected as constant (hence not suitable
-
 1151:         # for good splitting) by ancestor nodes and save the information on
-
 1152:         # newly discovered constant features to spare computation on descendant
-
 1153:         # nodes.
-
+1154:         while (f_i > n_total_constants and  # Stop early if remaining features
+
 1145:         # Sample up to max_features without replacement using a
+
 1146:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
+
 1147:         # `f_j` to compute a permutation of the `features` array).
+
 1148:         #
+
 1149:         # Skip the CPU intensive evaluation of the impurity criterion for
+
 1150:         # features that were already detected as constant (hence not suitable
+
 1151:         # for good splitting) by ancestor nodes and save the information on
+
 1152:         # newly discovered constant features to spare computation on descendant
+
 1153:         # nodes.
+
+1154:         while (f_i > n_total_constants and  # Stop early if remaining features
  while (1) {
     __pyx_t_7 = ((__pyx_v_f_i > __pyx_v_n_total_constants) != 0);
     if (__pyx_t_7) {
@@ -3629,169 +3703,169 @@
       __pyx_t_6 = __pyx_t_7;
       goto __pyx_L5_bool_binop_done;
     }
-
 1155:                                             # are constant
-
+1156:                 (n_visited_features < max_features or
+
 1155:                                             # are constant
+
+1156:                 (n_visited_features < max_features or
    __pyx_t_7 = ((__pyx_v_n_visited_features < __pyx_v_max_features) != 0);
     if (!__pyx_t_7) {
     } else {
       __pyx_t_6 = __pyx_t_7;
       goto __pyx_L5_bool_binop_done;
     }
-
 1157:                  # At least one drawn features must be non constant
-
+1158:                  n_visited_features <= n_found_constants + n_drawn_constants)):
+
 1157:                  # At least one drawn features must be non constant
+
+1158:                  n_visited_features <= n_found_constants + n_drawn_constants)):
    __pyx_t_7 = ((__pyx_v_n_visited_features <= (__pyx_v_n_found_constants + __pyx_v_n_drawn_constants)) != 0);
     __pyx_t_6 = __pyx_t_7;
     __pyx_L5_bool_binop_done:;
     if (!__pyx_t_6) break;
 
 1159: 
-
+1160:             n_visited_features += 1
+
+1160:             n_visited_features += 1
    __pyx_v_n_visited_features = (__pyx_v_n_visited_features + 1);
 
 1161: 
-
 1162:             # Loop invariant: elements of features in
-
 1163:             # - [:n_drawn_constant[ holds drawn and known constant features;
-
 1164:             # - [n_drawn_constant:n_known_constant[ holds known constant
-
 1165:             #   features that haven't been drawn yet;
-
 1166:             # - [n_known_constant:n_total_constant[ holds newly found constant
-
 1167:             #   features;
-
 1168:             # - [n_total_constant:f_i[ holds features that haven't been drawn
-
 1169:             #   yet and aren't constant apriori.
-
 1170:             # - [f_i:n_features[ holds features that have been drawn
-
 1171:             #   and aren't constant.
+
 1162:             # Loop invariant: elements of features in
+
 1163:             # - [:n_drawn_constant[ holds drawn and known constant features;
+
 1164:             # - [n_drawn_constant:n_known_constant[ holds known constant
+
 1165:             #   features that haven't been drawn yet;
+
 1166:             # - [n_known_constant:n_total_constant[ holds newly found constant
+
 1167:             #   features;
+
 1168:             # - [n_total_constant:f_i[ holds features that haven't been drawn
+
 1169:             #   yet and aren't constant apriori.
+
 1170:             # - [f_i:n_features[ holds features that have been drawn
+
 1171:             #   and aren't constant.
 1172: 
-
 1173:             # Draw a feature at random
-
+1174:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
+
 1173:             # Draw a feature at random
+
+1174:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
    __pyx_v_f_j = __pyx_f_7sklearn_4tree_6_utils_rand_int(__pyx_v_n_drawn_constants, (__pyx_v_f_i - __pyx_v_n_found_constants), __pyx_v_random_state);
-
 1175:                            random_state)
+
 1175:                            random_state)
 1176: 
-
+1177:             if f_j < n_known_constants:
+
+1177:             if f_j < n_known_constants:
    __pyx_t_6 = ((__pyx_v_f_j < __pyx_v_n_known_constants) != 0);
     if (__pyx_t_6) {
 /* … */
       goto __pyx_L8;
     }
-
 1178:                 # f_j in the interval [n_drawn_constants, n_known_constants[
-
+1179:                 features[f_j], features[n_drawn_constants] = features[n_drawn_constants], features[f_j]
+
 1178:                 # f_j in the interval [n_drawn_constants, n_known_constants[
+
+1179:                 features[f_j], features[n_drawn_constants] = features[n_drawn_constants], features[f_j]
      __pyx_t_2 = (__pyx_v_features[__pyx_v_n_drawn_constants]);
       __pyx_t_8 = (__pyx_v_features[__pyx_v_f_j]);
       (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_2;
       (__pyx_v_features[__pyx_v_n_drawn_constants]) = __pyx_t_8;
 
 1180: 
-
+1181:                 n_drawn_constants += 1
+
+1181:                 n_drawn_constants += 1
      __pyx_v_n_drawn_constants = (__pyx_v_n_drawn_constants + 1);
 
 1182: 
-
 1183:             else:
-
 1184:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
-
+1185:                 f_j += n_found_constants
+
 1183:             else:
+
 1184:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
+
+1185:                 f_j += n_found_constants
    /*else*/ {
       __pyx_v_f_j = (__pyx_v_f_j + __pyx_v_n_found_constants);
-
 1186:                 # f_j in the interval [n_total_constants, f_i[
+
 1186:                 # f_j in the interval [n_total_constants, f_i[
 1187: 
-
+1188:                 current.feature = features[f_j]
+
+1188:                 current.feature = features[f_j]
      __pyx_v_current.feature = (__pyx_v_features[__pyx_v_f_j]);
-
+1189:                 self.extract_nnz(current.feature,
+
+1189:                 self.extract_nnz(current.feature,
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_current.feature, (&__pyx_v_end_negative), (&__pyx_v_start_positive), (&__pyx_v_is_samples_sorted));
-
 1190:                                  &end_negative, &start_positive,
-
 1191:                                  &is_samples_sorted)
+
 1190:                                  &end_negative, &start_positive,
+
 1191:                                  &is_samples_sorted)
 1192: 
-
 1193:                 # Sort the positive and negative parts of `Xf`
-
+1194:                 sort(Xf + start, samples + start, end_negative - start)
+
 1193:                 # Sort the positive and negative parts of `Xf`
+
+1194:                 sort(Xf + start, samples + start, end_negative - start)
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_sort((__pyx_v_Xf + __pyx_v_start), (__pyx_v_samples + __pyx_v_start), (__pyx_v_end_negative - __pyx_v_start));
-
+1195:                 sort(Xf + start_positive, samples + start_positive,
+
+1195:                 sort(Xf + start_positive, samples + start_positive,
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_sort((__pyx_v_Xf + __pyx_v_start_positive), (__pyx_v_samples + __pyx_v_start_positive), (__pyx_v_end - __pyx_v_start_positive));
-
 1196:                      end - start_positive)
+
 1196:                      end - start_positive)
 1197: 
-
 1198:                 # Update index_to_samples to take into account the sort
-
+1199:                 for p in range(start, end_negative):
+
 1198:                 # Update index_to_samples to take into account the sort
+
+1199:                 for p in range(start, end_negative):
      __pyx_t_8 = __pyx_v_end_negative;
       __pyx_t_2 = __pyx_t_8;
       for (__pyx_t_9 = __pyx_v_start; __pyx_t_9 < __pyx_t_2; __pyx_t_9+=1) {
         __pyx_v_p = __pyx_t_9;
-
+1200:                     index_to_samples[samples[p]] = p
+
+1200:                     index_to_samples[samples[p]] = p
        (__pyx_v_index_to_samples[(__pyx_v_samples[__pyx_v_p])]) = __pyx_v_p;
       }
-
+1201:                 for p in range(start_positive, end):
+
+1201:                 for p in range(start_positive, end):
      __pyx_t_8 = __pyx_v_end;
       __pyx_t_2 = __pyx_t_8;
       for (__pyx_t_9 = __pyx_v_start_positive; __pyx_t_9 < __pyx_t_2; __pyx_t_9+=1) {
         __pyx_v_p = __pyx_t_9;
-
+1202:                     index_to_samples[samples[p]] = p
+
+1202:                     index_to_samples[samples[p]] = p
        (__pyx_v_index_to_samples[(__pyx_v_samples[__pyx_v_p])]) = __pyx_v_p;
       }
 
 1203: 
-
 1204:                 # Add one or two zeros in Xf, if there is any
-
+1205:                 if end_negative < start_positive:
+
 1204:                 # Add one or two zeros in Xf, if there is any
+
+1205:                 if end_negative < start_positive:
      __pyx_t_6 = ((__pyx_v_end_negative < __pyx_v_start_positive) != 0);
       if (__pyx_t_6) {
 /* … */
       }
-
+1206:                     start_positive -= 1
+
+1206:                     start_positive -= 1
        __pyx_v_start_positive = (__pyx_v_start_positive - 1);
-
+1207:                     Xf[start_positive] = 0.
+
+1207:                     Xf[start_positive] = 0.
        (__pyx_v_Xf[__pyx_v_start_positive]) = 0.;
 
 1208: 
-
+1209:                     if end_negative != start_positive:
+
+1209:                     if end_negative != start_positive:
        __pyx_t_6 = ((__pyx_v_end_negative != __pyx_v_start_positive) != 0);
         if (__pyx_t_6) {
 /* … */
         }
-
+1210:                         Xf[end_negative] = 0.
+
+1210:                         Xf[end_negative] = 0.
          (__pyx_v_Xf[__pyx_v_end_negative]) = 0.;
-
+1211:                         end_negative += 1
+
+1211:                         end_negative += 1
          __pyx_v_end_negative = (__pyx_v_end_negative + 1);
 
 1212: 
-
+1213:                 if Xf[end - 1] <= Xf[start] + FEATURE_THRESHOLD:
+
+1213:                 if Xf[end - 1] <= Xf[start] + FEATURE_THRESHOLD:
      __pyx_t_6 = (((__pyx_v_Xf[(__pyx_v_end - 1)]) <= ((__pyx_v_Xf[__pyx_v_start]) + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
       if (__pyx_t_6) {
 /* … */
         goto __pyx_L15;
       }
-
+1214:                     features[f_j], features[n_total_constants] = features[n_total_constants], features[f_j]
+
+1214:                     features[f_j], features[n_total_constants] = features[n_total_constants], features[f_j]
        __pyx_t_8 = (__pyx_v_features[__pyx_v_n_total_constants]);
         __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_8;
         (__pyx_v_features[__pyx_v_n_total_constants]) = __pyx_t_2;
 
 1215: 
-
+1216:                     n_found_constants += 1
+
+1216:                     n_found_constants += 1
        __pyx_v_n_found_constants = (__pyx_v_n_found_constants + 1);
-
+1217:                     n_total_constants += 1
+
+1217:                     n_total_constants += 1
        __pyx_v_n_total_constants = (__pyx_v_n_total_constants + 1);
 
 1218: 
-
 1219:                 else:
-
+1220:                     f_i -= 1
+
 1219:                 else:
+
+1220:                     f_i -= 1
      /*else*/ {
         __pyx_v_f_i = (__pyx_v_f_i - 1);
-
+1221:                     features[f_i], features[f_j] = features[f_j], features[f_i]
+
+1221:                     features[f_i], features[f_j] = features[f_j], features[f_i]
        __pyx_t_2 = (__pyx_v_features[__pyx_v_f_j]);
         __pyx_t_8 = (__pyx_v_features[__pyx_v_f_i]);
         (__pyx_v_features[__pyx_v_f_i]) = __pyx_t_2;
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_8;
 
 1222: 
-
 1223:                     # Evaluate all splits
-
+1224:                     self.criterion.reset()
+
 1223:                     # Evaluate all splits
+
+1224:                     self.criterion.reset()
        __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1224, __pyx_L1_error)
-
+1225:                     p = start
+
+1225:                     p = start
        __pyx_v_p = __pyx_v_start;
 
 1226: 
-
+1227:                     while p < end:
+
+1227:                     while p < end:
        while (1) {
           __pyx_t_6 = ((__pyx_v_p < __pyx_v_end) != 0);
           if (!__pyx_t_6) break;
-
+1228:                         if p + 1 != end_negative:
+
+1228:                         if p + 1 != end_negative:
          __pyx_t_6 = (((__pyx_v_p + 1) != __pyx_v_end_negative) != 0);
           if (__pyx_t_6) {
 /* … */
             goto __pyx_L18;
           }
-
+1229:                             p_next = p + 1
+
+1229:                             p_next = p + 1
            __pyx_v_p_next = (__pyx_v_p + 1);
-
 1230:                         else:
-
+1231:                             p_next = start_positive
+
 1230:                         else:
+
+1231:                             p_next = start_positive
          /*else*/ {
             __pyx_v_p_next = __pyx_v_start_positive;
           }
           __pyx_L18:;
 
 1232: 
-
+1233:                         while (p_next < end and
+
+1233:                         while (p_next < end and
          while (1) {
             __pyx_t_7 = ((__pyx_v_p_next < __pyx_v_end) != 0);
             if (__pyx_t_7) {
@@ -3799,23 +3873,23 @@
               __pyx_t_6 = __pyx_t_7;
               goto __pyx_L21_bool_binop_done;
             }
-
+1234:                                Xf[p_next] <= Xf[p] + FEATURE_THRESHOLD):
+
+1234:                                Xf[p_next] <= Xf[p] + FEATURE_THRESHOLD):
            __pyx_t_7 = (((__pyx_v_Xf[__pyx_v_p_next]) <= ((__pyx_v_Xf[__pyx_v_p]) + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
             __pyx_t_6 = __pyx_t_7;
             __pyx_L21_bool_binop_done:;
             if (!__pyx_t_6) break;
-
+1235:                             p = p_next
+
+1235:                             p = p_next
            __pyx_v_p = __pyx_v_p_next;
-
+1236:                             if p + 1 != end_negative:
+
+1236:                             if p + 1 != end_negative:
            __pyx_t_6 = (((__pyx_v_p + 1) != __pyx_v_end_negative) != 0);
             if (__pyx_t_6) {
 /* … */
               goto __pyx_L23;
             }
-
+1237:                                 p_next = p + 1
+
+1237:                                 p_next = p + 1
              __pyx_v_p_next = (__pyx_v_p + 1);
-
 1238:                             else:
-
+1239:                                 p_next = start_positive
+
 1238:                             else:
+
+1239:                                 p_next = start_positive
            /*else*/ {
               __pyx_v_p_next = __pyx_v_start_positive;
             }
@@ -3823,17 +3897,17 @@
           }
 
 1240: 
 1241: 
-
 1242:                         # (p_next >= end) or (X[samples[p_next], current.feature] >
-
 1243:                         #                     X[samples[p], current.feature])
-
+1244:                         p_prev = p
+
 1242:                         # (p_next >= end) or (X[samples[p_next], current.feature] >
+
 1243:                         #                     X[samples[p], current.feature])
+
+1244:                         p_prev = p
          __pyx_v_p_prev = __pyx_v_p;
-
+1245:                         p = p_next
+
+1245:                         p = p_next
          __pyx_v_p = __pyx_v_p_next;
-
 1246:                         # (p >= end) or (X[samples[p], current.feature] >
-
 1247:                         #                X[samples[p_prev], current.feature])
+
 1246:                         # (p >= end) or (X[samples[p], current.feature] >
+
 1247:                         #                X[samples[p_prev], current.feature])
 1248: 
 1249: 
-
+1250:                         if p < end:
+
+1250:                         if p < end:
          __pyx_t_6 = ((__pyx_v_p < __pyx_v_end) != 0);
           if (__pyx_t_6) {
 /* … */
@@ -3845,11 +3919,11 @@
     }
     __pyx_L8:;
   }
-
+1251:                             current.pos = p
+
+1251:                             current.pos = p
            __pyx_v_current.pos = __pyx_v_p;
 
 1252: 
-
 1253:                             # Reject if min_samples_leaf is not guaranteed
-
+1254:                             if (((current.pos - start) < min_samples_leaf) or
+
 1253:                             # Reject if min_samples_leaf is not guaranteed
+
+1254:                             if (((current.pos - start) < min_samples_leaf) or
            __pyx_t_7 = (((__pyx_v_current.pos - __pyx_v_start) < __pyx_v_min_samples_leaf) != 0);
             if (!__pyx_t_7) {
             } else {
@@ -3860,18 +3934,18 @@
             if (__pyx_t_6) {
 /* … */
             }
-
+1255:                                     ((end - current.pos) < min_samples_leaf)):
+
+1255:                                     ((end - current.pos) < min_samples_leaf)):
            __pyx_t_7 = (((__pyx_v_end - __pyx_v_current.pos) < __pyx_v_min_samples_leaf) != 0);
             __pyx_t_6 = __pyx_t_7;
             __pyx_L26_bool_binop_done:;
-
+1256:                                 continue
+
+1256:                                 continue
              goto __pyx_L16_continue;
 
 1257: 
-
+1258:                             self.criterion.update(current.pos)
+
+1258:                             self.criterion.update(current.pos)
            __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_current.pos); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1258, __pyx_L1_error)
 
 1259: 
-
 1260:                             # Reject if min_weight_leaf is not satisfied
-
+1261:                             if ((self.criterion.weighted_n_left < min_weight_leaf) or
+
 1260:                             # Reject if min_weight_leaf is not satisfied
+
+1261:                             if ((self.criterion.weighted_n_left < min_weight_leaf) or
            __pyx_t_7 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_left < __pyx_v_min_weight_leaf) != 0);
             if (!__pyx_t_7) {
             } else {
@@ -3882,28 +3956,28 @@
             if (__pyx_t_6) {
 /* … */
             }
-
+1262:                                     (self.criterion.weighted_n_right < min_weight_leaf)):
+
+1262:                                     (self.criterion.weighted_n_right < min_weight_leaf)):
            __pyx_t_7 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_right < __pyx_v_min_weight_leaf) != 0);
             __pyx_t_6 = __pyx_t_7;
             __pyx_L29_bool_binop_done:;
-
+1263:                                 continue
+
+1263:                                 continue
              goto __pyx_L16_continue;
 
 1264: 
-
+1265:                             current_proxy_improvement = self.criterion.proxy_impurity_improvement()
+
+1265:                             current_proxy_improvement = self.criterion.proxy_impurity_improvement()
            __pyx_v_current_proxy_improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->proxy_impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion);
 
 1266: 
-
+1267:                             if current_proxy_improvement > best_proxy_improvement:
+
+1267:                             if current_proxy_improvement > best_proxy_improvement:
            __pyx_t_6 = ((__pyx_v_current_proxy_improvement > __pyx_v_best_proxy_improvement) != 0);
             if (__pyx_t_6) {
 /* … */
             }
-
+1268:                                 best_proxy_improvement = current_proxy_improvement
+
+1268:                                 best_proxy_improvement = current_proxy_improvement
              __pyx_v_best_proxy_improvement = __pyx_v_current_proxy_improvement;
-
 1269:                                 # sum of halves used to avoid infinite values
-
+1270:                                 current.threshold = Xf[p_prev] / 2.0 + Xf[p] / 2.0
+
 1269:                                 # sum of halves used to avoid infinite values
+
+1270:                                 current.threshold = Xf[p_prev] / 2.0 + Xf[p] / 2.0
              __pyx_v_current.threshold = (((__pyx_v_Xf[__pyx_v_p_prev]) / 2.0) + ((__pyx_v_Xf[__pyx_v_p]) / 2.0));
 
 1271: 
-
+1272:                                 if ((current.threshold == Xf[p]) or
+
+1272:                                 if ((current.threshold == Xf[p]) or
              __pyx_t_7 = ((__pyx_v_current.threshold == (__pyx_v_Xf[__pyx_v_p])) != 0);
               if (!__pyx_t_7) {
               } else {
@@ -3914,70 +3988,70 @@
               if (__pyx_t_6) {
 /* … */
               }
-
+1273:                                     (current.threshold == INFINITY) or
+
+1273:                                     (current.threshold == INFINITY) or
              __pyx_t_7 = ((__pyx_v_current.threshold == __pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY) != 0);
               if (!__pyx_t_7) {
               } else {
                 __pyx_t_6 = __pyx_t_7;
                 goto __pyx_L33_bool_binop_done;
               }
-
+1274:                                     (current.threshold == -INFINITY)):
+
+1274:                                     (current.threshold == -INFINITY)):
              __pyx_t_7 = ((__pyx_v_current.threshold == (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY)) != 0);
               __pyx_t_6 = __pyx_t_7;
               __pyx_L33_bool_binop_done:;
-
+1275:                                     current.threshold = Xf[p_prev]
+
+1275:                                     current.threshold = Xf[p_prev]
                __pyx_v_current.threshold = (__pyx_v_Xf[__pyx_v_p_prev]);
 
 1276: 
-
+1277:                                 best = current
+
+1277:                                 best = current
              __pyx_v_best = __pyx_v_current;
 
 1278: 
-
 1279:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
-
+1280:         if best.pos < end:
+
 1279:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
+
+1280:         if best.pos < end:
  __pyx_t_6 = ((__pyx_v_best.pos < __pyx_v_end) != 0);
   if (__pyx_t_6) {
 /* … */
   }
-
+1281:             self.extract_nnz(best.feature, &end_negative, &start_positive,
+
+1281:             self.extract_nnz(best.feature, &end_negative, &start_positive,
    __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_best.feature, (&__pyx_v_end_negative), (&__pyx_v_start_positive), (&__pyx_v_is_samples_sorted));
-
 1282:                              &is_samples_sorted)
+
 1282:                              &is_samples_sorted)
 1283: 
-
+1284:             self._partition(best.threshold, end_negative, start_positive,
+
+1284:             self._partition(best.threshold, end_negative, start_positive,
    (void)(__pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter__partition(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_best.threshold, __pyx_v_end_negative, __pyx_v_start_positive, __pyx_v_best.pos));
-
 1285:                             best.pos)
+
 1285:                             best.pos)
 1286: 
-
+1287:             self.criterion.reset()
+
+1287:             self.criterion.reset()
    __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1287, __pyx_L1_error)
-
+1288:             self.criterion.update(best.pos)
+
+1288:             self.criterion.update(best.pos)
    __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_best.pos); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1288, __pyx_L1_error)
-
+1289:             best.improvement = self.criterion.impurity_improvement(impurity)
+
+1289:             best.improvement = self.criterion.impurity_improvement(impurity)
    __pyx_v_best.improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_impurity);
-
+1290:             self.criterion.children_impurity(&best.impurity_left,
+
+1290:             self.criterion.children_impurity(&best.impurity_left,
    ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->children_impurity(__pyx_v_self->__pyx_base.__pyx_base.criterion, (&__pyx_v_best.impurity_left), (&__pyx_v_best.impurity_right));
-
 1291:                                              &best.impurity_right)
+
 1291:                                              &best.impurity_right)
 1292: 
-
 1293:         # Respect invariant for constant features: the original order of
-
 1294:         # element in features[:n_known_constants] must be preserved for sibling
-
 1295:         # and child nodes
-
+1296:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
+
 1293:         # Respect invariant for constant features: the original order of
+
 1294:         # element in features[:n_known_constants] must be preserved for sibling
+
 1295:         # and child nodes
+
+1296:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
  (void)(memcpy(__pyx_v_features, __pyx_v_constant_features, ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_known_constants)));
 
 1297: 
-
 1298:         # Copy newly found constant features
-
+1299:         memcpy(constant_features + n_known_constants,
+
 1298:         # Copy newly found constant features
+
+1299:         memcpy(constant_features + n_known_constants,
  (void)(memcpy((__pyx_v_constant_features + __pyx_v_n_known_constants), (__pyx_v_features + __pyx_v_n_known_constants), ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_found_constants)));
-
 1300:                features + n_known_constants,
-
 1301:                sizeof(SIZE_t) * n_found_constants)
+
 1300:                features + n_known_constants,
+
 1301:                sizeof(SIZE_t) * n_found_constants)
 1302: 
-
 1303:         # Return values
-
+1304:         split[0] = best
+
 1303:         # Return values
+
+1304:         split[0] = best
  (__pyx_v_split[0]) = __pyx_v_best;
-
+1305:         n_constant_features[0] = n_total_constants
+
+1305:         n_constant_features[0] = n_total_constants
  (__pyx_v_n_constant_features[0]) = __pyx_v_n_total_constants;
-
+1306:         return 0
+
+1306:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 1307: 
 1308: 
-
+1309: cdef class RandomSparseSplitter(BaseSparseSplitter):
+
+1309: cdef class RandomSparseSplitter(BaseSparseSplitter):
struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_RandomSparseSplitter {
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter __pyx_base;
 };
@@ -3987,9 +4061,9 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_RandomSparseSplitter *__pyx_vtabptr_13stpredictions_6models_3OK3_9_splitter_RandomSparseSplitter;
 
-
 1310:     """Splitter for finding a random split, using the sparse data."""
+
 1310:     """Splitter for finding a random split, using the sparse data."""
 1311: 
-
+1312:     def __reduce__(self):
+
+1312:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_20RandomSparseSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_9_splitter_20RandomSparseSplitter_1__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
@@ -4021,7 +4095,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+1313:         return (RandomSparseSplitter, (self.criterion,
+
+1313:         return (RandomSparseSplitter, (self.criterion,
  __Pyx_XDECREF(__pyx_r);
 /* … */
   __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1313, __pyx_L1_error)
@@ -4056,16 +4130,16 @@
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
-
+1314:                                        self.max_features,
+
+1314:                                        self.max_features,
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.max_features); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1314, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
-
+1315:                                        self.min_samples_leaf,
+
+1315:                                        self.min_samples_leaf,
  __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
-
+1316:                                        self.min_weight_leaf,
+
+1316:                                        self.min_weight_leaf,
  __pyx_t_3 = PyFloat_FromDouble(__pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1316, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
-
+1317:                                        self.random_state), self.__getstate__())
+
+1317:                                        self.random_state), self.__getstate__())
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_getstate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1317, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_1 = NULL;
@@ -4084,7 +4158,7 @@
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 1318: 
-
+1319:     cdef int node_split(self, double impurity, SplitRecord* split,
+
+1319:     cdef int node_split(self, double impurity, SplitRecord* split,
static int __pyx_f_13stpredictions_6models_3OK3_9_splitter_20RandomSparseSplitter_node_split(struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_RandomSparseSplitter *__pyx_v_self, double __pyx_v_impurity, struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord *__pyx_v_split, __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_n_constant_features) {
   CYTHON_UNUSED __pyx_t_7sklearn_4tree_5_tree_SIZE_t *__pyx_v_samples;
   __pyx_t_7sklearn_4tree_5_tree_SIZE_t __pyx_v_start;
@@ -4137,114 +4211,114 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 1320:                         SIZE_t* n_constant_features) nogil except -1:
-
 1321:         """Find a random split on node samples[start:end], using sparse features
+
 1320:                         SIZE_t* n_constant_features) nogil except -1:
+
 1321:         """Find a random split on node samples[start:end], using sparse features
 1322: 
-
 1323:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 1324:         or 0 otherwise.
-
 1325:         """
-
 1326:         # Find the best split
-
+1327:         cdef SIZE_t* samples = self.samples
+
 1323:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 1324:         or 0 otherwise.
+
 1325:         """
+
 1326:         # Find the best split
+
+1327:         cdef SIZE_t* samples = self.samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.samples;
   __pyx_v_samples = __pyx_t_1;
-
+1328:         cdef SIZE_t start = self.start
+
+1328:         cdef SIZE_t start = self.start
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.start;
   __pyx_v_start = __pyx_t_2;
-
+1329:         cdef SIZE_t end = self.end
+
+1329:         cdef SIZE_t end = self.end
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.end;
   __pyx_v_end = __pyx_t_2;
 
 1330: 
-
+1331:         cdef INT32_t* X_indices = self.X_indices
+
+1331:         cdef INT32_t* X_indices = self.X_indices
  __pyx_t_3 = __pyx_v_self->__pyx_base.X_indices;
   __pyx_v_X_indices = __pyx_t_3;
-
+1332:         cdef INT32_t* X_indptr = self.X_indptr
+
+1332:         cdef INT32_t* X_indptr = self.X_indptr
  __pyx_t_3 = __pyx_v_self->__pyx_base.X_indptr;
   __pyx_v_X_indptr = __pyx_t_3;
-
+1333:         cdef DTYPE_t* X_data = self.X_data
+
+1333:         cdef DTYPE_t* X_data = self.X_data
  __pyx_t_4 = __pyx_v_self->__pyx_base.X_data;
   __pyx_v_X_data = __pyx_t_4;
 
 1334: 
-
+1335:         cdef SIZE_t* features = self.features
+
+1335:         cdef SIZE_t* features = self.features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.features;
   __pyx_v_features = __pyx_t_1;
-
+1336:         cdef SIZE_t* constant_features = self.constant_features
+
+1336:         cdef SIZE_t* constant_features = self.constant_features
  __pyx_t_1 = __pyx_v_self->__pyx_base.__pyx_base.constant_features;
   __pyx_v_constant_features = __pyx_t_1;
-
+1337:         cdef SIZE_t n_features = self.n_features
+
+1337:         cdef SIZE_t n_features = self.n_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.n_features;
   __pyx_v_n_features = __pyx_t_2;
 
 1338: 
-
+1339:         cdef DTYPE_t* Xf = self.feature_values
+
+1339:         cdef DTYPE_t* Xf = self.feature_values
  __pyx_t_4 = __pyx_v_self->__pyx_base.__pyx_base.feature_values;
   __pyx_v_Xf = __pyx_t_4;
-
+1340:         cdef SIZE_t* sorted_samples = self.sorted_samples
+
+1340:         cdef SIZE_t* sorted_samples = self.sorted_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.sorted_samples;
   __pyx_v_sorted_samples = __pyx_t_1;
-
+1341:         cdef SIZE_t* index_to_samples = self.index_to_samples
+
+1341:         cdef SIZE_t* index_to_samples = self.index_to_samples
  __pyx_t_1 = __pyx_v_self->__pyx_base.index_to_samples;
   __pyx_v_index_to_samples = __pyx_t_1;
-
+1342:         cdef SIZE_t max_features = self.max_features
+
+1342:         cdef SIZE_t max_features = self.max_features
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.max_features;
   __pyx_v_max_features = __pyx_t_2;
-
+1343:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+1343:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_2 = __pyx_v_self->__pyx_base.__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_2;
-
+1344:         cdef double min_weight_leaf = self.min_weight_leaf
+
+1344:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_5 = __pyx_v_self->__pyx_base.__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_5;
-
+1345:         cdef UINT32_t* random_state = &self.rand_r_state
+
+1345:         cdef UINT32_t* random_state = &self.rand_r_state
  __pyx_v_random_state = (&__pyx_v_self->__pyx_base.__pyx_base.rand_r_state);
 
 1346: 
-
 1347:         cdef SplitRecord best, current
-
+1348:         _init_split(&best, end)
+
 1347:         cdef SplitRecord best, current
+
+1348:         _init_split(&best, end)
  __pyx_f_13stpredictions_6models_3OK3_9_splitter__init_split((&__pyx_v_best), __pyx_v_end);
-
+1349:         cdef double current_proxy_improvement = - INFINITY
+
+1349:         cdef double current_proxy_improvement = - INFINITY
  __pyx_v_current_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
-
+1350:         cdef double best_proxy_improvement = - INFINITY
+
+1350:         cdef double best_proxy_improvement = - INFINITY
  __pyx_v_best_proxy_improvement = (-__pyx_v_13stpredictions_6models_3OK3_9_splitter_INFINITY);
 
 1351: 
-
 1352:         cdef DTYPE_t current_feature_value
+
 1352:         cdef DTYPE_t current_feature_value
 1353: 
-
+1354:         cdef SIZE_t f_i = n_features
+
+1354:         cdef SIZE_t f_i = n_features
  __pyx_v_f_i = __pyx_v_n_features;
-
 1355:         cdef SIZE_t f_j, p
-
+1356:         cdef SIZE_t n_visited_features = 0
+
 1355:         cdef SIZE_t f_j, p
+
+1356:         cdef SIZE_t n_visited_features = 0
  __pyx_v_n_visited_features = 0;
-
 1357:         # Number of features discovered to be constant during the split search
-
+1358:         cdef SIZE_t n_found_constants = 0
+
 1357:         # Number of features discovered to be constant during the split search
+
+1358:         cdef SIZE_t n_found_constants = 0
  __pyx_v_n_found_constants = 0;
-
 1359:         # Number of features known to be constant and drawn without replacement
-
+1360:         cdef SIZE_t n_drawn_constants = 0
+
 1359:         # Number of features known to be constant and drawn without replacement
+
+1360:         cdef SIZE_t n_drawn_constants = 0
  __pyx_v_n_drawn_constants = 0;
-
+1361:         cdef SIZE_t n_known_constants = n_constant_features[0]
+
+1361:         cdef SIZE_t n_known_constants = n_constant_features[0]
  __pyx_v_n_known_constants = (__pyx_v_n_constant_features[0]);
-
 1362:         # n_total_constants = n_known_constants + n_found_constants
-
+1363:         cdef SIZE_t n_total_constants = n_known_constants
+
 1362:         # n_total_constants = n_known_constants + n_found_constants
+
+1363:         cdef SIZE_t n_total_constants = n_known_constants
  __pyx_v_n_total_constants = __pyx_v_n_known_constants;
-
 1364:         cdef SIZE_t partition_end
+
 1364:         cdef SIZE_t partition_end
 1365: 
-
 1366:         cdef DTYPE_t min_feature_value
-
 1367:         cdef DTYPE_t max_feature_value
+
 1366:         cdef DTYPE_t min_feature_value
+
 1367:         cdef DTYPE_t max_feature_value
 1368: 
-
+1369:         cdef bint is_samples_sorted = 0  # indicate that sorted_samples is
+
+1369:         cdef bint is_samples_sorted = 0  # indicate that sorted_samples is
  __pyx_v_is_samples_sorted = 0;
-
 1370:                                          # inititialized
+
 1370:                                          # inititialized
 1371: 
-
 1372:         # We assume implicitly that end_positive = end and
-
 1373:         # start_negative = start
-
 1374:         cdef SIZE_t start_positive
-
 1375:         cdef SIZE_t end_negative
+
 1372:         # We assume implicitly that end_positive = end and
+
 1373:         # start_negative = start
+
 1374:         cdef SIZE_t start_positive
+
 1375:         cdef SIZE_t end_negative
 1376: 
-
 1377:         # Sample up to max_features without replacement using a
-
 1378:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
-
 1379:         # `f_j` to compute a permutation of the `features` array).
-
 1380:         #
-
 1381:         # Skip the CPU intensive evaluation of the impurity criterion for
-
 1382:         # features that were already detected as constant (hence not suitable
-
 1383:         # for good splitting) by ancestor nodes and save the information on
-
 1384:         # newly discovered constant features to spare computation on descendant
-
 1385:         # nodes.
-
+1386:         while (f_i > n_total_constants and  # Stop early if remaining features
+
 1377:         # Sample up to max_features without replacement using a
+
 1378:         # Fisher-Yates-based algorithm (using the local variables `f_i` and
+
 1379:         # `f_j` to compute a permutation of the `features` array).
+
 1380:         #
+
 1381:         # Skip the CPU intensive evaluation of the impurity criterion for
+
 1382:         # features that were already detected as constant (hence not suitable
+
 1383:         # for good splitting) by ancestor nodes and save the information on
+
 1384:         # newly discovered constant features to spare computation on descendant
+
 1385:         # nodes.
+
+1386:         while (f_i > n_total_constants and  # Stop early if remaining features
  while (1) {
     __pyx_t_7 = ((__pyx_v_f_i > __pyx_v_n_total_constants) != 0);
     if (__pyx_t_7) {
@@ -4252,202 +4326,202 @@
       __pyx_t_6 = __pyx_t_7;
       goto __pyx_L5_bool_binop_done;
     }
-
 1387:                                             # are constant
-
+1388:                 (n_visited_features < max_features or
+
 1387:                                             # are constant
+
+1388:                 (n_visited_features < max_features or
    __pyx_t_7 = ((__pyx_v_n_visited_features < __pyx_v_max_features) != 0);
     if (!__pyx_t_7) {
     } else {
       __pyx_t_6 = __pyx_t_7;
       goto __pyx_L5_bool_binop_done;
     }
-
 1389:                  # At least one drawn features must be non constant
-
+1390:                  n_visited_features <= n_found_constants + n_drawn_constants)):
+
 1389:                  # At least one drawn features must be non constant
+
+1390:                  n_visited_features <= n_found_constants + n_drawn_constants)):
    __pyx_t_7 = ((__pyx_v_n_visited_features <= (__pyx_v_n_found_constants + __pyx_v_n_drawn_constants)) != 0);
     __pyx_t_6 = __pyx_t_7;
     __pyx_L5_bool_binop_done:;
     if (!__pyx_t_6) break;
 
 1391: 
-
+1392:             n_visited_features += 1
+
+1392:             n_visited_features += 1
    __pyx_v_n_visited_features = (__pyx_v_n_visited_features + 1);
 
 1393: 
-
 1394:             # Loop invariant: elements of features in
-
 1395:             # - [:n_drawn_constant[ holds drawn and known constant features;
-
 1396:             # - [n_drawn_constant:n_known_constant[ holds known constant
-
 1397:             #   features that haven't been drawn yet;
-
 1398:             # - [n_known_constant:n_total_constant[ holds newly found constant
-
 1399:             #   features;
-
 1400:             # - [n_total_constant:f_i[ holds features that haven't been drawn
-
 1401:             #   yet and aren't constant apriori.
-
 1402:             # - [f_i:n_features[ holds features that have been drawn
-
 1403:             #   and aren't constant.
+
 1394:             # Loop invariant: elements of features in
+
 1395:             # - [:n_drawn_constant[ holds drawn and known constant features;
+
 1396:             # - [n_drawn_constant:n_known_constant[ holds known constant
+
 1397:             #   features that haven't been drawn yet;
+
 1398:             # - [n_known_constant:n_total_constant[ holds newly found constant
+
 1399:             #   features;
+
 1400:             # - [n_total_constant:f_i[ holds features that haven't been drawn
+
 1401:             #   yet and aren't constant apriori.
+
 1402:             # - [f_i:n_features[ holds features that have been drawn
+
 1403:             #   and aren't constant.
 1404: 
-
 1405:             # Draw a feature at random
-
+1406:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
+
 1405:             # Draw a feature at random
+
+1406:             f_j = rand_int(n_drawn_constants, f_i - n_found_constants,
    __pyx_v_f_j = __pyx_f_7sklearn_4tree_6_utils_rand_int(__pyx_v_n_drawn_constants, (__pyx_v_f_i - __pyx_v_n_found_constants), __pyx_v_random_state);
-
 1407:                            random_state)
+
 1407:                            random_state)
 1408: 
-
+1409:             if f_j < n_known_constants:
+
+1409:             if f_j < n_known_constants:
    __pyx_t_6 = ((__pyx_v_f_j < __pyx_v_n_known_constants) != 0);
     if (__pyx_t_6) {
 /* … */
       goto __pyx_L8;
     }
-
 1410:                 # f_j in the interval [n_drawn_constants, n_known_constants[
-
+1411:                 features[f_j], features[n_drawn_constants] = features[n_drawn_constants], features[f_j]
+
 1410:                 # f_j in the interval [n_drawn_constants, n_known_constants[
+
+1411:                 features[f_j], features[n_drawn_constants] = features[n_drawn_constants], features[f_j]
      __pyx_t_2 = (__pyx_v_features[__pyx_v_n_drawn_constants]);
       __pyx_t_8 = (__pyx_v_features[__pyx_v_f_j]);
       (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_2;
       (__pyx_v_features[__pyx_v_n_drawn_constants]) = __pyx_t_8;
 
 1412: 
-
+1413:                 n_drawn_constants += 1
+
+1413:                 n_drawn_constants += 1
      __pyx_v_n_drawn_constants = (__pyx_v_n_drawn_constants + 1);
 
 1414: 
-
 1415:             else:
-
 1416:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
-
+1417:                 f_j += n_found_constants
+
 1415:             else:
+
 1416:                 # f_j in the interval [n_known_constants, f_i - n_found_constants[
+
+1417:                 f_j += n_found_constants
    /*else*/ {
       __pyx_v_f_j = (__pyx_v_f_j + __pyx_v_n_found_constants);
-
 1418:                 # f_j in the interval [n_total_constants, f_i[
+
 1418:                 # f_j in the interval [n_total_constants, f_i[
 1419: 
-
+1420:                 current.feature = features[f_j]
+
+1420:                 current.feature = features[f_j]
      __pyx_v_current.feature = (__pyx_v_features[__pyx_v_f_j]);
 
 1421: 
-
+1422:                 self.extract_nnz(current.feature,
+
+1422:                 self.extract_nnz(current.feature,
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_current.feature, (&__pyx_v_end_negative), (&__pyx_v_start_positive), (&__pyx_v_is_samples_sorted));
-
 1423:                                  &end_negative, &start_positive,
-
 1424:                                  &is_samples_sorted)
+
 1423:                                  &end_negative, &start_positive,
+
 1424:                                  &is_samples_sorted)
 1425: 
-
 1426:                 # Add one or two zeros in Xf, if there is any
-
+1427:                 if end_negative < start_positive:
+
 1426:                 # Add one or two zeros in Xf, if there is any
+
+1427:                 if end_negative < start_positive:
      __pyx_t_6 = ((__pyx_v_end_negative < __pyx_v_start_positive) != 0);
       if (__pyx_t_6) {
 /* … */
       }
-
+1428:                     start_positive -= 1
+
+1428:                     start_positive -= 1
        __pyx_v_start_positive = (__pyx_v_start_positive - 1);
-
+1429:                     Xf[start_positive] = 0.
+
+1429:                     Xf[start_positive] = 0.
        (__pyx_v_Xf[__pyx_v_start_positive]) = 0.;
 
 1430: 
-
+1431:                     if end_negative != start_positive:
+
+1431:                     if end_negative != start_positive:
        __pyx_t_6 = ((__pyx_v_end_negative != __pyx_v_start_positive) != 0);
         if (__pyx_t_6) {
 /* … */
         }
-
+1432:                         Xf[end_negative] = 0.
+
+1432:                         Xf[end_negative] = 0.
          (__pyx_v_Xf[__pyx_v_end_negative]) = 0.;
-
+1433:                         end_negative += 1
+
+1433:                         end_negative += 1
          __pyx_v_end_negative = (__pyx_v_end_negative + 1);
 
 1434: 
-
 1435:                 # Find min, max in Xf[start:end_negative]
-
+1436:                 min_feature_value = Xf[start]
+
 1435:                 # Find min, max in Xf[start:end_negative]
+
+1436:                 min_feature_value = Xf[start]
      __pyx_v_min_feature_value = (__pyx_v_Xf[__pyx_v_start]);
-
+1437:                 max_feature_value = min_feature_value
+
+1437:                 max_feature_value = min_feature_value
      __pyx_v_max_feature_value = __pyx_v_min_feature_value;
 
 1438: 
-
+1439:                 for p in range(start, end_negative):
+
+1439:                 for p in range(start, end_negative):
      __pyx_t_8 = __pyx_v_end_negative;
       __pyx_t_2 = __pyx_t_8;
       for (__pyx_t_9 = __pyx_v_start; __pyx_t_9 < __pyx_t_2; __pyx_t_9+=1) {
         __pyx_v_p = __pyx_t_9;
-
+1440:                     current_feature_value = Xf[p]
+
+1440:                     current_feature_value = Xf[p]
        __pyx_v_current_feature_value = (__pyx_v_Xf[__pyx_v_p]);
 
 1441: 
-
+1442:                     if current_feature_value < min_feature_value:
+
+1442:                     if current_feature_value < min_feature_value:
        __pyx_t_6 = ((__pyx_v_current_feature_value < __pyx_v_min_feature_value) != 0);
         if (__pyx_t_6) {
 /* … */
           goto __pyx_L13;
         }
-
+1443:                         min_feature_value = current_feature_value
+
+1443:                         min_feature_value = current_feature_value
          __pyx_v_min_feature_value = __pyx_v_current_feature_value;
-
+1444:                     elif current_feature_value > max_feature_value:
+
+1444:                     elif current_feature_value > max_feature_value:
        __pyx_t_6 = ((__pyx_v_current_feature_value > __pyx_v_max_feature_value) != 0);
         if (__pyx_t_6) {
 /* … */
         }
         __pyx_L13:;
       }
-
+1445:                         max_feature_value = current_feature_value
+
+1445:                         max_feature_value = current_feature_value
          __pyx_v_max_feature_value = __pyx_v_current_feature_value;
 
 1446: 
-
 1447:                 # Update min, max given Xf[start_positive:end]
-
+1448:                 for p in range(start_positive, end):
+
 1447:                 # Update min, max given Xf[start_positive:end]
+
+1448:                 for p in range(start_positive, end):
      __pyx_t_8 = __pyx_v_end;
       __pyx_t_2 = __pyx_t_8;
       for (__pyx_t_9 = __pyx_v_start_positive; __pyx_t_9 < __pyx_t_2; __pyx_t_9+=1) {
         __pyx_v_p = __pyx_t_9;
-
+1449:                     current_feature_value = Xf[p]
+
+1449:                     current_feature_value = Xf[p]
        __pyx_v_current_feature_value = (__pyx_v_Xf[__pyx_v_p]);
 
 1450: 
-
+1451:                     if current_feature_value < min_feature_value:
+
+1451:                     if current_feature_value < min_feature_value:
        __pyx_t_6 = ((__pyx_v_current_feature_value < __pyx_v_min_feature_value) != 0);
         if (__pyx_t_6) {
 /* … */
           goto __pyx_L16;
         }
-
+1452:                         min_feature_value = current_feature_value
+
+1452:                         min_feature_value = current_feature_value
          __pyx_v_min_feature_value = __pyx_v_current_feature_value;
-
+1453:                     elif current_feature_value > max_feature_value:
+
+1453:                     elif current_feature_value > max_feature_value:
        __pyx_t_6 = ((__pyx_v_current_feature_value > __pyx_v_max_feature_value) != 0);
         if (__pyx_t_6) {
 /* … */
         }
         __pyx_L16:;
       }
-
+1454:                         max_feature_value = current_feature_value
+
+1454:                         max_feature_value = current_feature_value
          __pyx_v_max_feature_value = __pyx_v_current_feature_value;
 
 1455: 
-
+1456:                 if max_feature_value <= min_feature_value + FEATURE_THRESHOLD:
+
+1456:                 if max_feature_value <= min_feature_value + FEATURE_THRESHOLD:
      __pyx_t_6 = ((__pyx_v_max_feature_value <= (__pyx_v_min_feature_value + __pyx_v_13stpredictions_6models_3OK3_9_splitter_FEATURE_THRESHOLD)) != 0);
       if (__pyx_t_6) {
 /* … */
         goto __pyx_L17;
       }
-
+1457:                     features[f_j] = features[n_total_constants]
+
+1457:                     features[f_j] = features[n_total_constants]
        (__pyx_v_features[__pyx_v_f_j]) = (__pyx_v_features[__pyx_v_n_total_constants]);
-
+1458:                     features[n_total_constants] = current.feature
+
+1458:                     features[n_total_constants] = current.feature
        __pyx_t_8 = __pyx_v_current.feature;
         (__pyx_v_features[__pyx_v_n_total_constants]) = __pyx_t_8;
 
 1459: 
-
+1460:                     n_found_constants += 1
+
+1460:                     n_found_constants += 1
        __pyx_v_n_found_constants = (__pyx_v_n_found_constants + 1);
-
+1461:                     n_total_constants += 1
+
+1461:                     n_total_constants += 1
        __pyx_v_n_total_constants = (__pyx_v_n_total_constants + 1);
 
 1462: 
-
 1463:                 else:
-
+1464:                     f_i -= 1
+
 1463:                 else:
+
+1464:                     f_i -= 1
      /*else*/ {
         __pyx_v_f_i = (__pyx_v_f_i - 1);
-
+1465:                     features[f_i], features[f_j] = features[f_j], features[f_i]
+
+1465:                     features[f_i], features[f_j] = features[f_j], features[f_i]
        __pyx_t_8 = (__pyx_v_features[__pyx_v_f_j]);
         __pyx_t_2 = (__pyx_v_features[__pyx_v_f_i]);
         (__pyx_v_features[__pyx_v_f_i]) = __pyx_t_8;
         (__pyx_v_features[__pyx_v_f_j]) = __pyx_t_2;
 
 1466: 
-
 1467:                     # Draw a random threshold
-
+1468:                     current.threshold = rand_uniform(min_feature_value,
+
 1467:                     # Draw a random threshold
+
+1468:                     current.threshold = rand_uniform(min_feature_value,
        __pyx_v_current.threshold = __pyx_f_7sklearn_4tree_6_utils_rand_uniform(__pyx_v_min_feature_value, __pyx_v_max_feature_value, __pyx_v_random_state);
-
 1469:                                                      max_feature_value,
-
 1470:                                                      random_state)
+
 1469:                                                      max_feature_value,
+
 1470:                                                      random_state)
 1471: 
-
+1472:                     if current.threshold == max_feature_value:
+
+1472:                     if current.threshold == max_feature_value:
        __pyx_t_6 = ((__pyx_v_current.threshold == __pyx_v_max_feature_value) != 0);
         if (__pyx_t_6) {
 /* … */
         }
-
+1473:                         current.threshold = min_feature_value
+
+1473:                         current.threshold = min_feature_value
          __pyx_v_current.threshold = __pyx_v_min_feature_value;
 
 1474: 
-
 1475:                     # Partition
-
+1476:                     current.pos = self._partition(current.threshold,
+
 1475:                     # Partition
+
+1476:                     current.pos = self._partition(current.threshold,
        __pyx_v_current.pos = __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter__partition(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_current.threshold, __pyx_v_end_negative, __pyx_v_start_positive, (__pyx_v_start_positive + ((__pyx_v_Xf[__pyx_v_start_positive]) == 0.)));
-
 1477:                                                   end_negative,
-
 1478:                                                   start_positive,
-
 1479:                                                   start_positive +
-
 1480:                                                   (Xf[start_positive] == 0.))
+
 1477:                                                   end_negative,
+
 1478:                                                   start_positive,
+
 1479:                                                   start_positive +
+
 1480:                                                   (Xf[start_positive] == 0.))
 1481: 
-
 1482:                     # Reject if min_samples_leaf is not guaranteed
-
+1483:                     if (((current.pos - start) < min_samples_leaf) or
+
 1482:                     # Reject if min_samples_leaf is not guaranteed
+
+1483:                     if (((current.pos - start) < min_samples_leaf) or
        __pyx_t_7 = (((__pyx_v_current.pos - __pyx_v_start) < __pyx_v_min_samples_leaf) != 0);
         if (!__pyx_t_7) {
         } else {
@@ -4458,21 +4532,21 @@
         if (__pyx_t_6) {
 /* … */
         }
-
+1484:                             ((end - current.pos) < min_samples_leaf)):
+
+1484:                             ((end - current.pos) < min_samples_leaf)):
        __pyx_t_7 = (((__pyx_v_end - __pyx_v_current.pos) < __pyx_v_min_samples_leaf) != 0);
         __pyx_t_6 = __pyx_t_7;
         __pyx_L20_bool_binop_done:;
-
+1485:                         continue
+
+1485:                         continue
          goto __pyx_L3_continue;
 
 1486: 
-
 1487:                     # Evaluate split
-
+1488:                     self.criterion.reset()
+
 1487:                     # Evaluate split
+
+1488:                     self.criterion.reset()
        __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1488, __pyx_L1_error)
-
+1489:                     self.criterion.update(current.pos)
+
+1489:                     self.criterion.update(current.pos)
        __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_current.pos); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1489, __pyx_L1_error)
 
 1490: 
-
 1491:                     # Reject if min_weight_leaf is not satisfied
-
+1492:                     if ((self.criterion.weighted_n_left < min_weight_leaf) or
+
 1491:                     # Reject if min_weight_leaf is not satisfied
+
+1492:                     if ((self.criterion.weighted_n_left < min_weight_leaf) or
        __pyx_t_7 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_left < __pyx_v_min_weight_leaf) != 0);
         if (!__pyx_t_7) {
         } else {
@@ -4483,17 +4557,17 @@
         if (__pyx_t_6) {
 /* … */
         }
-
+1493:                             (self.criterion.weighted_n_right < min_weight_leaf)):
+
+1493:                             (self.criterion.weighted_n_right < min_weight_leaf)):
        __pyx_t_7 = ((__pyx_v_self->__pyx_base.__pyx_base.criterion->weighted_n_right < __pyx_v_min_weight_leaf) != 0);
         __pyx_t_6 = __pyx_t_7;
         __pyx_L23_bool_binop_done:;
-
+1494:                         continue
+
+1494:                         continue
          goto __pyx_L3_continue;
 
 1495: 
-
+1496:                     current_proxy_improvement = self.criterion.proxy_impurity_improvement()
+
+1496:                     current_proxy_improvement = self.criterion.proxy_impurity_improvement()
        __pyx_v_current_proxy_improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->proxy_impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion);
 
 1497: 
-
+1498:                     if current_proxy_improvement > best_proxy_improvement:
+
+1498:                     if current_proxy_improvement > best_proxy_improvement:
        __pyx_t_6 = ((__pyx_v_current_proxy_improvement > __pyx_v_best_proxy_improvement) != 0);
         if (__pyx_t_6) {
 /* … */
@@ -4504,64 +4578,64 @@
     __pyx_L8:;
     __pyx_L3_continue:;
   }
-
+1499:                         best_proxy_improvement = current_proxy_improvement
+
+1499:                         best_proxy_improvement = current_proxy_improvement
          __pyx_v_best_proxy_improvement = __pyx_v_current_proxy_improvement;
-
+1500:                         current.improvement = self.criterion.impurity_improvement(impurity)
+
+1500:                         current.improvement = self.criterion.impurity_improvement(impurity)
          __pyx_v_current.improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_impurity);
 
 1501: 
-
+1502:                         self.criterion.children_impurity(&current.impurity_left,
+
+1502:                         self.criterion.children_impurity(&current.impurity_left,
          ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->children_impurity(__pyx_v_self->__pyx_base.__pyx_base.criterion, (&__pyx_v_current.impurity_left), (&__pyx_v_current.impurity_right));
-
 1503:                                                          &current.impurity_right)
-
+1504:                         best = current
+
 1503:                                                          &current.impurity_right)
+
+1504:                         best = current
          __pyx_v_best = __pyx_v_current;
 
 1505: 
-
 1506:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
-
+1507:         if best.pos < end:
+
 1506:         # Reorganize into samples[start:best.pos] + samples[best.pos:end]
+
+1507:         if best.pos < end:
  __pyx_t_6 = ((__pyx_v_best.pos < __pyx_v_end) != 0);
   if (__pyx_t_6) {
 /* … */
   }
-
+1508:             if current.feature != best.feature:
+
+1508:             if current.feature != best.feature:
    __pyx_t_6 = ((__pyx_v_current.feature != __pyx_v_best.feature) != 0);
     if (__pyx_t_6) {
 /* … */
     }
-
+1509:                 self.extract_nnz(best.feature, &end_negative, &start_positive,
+
+1509:                 self.extract_nnz(best.feature, &end_negative, &start_positive,
      __pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter_extract_nnz(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_best.feature, (&__pyx_v_end_negative), (&__pyx_v_start_positive), (&__pyx_v_is_samples_sorted));
-
 1510:                                  &is_samples_sorted)
+
 1510:                                  &is_samples_sorted)
 1511: 
-
+1512:                 self._partition(best.threshold, end_negative, start_positive,
+
+1512:                 self._partition(best.threshold, end_negative, start_positive,
      (void)(__pyx_f_13stpredictions_6models_3OK3_9_splitter_18BaseSparseSplitter__partition(((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_BaseSparseSplitter *)__pyx_v_self), __pyx_v_best.threshold, __pyx_v_end_negative, __pyx_v_start_positive, __pyx_v_best.pos));
-
 1513:                                 best.pos)
+
 1513:                                 best.pos)
 1514: 
-
+1515:             self.criterion.reset()
+
+1515:             self.criterion.reset()
    __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->reset(__pyx_v_self->__pyx_base.__pyx_base.criterion); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1515, __pyx_L1_error)
-
+1516:             self.criterion.update(best.pos)
+
+1516:             self.criterion.update(best.pos)
    __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->update(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_best.pos); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 1516, __pyx_L1_error)
-
+1517:             best.improvement = self.criterion.impurity_improvement(impurity)
+
+1517:             best.improvement = self.criterion.impurity_improvement(impurity)
    __pyx_v_best.improvement = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->impurity_improvement(__pyx_v_self->__pyx_base.__pyx_base.criterion, __pyx_v_impurity);
-
+1518:             self.criterion.children_impurity(&best.impurity_left,
+
+1518:             self.criterion.children_impurity(&best.impurity_left,
    ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_10_criterion_Criterion *)__pyx_v_self->__pyx_base.__pyx_base.criterion->__pyx_vtab)->children_impurity(__pyx_v_self->__pyx_base.__pyx_base.criterion, (&__pyx_v_best.impurity_left), (&__pyx_v_best.impurity_right));
-
 1519:                                              &best.impurity_right)
+
 1519:                                              &best.impurity_right)
 1520: 
-
 1521:         # Respect invariant for constant features: the original order of
-
 1522:         # element in features[:n_known_constants] must be preserved for sibling
-
 1523:         # and child nodes
-
+1524:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
+
 1521:         # Respect invariant for constant features: the original order of
+
 1522:         # element in features[:n_known_constants] must be preserved for sibling
+
 1523:         # and child nodes
+
+1524:         memcpy(features, constant_features, sizeof(SIZE_t) * n_known_constants)
  (void)(memcpy(__pyx_v_features, __pyx_v_constant_features, ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_known_constants)));
 
 1525: 
-
 1526:         # Copy newly found constant features
-
+1527:         memcpy(constant_features + n_known_constants,
+
 1526:         # Copy newly found constant features
+
+1527:         memcpy(constant_features + n_known_constants,
  (void)(memcpy((__pyx_v_constant_features + __pyx_v_n_known_constants), (__pyx_v_features + __pyx_v_n_known_constants), ((sizeof(__pyx_t_7sklearn_4tree_5_tree_SIZE_t)) * __pyx_v_n_found_constants)));
-
 1528:                features + n_known_constants,
-
 1529:                sizeof(SIZE_t) * n_found_constants)
+
 1528:                features + n_known_constants,
+
 1529:                sizeof(SIZE_t) * n_found_constants)
 1530: 
-
 1531:         # Return values
-
+1532:         split[0] = best
+
 1531:         # Return values
+
+1532:         split[0] = best
  (__pyx_v_split[0]) = __pyx_v_best;
-
+1533:         n_constant_features[0] = n_total_constants
+
+1533:         n_constant_features[0] = n_total_constants
  (__pyx_v_n_constant_features[0]) = __pyx_v_n_total_constants;
-
+1534:         return 0
+
+1534:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
diff --git a/stpredictions/models/OK3/_tree.html b/stpredictions/models/OK3/_tree.html index 5835ded8c..fbb0dd242 100644 --- a/stpredictions/models/OK3/_tree.html +++ b/stpredictions/models/OK3/_tree.html @@ -286,6 +286,80 @@ .cython.score-252 {background-color: #FFFF09;} .cython.score-253 {background-color: #FFFF09;} .cython.score-254 {background-color: #FFFF09;} +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.cython .hll { background-color: #ffffcc } +.cython { background: #f8f8f8; } +.cython .c { color: #3D7B7B; font-style: italic } /* Comment */ +.cython .err { border: 1px solid #FF0000 } /* Error */ +.cython .k { color: #008000; font-weight: bold } /* Keyword */ +.cython .o { color: #666666 } /* Operator */ +.cython .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.cython .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.cython .cp { color: #9C6500 } /* Comment.Preproc */ +.cython .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.cython .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.cython .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.cython .gd { color: #A00000 } /* Generic.Deleted */ +.cython .ge { font-style: italic } /* Generic.Emph */ +.cython .gr { color: #E40000 } /* Generic.Error */ +.cython .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.cython .gi { color: #008400 } /* Generic.Inserted */ +.cython .go { color: #717171 } /* Generic.Output */ +.cython .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.cython .gs { font-weight: bold } /* Generic.Strong */ +.cython .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.cython .gt { color: #0044DD } /* Generic.Traceback */ +.cython .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.cython .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.cython .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.cython .kp { color: #008000 } /* Keyword.Pseudo */ +.cython .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.cython .kt { color: #B00040 } /* Keyword.Type */ +.cython .m { color: #666666 } /* Literal.Number */ +.cython .s { color: #BA2121 } /* Literal.String */ +.cython .na { color: #687822 } /* Name.Attribute */ +.cython .nb { color: #008000 } /* Name.Builtin */ +.cython .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.cython .no { color: #880000 } /* Name.Constant */ +.cython .nd { color: #AA22FF } /* Name.Decorator */ +.cython .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.cython .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.cython .nf { color: #0000FF } /* Name.Function */ +.cython .nl { color: #767600 } /* Name.Label */ +.cython .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.cython .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.cython .nv { color: #19177C } /* Name.Variable */ +.cython .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.cython .w { color: #bbbbbb } /* Text.Whitespace */ +.cython .mb { color: #666666 } /* Literal.Number.Bin */ +.cython .mf { color: #666666 } /* Literal.Number.Float */ +.cython .mh { color: #666666 } /* Literal.Number.Hex */ +.cython .mi { color: #666666 } /* Literal.Number.Integer */ +.cython .mo { color: #666666 } /* Literal.Number.Oct */ +.cython .sa { color: #BA2121 } /* Literal.String.Affix */ +.cython .sb { color: #BA2121 } /* Literal.String.Backtick */ +.cython .sc { color: #BA2121 } /* Literal.String.Char */ +.cython .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.cython .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.cython .s2 { color: #BA2121 } /* Literal.String.Double */ +.cython .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.cython .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.cython .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.cython .sx { color: #008000 } /* Literal.String.Other */ +.cython .sr { color: #A45A77 } /* Literal.String.Regex */ +.cython .s1 { color: #BA2121 } /* Literal.String.Single */ +.cython .ss { color: #19177C } /* Literal.String.Symbol */ +.cython .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.cython .fm { color: #0000FF } /* Name.Function.Magic */ +.cython .vc { color: #19177C } /* Name.Variable.Class */ +.cython .vg { color: #19177C } /* Name.Variable.Global */ +.cython .vi { color: #19177C } /* Name.Variable.Instance */ +.cython .vm { color: #19177C } /* Name.Variable.Magic */ +.cython .il { color: #666666 } /* Literal.Number.Integer.Long */ @@ -295,44 +369,44 @@ Click on a line that starts with a "+" to see the C code that Cython generated for it.

Raw output: _tree.c

-
+0001: # cython: cdivision=True
+
+0001: # cython: cdivision=True
  __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 0002: # cython: boundscheck=False
-
 0003: # cython: wraparound=False
+
 0002: # cython: boundscheck=False
+
 0003: # cython: wraparound=False
 0004: 
-
 0005: from cpython cimport Py_INCREF, PyObject, PyTypeObject
+
 0005: from cpython cimport Py_INCREF, PyObject, PyTypeObject
 0006: 
-
 0007: from libc.stdlib cimport free
-
 0008: from libc.math cimport fabs
-
 0009: from libc.string cimport memcpy
-
 0010: from libc.string cimport memset
-
 0011: from libc.stdint cimport SIZE_MAX
+
 0007: from libc.stdlib cimport free
+
 0008: from libc.math cimport fabs
+
 0009: from libc.string cimport memcpy
+
 0010: from libc.string cimport memset
+
 0011: from libc.stdint cimport SIZE_MAX
 0012: 
-
+0013: import numpy as np
+
+0013: import numpy as np
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
 0014: cimport numpy as np
-
+0015: np.import_array()
+
 0014: cimport numpy as np
+
+0015: np.import_array()
  __pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 15, __pyx_L1_error)
 
 0016: 
-
+0017: import warnings
+
+0017: import warnings
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_warnings, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_warnings, __pyx_t_1) < 0) __PYX_ERR(0, 17, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0018: 
-
+0019: import itertools
+
+0019: import itertools
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_itertools, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_itertools, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0020: 
-
+0021: from scipy.sparse import issparse
+
+0021: from scipy.sparse import issparse
  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_issparse);
@@ -346,7 +420,7 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_issparse, __pyx_t_1) < 0) __PYX_ERR(0, 21, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
+0022: from scipy.sparse import csc_matrix
+
+0022: from scipy.sparse import csc_matrix
  __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_INCREF(__pyx_n_s_csc_matrix);
@@ -360,7 +434,7 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_csc_matrix, __pyx_t_3) < 0) __PYX_ERR(0, 22, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0023: from scipy.sparse import csr_matrix
+
+0023: from scipy.sparse import csr_matrix
  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_csr_matrix);
@@ -375,14 +449,14 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
 0024: 
-
 0025: from sklearn.tree._utils cimport Stack
-
 0026: from sklearn.tree._utils cimport StackRecord
-
 0027: from sklearn.tree._utils cimport PriorityHeap
-
 0028: from sklearn.tree._utils cimport PriorityHeapRecord
-
 0029: from sklearn.tree._utils cimport safe_realloc
-
 0030: from sklearn.tree._utils cimport sizet_ptr_to_ndarray
+
 0025: from sklearn.tree._utils cimport Stack
+
 0026: from sklearn.tree._utils cimport StackRecord
+
 0027: from sklearn.tree._utils cimport PriorityHeap
+
 0028: from sklearn.tree._utils cimport PriorityHeapRecord
+
 0029: from sklearn.tree._utils cimport safe_realloc
+
 0030: from sklearn.tree._utils cimport sizet_ptr_to_ndarray
 0031: 
-
+0032: from _criterion import Criterion
+
+0032: from _criterion import Criterion
  __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_INCREF(__pyx_n_s_Criterion);
@@ -396,7 +470,7 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_Criterion, __pyx_t_3) < 0) __PYX_ERR(0, 32, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0033: from _criterion import KernelizedMSE
+
+0033: from _criterion import KernelizedMSE
  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_KernelizedMSE);
@@ -411,18 +485,18 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
 0034: 
-
 0035: cdef extern from "numpy/arrayobject.h":
-
 0036:     object PyArray_NewFromDescr(PyTypeObject* subtype, np.dtype descr,
-
 0037:                                 int nd, np.npy_intp* dims,
-
 0038:                                 np.npy_intp* strides,
-
 0039:                                 void* data, int flags, object obj)
-
 0040:     int PyArray_SetBaseObject(np.ndarray arr, PyObject* obj)
+
 0035: cdef extern from "numpy/arrayobject.h":
+
 0036:     object PyArray_NewFromDescr(PyTypeObject* subtype, np.dtype descr,
+
 0037:                                 int nd, np.npy_intp* dims,
+
 0038:                                 np.npy_intp* strides,
+
 0039:                                 void* data, int flags, object obj)
+
 0040:     int PyArray_SetBaseObject(np.ndarray arr, PyObject* obj)
 0041: 
-
 0042: # =============================================================================
-
 0043: # Types and constants
-
 0044: # =============================================================================
+
 0042: # =============================================================================
+
 0043: # Types and constants
+
 0044: # =============================================================================
 0045: 
-
+0046: from numpy import float32 as DTYPE
+
+0046: from numpy import float32 as DTYPE
  __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_INCREF(__pyx_n_s_float32);
@@ -436,7 +510,7 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_3) < 0) __PYX_ERR(0, 46, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0047: from numpy import float64 as DOUBLE
+
+0047: from numpy import float64 as DOUBLE
  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 47, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_float64);
@@ -451,7 +525,7 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
 0048: 
-
+0049: cdef double INFINITY = np.inf
+
+0049: cdef double INFINITY = np.inf
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 49, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_inf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error)
@@ -460,7 +534,7 @@
   __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_13stpredictions_6models_3OK3_5_tree_INFINITY = __pyx_t_4;
-
+0050: cdef double EPSILON = np.finfo('double').eps
+
+0050: cdef double EPSILON = np.finfo('double').eps
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_finfo); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error)
@@ -480,41 +554,41 @@
   __Pyx_GOTREF(__pyx_tuple__48);
   __Pyx_GIVEREF(__pyx_tuple__48);
 
 0051: 
-
 0052: # Some handy constants (BestFirstTreeBuilder)
-
+0053: cdef int IS_FIRST = 1
+
 0052: # Some handy constants (BestFirstTreeBuilder)
+
+0053: cdef int IS_FIRST = 1
  __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_FIRST = 1;
-
+0054: cdef int IS_NOT_FIRST = 0
+
+0054: cdef int IS_NOT_FIRST = 0
  __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_NOT_FIRST = 0;
-
+0055: cdef int IS_LEFT = 1
+
+0055: cdef int IS_LEFT = 1
  __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_LEFT = 1;
-
+0056: cdef int IS_NOT_LEFT = 0
+
+0056: cdef int IS_NOT_LEFT = 0
  __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_NOT_LEFT = 0;
 
 0057: 
-
+0058: TREE_LEAF = -1
+
+0058: TREE_LEAF = -1
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_TREE_LEAF, __pyx_int_neg_1) < 0) __PYX_ERR(0, 58, __pyx_L1_error)
-
+0059: TREE_UNDEFINED = -2
+
+0059: TREE_UNDEFINED = -2
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_TREE_UNDEFINED, __pyx_int_neg_2) < 0) __PYX_ERR(0, 59, __pyx_L1_error)
-
+0060: cdef SIZE_t _TREE_LEAF = TREE_LEAF
+
+0060: cdef SIZE_t _TREE_LEAF = TREE_LEAF
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_TREE_LEAF); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 60, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_5 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_3); if (unlikely((__pyx_t_5 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF = __pyx_t_5;
-
+0061: cdef SIZE_t _TREE_UNDEFINED = TREE_UNDEFINED
+
+0061: cdef SIZE_t _TREE_UNDEFINED = TREE_UNDEFINED
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_TREE_UNDEFINED); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_5 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_3); if (unlikely((__pyx_t_5 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 61, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED = __pyx_t_5;
-
+0062: cdef SIZE_t INITIAL_STACK_SIZE = 10
+
+0062: cdef SIZE_t INITIAL_STACK_SIZE = 10
  __pyx_v_13stpredictions_6models_3OK3_5_tree_INITIAL_STACK_SIZE = 10;
 
 0063: 
-
 0064: # Build the corresponding numpy dtype for Node.
-
 0065: # This works by casting `dummy` to an array of Node of length 1, which numpy
-
 0066: # can construct a `dtype`-object for. See https://stackoverflow.com/q/62448946
-
 0067: # for a more detailed explanation.
-
 0068: cdef Node dummy;
-
+0069: NODE_DTYPE = np.asarray(<Node[:1]>(&dummy)).dtype
+
 0064: # Build the corresponding numpy dtype for Node.
+
 0065: # This works by casting `dummy` to an array of Node of length 1, which numpy
+
 0066: # can construct a `dtype`-object for. See https://stackoverflow.com/q/62448946
+
 0067: # for a more detailed explanation.
+
 0068: cdef Node dummy;
+
+0069: NODE_DTYPE = np.asarray(<Node[:1]>(&dummy)).dtype
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
@@ -544,11 +618,11 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_NODE_DTYPE, __pyx_t_1) < 0) __PYX_ERR(0, 69, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0070: 
-
 0071: # =============================================================================
-
 0072: # TreeBuilder
-
 0073: # =============================================================================
+
 0071: # =============================================================================
+
 0072: # TreeBuilder
+
 0073: # =============================================================================
 0074: 
-
+0075: cdef class TreeBuilder:
+
+0075: cdef class TreeBuilder:
struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_TreeBuilder {
   PyObject *(*build)(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, PyObject *, PyArrayObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_11TreeBuilder_build *__pyx_optional_args);
   PyObject *(*_check_input)(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *, PyObject *, PyArrayObject *, PyArrayObject *);
@@ -556,9 +630,9 @@
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_TreeBuilder *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree_TreeBuilder;
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_11TreeBuilder__check_input(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *, PyObject *, PyArrayObject *, PyArrayObject *);
 
-
 0076:     """Interface for different tree building strategies."""
+
 0076:     """Interface for different tree building strategies."""
 0077: 
-
+0078:     cpdef build(self, Tree tree, object X, np.ndarray y,
+
+0078:     cpdef build(self, Tree tree, object X, np.ndarray y,
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_11TreeBuilder_1build(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_11TreeBuilder_build(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *__pyx_v_self, CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_tree, CYTHON_UNUSED PyObject *__pyx_v_X, CYTHON_UNUSED PyArrayObject *__pyx_v_y, int __pyx_skip_dispatch, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_11TreeBuilder_build *__pyx_optional_args) {
 /* … */
@@ -709,7 +783,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0079:                 np.ndarray sample_weight=None):
+
+0079:                 np.ndarray sample_weight=None):
  PyArrayObject *__pyx_v_sample_weight = ((PyArrayObject *)Py_None);
   PyObject *__pyx_r = NULL;
   __Pyx_RefNannyDeclarations
@@ -791,10 +865,10 @@
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) __PYX_ERR(0, 78, __pyx_L1_error)
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sample_weight), __pyx_ptype_5numpy_ndarray, 1, "sample_weight", 0))) __PYX_ERR(0, 79, __pyx_L1_error)
   __pyx_r = __pyx_pf_13stpredictions_6models_3OK3_5_tree_11TreeBuilder_build(((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *)__pyx_v_self), __pyx_v_tree, __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight);
-
 0080:         """Build a decision tree from the training set (X, y)."""
-
 0081:         pass
+
 0080:         """Build a decision tree from the training set (X, y)."""
+
 0081:         pass
 0082: 
-
+0083:     cdef inline _check_input(self, object X, np.ndarray y,
+
+0083:     cdef inline _check_input(self, object X, np.ndarray y,
static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_11TreeBuilder__check_input(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *__pyx_v_self, PyObject *__pyx_v_X, PyArrayObject *__pyx_v_y, PyArrayObject *__pyx_v_sample_weight) {
   PyObject *__pyx_r = NULL;
   __Pyx_RefNannyDeclarations
@@ -819,9 +893,9 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0084:                              np.ndarray sample_weight):
-
 0085:         """Check input dtype, layout and format"""
-
+0086:         if issparse(X):
+
 0084:                              np.ndarray sample_weight):
+
 0085:         """Check input dtype, layout and format"""
+
+0086:         if issparse(X):
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_issparse); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = NULL;
@@ -845,7 +919,7 @@
 /* … */
     goto __pyx_L3;
   }
-
+0087:             X = X.tocsc()
+
+0087:             X = X.tocsc()
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_tocsc); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_3 = NULL;
@@ -865,7 +939,7 @@
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_1);
     __pyx_t_1 = 0;
-
+0088:             X.sort_indices()
+
+0088:             X.sort_indices()
    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_sort_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_3 = NULL;
@@ -885,7 +959,7 @@
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0089: 
-
+0090:             if X.data.dtype != DTYPE:
+
+0090:             if X.data.dtype != DTYPE:
    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error)
@@ -901,7 +975,7 @@
     if (__pyx_t_4) {
 /* … */
     }
-
+0091:                 X.data = np.ascontiguousarray(X.data, dtype=DTYPE)
+
+0091:                 X.data = np.ascontiguousarray(X.data, dtype=DTYPE)
      __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_3);
       __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error)
@@ -928,7 +1002,7 @@
       if (__Pyx_PyObject_SetAttrStr(__pyx_v_X, __pyx_n_s_data, __pyx_t_5) < 0) __PYX_ERR(0, 91, __pyx_L1_error)
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
 
 0092: 
-
+0093:             if X.indices.dtype != np.int32 or X.indptr.dtype != np.int32:
+
+0093:             if X.indices.dtype != np.int32 or X.indptr.dtype != np.int32:
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indices); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 93, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error)
@@ -969,7 +1043,7 @@
     if (unlikely(__pyx_t_4)) {
 /* … */
     }
-
+0094:                 raise ValueError("No support for np.int64 index based "
+
+0094:                 raise ValueError("No support for np.int64 index based "
      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 94, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_5);
       __Pyx_Raise(__pyx_t_5, 0, 0, 0);
@@ -979,9 +1053,9 @@
   __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_No_support_for_np_int64_index_ba); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 94, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_tuple_);
   __Pyx_GIVEREF(__pyx_tuple_);
-
 0095:                                  "sparse matrices")
+
 0095:                                  "sparse matrices")
 0096: 
-
+0097:         elif X.dtype != DTYPE:
+
+0097:         elif X.dtype != DTYPE:
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L1_error)
@@ -995,8 +1069,8 @@
 /* … */
   }
   __pyx_L3:;
-
 0098:             # since we have to copy we will make it fortran for efficiency
-
+0099:             X = np.asfortranarray(X, dtype=DTYPE)
+
 0098:             # since we have to copy we will make it fortran for efficiency
+
+0099:             X = np.asfortranarray(X, dtype=DTYPE)
    __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asfortranarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error)
@@ -1021,7 +1095,7 @@
     __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_1);
     __pyx_t_1 = 0;
 
 0100: 
-
+0101:         if y.dtype != DOUBLE or not y.flags.contiguous:
+
+0101:         if y.dtype != DOUBLE or not y.flags.contiguous:
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_y), __pyx_n_s_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_DOUBLE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error)
@@ -1049,7 +1123,7 @@
   if (__pyx_t_4) {
 /* … */
   }
-
+0102:             y = np.ascontiguousarray(y, dtype=DOUBLE)
+
+0102:             y = np.ascontiguousarray(y, dtype=DOUBLE)
    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 102, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error)
@@ -1075,7 +1149,7 @@
     __Pyx_DECREF_SET(__pyx_v_y, ((PyArrayObject *)__pyx_t_3));
     __pyx_t_3 = 0;
 
 0103: 
-
+0104:         if (sample_weight is not None and
+
+0104:         if (sample_weight is not None and
  __pyx_t_7 = (((PyObject *)__pyx_v_sample_weight) != Py_None);
   __pyx_t_6 = (__pyx_t_7 != 0);
   if (__pyx_t_6) {
@@ -1087,7 +1161,7 @@
   if (__pyx_t_4) {
 /* … */
   }
-
+0105:             (sample_weight.dtype != DOUBLE or
+
+0105:             (sample_weight.dtype != DOUBLE or
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_sample_weight), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 105, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DOUBLE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 105, __pyx_L1_error)
@@ -1102,7 +1176,7 @@
     __pyx_t_4 = __pyx_t_6;
     goto __pyx_L12_bool_binop_done;
   }
-
+0106:             not sample_weight.flags.contiguous)):
+
+0106:             not sample_weight.flags.contiguous)):
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_sample_weight), __pyx_n_s_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_contiguous); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 106, __pyx_L1_error)
@@ -1113,7 +1187,7 @@
   __pyx_t_7 = ((!__pyx_t_6) != 0);
   __pyx_t_4 = __pyx_t_7;
   __pyx_L12_bool_binop_done:;
-
+0107:                 sample_weight = np.asarray(sample_weight, dtype=DOUBLE,
+
+0107:                 sample_weight = np.asarray(sample_weight, dtype=DOUBLE,
    __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 107, __pyx_L1_error)
@@ -1139,9 +1213,9 @@
     if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 107, __pyx_L1_error)
     __Pyx_DECREF_SET(__pyx_v_sample_weight, ((PyArrayObject *)__pyx_t_2));
     __pyx_t_2 = 0;
-
 0108:                                            order="C")
+
 0108:                                            order="C")
 0109: 
-
+0110:         return X, y, sample_weight
+
+0110:         return X, y, sample_weight
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
@@ -1158,9 +1232,9 @@
   __pyx_t_2 = 0;
   goto __pyx_L0;
 
 0111: 
-
 0112: # Depth first builder ---------------------------------------------------------
+
 0112: # Depth first builder ---------------------------------------------------------
 0113: 
-
+0114: cdef class DepthFirstTreeBuilder(TreeBuilder):
+
+0114: cdef class DepthFirstTreeBuilder(TreeBuilder):
struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_DepthFirstTreeBuilder {
   struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder __pyx_base;
 };
@@ -1170,9 +1244,9 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_DepthFirstTreeBuilder *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree_DepthFirstTreeBuilder;
 
-
 0115:     """Build a decision tree in depth-first fashion."""
+
 0115:     """Build a decision tree in depth-first fashion."""
 0116: 
-
+0117:     def __cinit__(self, Splitter splitter, SIZE_t min_samples_split,
+
+0117:     def __cinit__(self, Splitter splitter, SIZE_t min_samples_split,
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -1307,29 +1381,29 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0118:                   SIZE_t min_samples_leaf, double min_weight_leaf,
-
 0119:                   SIZE_t max_depth, double min_impurity_decrease,
-
 0120:                   double min_impurity_split):
-
+0121:         self.splitter = splitter
+
 0118:                   SIZE_t min_samples_leaf, double min_weight_leaf,
+
 0119:                   SIZE_t max_depth, double min_impurity_decrease,
+
 0120:                   double min_impurity_split):
+
+0121:         self.splitter = splitter
  __Pyx_INCREF(((PyObject *)__pyx_v_splitter));
   __Pyx_GIVEREF(((PyObject *)__pyx_v_splitter));
   __Pyx_GOTREF(__pyx_v_self->__pyx_base.splitter);
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx_base.splitter));
   __pyx_v_self->__pyx_base.splitter = __pyx_v_splitter;
-
+0122:         self.min_samples_split = min_samples_split
+
+0122:         self.min_samples_split = min_samples_split
  __pyx_v_self->__pyx_base.min_samples_split = __pyx_v_min_samples_split;
-
+0123:         self.min_samples_leaf = min_samples_leaf
+
+0123:         self.min_samples_leaf = min_samples_leaf
  __pyx_v_self->__pyx_base.min_samples_leaf = __pyx_v_min_samples_leaf;
-
+0124:         self.min_weight_leaf = min_weight_leaf
+
+0124:         self.min_weight_leaf = min_weight_leaf
  __pyx_v_self->__pyx_base.min_weight_leaf = __pyx_v_min_weight_leaf;
-
+0125:         self.max_depth = max_depth
+
+0125:         self.max_depth = max_depth
  __pyx_v_self->__pyx_base.max_depth = __pyx_v_max_depth;
-
+0126:         self.min_impurity_decrease = min_impurity_decrease
+
+0126:         self.min_impurity_decrease = min_impurity_decrease
  __pyx_v_self->__pyx_base.min_impurity_decrease = __pyx_v_min_impurity_decrease;
-
+0127:         self.min_impurity_split = min_impurity_split
+
+0127:         self.min_impurity_split = min_impurity_split
  __pyx_v_self->__pyx_base.min_impurity_split = __pyx_v_min_impurity_split;
 
 0128: 
-
+0129:     cpdef build(self, Tree tree, object X, np.ndarray y,
+
+0129:     cpdef build(self, Tree tree, object X, np.ndarray y,
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_3build(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_build(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_DepthFirstTreeBuilder *__pyx_v_self, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_tree, PyObject *__pyx_v_X, PyArrayObject *__pyx_v_y, int __pyx_skip_dispatch, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_build *__pyx_optional_args) {
 /* … */
@@ -1491,7 +1565,7 @@
   int __pyx_n;
   PyArrayObject *sample_weight;
 };
-
+0130:                 np.ndarray sample_weight=None):
+
+0130:                 np.ndarray sample_weight=None):
  PyArrayObject *__pyx_v_sample_weight = ((PyArrayObject *)Py_None);
   __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *__pyx_v_sample_weight_ptr;
   int __pyx_v_init_capacity;
@@ -1603,10 +1677,10 @@
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) __PYX_ERR(0, 129, __pyx_L1_error)
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sample_weight), __pyx_ptype_5numpy_ndarray, 1, "sample_weight", 0))) __PYX_ERR(0, 130, __pyx_L1_error)
   __pyx_r = __pyx_pf_13stpredictions_6models_3OK3_5_tree_21DepthFirstTreeBuilder_2build(((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_DepthFirstTreeBuilder *)__pyx_v_self), __pyx_v_tree, __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight);
-
 0131:         """Build a decision tree from the training set (X, y)."""
+
 0131:         """Build a decision tree from the training set (X, y)."""
 0132: 
-
 0133:         # check input
-
+0134:         X, y, sample_weight = self._check_input(X, y, sample_weight)
+
 0133:         # check input
+
+0134:         X, y, sample_weight = self._check_input(X, y, sample_weight)
  __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_5_tree_11TreeBuilder__check_input(((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *)__pyx_v_self), __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
@@ -1671,98 +1745,98 @@
   __Pyx_DECREF_SET(__pyx_v_sample_weight, ((PyArrayObject *)__pyx_t_6));
   __pyx_t_6 = 0;
 
 0135: 
-
+0136:         cdef DOUBLE_t* sample_weight_ptr = NULL
+
+0136:         cdef DOUBLE_t* sample_weight_ptr = NULL
  __pyx_v_sample_weight_ptr = NULL;
-
+0137:         if sample_weight is not None:
+
+0137:         if sample_weight is not None:
  __pyx_t_8 = (((PyObject *)__pyx_v_sample_weight) != Py_None);
   __pyx_t_9 = (__pyx_t_8 != 0);
   if (__pyx_t_9) {
 /* … */
   }
-
+0138:             sample_weight_ptr = <DOUBLE_t*> sample_weight.data
+
+0138:             sample_weight_ptr = <DOUBLE_t*> sample_weight.data
    __pyx_v_sample_weight_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *)__pyx_v_sample_weight->data);
 
 0139: 
-
 0140:         # Initial capacity
-
 0141:         cdef int init_capacity
+
 0140:         # Initial capacity
+
 0141:         cdef int init_capacity
 0142: 
-
+0143:         if tree.max_depth <= 10:
+
+0143:         if tree.max_depth <= 10:
  __pyx_t_9 = ((__pyx_v_tree->max_depth <= 10) != 0);
   if (__pyx_t_9) {
 /* … */
     goto __pyx_L6;
   }
-
+0144:             init_capacity = (2 ** (tree.max_depth + 1)) - 1
+
+0144:             init_capacity = (2 ** (tree.max_depth + 1)) - 1
    __pyx_v_init_capacity = (__Pyx_pow_long(2, (__pyx_v_tree->max_depth + 1)) - 1);
-
 0145:         else:
-
+0146:             init_capacity = 2047
+
 0145:         else:
+
+0146:             init_capacity = 2047
  /*else*/ {
     __pyx_v_init_capacity = 0x7FF;
   }
   __pyx_L6:;
 
 0147: 
-
+0148:         tree._resize(init_capacity)
+
+0148:         tree._resize(init_capacity)
  __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_resize(__pyx_v_tree, __pyx_v_init_capacity); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 148, __pyx_L1_error)
 
 0149: 
-
 0150:         # Parameters
-
+0151:         cdef Splitter splitter = self.splitter
+
 0150:         # Parameters
+
+0151:         cdef Splitter splitter = self.splitter
  __pyx_t_1 = ((PyObject *)__pyx_v_self->__pyx_base.splitter);
   __Pyx_INCREF(__pyx_t_1);
   __pyx_v_splitter = ((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_t_1);
   __pyx_t_1 = 0;
-
+0152:         cdef SIZE_t max_depth = self.max_depth
+
+0152:         cdef SIZE_t max_depth = self.max_depth
  __pyx_t_10 = __pyx_v_self->__pyx_base.max_depth;
   __pyx_v_max_depth = __pyx_t_10;
-
+0153:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+0153:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_10 = __pyx_v_self->__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_10;
-
+0154:         cdef double min_weight_leaf = self.min_weight_leaf
+
+0154:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_11 = __pyx_v_self->__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_11;
-
+0155:         cdef SIZE_t min_samples_split = self.min_samples_split
+
+0155:         cdef SIZE_t min_samples_split = self.min_samples_split
  __pyx_t_10 = __pyx_v_self->__pyx_base.min_samples_split;
   __pyx_v_min_samples_split = __pyx_t_10;
-
+0156:         cdef double min_impurity_decrease = self.min_impurity_decrease
+
+0156:         cdef double min_impurity_decrease = self.min_impurity_decrease
  __pyx_t_11 = __pyx_v_self->__pyx_base.min_impurity_decrease;
   __pyx_v_min_impurity_decrease = __pyx_t_11;
-
+0157:         cdef double min_impurity_split = self.min_impurity_split
+
+0157:         cdef double min_impurity_split = self.min_impurity_split
  __pyx_t_11 = __pyx_v_self->__pyx_base.min_impurity_split;
   __pyx_v_min_impurity_split = __pyx_t_11;
 
 0158: 
-
 0159:         # Recursive partition (without actual recursion)
-
+0160:         splitter.init(X, y, sample_weight_ptr)
+
 0159:         # Recursive partition (without actual recursion)
+
+0160:         splitter.init(X, y, sample_weight_ptr)
  __pyx_t_12 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_nn___pyx_t_7sklearn_4tree_5_tree_DOUBLE_t__const__(((PyObject *)__pyx_v_y), 0); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 160, __pyx_L1_error)
   __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->init(__pyx_v_splitter, __pyx_v_X, __pyx_t_12, __pyx_v_sample_weight_ptr); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 160, __pyx_L1_error)
   __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1);
   __pyx_t_12.memview = NULL;
   __pyx_t_12.data = NULL;
 
 0161: 
-
 0162:         cdef SIZE_t start
-
 0163:         cdef SIZE_t end
-
 0164:         cdef SIZE_t depth
-
 0165:         cdef SIZE_t parent
-
 0166:         cdef bint is_left
-
+0167:         cdef SIZE_t n_node_samples = splitter.n_samples
+
 0162:         cdef SIZE_t start
+
 0163:         cdef SIZE_t end
+
 0164:         cdef SIZE_t depth
+
 0165:         cdef SIZE_t parent
+
 0166:         cdef bint is_left
+
+0167:         cdef SIZE_t n_node_samples = splitter.n_samples
  __pyx_t_13 = __pyx_v_splitter->n_samples;
   __pyx_v_n_node_samples = __pyx_t_13;
-
+0168:         cdef double weighted_n_samples = splitter.weighted_n_samples
+
+0168:         cdef double weighted_n_samples = splitter.weighted_n_samples
  __pyx_t_11 = __pyx_v_splitter->weighted_n_samples;
   __pyx_v_weighted_n_samples = __pyx_t_11;
-
 0169:         cdef double weighted_n_node_samples
-
 0170:         cdef SplitRecord split
-
 0171:         cdef SIZE_t node_id
+
 0169:         cdef double weighted_n_node_samples
+
 0170:         cdef SplitRecord split
+
 0171:         cdef SIZE_t node_id
 0172: 
-
+0173:         cdef double impurity = INFINITY
+
+0173:         cdef double impurity = INFINITY
  __pyx_v_impurity = __pyx_v_13stpredictions_6models_3OK3_5_tree_INFINITY;
-
 0174:         cdef SIZE_t n_constant_features
-
 0175:         cdef bint is_leaf
-
+0176:         cdef bint first = 1
+
 0174:         cdef SIZE_t n_constant_features
+
 0175:         cdef bint is_leaf
+
+0176:         cdef bint first = 1
  __pyx_v_first = 1;
-
+0177:         cdef SIZE_t max_depth_seen = -1
+
+0177:         cdef SIZE_t max_depth_seen = -1
  __pyx_v_max_depth_seen = -1;
-
+0178:         cdef int rc = 0
+
+0178:         cdef int rc = 0
  __pyx_v_rc = 0;
 
 0179: 
-
+0180:         cdef Stack stack = Stack(INITIAL_STACK_SIZE)
+
+0180:         cdef Stack stack = Stack(INITIAL_STACK_SIZE)
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_13stpredictions_6models_3OK3_5_tree_INITIAL_STACK_SIZE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 180, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_6 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7sklearn_4tree_6_utils_Stack), __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 180, __pyx_L1_error)
@@ -1770,9 +1844,9 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_stack = ((struct __pyx_obj_7sklearn_4tree_6_utils_Stack *)__pyx_t_6);
   __pyx_t_6 = 0;
-
 0181:         cdef StackRecord stack_record
+
 0181:         cdef StackRecord stack_record
 0182: 
-
+0183:         with nogil:
+
+0183:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -1799,17 +1873,17 @@
         __pyx_L9:;
       }
   }
-
 0184:             # push root node onto stack
-
+0185:             rc = stack.push(0, n_node_samples, 0, _TREE_UNDEFINED, 0, INFINITY, 0)
+
 0184:             # push root node onto stack
+
+0185:             rc = stack.push(0, n_node_samples, 0, _TREE_UNDEFINED, 0, INFINITY, 0)
        __pyx_t_5 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, 0, __pyx_v_n_node_samples, 0, __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED, 0, __pyx_v_13stpredictions_6models_3OK3_5_tree_INFINITY, 0); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 185, __pyx_L8_error)
         __pyx_v_rc = __pyx_t_5;
-
+0186:             if rc == -1:
+
+0186:             if rc == -1:
        __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
 0187:                 # got return code -1 - out-of-memory
-
+0188:                 with gil:
+
 0187:                 # got return code -1 - out-of-memory
+
+0188:                 with gil:
          {
               #ifdef WITH_THREAD
               PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -1825,105 +1899,105 @@
                 }
               }
           }
-
+0189:                     raise MemoryError()
+
+0189:                     raise MemoryError()
                PyErr_NoMemory(); __PYX_ERR(0, 189, __pyx_L12_error)
               }
 
 0190: 
-
+0191:             while not stack.is_empty():
+
+0191:             while not stack.is_empty():
        while (1) {
           __pyx_t_9 = ((!(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->is_empty(__pyx_v_stack) != 0)) != 0);
           if (!__pyx_t_9) break;
-
+0192:                 stack.pop(&stack_record)
+
+0192:                 stack.pop(&stack_record)
          (void)(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->pop(__pyx_v_stack, (&__pyx_v_stack_record)));
 
 0193: 
-
+0194:                 start = stack_record.start
+
+0194:                 start = stack_record.start
          __pyx_t_14 = __pyx_v_stack_record.start;
           __pyx_v_start = __pyx_t_14;
-
+0195:                 end = stack_record.end
+
+0195:                 end = stack_record.end
          __pyx_t_14 = __pyx_v_stack_record.end;
           __pyx_v_end = __pyx_t_14;
-
+0196:                 depth = stack_record.depth
+
+0196:                 depth = stack_record.depth
          __pyx_t_14 = __pyx_v_stack_record.depth;
           __pyx_v_depth = __pyx_t_14;
-
+0197:                 parent = stack_record.parent
+
+0197:                 parent = stack_record.parent
          __pyx_t_14 = __pyx_v_stack_record.parent;
           __pyx_v_parent = __pyx_t_14;
-
+0198:                 is_left = stack_record.is_left
+
+0198:                 is_left = stack_record.is_left
          __pyx_t_9 = __pyx_v_stack_record.is_left;
           __pyx_v_is_left = __pyx_t_9;
-
+0199:                 impurity = stack_record.impurity
+
+0199:                 impurity = stack_record.impurity
          __pyx_t_11 = __pyx_v_stack_record.impurity;
           __pyx_v_impurity = __pyx_t_11;
-
+0200:                 n_constant_features = stack_record.n_constant_features
+
+0200:                 n_constant_features = stack_record.n_constant_features
          __pyx_t_14 = __pyx_v_stack_record.n_constant_features;
           __pyx_v_n_constant_features = __pyx_t_14;
 
 0201: 
-
+0202:                 n_node_samples = end - start
+
+0202:                 n_node_samples = end - start
          __pyx_v_n_node_samples = (__pyx_v_end - __pyx_v_start);
-
+0203:                 splitter.node_reset(start, end, &weighted_n_node_samples)
+
+0203:                 splitter.node_reset(start, end, &weighted_n_node_samples)
          __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_reset(__pyx_v_splitter, __pyx_v_start, __pyx_v_end, (&__pyx_v_weighted_n_node_samples)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 203, __pyx_L8_error)
 
 0204: 
-
+0205:                 is_leaf = (depth >= max_depth or
+
+0205:                 is_leaf = (depth >= max_depth or
          __pyx_t_8 = ((__pyx_v_depth >= __pyx_v_max_depth) != 0);
           if (!__pyx_t_8) {
           } else {
             __pyx_t_9 = __pyx_t_8;
             goto __pyx_L16_bool_binop_done;
           }
-
+0206:                            n_node_samples < min_samples_split or
+
+0206:                            n_node_samples < min_samples_split or
          __pyx_t_8 = ((__pyx_v_n_node_samples < __pyx_v_min_samples_split) != 0);
           if (!__pyx_t_8) {
           } else {
             __pyx_t_9 = __pyx_t_8;
             goto __pyx_L16_bool_binop_done;
           }
-
+0207:                            n_node_samples < 2 * min_samples_leaf or
+
+0207:                            n_node_samples < 2 * min_samples_leaf or
          __pyx_t_8 = ((__pyx_v_n_node_samples < (2 * __pyx_v_min_samples_leaf)) != 0);
           if (!__pyx_t_8) {
           } else {
             __pyx_t_9 = __pyx_t_8;
             goto __pyx_L16_bool_binop_done;
           }
-
+0208:                            weighted_n_node_samples < 2 * min_weight_leaf)
+
+0208:                            weighted_n_node_samples < 2 * min_weight_leaf)
          __pyx_t_8 = ((__pyx_v_weighted_n_node_samples < (2.0 * __pyx_v_min_weight_leaf)) != 0);
           __pyx_t_9 = __pyx_t_8;
           __pyx_L16_bool_binop_done:;
           __pyx_v_is_leaf = __pyx_t_9;
 
 0209: 
-
+0210:                 if first:
+
+0210:                 if first:
          __pyx_t_9 = (__pyx_v_first != 0);
           if (__pyx_t_9) {
 /* … */
           }
-
+0211:                     impurity = splitter.node_impurity()
+
+0211:                     impurity = splitter.node_impurity()
            __pyx_v_impurity = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_impurity(__pyx_v_splitter);
-
+0212:                     first = 0
+
+0212:                     first = 0
            __pyx_v_first = 0;
 
 0213: 
-
+0214:                 is_leaf = (is_leaf or
+
+0214:                 is_leaf = (is_leaf or
          __pyx_t_8 = (__pyx_v_is_leaf != 0);
           if (!__pyx_t_8) {
           } else {
             __pyx_t_9 = __pyx_t_8;
             goto __pyx_L21_bool_binop_done;
           }
-
+0215:                            (impurity <= min_impurity_split))
+
+0215:                            (impurity <= min_impurity_split))
          __pyx_t_8 = ((__pyx_v_impurity <= __pyx_v_min_impurity_split) != 0);
           __pyx_t_9 = __pyx_t_8;
           __pyx_L21_bool_binop_done:;
           __pyx_v_is_leaf = __pyx_t_9;
 
 0216: 
-
+0217:                 if not is_leaf:
+
+0217:                 if not is_leaf:
          __pyx_t_9 = ((!(__pyx_v_is_leaf != 0)) != 0);
           if (__pyx_t_9) {
 /* … */
           }
-
+0218:                     splitter.node_split(impurity, &split, &n_constant_features)
+
+0218:                     splitter.node_split(impurity, &split, &n_constant_features)
            __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_split(__pyx_v_splitter, __pyx_v_impurity, (&__pyx_v_split), (&__pyx_v_n_constant_features)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 218, __pyx_L8_error)
-
 0219:                     # If EPSILON=0 in the below comparison, float precision
-
 0220:                     # issues stop splitting, producing trees that are
-
 0221:                     # dissimilar to v0.18
-
+0222:                     is_leaf = (is_leaf or split.pos >= end or
+
 0219:                     # If EPSILON=0 in the below comparison, float precision
+
 0220:                     # issues stop splitting, producing trees that are
+
 0221:                     # dissimilar to v0.18
+
+0222:                     is_leaf = (is_leaf or split.pos >= end or
            __pyx_t_8 = (__pyx_v_is_leaf != 0);
             if (!__pyx_t_8) {
             } else {
@@ -1936,116 +2010,116 @@
               __pyx_t_9 = __pyx_t_8;
               goto __pyx_L24_bool_binop_done;
             }
-
+0223:                                (split.improvement + EPSILON <
+
+0223:                                (split.improvement + EPSILON <
            __pyx_t_8 = (((__pyx_v_split.improvement + __pyx_v_13stpredictions_6models_3OK3_5_tree_EPSILON) < __pyx_v_min_impurity_decrease) != 0);
             __pyx_t_9 = __pyx_t_8;
             __pyx_L24_bool_binop_done:;
             __pyx_v_is_leaf = __pyx_t_9;
-
 0224:                                 min_impurity_decrease))
+
 0224:                                 min_impurity_decrease))
 0225: 
-
+0226:                 node_id = tree._add_node(parent, is_left, is_leaf, split.feature,
+
+0226:                 node_id = tree._add_node(parent, is_left, is_leaf, split.feature,
          __pyx_t_10 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_add_node(__pyx_v_tree, __pyx_v_parent, __pyx_v_is_left, __pyx_v_is_leaf, __pyx_v_split.feature, __pyx_v_split.threshold, __pyx_v_impurity, __pyx_v_n_node_samples, __pyx_v_weighted_n_node_samples); if (unlikely(__pyx_t_10 == ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)-1))) __PYX_ERR(0, 226, __pyx_L8_error)
           __pyx_v_node_id = __pyx_t_10;
-
 0227:                                          split.threshold, impurity, n_node_samples,
-
 0228:                                          weighted_n_node_samples)
+
 0227:                                          split.threshold, impurity, n_node_samples,
+
 0228:                                          weighted_n_node_samples)
 0229: 
-
+0230:                 if node_id == SIZE_MAX:
+
+0230:                 if node_id == SIZE_MAX:
          __pyx_t_9 = ((__pyx_v_node_id == SIZE_MAX) != 0);
           if (__pyx_t_9) {
 /* … */
           }
-
+0231:                     rc = -1
+
+0231:                     rc = -1
            __pyx_v_rc = -1;
-
+0232:                     break
+
+0232:                     break
            goto __pyx_L15_break;
 
 0233: 
-
 0234:                 # Store value for all nodes, to facilitate tree/model
-
 0235:                 # inspection and interpretation
-
+0236:                 splitter.node_value(tree.value + node_id * y.shape[0])
+
 0234:                 # Store value for all nodes, to facilitate tree/model
+
 0235:                 # inspection and interpretation
+
+0236:                 splitter.node_value(tree.value + node_id * y.shape[0])
          ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_value(__pyx_v_splitter, (__pyx_v_tree->value + (__pyx_v_node_id * (__pyx_v_y->dimensions[0]))));
 
 0237: 
-
+0238:                 if not is_leaf:
+
+0238:                 if not is_leaf:
          __pyx_t_9 = ((!(__pyx_v_is_leaf != 0)) != 0);
           if (__pyx_t_9) {
 /* … */
           }
-
 0239:                     # Push right child on stack
-
+0240:                     rc = stack.push(split.pos, end, depth + 1, node_id, 0,
+
 0239:                     # Push right child on stack
+
+0240:                     rc = stack.push(split.pos, end, depth + 1, node_id, 0,
            __pyx_t_5 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, __pyx_v_split.pos, __pyx_v_end, (__pyx_v_depth + 1), __pyx_v_node_id, 0, __pyx_v_split.impurity_right, __pyx_v_n_constant_features); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 240, __pyx_L8_error)
             __pyx_v_rc = __pyx_t_5;
-
 0241:                                     split.impurity_right, n_constant_features)
-
+0242:                     if rc == -1:
+
 0241:                                     split.impurity_right, n_constant_features)
+
+0242:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
-
+0243:                         break
+
+0243:                         break
              goto __pyx_L15_break;
 
 0244: 
-
 0245:                     # Push left child on stack
-
+0246:                     rc = stack.push(start, split.pos, depth + 1, node_id, 1,
+
 0245:                     # Push left child on stack
+
+0246:                     rc = stack.push(start, split.pos, depth + 1, node_id, 1,
            __pyx_t_5 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, __pyx_v_start, __pyx_v_split.pos, (__pyx_v_depth + 1), __pyx_v_node_id, 1, __pyx_v_split.impurity_left, __pyx_v_n_constant_features); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 246, __pyx_L8_error)
             __pyx_v_rc = __pyx_t_5;
-
 0247:                                     split.impurity_left, n_constant_features)
-
+0248:                     if rc == -1:
+
 0247:                                     split.impurity_left, n_constant_features)
+
+0248:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
-
+0249:                         break
+
+0249:                         break
              goto __pyx_L15_break;
 
 0250: 
-
+0251:                 if depth > max_depth_seen:
+
+0251:                 if depth > max_depth_seen:
          __pyx_t_9 = ((__pyx_v_depth > __pyx_v_max_depth_seen) != 0);
           if (__pyx_t_9) {
 /* … */
           }
         }
         __pyx_L15_break:;
-
+0252:                     max_depth_seen = depth
+
+0252:                     max_depth_seen = depth
            __pyx_v_max_depth_seen = __pyx_v_depth;
 
 0253: 
-
+0254:             if rc >= 0:
+
+0254:             if rc >= 0:
        __pyx_t_9 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
+0255:                 rc = tree._resize_c(tree.node_count)
+
+0255:                 rc = tree._resize_c(tree.node_count)
          __pyx_t_15.__pyx_n = 1;
           __pyx_t_15.capacity = __pyx_v_tree->node_count;
           __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_resize_c(__pyx_v_tree, &__pyx_t_15); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 255, __pyx_L8_error)
           __pyx_v_rc = __pyx_t_5;
 
 0256: 
-
+0257:             if rc >= 0:
+
+0257:             if rc >= 0:
        __pyx_t_9 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_9) {
 /* … */
         }
       }
-
+0258:                 tree.max_depth = max_depth_seen
+
+0258:                 tree.max_depth = max_depth_seen
          __pyx_v_tree->max_depth = __pyx_v_max_depth_seen;
 
 0259: 
-
+0260:         if rc == -1:
+
+0260:         if rc == -1:
  __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
   if (unlikely(__pyx_t_9)) {
 /* … */
   }
-
+0261:             raise MemoryError()
+
+0261:             raise MemoryError()
    PyErr_NoMemory(); __PYX_ERR(0, 261, __pyx_L1_error)
 
 0262: 
 0263: 
-
 0264:         # feed the tree attribute 'K_y'
+
 0264:         # feed the tree attribute 'K_y'
 0265: 
-
+0266:         tree.K_y = y
+
+0266:         tree.K_y = y
  __Pyx_INCREF(((PyObject *)__pyx_v_y));
   __Pyx_GIVEREF(((PyObject *)__pyx_v_y));
   __Pyx_GOTREF(__pyx_v_tree->K_y);
   __Pyx_DECREF(((PyObject *)__pyx_v_tree->K_y));
   __pyx_v_tree->K_y = __pyx_v_y;
 
 0267: 
-
 0268: # Best first builder ----------------------------------------------------------
+
 0268: # Best first builder ----------------------------------------------------------
 0269: 
-
+0270: cdef inline int _add_to_frontier(PriorityHeapRecord* rec,
+
+0270: cdef inline int _add_to_frontier(PriorityHeapRecord* rec,
static CYTHON_INLINE int __pyx_f_13stpredictions_6models_3OK3_5_tree__add_to_frontier(struct __pyx_t_7sklearn_4tree_6_utils_PriorityHeapRecord *__pyx_v_rec, struct __pyx_obj_7sklearn_4tree_6_utils_PriorityHeap *__pyx_v_frontier) {
   int __pyx_r;
 /* … */
@@ -2064,21 +2138,21 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0271:                                  PriorityHeap frontier) nogil except -1:
-
 0272:     """Adds record ``rec`` to the priority queue ``frontier``
+
 0271:                                  PriorityHeap frontier) nogil except -1:
+
 0272:     """Adds record ``rec`` to the priority queue ``frontier``
 0273: 
-
 0274:     Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0275:     or 0 otherwise.
-
 0276:     """
-
+0277:     return frontier.push(rec.node_id, rec.start, rec.end, rec.pos, rec.depth,
+
 0274:     Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0275:     or 0 otherwise.
+
 0276:     """
+
+0277:     return frontier.push(rec.node_id, rec.start, rec.end, rec.pos, rec.depth,
  __pyx_t_1 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_PriorityHeap *)__pyx_v_frontier->__pyx_vtab)->push(__pyx_v_frontier, __pyx_v_rec->node_id, __pyx_v_rec->start, __pyx_v_rec->end, __pyx_v_rec->pos, __pyx_v_rec->depth, __pyx_v_rec->is_leaf, __pyx_v_rec->improvement, __pyx_v_rec->impurity, __pyx_v_rec->impurity_left, __pyx_v_rec->impurity_right); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 277, __pyx_L1_error)
   __pyx_r = __pyx_t_1;
   goto __pyx_L0;
-
 0278:                          rec.is_leaf, rec.improvement, rec.impurity,
-
 0279:                          rec.impurity_left, rec.impurity_right)
+
 0278:                          rec.is_leaf, rec.improvement, rec.impurity,
+
 0279:                          rec.impurity_left, rec.impurity_right)
 0280: 
 0281: 
-
+0282: cdef class BestFirstTreeBuilder(TreeBuilder):
+
+0282: cdef class BestFirstTreeBuilder(TreeBuilder):
struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder {
   struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder __pyx_base;
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t max_leaf_nodes;
@@ -2091,14 +2165,14 @@
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder;
 static CYTHON_INLINE int __pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder__add_split_node(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder *, struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, double, int, int, struct __pyx_t_13stpredictions_6models_3OK3_5_tree_Node *, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, struct __pyx_t_7sklearn_4tree_6_utils_PriorityHeapRecord *, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t);
 
-
 0283:     """Build a decision tree in best-first fashion.
+
 0283:     """Build a decision tree in best-first fashion.
 0284: 
-
 0285:     The best node to expand is given by the node at the frontier that has the
-
 0286:     highest impurity improvement.
-
 0287:     """
-
 0288:     cdef SIZE_t max_leaf_nodes
+
 0285:     The best node to expand is given by the node at the frontier that has the
+
 0286:     highest impurity improvement.
+
 0287:     """
+
 0288:     cdef SIZE_t max_leaf_nodes
 0289: 
-
+0290:     def __cinit__(self, Splitter splitter, SIZE_t min_samples_split,
+
+0290:     def __cinit__(self, Splitter splitter, SIZE_t min_samples_split,
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -2249,32 +2323,32 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0291:                   SIZE_t min_samples_leaf,  min_weight_leaf,
-
 0292:                   SIZE_t max_depth, SIZE_t max_leaf_nodes,
-
 0293:                   double min_impurity_decrease, double min_impurity_split):
-
+0294:         self.splitter = splitter
+
 0291:                   SIZE_t min_samples_leaf,  min_weight_leaf,
+
 0292:                   SIZE_t max_depth, SIZE_t max_leaf_nodes,
+
 0293:                   double min_impurity_decrease, double min_impurity_split):
+
+0294:         self.splitter = splitter
  __Pyx_INCREF(((PyObject *)__pyx_v_splitter));
   __Pyx_GIVEREF(((PyObject *)__pyx_v_splitter));
   __Pyx_GOTREF(__pyx_v_self->__pyx_base.splitter);
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx_base.splitter));
   __pyx_v_self->__pyx_base.splitter = __pyx_v_splitter;
-
+0295:         self.min_samples_split = min_samples_split
+
+0295:         self.min_samples_split = min_samples_split
  __pyx_v_self->__pyx_base.min_samples_split = __pyx_v_min_samples_split;
-
+0296:         self.min_samples_leaf = min_samples_leaf
+
+0296:         self.min_samples_leaf = min_samples_leaf
  __pyx_v_self->__pyx_base.min_samples_leaf = __pyx_v_min_samples_leaf;
-
+0297:         self.min_weight_leaf = min_weight_leaf
+
+0297:         self.min_weight_leaf = min_weight_leaf
  __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_min_weight_leaf); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 297, __pyx_L1_error)
   __pyx_v_self->__pyx_base.min_weight_leaf = __pyx_t_1;
-
+0298:         self.max_depth = max_depth
+
+0298:         self.max_depth = max_depth
  __pyx_v_self->__pyx_base.max_depth = __pyx_v_max_depth;
-
+0299:         self.max_leaf_nodes = max_leaf_nodes
+
+0299:         self.max_leaf_nodes = max_leaf_nodes
  __pyx_v_self->max_leaf_nodes = __pyx_v_max_leaf_nodes;
-
+0300:         self.min_impurity_decrease = min_impurity_decrease
+
+0300:         self.min_impurity_decrease = min_impurity_decrease
  __pyx_v_self->__pyx_base.min_impurity_decrease = __pyx_v_min_impurity_decrease;
-
+0301:         self.min_impurity_split = min_impurity_split
+
+0301:         self.min_impurity_split = min_impurity_split
  __pyx_v_self->__pyx_base.min_impurity_split = __pyx_v_min_impurity_split;
 
 0302: 
-
+0303:     cpdef build(self, Tree tree, object X, np.ndarray y,
+
+0303:     cpdef build(self, Tree tree, object X, np.ndarray y,
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_3build(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_build(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder *__pyx_v_self, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_tree, PyObject *__pyx_v_X, PyArrayObject *__pyx_v_y, int __pyx_skip_dispatch, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_build *__pyx_optional_args) {
 /* … */
@@ -2436,7 +2510,7 @@
   int __pyx_n;
   PyArrayObject *sample_weight;
 };
-
+0304:                 np.ndarray sample_weight=None):
+
+0304:                 np.ndarray sample_weight=None):
  PyArrayObject *__pyx_v_sample_weight = ((PyArrayObject *)Py_None);
   __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *__pyx_v_sample_weight_ptr;
   struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_splitter = 0;
@@ -2538,10 +2612,10 @@
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) __PYX_ERR(0, 303, __pyx_L1_error)
   if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sample_weight), __pyx_ptype_5numpy_ndarray, 1, "sample_weight", 0))) __PYX_ERR(0, 304, __pyx_L1_error)
   __pyx_r = __pyx_pf_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder_2build(((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder *)__pyx_v_self), __pyx_v_tree, __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight);
-
 0305:         """Build a decision tree from the training set (X, y)."""
+
 0305:         """Build a decision tree from the training set (X, y)."""
 0306: 
-
 0307:         # check input
-
+0308:         X, y, sample_weight = self._check_input(X, y, sample_weight)
+
 0307:         # check input
+
+0308:         X, y, sample_weight = self._check_input(X, y, sample_weight)
  __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_5_tree_11TreeBuilder__check_input(((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_TreeBuilder *)__pyx_v_self), __pyx_v_X, __pyx_v_y, __pyx_v_sample_weight); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 308, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
@@ -2606,45 +2680,45 @@
   __Pyx_DECREF_SET(__pyx_v_sample_weight, ((PyArrayObject *)__pyx_t_6));
   __pyx_t_6 = 0;
 
 0309: 
-
+0310:         cdef DOUBLE_t* sample_weight_ptr = NULL
+
+0310:         cdef DOUBLE_t* sample_weight_ptr = NULL
  __pyx_v_sample_weight_ptr = NULL;
-
+0311:         if sample_weight is not None:
+
+0311:         if sample_weight is not None:
  __pyx_t_8 = (((PyObject *)__pyx_v_sample_weight) != Py_None);
   __pyx_t_9 = (__pyx_t_8 != 0);
   if (__pyx_t_9) {
 /* … */
   }
-
+0312:             sample_weight_ptr = <DOUBLE_t*> sample_weight.data
+
+0312:             sample_weight_ptr = <DOUBLE_t*> sample_weight.data
    __pyx_v_sample_weight_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *)__pyx_v_sample_weight->data);
 
 0313: 
-
 0314:         # Parameters
-
+0315:         cdef Splitter splitter = self.splitter
+
 0314:         # Parameters
+
+0315:         cdef Splitter splitter = self.splitter
  __pyx_t_1 = ((PyObject *)__pyx_v_self->__pyx_base.splitter);
   __Pyx_INCREF(__pyx_t_1);
   __pyx_v_splitter = ((struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_t_1);
   __pyx_t_1 = 0;
-
+0316:         cdef SIZE_t max_leaf_nodes = self.max_leaf_nodes
+
+0316:         cdef SIZE_t max_leaf_nodes = self.max_leaf_nodes
  __pyx_t_10 = __pyx_v_self->max_leaf_nodes;
   __pyx_v_max_leaf_nodes = __pyx_t_10;
-
+0317:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
+
+0317:         cdef SIZE_t min_samples_leaf = self.min_samples_leaf
  __pyx_t_10 = __pyx_v_self->__pyx_base.min_samples_leaf;
   __pyx_v_min_samples_leaf = __pyx_t_10;
-
+0318:         cdef double min_weight_leaf = self.min_weight_leaf
+
+0318:         cdef double min_weight_leaf = self.min_weight_leaf
  __pyx_t_11 = __pyx_v_self->__pyx_base.min_weight_leaf;
   __pyx_v_min_weight_leaf = __pyx_t_11;
-
+0319:         cdef SIZE_t min_samples_split = self.min_samples_split
+
+0319:         cdef SIZE_t min_samples_split = self.min_samples_split
  __pyx_t_10 = __pyx_v_self->__pyx_base.min_samples_split;
   __pyx_v_min_samples_split = __pyx_t_10;
 
 0320: 
-
 0321:         # Recursive partition (without actual recursion)
-
+0322:         splitter.init(X, y, sample_weight_ptr)
+
 0321:         # Recursive partition (without actual recursion)
+
+0322:         splitter.init(X, y, sample_weight_ptr)
  __pyx_t_12 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_nn___pyx_t_7sklearn_4tree_5_tree_DOUBLE_t__const__(((PyObject *)__pyx_v_y), 0); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 322, __pyx_L1_error)
   __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->init(__pyx_v_splitter, __pyx_v_X, __pyx_t_12, __pyx_v_sample_weight_ptr); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 322, __pyx_L1_error)
   __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1);
   __pyx_t_12.memview = NULL;
   __pyx_t_12.data = NULL;
 
 0323: 
-
+0324:         cdef PriorityHeap frontier = PriorityHeap(INITIAL_STACK_SIZE)
+
+0324:         cdef PriorityHeap frontier = PriorityHeap(INITIAL_STACK_SIZE)
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_13stpredictions_6models_3OK3_5_tree_INITIAL_STACK_SIZE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_6 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7sklearn_4tree_6_utils_PriorityHeap), __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 324, __pyx_L1_error)
@@ -2652,29 +2726,29 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_frontier = ((struct __pyx_obj_7sklearn_4tree_6_utils_PriorityHeap *)__pyx_t_6);
   __pyx_t_6 = 0;
-
 0325:         cdef PriorityHeapRecord record
-
 0326:         cdef PriorityHeapRecord split_node_left
-
 0327:         cdef PriorityHeapRecord split_node_right
+
 0325:         cdef PriorityHeapRecord record
+
 0326:         cdef PriorityHeapRecord split_node_left
+
 0327:         cdef PriorityHeapRecord split_node_right
 0328: 
-
+0329:         cdef SIZE_t n_node_samples = splitter.n_samples
+
+0329:         cdef SIZE_t n_node_samples = splitter.n_samples
  __pyx_t_13 = __pyx_v_splitter->n_samples;
   __pyx_v_n_node_samples = __pyx_t_13;
-
+0330:         cdef SIZE_t max_split_nodes = max_leaf_nodes - 1
+
+0330:         cdef SIZE_t max_split_nodes = max_leaf_nodes - 1
  __pyx_v_max_split_nodes = (__pyx_v_max_leaf_nodes - 1);
-
 0331:         cdef bint is_leaf
-
+0332:         cdef SIZE_t max_depth_seen = -1
+
 0331:         cdef bint is_leaf
+
+0332:         cdef SIZE_t max_depth_seen = -1
  __pyx_v_max_depth_seen = -1;
-
+0333:         cdef int rc = 0
+
+0333:         cdef int rc = 0
  __pyx_v_rc = 0;
-
 0334:         cdef Node* node
+
 0334:         cdef Node* node
 0335: 
-
 0336:         # Initial capacity
-
+0337:         cdef SIZE_t init_capacity = max_split_nodes + max_leaf_nodes
+
 0336:         # Initial capacity
+
+0337:         cdef SIZE_t init_capacity = max_split_nodes + max_leaf_nodes
  __pyx_v_init_capacity = (__pyx_v_max_split_nodes + __pyx_v_max_leaf_nodes);
-
+0338:         tree._resize(init_capacity)
+
+0338:         tree._resize(init_capacity)
  __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_resize(__pyx_v_tree, __pyx_v_init_capacity); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 338, __pyx_L1_error)
 
 0339: 
-
+0340:         with nogil:
+
+0340:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -2701,28 +2775,28 @@
         __pyx_L8:;
       }
   }
-
 0341:             # add root to frontier
-
+0342:             rc = self._add_split_node(splitter, tree, 0, n_node_samples,
+
 0341:             # add root to frontier
+
+0342:             rc = self._add_split_node(splitter, tree, 0, n_node_samples,
        __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder__add_split_node(__pyx_v_self, __pyx_v_splitter, __pyx_v_tree, 0, __pyx_v_n_node_samples, __pyx_v_13stpredictions_6models_3OK3_5_tree_INFINITY, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_FIRST, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_LEFT, NULL, 0, (&__pyx_v_split_node_left), (__pyx_v_y->dimensions[0])); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 342, __pyx_L7_error)
         __pyx_v_rc = __pyx_t_5;
-
 0343:                                       INFINITY, IS_FIRST, IS_LEFT, NULL, 0,
-
 0344:                                       &split_node_left,
-
 0345:                                       y.shape[0])
-
+0346:             if rc >= 0:
+
 0343:                                       INFINITY, IS_FIRST, IS_LEFT, NULL, 0,
+
 0344:                                       &split_node_left,
+
 0345:                                       y.shape[0])
+
+0346:             if rc >= 0:
        __pyx_t_9 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
+0347:                 rc = _add_to_frontier(&split_node_left, frontier)
+
+0347:                 rc = _add_to_frontier(&split_node_left, frontier)
          __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree__add_to_frontier((&__pyx_v_split_node_left), __pyx_v_frontier); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 347, __pyx_L7_error)
           __pyx_v_rc = __pyx_t_5;
 
 0348: 
-
+0349:             if rc == -1:
+
+0349:             if rc == -1:
        __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
+0350:                 with gil:
+
+0350:                 with gil:
          {
               #ifdef WITH_THREAD
               PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -2738,20 +2812,20 @@
                 }
               }
           }
-
+0351:                     raise MemoryError()
+
+0351:                     raise MemoryError()
                PyErr_NoMemory(); __PYX_ERR(0, 351, __pyx_L12_error)
               }
 
 0352: 
-
+0353:             while not frontier.is_empty():
+
+0353:             while not frontier.is_empty():
        while (1) {
           __pyx_t_9 = ((!(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_PriorityHeap *)__pyx_v_frontier->__pyx_vtab)->is_empty(__pyx_v_frontier) != 0)) != 0);
           if (!__pyx_t_9) break;
-
+0354:                 frontier.pop(&record)
+
+0354:                 frontier.pop(&record)
          (void)(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_PriorityHeap *)__pyx_v_frontier->__pyx_vtab)->pop(__pyx_v_frontier, (&__pyx_v_record)));
 
 0355: 
-
+0356:                 node = &tree.nodes[record.node_id]
+
+0356:                 node = &tree.nodes[record.node_id]
          __pyx_v_node = (&(__pyx_v_tree->nodes[__pyx_v_record.node_id]));
-
+0357:                 is_leaf = (record.is_leaf or max_split_nodes <= 0)
+
+0357:                 is_leaf = (record.is_leaf or max_split_nodes <= 0)
          __pyx_t_8 = (__pyx_v_record.is_leaf != 0);
           if (!__pyx_t_8) {
           } else {
@@ -2763,138 +2837,138 @@
           __pyx_L16_bool_binop_done:;
           __pyx_v_is_leaf = __pyx_t_9;
 
 0358: 
-
+0359:                 if is_leaf:
+
+0359:                 if is_leaf:
          __pyx_t_9 = (__pyx_v_is_leaf != 0);
           if (__pyx_t_9) {
 /* … */
             goto __pyx_L18;
           }
-
 0360:                     # Node is not expandable; set node as leaf
-
+0361:                     node.left_child = _TREE_LEAF
+
 0360:                     # Node is not expandable; set node as leaf
+
+0361:                     node.left_child = _TREE_LEAF
            __pyx_v_node->left_child = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF;
-
+0362:                     node.right_child = _TREE_LEAF
+
+0362:                     node.right_child = _TREE_LEAF
            __pyx_v_node->right_child = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF;
-
+0363:                     node.feature = _TREE_UNDEFINED
+
+0363:                     node.feature = _TREE_UNDEFINED
            __pyx_v_node->feature = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED;
-
+0364:                     node.threshold = _TREE_UNDEFINED
+
+0364:                     node.threshold = _TREE_UNDEFINED
            __pyx_v_node->threshold = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED;
 
 0365: 
-
 0366:                 else:
-
 0367:                     # Node is expandable
+
 0366:                 else:
+
 0367:                     # Node is expandable
 0368: 
-
 0369:                     # Decrement number of split nodes available
-
+0370:                     max_split_nodes -= 1
+
 0369:                     # Decrement number of split nodes available
+
+0370:                     max_split_nodes -= 1
          /*else*/ {
             __pyx_v_max_split_nodes = (__pyx_v_max_split_nodes - 1);
 
 0371: 
-
 0372:                     # Compute left split node
-
+0373:                     rc = self._add_split_node(splitter, tree,
+
 0372:                     # Compute left split node
+
+0373:                     rc = self._add_split_node(splitter, tree,
            __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder__add_split_node(__pyx_v_self, __pyx_v_splitter, __pyx_v_tree, __pyx_v_record.start, __pyx_v_record.pos, __pyx_v_record.impurity_left, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_NOT_FIRST, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_LEFT, __pyx_v_node, (__pyx_v_record.depth + 1), (&__pyx_v_split_node_left), (__pyx_v_y->dimensions[0])); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 373, __pyx_L7_error)
             __pyx_v_rc = __pyx_t_5;
-
 0374:                                               record.start, record.pos,
-
 0375:                                               record.impurity_left,
-
 0376:                                               IS_NOT_FIRST, IS_LEFT, node,
-
 0377:                                               record.depth + 1,
-
 0378:                                               &split_node_left,
-
 0379:                                               y.shape[0])
-
+0380:                     if rc == -1:
+
 0374:                                               record.start, record.pos,
+
 0375:                                               record.impurity_left,
+
 0376:                                               IS_NOT_FIRST, IS_LEFT, node,
+
 0377:                                               record.depth + 1,
+
 0378:                                               &split_node_left,
+
 0379:                                               y.shape[0])
+
+0380:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
-
+0381:                         break
+
+0381:                         break
              goto __pyx_L15_break;
 
 0382: 
-
 0383:                     # tree.nodes may have changed
-
+0384:                     node = &tree.nodes[record.node_id]
+
 0383:                     # tree.nodes may have changed
+
+0384:                     node = &tree.nodes[record.node_id]
            __pyx_v_node = (&(__pyx_v_tree->nodes[__pyx_v_record.node_id]));
 
 0385: 
-
 0386:                     # Compute right split node
-
+0387:                     rc = self._add_split_node(splitter, tree, record.pos,
+
 0386:                     # Compute right split node
+
+0387:                     rc = self._add_split_node(splitter, tree, record.pos,
            __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder__add_split_node(__pyx_v_self, __pyx_v_splitter, __pyx_v_tree, __pyx_v_record.pos, __pyx_v_record.end, __pyx_v_record.impurity_right, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_NOT_FIRST, __pyx_v_13stpredictions_6models_3OK3_5_tree_IS_NOT_LEFT, __pyx_v_node, (__pyx_v_record.depth + 1), (&__pyx_v_split_node_right), (__pyx_v_y->dimensions[0])); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 387, __pyx_L7_error)
             __pyx_v_rc = __pyx_t_5;
-
 0388:                                               record.end,
-
 0389:                                               record.impurity_right,
-
 0390:                                               IS_NOT_FIRST, IS_NOT_LEFT, node,
-
 0391:                                               record.depth + 1,
-
 0392:                                               &split_node_right,
-
 0393:                                               y.shape[0])
-
+0394:                     if rc == -1:
+
 0388:                                               record.end,
+
 0389:                                               record.impurity_right,
+
 0390:                                               IS_NOT_FIRST, IS_NOT_LEFT, node,
+
 0391:                                               record.depth + 1,
+
 0392:                                               &split_node_right,
+
 0393:                                               y.shape[0])
+
+0394:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
-
+0395:                         break
+
+0395:                         break
              goto __pyx_L15_break;
 
 0396: 
-
 0397:                     # Add nodes to queue
-
+0398:                     rc = _add_to_frontier(&split_node_left, frontier)
+
 0397:                     # Add nodes to queue
+
+0398:                     rc = _add_to_frontier(&split_node_left, frontier)
            __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree__add_to_frontier((&__pyx_v_split_node_left), __pyx_v_frontier); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 398, __pyx_L7_error)
             __pyx_v_rc = __pyx_t_5;
-
+0399:                     if rc == -1:
+
+0399:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
-
+0400:                         break
+
+0400:                         break
              goto __pyx_L15_break;
 
 0401: 
-
+0402:                     rc = _add_to_frontier(&split_node_right, frontier)
+
+0402:                     rc = _add_to_frontier(&split_node_right, frontier)
            __pyx_t_5 = __pyx_f_13stpredictions_6models_3OK3_5_tree__add_to_frontier((&__pyx_v_split_node_right), __pyx_v_frontier); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 402, __pyx_L7_error)
             __pyx_v_rc = __pyx_t_5;
-
+0403:                     if rc == -1:
+
+0403:                     if rc == -1:
            __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_9) {
 /* … */
             }
           }
           __pyx_L18:;
-
+0404:                         break
+
+0404:                         break
              goto __pyx_L15_break;
 
 0405: 
-
+0406:                 if record.depth > max_depth_seen:
+
+0406:                 if record.depth > max_depth_seen:
          __pyx_t_9 = ((__pyx_v_record.depth > __pyx_v_max_depth_seen) != 0);
           if (__pyx_t_9) {
 /* … */
           }
         }
         __pyx_L15_break:;
-
+0407:                     max_depth_seen = record.depth
+
+0407:                     max_depth_seen = record.depth
            __pyx_t_14 = __pyx_v_record.depth;
             __pyx_v_max_depth_seen = __pyx_t_14;
 
 0408: 
-
+0409:             if rc >= 0:
+
+0409:             if rc >= 0:
        __pyx_t_9 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
+0410:                 rc = tree._resize_c(tree.node_count)
+
+0410:                 rc = tree._resize_c(tree.node_count)
          __pyx_t_15.__pyx_n = 1;
           __pyx_t_15.capacity = __pyx_v_tree->node_count;
           __pyx_t_5 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_resize_c(__pyx_v_tree, &__pyx_t_15); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 410, __pyx_L7_error)
           __pyx_v_rc = __pyx_t_5;
 
 0411: 
-
+0412:             if rc >= 0:
+
+0412:             if rc >= 0:
        __pyx_t_9 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_9) {
 /* … */
         }
       }
-
+0413:                 tree.max_depth = max_depth_seen
+
+0413:                 tree.max_depth = max_depth_seen
          __pyx_v_tree->max_depth = __pyx_v_max_depth_seen;
 
 0414: 
-
+0415:         if rc == -1:
+
+0415:         if rc == -1:
  __pyx_t_9 = ((__pyx_v_rc == -1L) != 0);
   if (unlikely(__pyx_t_9)) {
 /* … */
   }
-
+0416:             raise MemoryError()
+
+0416:             raise MemoryError()
    PyErr_NoMemory(); __PYX_ERR(0, 416, __pyx_L1_error)
 
 0417: 
 0418: 
-
 0419:         # feed the tree attribute 'K_y'
+
 0419:         # feed the tree attribute 'K_y'
 0420: 
-
+0421:         tree.K_y = y
+
+0421:         tree.K_y = y
  __Pyx_INCREF(((PyObject *)__pyx_v_y));
   __Pyx_GIVEREF(((PyObject *)__pyx_v_y));
   __Pyx_GOTREF(__pyx_v_tree->K_y);
@@ -2902,7 +2976,7 @@
   __pyx_v_tree->K_y = __pyx_v_y;
 
 0422: 
 0423: 
-
+0424:     cdef inline int _add_split_node(self, Splitter splitter, Tree tree,
+
+0424:     cdef inline int _add_split_node(self, Splitter splitter, Tree tree,
static CYTHON_INLINE int __pyx_f_13stpredictions_6models_3OK3_5_tree_20BestFirstTreeBuilder__add_split_node(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_BestFirstTreeBuilder *__pyx_v_self, struct __pyx_obj_13stpredictions_6models_3OK3_9_splitter_Splitter *__pyx_v_splitter, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_tree, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_start, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_end, double __pyx_v_impurity, int __pyx_v_is_first, int __pyx_v_is_left, struct __pyx_t_13stpredictions_6models_3OK3_5_tree_Node *__pyx_v_parent, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_depth, struct __pyx_t_7sklearn_4tree_6_utils_PriorityHeapRecord *__pyx_v_res, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_n_samples) {
   struct __pyx_t_13stpredictions_6models_3OK3_9_splitter_SplitRecord __pyx_v_split;
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_node_id;
@@ -2930,88 +3004,88 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0425:                                     SIZE_t start, SIZE_t end, double impurity,
-
 0426:                                     bint is_first, bint is_left, Node* parent,
-
 0427:                                     SIZE_t depth,
-
 0428:                                     PriorityHeapRecord* res,
-
 0429:                                     SIZE_t n_samples) nogil except -1:
-
 0430:         """Adds node w/ partition ``[start, end)`` to the frontier. """
-
 0431:         cdef SplitRecord split
-
 0432:         cdef SIZE_t node_id
-
 0433:         cdef SIZE_t n_node_samples
-
+0434:         cdef SIZE_t n_constant_features = 0
+
 0425:                                     SIZE_t start, SIZE_t end, double impurity,
+
 0426:                                     bint is_first, bint is_left, Node* parent,
+
 0427:                                     SIZE_t depth,
+
 0428:                                     PriorityHeapRecord* res,
+
 0429:                                     SIZE_t n_samples) nogil except -1:
+
 0430:         """Adds node w/ partition ``[start, end)`` to the frontier. """
+
 0431:         cdef SplitRecord split
+
 0432:         cdef SIZE_t node_id
+
 0433:         cdef SIZE_t n_node_samples
+
+0434:         cdef SIZE_t n_constant_features = 0
  __pyx_v_n_constant_features = 0;
-
+0435:         cdef double weighted_n_samples = splitter.weighted_n_samples
+
+0435:         cdef double weighted_n_samples = splitter.weighted_n_samples
  __pyx_t_1 = __pyx_v_splitter->weighted_n_samples;
   __pyx_v_weighted_n_samples = __pyx_t_1;
-
+0436:         cdef double min_impurity_decrease = self.min_impurity_decrease
+
+0436:         cdef double min_impurity_decrease = self.min_impurity_decrease
  __pyx_t_1 = __pyx_v_self->__pyx_base.min_impurity_decrease;
   __pyx_v_min_impurity_decrease = __pyx_t_1;
-
+0437:         cdef double min_impurity_split = self.min_impurity_split
+
+0437:         cdef double min_impurity_split = self.min_impurity_split
  __pyx_t_1 = __pyx_v_self->__pyx_base.min_impurity_split;
   __pyx_v_min_impurity_split = __pyx_t_1;
-
 0438:         cdef double weighted_n_node_samples
-
 0439:         cdef bint is_leaf
-
 0440:         cdef SIZE_t n_left, n_right
-
 0441:         cdef double imp_diff
+
 0438:         cdef double weighted_n_node_samples
+
 0439:         cdef bint is_leaf
+
 0440:         cdef SIZE_t n_left, n_right
+
 0441:         cdef double imp_diff
 0442: 
-
+0443:         splitter.node_reset(start, end, &weighted_n_node_samples)
+
+0443:         splitter.node_reset(start, end, &weighted_n_node_samples)
  __pyx_t_2 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_reset(__pyx_v_splitter, __pyx_v_start, __pyx_v_end, (&__pyx_v_weighted_n_node_samples)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 443, __pyx_L1_error)
 
 0444: 
-
+0445:         if is_first:
+
+0445:         if is_first:
  __pyx_t_3 = (__pyx_v_is_first != 0);
   if (__pyx_t_3) {
 /* … */
   }
-
+0446:             impurity = splitter.node_impurity()
+
+0446:             impurity = splitter.node_impurity()
    __pyx_v_impurity = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_impurity(__pyx_v_splitter);
 
 0447: 
-
+0448:         n_node_samples = end - start
+
+0448:         n_node_samples = end - start
  __pyx_v_n_node_samples = (__pyx_v_end - __pyx_v_start);
-
+0449:         is_leaf = (depth >= self.max_depth or
+
+0449:         is_leaf = (depth >= self.max_depth or
  __pyx_t_4 = ((__pyx_v_depth >= __pyx_v_self->__pyx_base.max_depth) != 0);
   if (!__pyx_t_4) {
   } else {
     __pyx_t_3 = __pyx_t_4;
     goto __pyx_L4_bool_binop_done;
   }
-
+0450:                    n_node_samples < self.min_samples_split or
+
+0450:                    n_node_samples < self.min_samples_split or
  __pyx_t_4 = ((__pyx_v_n_node_samples < __pyx_v_self->__pyx_base.min_samples_split) != 0);
   if (!__pyx_t_4) {
   } else {
     __pyx_t_3 = __pyx_t_4;
     goto __pyx_L4_bool_binop_done;
   }
-
+0451:                    n_node_samples < 2 * self.min_samples_leaf or
+
+0451:                    n_node_samples < 2 * self.min_samples_leaf or
  __pyx_t_4 = ((__pyx_v_n_node_samples < (2 * __pyx_v_self->__pyx_base.min_samples_leaf)) != 0);
   if (!__pyx_t_4) {
   } else {
     __pyx_t_3 = __pyx_t_4;
     goto __pyx_L4_bool_binop_done;
   }
-
+0452:                    weighted_n_node_samples < 2 * self.min_weight_leaf or
+
+0452:                    weighted_n_node_samples < 2 * self.min_weight_leaf or
  __pyx_t_4 = ((__pyx_v_weighted_n_node_samples < (2.0 * __pyx_v_self->__pyx_base.min_weight_leaf)) != 0);
   if (!__pyx_t_4) {
   } else {
     __pyx_t_3 = __pyx_t_4;
     goto __pyx_L4_bool_binop_done;
   }
-
+0453:                    impurity <= min_impurity_split)
+
+0453:                    impurity <= min_impurity_split)
  __pyx_t_4 = ((__pyx_v_impurity <= __pyx_v_min_impurity_split) != 0);
   __pyx_t_3 = __pyx_t_4;
   __pyx_L4_bool_binop_done:;
   __pyx_v_is_leaf = __pyx_t_3;
 
 0454: 
-
+0455:         if not is_leaf:
+
+0455:         if not is_leaf:
  __pyx_t_3 = ((!(__pyx_v_is_leaf != 0)) != 0);
   if (__pyx_t_3) {
 /* … */
   }
-
+0456:             splitter.node_split(impurity, &split, &n_constant_features)
+
+0456:             splitter.node_split(impurity, &split, &n_constant_features)
    __pyx_t_2 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_split(__pyx_v_splitter, __pyx_v_impurity, (&__pyx_v_split), (&__pyx_v_n_constant_features)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 456, __pyx_L1_error)
-
 0457:             # If EPSILON=0 in the below comparison, float precision issues stop
-
 0458:             # splitting early, producing trees that are dissimilar to v0.18
-
+0459:             is_leaf = (is_leaf or split.pos >= end or
+
 0457:             # If EPSILON=0 in the below comparison, float precision issues stop
+
 0458:             # splitting early, producing trees that are dissimilar to v0.18
+
+0459:             is_leaf = (is_leaf or split.pos >= end or
    __pyx_t_4 = (__pyx_v_is_leaf != 0);
     if (!__pyx_t_4) {
     } else {
@@ -3024,98 +3098,98 @@
       __pyx_t_3 = __pyx_t_4;
       goto __pyx_L10_bool_binop_done;
     }
-
+0460:                        split.improvement + EPSILON < min_impurity_decrease)
+
+0460:                        split.improvement + EPSILON < min_impurity_decrease)
    __pyx_t_4 = (((__pyx_v_split.improvement + __pyx_v_13stpredictions_6models_3OK3_5_tree_EPSILON) < __pyx_v_min_impurity_decrease) != 0);
     __pyx_t_3 = __pyx_t_4;
     __pyx_L10_bool_binop_done:;
     __pyx_v_is_leaf = __pyx_t_3;
 
 0461: 
-
+0462:         node_id = tree._add_node(parent - tree.nodes
+
+0462:         node_id = tree._add_node(parent - tree.nodes
    __pyx_t_5 = (__pyx_v_parent - __pyx_v_tree->nodes);
   } else {
 /* … */
   __pyx_t_6 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_add_node(__pyx_v_tree, __pyx_t_5, __pyx_v_is_left, __pyx_v_is_leaf, __pyx_v_split.feature, __pyx_v_split.threshold, __pyx_v_impurity, __pyx_v_n_node_samples, __pyx_v_weighted_n_node_samples); if (unlikely(__pyx_t_6 == ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)-1))) __PYX_ERR(0, 462, __pyx_L1_error)
   __pyx_v_node_id = __pyx_t_6;
-
+0463:                                  if parent != NULL
+
+0463:                                  if parent != NULL
  if (((__pyx_v_parent != NULL) != 0)) {
-
+0464:                                  else _TREE_UNDEFINED,
+
+0464:                                  else _TREE_UNDEFINED,
    __pyx_t_5 = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED;
   }
-
 0465:                                  is_left, is_leaf,
-
 0466:                                  split.feature, split.threshold, impurity, n_node_samples,
-
 0467:                                  weighted_n_node_samples)
-
+0468:         if node_id == SIZE_MAX:
+
 0465:                                  is_left, is_leaf,
+
 0466:                                  split.feature, split.threshold, impurity, n_node_samples,
+
 0467:                                  weighted_n_node_samples)
+
+0468:         if node_id == SIZE_MAX:
  __pyx_t_3 = ((__pyx_v_node_id == SIZE_MAX) != 0);
   if (__pyx_t_3) {
 /* … */
   }
-
+0469:             return -1
+
+0469:             return -1
    __pyx_r = -1;
     goto __pyx_L0;
 
 0470: 
-
 0471:         # compute values also for split nodes (might become leafs later).
-
+0472:         splitter.node_value(tree.value + node_id * n_samples)
+
 0471:         # compute values also for split nodes (might become leafs later).
+
+0472:         splitter.node_value(tree.value + node_id * n_samples)
  ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_9_splitter_Splitter *)__pyx_v_splitter->__pyx_vtab)->node_value(__pyx_v_splitter, (__pyx_v_tree->value + (__pyx_v_node_id * __pyx_v_n_samples)));
 
 0473: 
-
+0474:         res.node_id = node_id
+
+0474:         res.node_id = node_id
  __pyx_v_res->node_id = __pyx_v_node_id;
-
+0475:         res.start = start
+
+0475:         res.start = start
  __pyx_v_res->start = __pyx_v_start;
-
+0476:         res.end = end
+
+0476:         res.end = end
  __pyx_v_res->end = __pyx_v_end;
-
+0477:         res.depth = depth
+
+0477:         res.depth = depth
  __pyx_v_res->depth = __pyx_v_depth;
-
+0478:         res.impurity = impurity
+
+0478:         res.impurity = impurity
  __pyx_v_res->impurity = __pyx_v_impurity;
 
 0479: 
-
+0480:         if not is_leaf:
+
+0480:         if not is_leaf:
  __pyx_t_3 = ((!(__pyx_v_is_leaf != 0)) != 0);
   if (__pyx_t_3) {
 /* … */
     goto __pyx_L14;
   }
-
 0481:             # is split node
-
+0482:             res.pos = split.pos
+
 0481:             # is split node
+
+0482:             res.pos = split.pos
    __pyx_t_7 = __pyx_v_split.pos;
     __pyx_v_res->pos = __pyx_t_7;
-
+0483:             res.is_leaf = 0
+
+0483:             res.is_leaf = 0
    __pyx_v_res->is_leaf = 0;
-
+0484:             res.improvement = split.improvement
+
+0484:             res.improvement = split.improvement
    __pyx_t_1 = __pyx_v_split.improvement;
     __pyx_v_res->improvement = __pyx_t_1;
-
+0485:             res.impurity_left = split.impurity_left
+
+0485:             res.impurity_left = split.impurity_left
    __pyx_t_1 = __pyx_v_split.impurity_left;
     __pyx_v_res->impurity_left = __pyx_t_1;
-
+0486:             res.impurity_right = split.impurity_right
+
+0486:             res.impurity_right = split.impurity_right
    __pyx_t_1 = __pyx_v_split.impurity_right;
     __pyx_v_res->impurity_right = __pyx_t_1;
 
 0487: 
-
 0488:         else:
-
 0489:             # is leaf => 0 improvement
-
+0490:             res.pos = end
+
 0488:         else:
+
 0489:             # is leaf => 0 improvement
+
+0490:             res.pos = end
  /*else*/ {
     __pyx_v_res->pos = __pyx_v_end;
-
+0491:             res.is_leaf = 1
+
+0491:             res.is_leaf = 1
    __pyx_v_res->is_leaf = 1;
-
+0492:             res.improvement = 0.0
+
+0492:             res.improvement = 0.0
    __pyx_v_res->improvement = 0.0;
-
+0493:             res.impurity_left = impurity
+
+0493:             res.impurity_left = impurity
    __pyx_v_res->impurity_left = __pyx_v_impurity;
-
+0494:             res.impurity_right = impurity
+
+0494:             res.impurity_right = impurity
    __pyx_v_res->impurity_right = __pyx_v_impurity;
   }
   __pyx_L14:;
 
 0495: 
-
+0496:         return 0
+
+0496:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0497: 
 0498: 
-
 0499: # =============================================================================
-
 0500: # Tree
-
 0501: # =============================================================================
+
 0499: # =============================================================================
+
 0500: # Tree
+
 0501: # =============================================================================
 0502: 
-
+0503: cdef class Tree:
+
+0503: cdef class Tree:
struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree {
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t (*_add_node)(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, int, int, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, double, double, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t, double);
   int (*_resize)(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t);
@@ -3138,74 +3212,74 @@
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_dense(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, PyObject *);
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_sparse_csr(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *, PyObject *);
 
-
 0504:     """Array-based representation of a binary decision tree.
+
 0504:     """Array-based representation of a binary decision tree.
 0505: 
-
 0506:     The binary tree is represented as a number of parallel arrays. The i-th
-
 0507:     element of each array holds information about the node `i`. Node 0 is the
-
 0508:     tree's root. You can find a detailed description of all arrays in
-
 0509:     `_tree.pxd`. NOTE: Some of the arrays only apply to either leaves or split
-
 0510:     nodes, resp. In this case the values of nodes of the other type are
-
 0511:     arbitrary!
+
 0506:     The binary tree is represented as a number of parallel arrays. The i-th
+
 0507:     element of each array holds information about the node `i`. Node 0 is the
+
 0508:     tree's root. You can find a detailed description of all arrays in
+
 0509:     `_tree.pxd`. NOTE: Some of the arrays only apply to either leaves or split
+
 0510:     nodes, resp. In this case the values of nodes of the other type are
+
 0511:     arbitrary!
 0512: 
-
 0513:     Attributes
-
 0514:     ----------
-
 0515:     node_count : int
-
 0516:         The number of nodes (internal nodes + leaves) in the tree.
+
 0513:     Attributes
+
 0514:     ----------
+
 0515:     node_count : int
+
 0516:         The number of nodes (internal nodes + leaves) in the tree.
 0517: 
-
 0518:     capacity : int
-
 0519:         The current capacity (i.e., size) of the arrays, which is at least as
-
 0520:         great as `node_count`.
+
 0518:     capacity : int
+
 0519:         The current capacity (i.e., size) of the arrays, which is at least as
+
 0520:         great as `node_count`.
 0521: 
-
 0522:     max_depth : int
-
 0523:         The depth of the tree, i.e. the maximum depth of its leaves.
-
 0524: 
-
 0525:     value : array of double, shape [node_count, n_train_samples]
-
 0526:         Gives for each node, the weighted list of training samples
-
 0527:         falling in the leaf/leaves bellow the leaf/node.
-
 0528:         (Kind of invert the array given by the 'apply' function.)
-
 0529: 
-
 0530:     K_y : array of double, shape [n_train_samples, n_train_samples]
-
 0531:         The training output Gramm matrix (used to compute the predictions)
+
 0522:     max_depth : int
+
 0523:         The depth of the tree, i.e. the maximum depth of its leaves.
+
 0524:     
+
 0525:     value : array of double, shape [node_count, n_train_samples]
+
 0526:         Gives for each node, the weighted list of training samples 
+
 0527:         falling in the leaf/leaves bellow the leaf/node. 
+
 0528:         (Kind of invert the array given by the 'apply' function.)
+
 0529:     
+
 0530:     K_y : array of double, shape [n_train_samples, n_train_samples]
+
 0531:         The training output Gramm matrix (used to compute the predictions)
 0532: 
-
 0533:     y : array of double, shape [n_train_samples, output_vetor_length]
-
 0534:         The training output matrix
+
 0533:     y : array of double, shape [n_train_samples, output_vetor_length]
+
 0534:         The training output matrix
 0535: 
-
 0536:     children_left : array of int, shape [node_count]
-
 0537:         children_left[i] holds the node id of the left child of node i.
-
 0538:         For leaves, children_left[i] == TREE_LEAF. Otherwise,
-
 0539:         children_left[i] > i. This child handles the case where
-
 0540:         X[:, feature[i]] <= threshold[i].
+
 0536:     children_left : array of int, shape [node_count]
+
 0537:         children_left[i] holds the node id of the left child of node i.
+
 0538:         For leaves, children_left[i] == TREE_LEAF. Otherwise,
+
 0539:         children_left[i] > i. This child handles the case where
+
 0540:         X[:, feature[i]] <= threshold[i].
 0541: 
-
 0542:     children_right : array of int, shape [node_count]
-
 0543:         children_right[i] holds the node id of the right child of node i.
-
 0544:         For leaves, children_right[i] == TREE_LEAF. Otherwise,
-
 0545:         children_right[i] > i. This child handles the case where
-
 0546:         X[:, feature[i]] > threshold[i].
+
 0542:     children_right : array of int, shape [node_count]
+
 0543:         children_right[i] holds the node id of the right child of node i.
+
 0544:         For leaves, children_right[i] == TREE_LEAF. Otherwise,
+
 0545:         children_right[i] > i. This child handles the case where
+
 0546:         X[:, feature[i]] > threshold[i].
 0547: 
-
 0548:     feature : array of int, shape [node_count]
-
 0549:         feature[i] holds the feature to split on, for the internal node i.
+
 0548:     feature : array of int, shape [node_count]
+
 0549:         feature[i] holds the feature to split on, for the internal node i.
 0550: 
-
 0551:     threshold : array of double, shape [node_count]
-
 0552:         threshold[i] holds the threshold for the internal node i.
+
 0551:     threshold : array of double, shape [node_count]
+
 0552:         threshold[i] holds the threshold for the internal node i.
 0553: 
-
 0554:     impurity : array of double, shape [node_count]
-
 0555:         impurity[i] holds the impurity (i.e., the value of the splitting
-
 0556:         criterion) at node i.
+
 0554:     impurity : array of double, shape [node_count]
+
 0555:         impurity[i] holds the impurity (i.e., the value of the splitting
+
 0556:         criterion) at node i.
 0557: 
-
 0558:     n_node_samples : array of int, shape [node_count]
-
 0559:         n_node_samples[i] holds the number of training samples reaching node i.
+
 0558:     n_node_samples : array of int, shape [node_count]
+
 0559:         n_node_samples[i] holds the number of training samples reaching node i.
 0560: 
-
 0561:     weighted_n_node_samples : array of int, shape [node_count]
-
 0562:         weighted_n_node_samples[i] holds the weighted number of training samples
-
 0563:         reaching node i.
-
 0564:     """
-
 0565:     # Wrap for outside world.
-
 0566:     # WARNING: these reference the current `nodes` buffers, which
-
 0567:     # must not be freed by a subsequent memory allocation.
-
 0568:     # (i.e. through `_resize` or `__setstate__`)
+
 0561:     weighted_n_node_samples : array of int, shape [node_count]
+
 0562:         weighted_n_node_samples[i] holds the weighted number of training samples
+
 0563:         reaching node i.
+
 0564:     """
+
 0565:     # Wrap for outside world.
+
 0566:     # WARNING: these reference the current `nodes` buffers, which
+
 0567:     # must not be freed by a subsequent memory allocation.
+
 0568:     # (i.e. through `_resize` or `__setstate__`)
 0569: 
-
 0570:     property children_left:
-
+0571:         def __get__(self):
+
 0570:     property children_left:
+
+0571:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_13children_left_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_13children_left_1__get__(PyObject *__pyx_v_self) {
@@ -3235,7 +3309,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0572:             return self._get_node_ndarray()['left_child'][:self.node_count]
+
+0572:             return self._get_node_ndarray()['left_child'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 572, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3249,8 +3323,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0573: 
-
 0574:     property children_right:
-
+0575:         def __get__(self):
+
 0574:     property children_right:
+
+0575:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_14children_right_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_14children_right_1__get__(PyObject *__pyx_v_self) {
@@ -3280,7 +3354,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0576:             return self._get_node_ndarray()['right_child'][:self.node_count]
+
+0576:             return self._get_node_ndarray()['right_child'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 576, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3294,8 +3368,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0577: 
-
 0578:     property n_leaves:
-
+0579:         def __get__(self):
+
 0578:     property n_leaves:
+
+0579:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_8n_leaves_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_8n_leaves_1__get__(PyObject *__pyx_v_self) {
@@ -3331,7 +3405,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0580:             return np.sum(np.logical_and(
+
+0580:             return np.sum(np.logical_and(
  __Pyx_XDECREF(__pyx_r);
   __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 580, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
@@ -3343,13 +3417,13 @@
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_logical_and); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 580, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
+0581:                 self.children_left == -1,
+
+0581:                 self.children_left == -1,
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_children_left); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 581, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_6 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_neg_1, -1L, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 581, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
+0582:                 self.children_right == -1))
+
+0582:                 self.children_right == -1))
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_children_right); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 582, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_7 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_neg_1, -1L, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 582, __pyx_L1_error)
@@ -3423,8 +3497,8 @@
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
-
 0583:     property feature:
-
+0584:         def __get__(self):
+
 0583:     property feature:
+
+0584:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_7feature_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_7feature_1__get__(PyObject *__pyx_v_self) {
@@ -3454,7 +3528,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0585:             return self._get_node_ndarray()['feature'][:self.node_count]
+
+0585:             return self._get_node_ndarray()['feature'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 585, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3468,8 +3542,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0586: 
-
 0587:     property threshold:
-
+0588:         def __get__(self):
+
 0587:     property threshold:
+
+0588:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_9threshold_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_9threshold_1__get__(PyObject *__pyx_v_self) {
@@ -3499,7 +3573,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0589:             return self._get_node_ndarray()['threshold'][:self.node_count]
+
+0589:             return self._get_node_ndarray()['threshold'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 589, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3513,8 +3587,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0590: 
-
 0591:     property impurity:
-
+0592:         def __get__(self):
+
 0591:     property impurity:
+
+0592:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_8impurity_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_8impurity_1__get__(PyObject *__pyx_v_self) {
@@ -3544,7 +3618,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0593:             return self._get_node_ndarray()['impurity'][:self.node_count]
+
+0593:             return self._get_node_ndarray()['impurity'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 593, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3558,8 +3632,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0594: 
-
 0595:     property n_node_samples:
-
+0596:         def __get__(self):
+
 0595:     property n_node_samples:
+
+0596:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_14n_node_samples_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_14n_node_samples_1__get__(PyObject *__pyx_v_self) {
@@ -3589,7 +3663,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0597:             return self._get_node_ndarray()['n_node_samples'][:self.node_count]
+
+0597:             return self._get_node_ndarray()['n_node_samples'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 597, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3603,8 +3677,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0598: 
-
 0599:     property weighted_n_node_samples:
-
+0600:         def __get__(self):
+
 0599:     property weighted_n_node_samples:
+
+0600:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_23weighted_n_node_samples_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_23weighted_n_node_samples_1__get__(PyObject *__pyx_v_self) {
@@ -3634,7 +3708,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0601:             return self._get_node_ndarray()['weighted_n_node_samples'][:self.node_count]
+
+0601:             return self._get_node_ndarray()['weighted_n_node_samples'][:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 601, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3648,8 +3722,8 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0602: 
-
 0603:     property value:
-
+0604:         def __get__(self):
+
 0603:     property value:
+
+0604:         def __get__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_5value_1__get__(PyObject *__pyx_v_self); /*proto*/
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_5value_1__get__(PyObject *__pyx_v_self) {
@@ -3679,7 +3753,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+0605:             return self._get_value_ndarray()[:self.node_count]
+
+0605:             return self._get_value_ndarray()[:self.node_count]
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 605, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3690,7 +3764,7 @@
   __pyx_t_2 = 0;
   goto __pyx_L0;
 
 0606: 
-
+0607:     def __cinit__(self, int n_features, int n_samples):
+
+0607:     def __cinit__(self, int n_features, int n_samples):
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -3774,19 +3848,19 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0608:         """Constructor."""
-
 0609:         # Input/Output layout
-
+0610:         self.n_features = n_features
+
 0608:         """Constructor."""
+
 0609:         # Input/Output layout
+
+0610:         self.n_features = n_features
  __pyx_v_self->n_features = __pyx_v_n_features;
 
 0611: 
-
 0612:         # Inner structures
-
+0613:         self.max_depth = 0
+
 0612:         # Inner structures
+
+0613:         self.max_depth = 0
  __pyx_v_self->max_depth = 0;
-
+0614:         self.node_count = 0
+
+0614:         self.node_count = 0
  __pyx_v_self->node_count = 0;
-
+0615:         self.capacity = 0
+
+0615:         self.capacity = 0
  __pyx_v_self->capacity = 0;
-
+0616:         self.K_y = np.zeros((n_samples,), dtype=DOUBLE)
+
+0616:         self.K_y = np.zeros((n_samples,), dtype=DOUBLE)
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 616, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 616, __pyx_L1_error)
@@ -3821,18 +3895,18 @@
   __Pyx_DECREF(((PyObject *)__pyx_v_self->K_y));
   __pyx_v_self->K_y = ((PyArrayObject *)__pyx_t_4);
   __pyx_t_4 = 0;
-
+0617:         self.y = None
+
+0617:         self.y = None
  __Pyx_INCREF(Py_None);
   __Pyx_GIVEREF(Py_None);
   __Pyx_GOTREF(__pyx_v_self->y);
   __Pyx_DECREF(((PyObject *)__pyx_v_self->y));
   __pyx_v_self->y = ((PyArrayObject *)Py_None);
-
+0618:         self.nodes = NULL
+
+0618:         self.nodes = NULL
  __pyx_v_self->nodes = NULL;
-
+0619:         self.value = NULL
+
+0619:         self.value = NULL
  __pyx_v_self->value = NULL;
 
 0620: 
-
+0621:     def __dealloc__(self):
+
+0621:     def __dealloc__(self):
/* Python wrapper */
 static void __pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
 static void __pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_3__dealloc__(PyObject *__pyx_v_self) {
@@ -3851,14 +3925,14 @@
   /* function exit code */
   __Pyx_RefNannyFinishContext();
 }
-
 0622:         """Destructor."""
-
 0623:         # Free all inner structures
-
+0624:         free(self.nodes)
+
 0622:         """Destructor."""
+
 0623:         # Free all inner structures
+
+0624:         free(self.nodes)
  free(__pyx_v_self->nodes);
-
+0625:         free(self.value)
+
+0625:         free(self.value)
  free(__pyx_v_self->value);
 
 0626: 
-
+0627:     def __reduce__(self):
+
+0627:     def __reduce__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_5__reduce__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static char __pyx_doc_13stpredictions_6models_3OK3_5_tree_4Tree_4__reduce__[] = "Reduce re-implementation, for pickling.";
@@ -3891,8 +3965,8 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0628:         """Reduce re-implementation, for pickling."""
-
+0629:         return (Tree, (self.n_features,self.K_y.shape[0]), self.__getstate__())
+
 0628:         """Reduce re-implementation, for pickling."""
+
+0629:         return (Tree, (self.n_features,self.K_y.shape[0]), self.__getstate__())
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->n_features); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 629, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
@@ -3938,7 +4012,7 @@
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
 0630: 
-
+0631:     def __getstate__(self):
+
+0631:     def __getstate__(self):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_7__getstate__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
 static char __pyx_doc_13stpredictions_6models_3OK3_5_tree_4Tree_6__getstate__[] = "Getstate re-implementation, for pickling.";
@@ -3970,50 +4044,50 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0632:         """Getstate re-implementation, for pickling."""
-
+0633:         d = {}
+
 0632:         """Getstate re-implementation, for pickling."""
+
+0633:         d = {}
  __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 633, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_d = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
-
 0634:         # capacity is inferred during the __setstate__ using nodes
-
+0635:         d["max_depth"] = self.max_depth
+
 0634:         # capacity is inferred during the __setstate__ using nodes
+
+0635:         d["max_depth"] = self.max_depth
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->max_depth); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 635, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_max_depth, __pyx_t_1) < 0)) __PYX_ERR(0, 635, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0636:         d["node_count"] = self.node_count
+
+0636:         d["node_count"] = self.node_count
  __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->node_count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 636, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_node_count, __pyx_t_1) < 0)) __PYX_ERR(0, 636, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0637:         d["nodes"] = self._get_node_ndarray()
+
+0637:         d["nodes"] = self._get_node_ndarray()
  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_node_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 637, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_nodes, __pyx_t_1) < 0)) __PYX_ERR(0, 637, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0638:         d["values"] = self._get_value_ndarray()
+
+0638:         d["values"] = self._get_value_ndarray()
  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 638, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_values, __pyx_t_1) < 0)) __PYX_ERR(0, 638, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0639:         d["K_y"] = self.K_y
+
+0639:         d["K_y"] = self.K_y
  __pyx_t_1 = ((PyObject *)__pyx_v_self->K_y);
   __Pyx_INCREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_K_y, __pyx_t_1) < 0)) __PYX_ERR(0, 639, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0640:         d["y"] = self.y
+
+0640:         d["y"] = self.y
  __pyx_t_1 = ((PyObject *)__pyx_v_self->y);
   __Pyx_INCREF(__pyx_t_1);
   if (unlikely(PyDict_SetItem(__pyx_v_d, __pyx_n_s_y, __pyx_t_1) < 0)) __PYX_ERR(0, 640, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+0641:         return d
+
+0641:         return d
  __Pyx_XDECREF(__pyx_r);
   __Pyx_INCREF(__pyx_v_d);
   __pyx_r = __pyx_v_d;
   goto __pyx_L0;
 
 0642: 
-
+0643:     def __setstate__(self, d):
+
+0643:     def __setstate__(self, d):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_9__setstate__(PyObject *__pyx_v_self, PyObject *__pyx_v_d); /*proto*/
 static char __pyx_doc_13stpredictions_6models_3OK3_5_tree_4Tree_8__setstate__[] = "Setstate re-implementation, for unpickling.";
@@ -4055,20 +4129,20 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0644:         """Setstate re-implementation, for unpickling."""
-
+0645:         self.max_depth = d["max_depth"]
+
 0644:         """Setstate re-implementation, for unpickling."""
+
+0645:         self.max_depth = d["max_depth"]
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_max_depth); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 645, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_2 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 645, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->max_depth = __pyx_t_2;
-
+0646:         self.node_count = d["node_count"]
+
+0646:         self.node_count = d["node_count"]
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_node_count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 646, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_2 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 646, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->node_count = __pyx_t_2;
-
+0647:         self.K_y = d["K_y"]
+
+0647:         self.K_y = d["K_y"]
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_K_y); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 647, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 647, __pyx_L1_error)
@@ -4077,7 +4151,7 @@
   __Pyx_DECREF(((PyObject *)__pyx_v_self->K_y));
   __pyx_v_self->K_y = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
-
+0648:         self.y = d["y"]
+
+0648:         self.y = d["y"]
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_y); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 648, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 648, __pyx_L1_error)
@@ -4087,13 +4161,13 @@
   __pyx_v_self->y = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
 0649: 
-
+0650:         if 'nodes' not in d:
+
+0650:         if 'nodes' not in d:
  __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_n_s_nodes, __pyx_v_d, Py_NE)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 650, __pyx_L1_error)
   __pyx_t_4 = (__pyx_t_3 != 0);
   if (unlikely(__pyx_t_4)) {
 /* … */
   }
-
+0651:             raise ValueError('You have loaded Tree version which '
+
+0651:             raise ValueError('You have loaded Tree version which '
    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 651, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
@@ -4103,20 +4177,20 @@
   __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_You_have_loaded_Tree_version_whi); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 651, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_tuple__6);
   __Pyx_GIVEREF(__pyx_tuple__6);
-
 0652:                              'cannot be imported')
+
 0652:                              'cannot be imported')
 0653: 
-
+0654:         node_ndarray = d['nodes']
+
+0654:         node_ndarray = d['nodes']
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_nodes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 654, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_node_ndarray = __pyx_t_1;
   __pyx_t_1 = 0;
-
+0655:         value_ndarray = d['values']
+
+0655:         value_ndarray = d['values']
  __pyx_t_1 = __Pyx_PyObject_Dict_GetItem(__pyx_v_d, __pyx_n_s_values); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 655, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_value_ndarray = __pyx_t_1;
   __pyx_t_1 = 0;
 
 0656: 
-
+0657:         value_shape = (node_ndarray.shape[0], self.K_y.shape[0])
+
+0657:         value_shape = (node_ndarray.shape[0], self.K_y.shape[0])
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_node_ndarray, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 657, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 657, __pyx_L1_error)
@@ -4134,7 +4208,7 @@
   __pyx_t_1 = 0;
   __pyx_v_value_shape = ((PyObject*)__pyx_t_6);
   __pyx_t_6 = 0;
-
+0658:         if (node_ndarray.ndim != 1 or
+
+0658:         if (node_ndarray.ndim != 1 or
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_node_ndarray, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 658, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __pyx_t_1 = __Pyx_PyInt_NeObjC(__pyx_t_6, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 658, __pyx_L1_error)
@@ -4151,7 +4225,7 @@
   if (unlikely(__pyx_t_4)) {
 /* … */
   }
-
+0659:                 node_ndarray.dtype != NODE_DTYPE or
+
+0659:                 node_ndarray.dtype != NODE_DTYPE or
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_node_ndarray, __pyx_n_s_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 659, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_NODE_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 659, __pyx_L1_error)
@@ -4166,7 +4240,7 @@
     __pyx_t_4 = __pyx_t_3;
     goto __pyx_L5_bool_binop_done;
   }
-
+0660:                 not node_ndarray.flags.c_contiguous or
+
+0660:                 not node_ndarray.flags.c_contiguous or
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_node_ndarray, __pyx_n_s_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 660, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_c_contiguous); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 660, __pyx_L1_error)
@@ -4180,7 +4254,7 @@
     __pyx_t_4 = __pyx_t_7;
     goto __pyx_L5_bool_binop_done;
   }
-
+0661:                 value_ndarray.shape != value_shape or
+
+0661:                 value_ndarray.shape != value_shape or
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_value_ndarray, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 661, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __pyx_t_5 = PyObject_RichCompare(__pyx_t_6, __pyx_v_value_shape, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 661, __pyx_L1_error)
@@ -4192,7 +4266,7 @@
     __pyx_t_4 = __pyx_t_7;
     goto __pyx_L5_bool_binop_done;
   }
-
+0662:                 not value_ndarray.flags.c_contiguous or
+
+0662:                 not value_ndarray.flags.c_contiguous or
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_value_ndarray, __pyx_n_s_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 662, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_c_contiguous); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 662, __pyx_L1_error)
@@ -4206,7 +4280,7 @@
     __pyx_t_4 = __pyx_t_3;
     goto __pyx_L5_bool_binop_done;
   }
-
+0663:                 value_ndarray.dtype != np.float64):
+
+0663:                 value_ndarray.dtype != np.float64):
  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_value_ndarray, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 663, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 663, __pyx_L1_error)
@@ -4221,7 +4295,7 @@
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   __pyx_t_4 = __pyx_t_3;
   __pyx_L5_bool_binop_done:;
-
+0664:             raise ValueError('Did not recognise loaded array layout')
+
+0664:             raise ValueError('Did not recognise loaded array layout')
    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 664, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __Pyx_Raise(__pyx_t_5, 0, 0, 0);
@@ -4232,7 +4306,7 @@
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
 
 0665: 
-
+0666:         self.capacity = node_ndarray.shape[0]
+
+0666:         self.capacity = node_ndarray.shape[0]
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_node_ndarray, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 666, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 666, __pyx_L1_error)
@@ -4241,7 +4315,7 @@
   __pyx_t_2 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_2 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 666, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->capacity = __pyx_t_2;
-
+0667:         if self._resize_c(self.capacity) != 0:
+
+0667:         if self._resize_c(self.capacity) != 0:
  __pyx_t_9.__pyx_n = 1;
   __pyx_t_9.capacity = __pyx_v_self->capacity;
   __pyx_t_8 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_resize_c(__pyx_v_self, &__pyx_t_9); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 667, __pyx_L1_error)
@@ -4249,7 +4323,7 @@
   if (unlikely(__pyx_t_4)) {
 /* … */
   }
-
+0668:             raise MemoryError("resizing tree to %d" % self.capacity)
+
+0668:             raise MemoryError("resizing tree to %d" % self.capacity)
    __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->capacity); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 668, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_resizing_tree_to_d, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 668, __pyx_L1_error)
@@ -4261,14 +4335,14 @@
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __PYX_ERR(0, 668, __pyx_L1_error)
-
+0669:         nodes = memcpy(self.nodes, (<np.ndarray> node_ndarray).data,
+
+0669:         nodes = memcpy(self.nodes, (<np.ndarray> node_ndarray).data,
  __pyx_v_nodes = memcpy(__pyx_v_self->nodes, ((PyArrayObject *)__pyx_v_node_ndarray)->data, (__pyx_v_self->capacity * (sizeof(struct __pyx_t_13stpredictions_6models_3OK3_5_tree_Node))));
-
 0670:                        self.capacity * sizeof(Node))
-
+0671:         value = memcpy(self.value, (<np.ndarray> value_ndarray).data,
+
 0670:                        self.capacity * sizeof(Node))
+
+0671:         value = memcpy(self.value, (<np.ndarray> value_ndarray).data,
  __pyx_v_value = memcpy(__pyx_v_self->value, ((PyArrayObject *)__pyx_v_value_ndarray)->data, ((__pyx_v_self->capacity * (__pyx_v_self->K_y->dimensions[0])) * (sizeof(double))));
-
 0672:                        self.capacity * self.K_y.shape[0] * sizeof(double))
+
 0672:                        self.capacity * self.K_y.shape[0] * sizeof(double))
 0673: 
-
+0674:     cdef int _resize(self, SIZE_t capacity) nogil except -1:
+
+0674:     cdef int _resize(self, SIZE_t capacity) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__resize(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_capacity) {
   int __pyx_r;
   #ifdef WITH_THREAD
@@ -4292,7 +4366,7 @@
   #endif
   return __pyx_r;
 }
-
+0675:         """Resize all inner arrays to `capacity`, if `capacity` == -1, then
+
+0675:         """Resize all inner arrays to `capacity`, if `capacity` == -1, then
  /*try:*/ {
 /* … */
   /*finally:*/ {
@@ -4310,12 +4384,12 @@
     }
     __pyx_L5:;
   }
-
 0676:            double the size of the inner arrays.
+
 0676:            double the size of the inner arrays.
 0677: 
-
 0678:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0679:         or 0 otherwise.
-
 0680:         """
-
+0681:         if self._resize_c(capacity) != 0:
+
 0678:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0679:         or 0 otherwise.
+
 0680:         """
+
+0681:         if self._resize_c(capacity) != 0:
    __pyx_t_2.__pyx_n = 1;
     __pyx_t_2.capacity = __pyx_v_capacity;
     __pyx_t_1 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_resize_c(__pyx_v_self, &__pyx_t_2); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 681, __pyx_L4_error)
@@ -4324,8 +4398,8 @@
 /* … */
     }
   }
-
 0682:             # Acquire gil only if we need to raise
-
+0683:             with gil:
+
 0682:             # Acquire gil only if we need to raise
+
+0683:             with gil:
      {
           #ifdef WITH_THREAD
           PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -4341,11 +4415,11 @@
             }
           }
       }
-
+0684:                 raise MemoryError()
+
+0684:                 raise MemoryError()
            PyErr_NoMemory(); __PYX_ERR(0, 684, __pyx_L8_error)
           }
 
 0685: 
-
+0686:     cdef int _resize_c(self, SIZE_t capacity=SIZE_MAX) nogil except -1:
+
+0686:     cdef int _resize_c(self, SIZE_t capacity=SIZE_MAX) nogil except -1:
static int __pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__resize_c(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_4Tree__resize_c *__pyx_optional_args) {
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_capacity = __pyx_k__8;
   int __pyx_r;
@@ -4372,12 +4446,12 @@
 }
 /* … */
   __pyx_k__8 = SIZE_MAX;
-
 0687:         """Guts of _resize
+
 0687:         """Guts of _resize
 0688: 
-
 0689:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
-
 0690:         or 0 otherwise.
-
 0691:         """
-
+0692:         if capacity == self.capacity and self.nodes != NULL:
+
 0689:         Returns -1 in case of failure to allocate memory (and raise MemoryError)
+
 0690:         or 0 otherwise.
+
 0691:         """
+
+0692:         if capacity == self.capacity and self.nodes != NULL:
  __pyx_t_2 = ((__pyx_v_capacity == __pyx_v_self->capacity) != 0);
   if (__pyx_t_2) {
   } else {
@@ -4390,37 +4464,37 @@
   if (__pyx_t_1) {
 /* … */
   }
-
+0693:             return 0
+
+0693:             return 0
    __pyx_r = 0;
     goto __pyx_L0;
 
 0694: 
-
+0695:         if capacity == SIZE_MAX:
+
+0695:         if capacity == SIZE_MAX:
  __pyx_t_1 = ((__pyx_v_capacity == SIZE_MAX) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+0696:             if self.capacity == 0:
+
+0696:             if self.capacity == 0:
    __pyx_t_1 = ((__pyx_v_self->capacity == 0) != 0);
     if (__pyx_t_1) {
 /* … */
       goto __pyx_L7;
     }
-
+0697:                 capacity = 3  # default initial value
+
+0697:                 capacity = 3  # default initial value
      __pyx_v_capacity = 3;
-
 0698:             else:
-
+0699:                 capacity = 2 * self.capacity
+
 0698:             else:
+
+0699:                 capacity = 2 * self.capacity
    /*else*/ {
       __pyx_v_capacity = (2 * __pyx_v_self->capacity);
     }
     __pyx_L7:;
 
 0700: 
-
+0701:         safe_realloc(&self.nodes, capacity)
+
+0701:         safe_realloc(&self.nodes, capacity)
  __pyx_fuse_6__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->nodes), __pyx_v_capacity); if (unlikely(__Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 701, __pyx_L1_error)
-
+0702:         safe_realloc(&self.value, capacity * self.K_y.shape[0])
+
+0702:         safe_realloc(&self.value, capacity * self.K_y.shape[0])
  __pyx_fuse_4__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_self->value), (__pyx_v_capacity * (__pyx_v_self->K_y->dimensions[0]))); if (unlikely(__Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 702, __pyx_L1_error)
 
 0703: 
-
 0704:         # value memory is initialised to 0 to enable classifier argmax
-
+0705:         if capacity > self.capacity:
+
 0704:         # value memory is initialised to 0 to enable classifier argmax
+
+0705:         if capacity > self.capacity:
   /* "stpredictions/models/OK3/_tree.pyx":705
  * 
@@ -4440,27 +4514,27 @@
  *                    (capacity - self.capacity) * self.K_y.shape[0] *
  */
   }
-
+0706:             memset(<void*>(self.value + self.capacity * self.K_y.shape[0]), 0,
+
+0706:             memset(<void*>(self.value + self.capacity * self.K_y.shape[0]), 0,
    (void)(memset(((void *)(__pyx_v_self->value + (__pyx_v_self->capacity * (__pyx_v_self->K_y->dimensions[0])))), 0, (((__pyx_v_capacity - __pyx_v_self->capacity) * (__pyx_v_self->K_y->dimensions[0])) * (sizeof(double)))));
-
 0707:                    (capacity - self.capacity) * self.K_y.shape[0] *
-
 0708:                    sizeof(double))
+
 0707:                    (capacity - self.capacity) * self.K_y.shape[0] *
+
 0708:                    sizeof(double))
 0709: 
-
 0710:         # if capacity smaller than node_count, adjust the counter
-
+0711:         if capacity < self.node_count:
+
 0710:         # if capacity smaller than node_count, adjust the counter
+
+0711:         if capacity < self.node_count:
  __pyx_t_1 = ((__pyx_v_capacity < __pyx_v_self->node_count) != 0);
   if (__pyx_t_1) {
 /* … */
   }
-
+0712:             self.node_count = capacity
+
+0712:             self.node_count = capacity
    __pyx_v_self->node_count = __pyx_v_capacity;
 
 0713: 
-
+0714:         self.capacity = capacity
+
+0714:         self.capacity = capacity
  __pyx_v_self->capacity = __pyx_v_capacity;
-
+0715:         return 0
+
+0715:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 0716: 
-
+0717:     cdef SIZE_t _add_node(self, SIZE_t parent, bint is_left, bint is_leaf,
+
+0717:     cdef SIZE_t _add_node(self, SIZE_t parent, bint is_left, bint is_leaf,
static __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__add_node(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_parent, int __pyx_v_is_left, int __pyx_v_is_leaf, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_feature, double __pyx_v_threshold, double __pyx_v_impurity, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_n_node_samples, double __pyx_v_weighted_n_node_samples) {
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_node_id;
   struct __pyx_t_13stpredictions_6models_3OK3_5_tree_Node *__pyx_v_node;
@@ -4481,96 +4555,96 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 0718:                           SIZE_t feature, double threshold, double impurity,
-
 0719:                           SIZE_t n_node_samples,
-
 0720:                           double weighted_n_node_samples) nogil except -1:
-
 0721:         """Add a node to the tree.
+
 0718:                           SIZE_t feature, double threshold, double impurity,
+
 0719:                           SIZE_t n_node_samples,
+
 0720:                           double weighted_n_node_samples) nogil except -1:
+
 0721:         """Add a node to the tree.
 0722: 
-
 0723:         The new node registers itself as the child of its parent.
+
 0723:         The new node registers itself as the child of its parent.
 0724: 
-
 0725:         Returns (size_t)(-1) on error.
-
 0726:         """
-
+0727:         cdef SIZE_t node_id = self.node_count
+
 0725:         Returns (size_t)(-1) on error.
+
 0726:         """
+
+0727:         cdef SIZE_t node_id = self.node_count
  __pyx_t_1 = __pyx_v_self->node_count;
   __pyx_v_node_id = __pyx_t_1;
 
 0728: 
-
+0729:         if node_id >= self.capacity:
+
+0729:         if node_id >= self.capacity:
  __pyx_t_2 = ((__pyx_v_node_id >= __pyx_v_self->capacity) != 0);
   if (__pyx_t_2) {
 /* … */
   }
-
+0730:             if self._resize_c() != 0:
+
+0730:             if self._resize_c() != 0:
    __pyx_t_3 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_resize_c(__pyx_v_self, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 730, __pyx_L1_error)
     __pyx_t_2 = ((__pyx_t_3 != 0) != 0);
     if (__pyx_t_2) {
 /* … */
     }
-
+0731:                 return SIZE_MAX
+
+0731:                 return SIZE_MAX
      __pyx_r = SIZE_MAX;
       goto __pyx_L0;
 
 0732: 
-
+0733:         cdef Node* node = &self.nodes[node_id]
+
+0733:         cdef Node* node = &self.nodes[node_id]
  __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node_id]));
-
+0734:         node.impurity = impurity
+
+0734:         node.impurity = impurity
  __pyx_v_node->impurity = __pyx_v_impurity;
-
+0735:         node.n_node_samples = n_node_samples
+
+0735:         node.n_node_samples = n_node_samples
  __pyx_v_node->n_node_samples = __pyx_v_n_node_samples;
-
+0736:         node.weighted_n_node_samples = weighted_n_node_samples
+
+0736:         node.weighted_n_node_samples = weighted_n_node_samples
  __pyx_v_node->weighted_n_node_samples = __pyx_v_weighted_n_node_samples;
 
 0737: 
-
+0738:         if parent != _TREE_UNDEFINED:
+
+0738:         if parent != _TREE_UNDEFINED:
  __pyx_t_2 = ((__pyx_v_parent != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED) != 0);
   if (__pyx_t_2) {
 /* … */
   }
-
+0739:             if is_left:
+
+0739:             if is_left:
    __pyx_t_2 = (__pyx_v_is_left != 0);
     if (__pyx_t_2) {
 /* … */
       goto __pyx_L6;
     }
-
+0740:                 self.nodes[parent].left_child = node_id
+
+0740:                 self.nodes[parent].left_child = node_id
      (__pyx_v_self->nodes[__pyx_v_parent]).left_child = __pyx_v_node_id;
-
 0741:             else:
-
+0742:                 self.nodes[parent].right_child = node_id
+
 0741:             else:
+
+0742:                 self.nodes[parent].right_child = node_id
    /*else*/ {
       (__pyx_v_self->nodes[__pyx_v_parent]).right_child = __pyx_v_node_id;
     }
     __pyx_L6:;
 
 0743: 
-
+0744:         if is_leaf:
+
+0744:         if is_leaf:
  __pyx_t_2 = (__pyx_v_is_leaf != 0);
   if (__pyx_t_2) {
 /* … */
     goto __pyx_L7;
   }
-
+0745:             node.left_child = _TREE_LEAF
+
+0745:             node.left_child = _TREE_LEAF
    __pyx_v_node->left_child = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF;
-
+0746:             node.right_child = _TREE_LEAF
+
+0746:             node.right_child = _TREE_LEAF
    __pyx_v_node->right_child = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF;
-
+0747:             node.feature = _TREE_UNDEFINED
+
+0747:             node.feature = _TREE_UNDEFINED
    __pyx_v_node->feature = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED;
-
+0748:             node.threshold = _TREE_UNDEFINED
+
+0748:             node.threshold = _TREE_UNDEFINED
    __pyx_v_node->threshold = __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED;
 
 0749: 
-
 0750:         else:
-
 0751:             # left_child and right_child will be set later
-
+0752:             node.feature = feature
+
 0750:         else:
+
 0751:             # left_child and right_child will be set later
+
+0752:             node.feature = feature
  /*else*/ {
     __pyx_v_node->feature = __pyx_v_feature;
-
+0753:             node.threshold = threshold
+
+0753:             node.threshold = threshold
    __pyx_v_node->threshold = __pyx_v_threshold;
   }
   __pyx_L7:;
 
 0754: 
-
+0755:         self.node_count += 1
+
+0755:         self.node_count += 1
  __pyx_v_self->node_count = (__pyx_v_self->node_count + 1);
 
 0756: 
-
+0757:         return node_id
+
+0757:         return node_id
  __pyx_r = __pyx_v_node_id;
   goto __pyx_L0;
 
 0758: 
-
+0759:     cpdef np.ndarray predict(self, object X):
+
+0759:     cpdef np.ndarray predict(self, object X):
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_11predict(PyObject *__pyx_v_self, PyObject *__pyx_v_X); /*proto*/
 static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree_predict(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X, int __pyx_skip_dispatch) {
   PyArrayObject *__pyx_v_ex_to_leaf = NULL;
@@ -4681,33 +4755,33 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0760:         """Returns the weighted training samples falling in the leaves X falls in.
-
 0761:         It is an array with for each row positive weights for the training indices in the same leaf.
-
 0762:         (the prediction in the Hilbert space is the weighted mean of these sample's outputs)
-
 0763: 
-
 0764:         Parameters
-
 0765:         ----------
-
 0766:         X : {array-like, sparse matrix} of shape (n_samples, n_features)
-
 0767:             The input samples.
-
 0768: 
-
 0769:         Returns
-
 0770:         --------
-
 0771:          A (n_test_samples, n_train_samples) array
-
 0772:         """
-
 0773:         # get the leaves X falls in
-
+0774:         ex_to_leaf = self.apply(X)
+
 0760:         """Returns the weighted training samples falling in the leaves X falls in.
+
 0761:         It is an array with for each row positive weights for the training indices in the same leaf.
+
 0762:         (the prediction in the Hilbert space is the weighted mean of these sample's outputs)
+
 0763:         
+
 0764:         Parameters
+
 0765:         ----------
+
 0766:         X : {array-like, sparse matrix} of shape (n_samples, n_features)
+
 0767:             The input samples.
+
 0768:         
+
 0769:         Returns
+
 0770:         --------
+
 0771:          A (n_test_samples, n_train_samples) array
+
 0772:         """
+
 0773:         # get the leaves X falls in
+
+0774:         ex_to_leaf = self.apply(X)
  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->apply(__pyx_v_self, __pyx_v_X, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 774, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_ex_to_leaf = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
-
 0775:         # get the list of the training examples each leaf
-
+0776:         leaf_to_train_exs = self._get_value_ndarray()
+
 0775:         # get the list of the training examples each leaf
+
+0776:         leaf_to_train_exs = self._get_value_ndarray()
  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 776, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_leaf_to_train_exs = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
 0777: 
-
+0778:         out = np.zeros((X.shape[0], leaf_to_train_exs.shape[1]), dtype=DOUBLE)
+
+0778:         out = np.zeros((X.shape[0], leaf_to_train_exs.shape[1]), dtype=DOUBLE)
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 778, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 778, __pyx_L1_error)
@@ -4746,8 +4820,8 @@
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __pyx_v_out = __pyx_t_3;
   __pyx_t_3 = 0;
-
 0779:         # assign the right list of training samples to the right input
-
+0780:         for ex in range(X.shape[0]):
+
 0779:         # assign the right list of training samples to the right input
+
+0780:         for ex in range(X.shape[0]):
  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 780, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 780, __pyx_L1_error)
@@ -4801,7 +4875,7 @@
 /* … */
   }
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
+0781:             out[ex] = leaf_to_train_exs[ex_to_leaf[ex]]
+
+0781:             out[ex] = leaf_to_train_exs[ex_to_leaf[ex]]
    __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_ex_to_leaf), __pyx_v_ex); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 781, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_3);
     __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_leaf_to_train_exs), __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 781, __pyx_L1_error)
@@ -4810,14 +4884,14 @@
     if (unlikely(PyObject_SetItem(__pyx_v_out, __pyx_v_ex, __pyx_t_1) < 0)) __PYX_ERR(0, 781, __pyx_L1_error)
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0782: 
-
+0783:         return out
+
+0783:         return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
   if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 783, __pyx_L1_error)
   __Pyx_INCREF(__pyx_v_out);
   __pyx_r = ((PyArrayObject *)__pyx_v_out);
   goto __pyx_L0;
 
 0784: 
-
+0785:     cpdef np.ndarray decode_tree(self, np.ndarray K_cand_train, np.ndarray sq_norms_cand, object criterion, str kernel, SIZE_t return_top_k):
+
+0785:     cpdef np.ndarray decode_tree(self, np.ndarray K_cand_train, np.ndarray sq_norms_cand, object criterion, str kernel, SIZE_t return_top_k):
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_13decode_tree(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree_decode_tree(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyArrayObject *__pyx_v_K_cand_train, PyArrayObject *__pyx_v_sq_norms_cand, PyObject *__pyx_v_criterion, PyObject *__pyx_v_kernel, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_return_top_k, int __pyx_skip_dispatch) {
   PyArrayObject *__pyx_v_y_train = NULL;
@@ -5099,45 +5173,45 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0786:         """shape (node_count,)
+
 0786:         """shape (node_count,)
 0787: 
-
 0788:         Decode using the search for the output the closer to the mean of the
-
 0789:         input's leaf in the embedding Hilbert space
-
 0790:         corresponds to the KernelizedMSE criterion
-
 0791: 
-
 0792:         out[i] is the index of the example whose
-
 0793:         output has been chosen to represent the output of the leaf i
-
 0794:         (if i is a leaf, otherwise it is -1).
-
 0795: 
-
 0796:         Parameters
-
 0797:         ----------
-
 0798:         K_cand_train : array of shape (n_candidates, n_train_samples)
-
 0799:             The Kernel matrix between the candidates outputs and the training outputs.
-
 0800: 
-
 0801:         sq_norms_cand : array of shape (n_candidates,)
-
 0802:             The list of the kernel values of the candidates computed againt themselves
-
 0803:             (square L2 norm in the Hilbert space).
-
 0804: 
-
 0805:         criterion : {"mse"}, default="mse"
-
 0806:             The function to measure the quality of a split (in the Hilbert space).
-
 0807: 
-
 0808:         kernel : string
-
 0809:             The type of kernel to use to compare the output data.
-
 0810:             Used only to check wether we want to do classic classification or regression or a general case.
-
 0811: 
-
 0812:         return_top_k : int (>0)
-
 0813:             The number of output to return for each leaf (the size of the set of the best candidates outputs)
-
 0814: 
-
 0815:         Returns
-
 0816:         -------
-
 0817:         An array of shape (node_count * return_top_k, n_candidates)
-
 0818:             describing for each LEAF the indices in candidates of the selected output(s),
-
 0819:             minimizing the "distance" with the "true" predisction in the Hilbert space.
-
 0820: 
-
 0821:         Note :
-
 0822:             The returned array has an arbitrary value of -1 for the lines corresponding to non-leaf nodes.
-
 0823:         """
-
+0824:         if isinstance(criterion, KernelizedMSE):
+
 0788:         Decode using the search for the output the closer to the mean of the 
+
 0789:         input's leaf in the embedding Hilbert space
+
 0790:         corresponds to the KernelizedMSE criterion
+
 0791:         
+
 0792:         out[i] is the index of the example whose 
+
 0793:         output has been chosen to represent the output of the leaf i 
+
 0794:         (if i is a leaf, otherwise it is -1).
+
 0795:         
+
 0796:         Parameters
+
 0797:         ----------
+
 0798:         K_cand_train : array of shape (n_candidates, n_train_samples)
+
 0799:             The Kernel matrix between the candidates outputs and the training outputs.
+
 0800:         
+
 0801:         sq_norms_cand : array of shape (n_candidates,)
+
 0802:             The list of the kernel values of the candidates computed againt themselves
+
 0803:             (square L2 norm in the Hilbert space).
+
 0804:         
+
 0805:         criterion : {"mse"}, default="mse"
+
 0806:             The function to measure the quality of a split (in the Hilbert space).
+
 0807:         
+
 0808:         kernel : string
+
 0809:             The type of kernel to use to compare the output data. 
+
 0810:             Used only to check wether we want to do classic classification or regression or a general case.
+
 0811:         
+
 0812:         return_top_k : int (>0)
+
 0813:             The number of output to return for each leaf (the size of the set of the best candidates outputs)
+
 0814:         
+
 0815:         Returns
+
 0816:         -------
+
 0817:         An array of shape (node_count * return_top_k, n_candidates)
+
 0818:             describing for each LEAF the indices in candidates of the selected output(s), 
+
 0819:             minimizing the "distance" with the "true" predisction in the Hilbert space.
+
 0820:             
+
 0821:         Note :
+
 0822:             The returned array has an arbitrary value of -1 for the lines corresponding to non-leaf nodes.
+
 0823:         """
+
+0824:         if isinstance(criterion, KernelizedMSE):
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_KernelizedMSE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 824, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_8 = PyObject_IsInstance(__pyx_v_criterion, __pyx_t_1); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 824, __pyx_L1_error)
@@ -5147,8 +5221,8 @@
 /* … */
   }
 
 0825: 
-
 0826:             # Cas particulier de la classification : recherche EXHAUSTIVE
-
+0827:             if kernel == "gini_clf":
+
 0826:             # Cas particulier de la classification : recherche EXHAUSTIVE
+
+0827:             if kernel == "gini_clf":
    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_kernel, __pyx_n_s_gini_clf, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 827, __pyx_L1_error)
     __pyx_t_8 = (__pyx_t_9 != 0);
     if (__pyx_t_8) {
@@ -5156,28 +5230,28 @@
       goto __pyx_L4;
     }
 
 0828: 
-
 0829:                 # rechercher la meilleure combinaison de labels parmis toutes celles possible
+
 0829:                 # rechercher la meilleure combinaison de labels parmis toutes celles possible
 0830: 
-
+0831:                 y_train = self.y
+
+0831:                 y_train = self.y
      __pyx_t_1 = ((PyObject *)__pyx_v_self->y);
       __Pyx_INCREF(__pyx_t_1);
       __pyx_v_y_train = ((PyArrayObject *)__pyx_t_1);
       __pyx_t_1 = 0;
-
+0832:                 n_outputs = y_train.shape[1]
+
+0832:                 n_outputs = y_train.shape[1]
      __pyx_v_n_outputs = (__pyx_v_y_train->dimensions[1]);
 
 0833: 
-
+0834:                 classes = []
+
+0834:                 classes = []
      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 834, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_1);
       __pyx_v_classes = ((PyObject*)__pyx_t_1);
       __pyx_t_1 = 0;
-
+0835:                 n_classes = []
+
+0835:                 n_classes = []
      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 835, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_1);
       __pyx_v_n_classes = __pyx_t_1;
       __pyx_t_1 = 0;
 
 0836: 
-
+0837:                 y_train_encoded = np.zeros((y_train.shape[0], n_outputs), dtype=int)
+
+0837:                 y_train_encoded = np.zeros((y_train.shape[0], n_outputs), dtype=int)
      __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 837, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_1);
       __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 837, __pyx_L1_error)
@@ -5211,12 +5285,12 @@
       __pyx_v_y_train_encoded = __pyx_t_1;
       __pyx_t_1 = 0;
 
 0838: 
-
+0839:                 for l in range(n_outputs):
+
+0839:                 for l in range(n_outputs):
      __pyx_t_10 = __pyx_v_n_outputs;
       __pyx_t_11 = __pyx_t_10;
       for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
         __pyx_v_l = __pyx_t_12;
-
+0840:                     classes_l, y_train_encoded[:, l] = np.unique(y_train[:, l], return_inverse=True)
+
+0840:                     classes_l, y_train_encoded[:, l] = np.unique(y_train[:, l], return_inverse=True)
        __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 840, __pyx_L1_error)
         __Pyx_GOTREF(__pyx_t_1);
         __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_unique); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 840, __pyx_L1_error)
@@ -5313,9 +5387,9 @@
   __pyx_slice__9 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__9)) __PYX_ERR(0, 840, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_slice__9);
   __Pyx_GIVEREF(__pyx_slice__9);
-
+0841:                     classes.append(classes_l)
+
+0841:                     classes.append(classes_l)
        __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_classes, __pyx_v_classes_l); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 841, __pyx_L1_error)
-
+0842:                     n_classes.append(classes_l.shape[0])
+
+0842:                     n_classes.append(classes_l.shape[0])
        __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_classes_l, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 842, __pyx_L1_error)
         __Pyx_GOTREF(__pyx_t_2);
         __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 842, __pyx_L1_error)
@@ -5325,7 +5399,7 @@
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       }
 
 0843: 
-
+0844:                 n_classes = np.array(n_classes, dtype=np.intp)
+
+0844:                 n_classes = np.array(n_classes, dtype=np.intp)
      __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 844, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_4);
       __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 844, __pyx_L1_error)
@@ -5355,13 +5429,13 @@
 
 0845: 
 0846: 
 0847: 
-
+0848:                 leaf_to_train_exs = self._get_value_ndarray()
+
+0848:                 leaf_to_train_exs = self._get_value_ndarray()
      __pyx_t_7 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 848, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_v_leaf_to_train_exs = ((PyArrayObject *)__pyx_t_7);
       __pyx_t_7 = 0;
 
 0849: 
-
+0850:                 out = np.ones((self.node_count*return_top_k,n_outputs), dtype=np.intp) * (-1)
+
+0850:                 out = np.ones((self.node_count*return_top_k,n_outputs), dtype=np.intp) * (-1)
      __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 850, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_ones); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 850, __pyx_L1_error)
@@ -5404,10 +5478,10 @@
       __pyx_v_out = __pyx_t_2;
       __pyx_t_2 = 0;
 
 0851: 
-
+0852:                 nb_candidates = 1
+
+0852:                 nb_candidates = 1
      __Pyx_INCREF(__pyx_int_1);
       __pyx_v_nb_candidates = __pyx_int_1;
-
+0853:                 for nb_classes in n_classes:
+
+0853:                 for nb_classes in n_classes:
      if (likely(PyList_CheckExact(__pyx_v_n_classes)) || PyTuple_CheckExact(__pyx_v_n_classes)) {
         __pyx_t_2 = __pyx_v_n_classes; __Pyx_INCREF(__pyx_t_2); __pyx_t_15 = 0;
         __pyx_t_16 = NULL;
@@ -5452,13 +5526,13 @@
 /* … */
       }
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
+0854:                     nb_candidates *= nb_classes
+
+0854:                     nb_candidates *= nb_classes
        __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_nb_candidates, __pyx_v_nb_classes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 854, __pyx_L1_error)
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF_SET(__pyx_v_nb_candidates, __pyx_t_3);
         __pyx_t_3 = 0;
-
 0855:                 # array to store the value of the criteria to minimize, for each training sample
-
+0856:                 value = np.zeros((nb_candidates,), dtype=np.float64)
+
 0855:                 # array to store the value of the criteria to minimize, for each training sample
+
+0856:                 value = np.zeros((nb_candidates,), dtype=np.float64)
      __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 856, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_2);
       __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 856, __pyx_L1_error)
@@ -5491,11 +5565,11 @@
       __pyx_v_value = __pyx_t_7;
       __pyx_t_7 = 0;
 
 0857: 
-
+0858:                 recherche_exhaustive_equivalente = False
+
+0858:                 recherche_exhaustive_equivalente = False
      __pyx_v_recherche_exhaustive_equivalente = 0;
 
 0859: 
-
 0860:                 # node k
-
+0861:                 for k in range(self.node_count):
+
 0860:                 # node k
+
+0861:                 for k in range(self.node_count):
      __pyx_t_7 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->node_count); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 861, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 861, __pyx_L1_error)
@@ -5546,15 +5620,15 @@
 /* … */
       }
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-
 0862:                     # ne considérer que les feuilles pour y calculer une output
-
+0863:                     if self.nodes[k].left_child == _TREE_LEAF:
+
 0862:                     # ne considérer que les feuilles pour y calculer une output
+
+0863:                     if self.nodes[k].left_child == _TREE_LEAF:
        __pyx_t_17 = __Pyx_PyIndex_AsSsize_t(__pyx_v_k); if (unlikely((__pyx_t_17 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 863, __pyx_L1_error)
         __pyx_t_8 = (((__pyx_v_self->nodes[__pyx_t_17]).left_child == __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
         if (__pyx_t_8) {
 /* … */
         }
 
 0864: 
-
+0865:                         if recherche_exhaustive_equivalente or return_top_k > 1: # n_outputs boucles sur les classes de chaque output imbriquées dans le product --> long
+
+0865:                         if recherche_exhaustive_equivalente or return_top_k > 1: # n_outputs boucles sur les classes de chaque output imbriquées dans le product --> long
          __pyx_t_9 = (__pyx_v_recherche_exhaustive_equivalente != 0);
           if (!__pyx_t_9) {
           } else {
@@ -5569,7 +5643,7 @@
             goto __pyx_L14;
           }
 
 0866: 
-
+0867:                             for ind, candidate in enumerate(list(itertools.product(*classes))):
+
+0867:                             for ind, candidate in enumerate(list(itertools.product(*classes))):
            __Pyx_INCREF(__pyx_int_0);
             __pyx_t_2 = __pyx_int_0;
             __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_itertools); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 867, __pyx_L1_error)
@@ -5610,11 +5684,11 @@
             __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
             __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 0868: 
-
 0869:                                 # la valeur a minimiser est k(candidate,candidate) - 2 * moyenne_des_Kernel(candidate,train_exs_in_same_leaf)
-
 0870:                                 # dans le cas de gini, k(candidate,candidate) est toujours égal à 1 peu importe candidate
-
 0871:                                 # on peut donc plutôt maximiser la quantité somme_des_Kernel(candidate,train_exs_in_same_leaf)
+
 0869:                                 # la valeur a minimiser est k(candidate,candidate) - 2 * moyenne_des_Kernel(candidate,train_exs_in_same_leaf)
+
 0870:                                 # dans le cas de gini, k(candidate,candidate) est toujours égal à 1 peu importe candidate
+
 0871:                                 # on peut donc plutôt maximiser la quantité somme_des_Kernel(candidate,train_exs_in_same_leaf)
 0872: 
-
+0873:                                 value[ind] = np.sum([ leaf_to_train_exs[k,ex] * (y_train[ex] == candidate).mean() for ex in range(leaf_to_train_exs.shape[1])])
+
+0873:                                 value[ind] = np.sum([ leaf_to_train_exs[k,ex] * (y_train[ex] == candidate).mean() for ex in range(leaf_to_train_exs.shape[1])])
              __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 873, __pyx_L1_error)
               __Pyx_GOTREF(__pyx_t_3);
               __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_sum); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 873, __pyx_L1_error)
@@ -5687,7 +5761,7 @@
               if (unlikely(PyObject_SetItem(__pyx_v_value, __pyx_v_ind, __pyx_t_4) < 0)) __PYX_ERR(0, 873, __pyx_L1_error)
               __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
 0874: 
-
+0875:                             ind_top_candidates = np.argpartition(value, - return_top_k)[- return_top_k:]
+
+0875:                             ind_top_candidates = np.argpartition(value, - return_top_k)[- return_top_k:]
            __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 875, __pyx_L1_error)
             __Pyx_GOTREF(__pyx_t_1);
             __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_argpartition); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 875, __pyx_L1_error)
@@ -5748,7 +5822,7 @@
             __Pyx_XDECREF_SET(__pyx_v_ind_top_candidates, __pyx_t_4);
             __pyx_t_4 = 0;
 
 0876: 
-
+0877:                             top_candidates = list(itertools.product(*classes))[ind_top_candidates]
+
+0877:                             top_candidates = list(itertools.product(*classes))[ind_top_candidates]
            __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_itertools); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 877, __pyx_L1_error)
             __Pyx_GOTREF(__pyx_t_4);
             __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_product); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 877, __pyx_L1_error)
@@ -5768,7 +5842,7 @@
             __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
             __Pyx_XDECREF_SET(__pyx_v_top_candidates, __pyx_t_3);
             __pyx_t_3 = 0;
-
+0878:                             top_candidates = np.array(top_candidates, dtype=int)
+
+0878:                             top_candidates = np.array(top_candidates, dtype=int)
            __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 878, __pyx_L1_error)
             __Pyx_GOTREF(__pyx_t_3);
             __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 878, __pyx_L1_error)
@@ -5790,7 +5864,7 @@
             __Pyx_DECREF_SET(__pyx_v_top_candidates, __pyx_t_1);
             __pyx_t_1 = 0;
 
 0879: 
-
+0880:                             out[k*return_top_k : (k+1)*return_top_k] = top_candidates
+
+0880:                             out[k*return_top_k : (k+1)*return_top_k] = top_candidates
            __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_return_top_k); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 880, __pyx_L1_error)
             __Pyx_GOTREF(__pyx_t_1);
             __pyx_t_2 = PyNumber_Multiply(__pyx_v_k, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 880, __pyx_L1_error)
@@ -5808,16 +5882,16 @@
             __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
             __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
 0881: 
-
 0882:                         else:
+
 0882:                         else:
 0883: 
-
+0884:                             for l in range(n_outputs):
+
+0884:                             for l in range(n_outputs):
          /*else*/ {
             __pyx_t_10 = __pyx_v_n_outputs;
             __pyx_t_11 = __pyx_t_10;
             for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
               __pyx_v_l = __pyx_t_12;
 
 0885: 
-
+0886:                                 major_class = np.argmax( [ np.sum( leaf_to_train_exs[k, np.where( y_train[:,l] == class_i )[0] ] ) for class_i in classes[l] ] )
+
+0886:                                 major_class = np.argmax( [ np.sum( leaf_to_train_exs[k, np.where( y_train[:,l] == class_i )[0] ] ) for class_i in classes[l] ] )
              __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 886, __pyx_L1_error)
               __Pyx_GOTREF(__pyx_t_2);
               __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_argmax); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 886, __pyx_L1_error)
@@ -5960,7 +6034,7 @@
               __Pyx_XDECREF_SET(__pyx_v_major_class, __pyx_t_4);
               __pyx_t_4 = 0;
 
 0887: 
-
+0888:                                 out[k,l] = classes[l][ major_class ]
+
+0888:                                 out[k,l] = classes[l][ major_class ]
              __pyx_t_4 = __Pyx_PyObject_GetItem(PyList_GET_ITEM(__pyx_v_classes, __pyx_v_l), __pyx_v_major_class); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 888, __pyx_L1_error)
               __Pyx_GOTREF(__pyx_t_4);
               __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_l); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 888, __pyx_L1_error)
@@ -5980,8 +6054,8 @@
           }
           __pyx_L14:;
 
 0889: 
-
 0890:             # Cas particulier de la régression : Recherche EXACTE
-
+0891:             elif kernel == "mse_reg":
+
 0890:             # Cas particulier de la régression : Recherche EXACTE
+
+0891:             elif kernel == "mse_reg":
    __pyx_t_8 = (__Pyx_PyString_Equals(__pyx_v_kernel, __pyx_n_s_mse_reg, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 891, __pyx_L1_error)
     __pyx_t_9 = (__pyx_t_8 != 0);
     if (__pyx_t_9) {
@@ -5989,58 +6063,58 @@
       goto __pyx_L4;
     }
 
 0892: 
-
 0893:                 # rechercher la meilleure combinaison de labels parmis toutes celles possible
-
 0894:                 # avec un critère MSE et donc un kernel linéaire,
-
 0895:                 # la solution exacte (argmin_y [k(y,y) - 2 moyenne_i(k(y,y_leaf_i))]) peut être calculée :
-
 0896:                 # c'est la moyenne des sorties de chaque feuille
-
 0897:                 #
-
 0898:                 # On ne peut pas rechercher les k meilleurs candidats car l'ensemble de recherche de candidats pour la régression est infini (R^d)
+
 0893:                 # rechercher la meilleure combinaison de labels parmis toutes celles possible
+
 0894:                 # avec un critère MSE et donc un kernel linéaire, 
+
 0895:                 # la solution exacte (argmin_y [k(y,y) - 2 moyenne_i(k(y,y_leaf_i))]) peut être calculée : 
+
 0896:                 # c'est la moyenne des sorties de chaque feuille
+
 0897:                 #
+
 0898:                 # On ne peut pas rechercher les k meilleurs candidats car l'ensemble de recherche de candidats pour la régression est infini (R^d)
 0899: 
-
+0900:                 y_train = self.y
+
+0900:                 y_train = self.y
      __pyx_t_7 = ((PyObject *)__pyx_v_self->y);
       __Pyx_INCREF(__pyx_t_7);
       __pyx_v_y_train = ((PyArrayObject *)__pyx_t_7);
       __pyx_t_7 = 0;
-
+0901:                 n_outputs = y_train.shape[1]
+
+0901:                 n_outputs = y_train.shape[1]
      __pyx_v_n_outputs = (__pyx_v_y_train->dimensions[1]);
 
 0902: 
-
+0903:                 leaf_to_train_exs = self._get_value_ndarray()
+
+0903:                 leaf_to_train_exs = self._get_value_ndarray()
      __pyx_t_7 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 903, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_v_leaf_to_train_exs = ((PyArrayObject *)__pyx_t_7);
       __pyx_t_7 = 0;
 
 0904: 
-
+0905:                 out = leaf_to_train_exs @ y_train
+
+0905:                 out = leaf_to_train_exs @ y_train
      __pyx_t_7 = __Pyx_PyNumber_MatrixMultiply(((PyObject *)__pyx_v_leaf_to_train_exs), ((PyObject *)__pyx_v_y_train)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 905, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_v_out = __pyx_t_7;
       __pyx_t_7 = 0;
 
 0906: 
-
 0907:                 # out = np.ones((self.node_count,n_outputs), dtype=y_train.dtype) * (-1)
+
 0907:                 # out = np.ones((self.node_count,n_outputs), dtype=y_train.dtype) * (-1)
 0908: 
-
 0909:                 # # node k
-
 0910:                 # for k in range(self.node_count):
-
 0911:                 #     # ne considérer que les feuilles pour y calculer une output
-
 0912:                 #     if self.nodes[k].left_child == _TREE_LEAF:
+
 0909:                 # # node k
+
 0910:                 # for k in range(self.node_count):
+
 0911:                 #     # ne considérer que les feuilles pour y calculer une output
+
 0912:                 #     if self.nodes[k].left_child == _TREE_LEAF:
 0913: 
-
 0914:                 #         out[k] = np.sum(np.array([ leaf_to_train_exs[k,ex] * y_train[ex] for ex in range(leaf_to_train_exs.shape[1])]), axis=0)
+
 0914:                 #         out[k] = np.sum(np.array([ leaf_to_train_exs[k,ex] * y_train[ex] for ex in range(leaf_to_train_exs.shape[1])]), axis=0)
 0915: 
 0916: 
-
 0917:             # Dans ce else, on a donc une matrice de Gram de candidats fournie
-
 0918:             else: # cas général : pas de classification ou de régression mais recherche de l'argmin dans l'ensemble de candidats fourni
+
 0917:             # Dans ce else, on a donc une matrice de Gram de candidats fournie
+
 0918:             else: # cas général : pas de classification ou de régression mais recherche de l'argmin dans l'ensemble de candidats fourni
 0919: 
-
 0920:                 # on a comme candidats une matrice de Gram des des candidats contre les training (+contre soi meme).
+
 0920:                 # on a comme candidats une matrice de Gram des des candidats contre les training (+contre soi meme).
 0921: 
-
 0922:                 # on renvoie l'indce du candidat représentant le mieux la feuille (on ne check pas les training examples, ils sont à mettre dans les candidats)
+
 0922:                 # on renvoie l'indce du candidat représentant le mieux la feuille (on ne check pas les training examples, ils sont à mettre dans les candidats)
 0923: 
-
+0924:                 leaf_to_train_exs = self._get_value_ndarray()
+
+0924:                 leaf_to_train_exs = self._get_value_ndarray()
    /*else*/ {
       __pyx_t_7 = ((PyObject *)((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_self->__pyx_vtab)->_get_value_ndarray(__pyx_v_self)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 924, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_v_leaf_to_train_exs = ((PyArrayObject *)__pyx_t_7);
       __pyx_t_7 = 0;
 
 0925: 
-
+0926:                 out = np.ones((self.node_count*return_top_k,), dtype=np.intp) * (-1)
+
+0926:                 out = np.ones((self.node_count*return_top_k,), dtype=np.intp) * (-1)
      __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 926, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_7);
       __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_ones); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 926, __pyx_L1_error)
@@ -6078,8 +6152,8 @@
       __pyx_v_out = __pyx_t_2;
       __pyx_t_2 = 0;
 
 0927: 
-
 0928:                 # array to store the value of the criteria to minimize, for each training sample
-
+0929:                 value = np.zeros((K_cand_train.shape[0],), dtype=np.float64)
+
 0928:                 # array to store the value of the criteria to minimize, for each training sample
+
+0929:                 value = np.zeros((K_cand_train.shape[0],), dtype=np.float64)
      __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 929, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_2);
       __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 929, __pyx_L1_error)
@@ -6114,8 +6188,8 @@
       __pyx_v_value = __pyx_t_3;
       __pyx_t_3 = 0;
 
 0930: 
-
 0931:                 # node k
-
+0932:                 for k in range(self.node_count):
+
 0931:                 # node k
+
+0932:                 for k in range(self.node_count):
      __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_self->node_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 932, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_3);
       __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 932, __pyx_L1_error)
@@ -6168,15 +6242,15 @@
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     }
     __pyx_L4:;
-
 0933:                     # ne considérer que les feuilles pour y calculer une output
-
+0934:                     if self.nodes[k].left_child == _TREE_LEAF:
+
 0933:                     # ne considérer que les feuilles pour y calculer une output
+
+0934:                     if self.nodes[k].left_child == _TREE_LEAF:
        __pyx_t_17 = __Pyx_PyIndex_AsSsize_t(__pyx_v_k); if (unlikely((__pyx_t_17 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 934, __pyx_L1_error)
         __pyx_t_9 = (((__pyx_v_self->nodes[__pyx_t_17]).left_child == __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
         if (__pyx_t_9) {
 /* … */
         }
-
 0935:                         # parmi les candidats, calculer k[candidat,candidat] - 2/self.n_node_samples * sum_i=0^self.n_node_samples k[candidat,i]
-
+0936:                         for candidate in range(K_cand_train.shape[0]):
+
 0935:                         # parmi les candidats, calculer k[candidat,candidat] - 2/self.n_node_samples * sum_i=0^self.n_node_samples k[candidat,i]
+
+0936:                         for candidate in range(K_cand_train.shape[0]):
          __pyx_t_7 = __Pyx_PyInt_From_Py_intptr_t((__pyx_v_K_cand_train->dimensions[0])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 936, __pyx_L1_error)
           __Pyx_GOTREF(__pyx_t_7);
           __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 936, __pyx_L1_error)
@@ -6228,7 +6302,7 @@
           }
           __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 
 0937: 
-
+0938:                             value[candidate] = sq_norms_cand[candidate] - 2 * np.sum([leaf_to_train_exs[k,ex] * K_cand_train[candidate,ex] for ex in range(leaf_to_train_exs.shape[1])])
+
+0938:                             value[candidate] = sq_norms_cand[candidate] - 2 * np.sum([leaf_to_train_exs[k,ex] * K_cand_train[candidate,ex] for ex in range(leaf_to_train_exs.shape[1])])
            __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_sq_norms_cand), __pyx_v_candidate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 938, __pyx_L1_error)
             __Pyx_GOTREF(__pyx_t_2);
             __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 938, __pyx_L1_error)
@@ -6301,8 +6375,8 @@
             if (unlikely(PyObject_SetItem(__pyx_v_value, __pyx_v_candidate, __pyx_t_1) < 0)) __PYX_ERR(0, 938, __pyx_L1_error)
             __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0939: 
-
 0940:                         # choisir l'entrée ex* qui donnait la plus petite valeur
-
+0941:                         ind_top_candidates = np.argpartition(value, return_top_k)[:return_top_k]
+
 0940:                         # choisir l'entrée ex* qui donnait la plus petite valeur
+
+0941:                         ind_top_candidates = np.argpartition(value, return_top_k)[:return_top_k]
          __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 941, __pyx_L1_error)
           __Pyx_GOTREF(__pyx_t_1);
           __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_argpartition); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 941, __pyx_L1_error)
@@ -6363,7 +6437,7 @@
           __Pyx_XDECREF_SET(__pyx_v_ind_top_candidates, __pyx_t_5);
           __pyx_t_5 = 0;
 
 0942: 
-
+0943:                         out[k*return_top_k : (k+1)*return_top_k] = ind_top_candidates
+
+0943:                         out[k*return_top_k : (k+1)*return_top_k] = ind_top_candidates
          __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_return_top_k); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 943, __pyx_L1_error)
           __Pyx_GOTREF(__pyx_t_5);
           __pyx_t_7 = PyNumber_Multiply(__pyx_v_k, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 943, __pyx_L1_error)
@@ -6382,7 +6456,7 @@
           __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 0944: 
 0945: 
-
+0946:             return out
+
+0946:             return out
    __Pyx_XDECREF(((PyObject *)__pyx_r));
     if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 946, __pyx_L1_error)
     __Pyx_INCREF(__pyx_v_out);
@@ -6390,8 +6464,8 @@
     goto __pyx_L0;
 
 0947: 
 0948: 
-
 0949:         else:
-
+0950:             raise NotImplementedError('only the "KernelizedMSE" criterion is supported')
+
 0949:         else:
+
+0950:             raise NotImplementedError('only the "KernelizedMSE" criterion is supported')
  /*else*/ {
     __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_NotImplementedError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 950, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_3);
@@ -6405,7 +6479,7 @@
   __Pyx_GIVEREF(__pyx_tuple__10);
 
 0951: 
 0952: 
-
+0953:     cpdef np.ndarray apply(self, object X):
+
+0953:     cpdef np.ndarray apply(self, object X):
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_15apply(PyObject *__pyx_v_self, PyObject *__pyx_v_X); /*proto*/
 static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree_apply(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X, int __pyx_skip_dispatch) {
   PyArrayObject *__pyx_r = NULL;
@@ -6508,8 +6582,8 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0954:         """Finds the terminal region (=leaf node) for each sample in X."""
-
+0955:         if issparse(X):
+
 0954:         """Finds the terminal region (=leaf node) for each sample in X."""
+
+0955:         if issparse(X):
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_issparse); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 955, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = NULL;
@@ -6532,7 +6606,7 @@
   if (__pyx_t_5) {
 /* … */
   }
-
+0956:             if type(X) == csc_matrix:
+
+0956:             if type(X) == csc_matrix:
    __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_csc_matrix); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 956, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_t_2 = PyObject_RichCompare(((PyObject *)Py_TYPE(__pyx_v_X)), __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 956, __pyx_L1_error)
@@ -6542,7 +6616,7 @@
     if (__pyx_t_5) {
 /* … */
     }
-
+0957:                 return self._apply_sparse_csr(X.tocsr())
+
+0957:                 return self._apply_sparse_csr(X.tocsr())
      __Pyx_XDECREF(((PyObject *)__pyx_r));
       __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_tocsr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 957, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_1);
@@ -6567,8 +6641,8 @@
       __pyx_r = ((PyArrayObject *)__pyx_t_1);
       __pyx_t_1 = 0;
       goto __pyx_L0;
-
 0958:             else:
-
+0959:                 return self._apply_sparse_csr(X)
+
 0958:             else:
+
+0959:                 return self._apply_sparse_csr(X)
    /*else*/ {
       __Pyx_XDECREF(((PyObject *)__pyx_r));
       __pyx_t_1 = ((PyObject *)__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__apply_sparse_csr(__pyx_v_self, __pyx_v_X)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 959, __pyx_L1_error)
@@ -6577,8 +6651,8 @@
       __pyx_t_1 = 0;
       goto __pyx_L0;
     }
-
 0960:         else:
-
+0961:             return self._apply_dense(X)
+
 0960:         else:
+
+0961:             return self._apply_dense(X)
  /*else*/ {
     __Pyx_XDECREF(((PyObject *)__pyx_r));
     __pyx_t_1 = ((PyObject *)__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__apply_dense(__pyx_v_self, __pyx_v_X)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 961, __pyx_L1_error)
@@ -6588,7 +6662,7 @@
     goto __pyx_L0;
   }
 
 0962: 
-
+0963:     cdef inline np.ndarray _apply_dense(self, object X):
+
+0963:     cdef inline np.ndarray _apply_dense(self, object X):
static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__apply_dense(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X) {
   __Pyx_memviewslice __pyx_v_X_ndarray = { 0, 0, { 0 }, { 0 }, { 0 } };
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_n_samples;
@@ -6632,27 +6706,27 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 0964:         """Finds the terminal region (=leaf node) for each sample in X."""
+
 0964:         """Finds the terminal region (=leaf node) for each sample in X."""
 0965: 
-
 0966:         # Check input
-
+0967:         if not isinstance(X, np.ndarray):
+
 0966:         # Check input
+
+0967:         if not isinstance(X, np.ndarray):
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_X, __pyx_ptype_5numpy_ndarray); 
   __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
   if (unlikely(__pyx_t_2)) {
 /* … */
   }
-
+0968:             raise ValueError("X should be in np.ndarray format, got %s"
+
+0968:             raise ValueError("X should be in np.ndarray format, got %s"
    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 968, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __Pyx_Raise(__pyx_t_4, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
     __PYX_ERR(0, 968, __pyx_L1_error)
-
+0969:                              % type(X))
+
+0969:                              % type(X))
    __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_should_be_in_np_ndarray_format, ((PyObject *)Py_TYPE(__pyx_v_X))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 969, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_3);
 
 0970: 
-
+0971:         if X.dtype != DTYPE:
+
+0971:         if X.dtype != DTYPE:
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 971, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 971, __pyx_L1_error)
@@ -6665,7 +6739,7 @@
   if (unlikely(__pyx_t_2)) {
 /* … */
   }
-
+0972:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
+
+0972:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 972, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_dtype_should_be_np_float32_got, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 972, __pyx_L1_error)
@@ -6678,13 +6752,13 @@
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
     __PYX_ERR(0, 972, __pyx_L1_error)
 
 0973: 
-
 0974:         # Extract input
-
+0975:         cdef const DTYPE_t[:, :] X_ndarray = X
+
 0974:         # Extract input
+
+0975:         cdef const DTYPE_t[:, :] X_ndarray = X
  __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t__const__(__pyx_v_X, 0); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 975, __pyx_L1_error)
   __pyx_v_X_ndarray = __pyx_t_6;
   __pyx_t_6.memview = NULL;
   __pyx_t_6.data = NULL;
-
+0976:         cdef SIZE_t n_samples = X.shape[0]
+
+0976:         cdef SIZE_t n_samples = X.shape[0]
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 976, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 976, __pyx_L1_error)
@@ -6694,8 +6768,8 @@
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __pyx_v_n_samples = __pyx_t_7;
 
 0977: 
-
 0978:         # Initialize output
-
+0979:         cdef np.ndarray[SIZE_t] out = np.zeros((n_samples,), dtype=np.intp)
+
 0978:         # Initialize output
+
+0979:         cdef np.ndarray[SIZE_t] out = np.zeros((n_samples,), dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 979, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 979, __pyx_L1_error)
@@ -6740,16 +6814,16 @@
   __pyx_t_10 = 0;
   __pyx_v_out = ((PyArrayObject *)__pyx_t_9);
   __pyx_t_9 = 0;
-
+0980:         cdef SIZE_t* out_ptr = <SIZE_t*> out.data
+
+0980:         cdef SIZE_t* out_ptr = <SIZE_t*> out.data
  __pyx_v_out_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_out->data);
 
 0981: 
-
 0982:         # Initialize auxiliary data-structure
-
+0983:         cdef Node* node = NULL
+
 0982:         # Initialize auxiliary data-structure
+
+0983:         cdef Node* node = NULL
  __pyx_v_node = NULL;
-
+0984:         cdef SIZE_t i = 0
+
+0984:         cdef SIZE_t i = 0
  __pyx_v_i = 0;
 
 0985: 
-
+0986:         with nogil:
+
+0986:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -6769,21 +6843,21 @@
         __pyx_L7:;
       }
   }
-
+0987:             for i in range(n_samples):
+
+0987:             for i in range(n_samples):
        __pyx_t_7 = __pyx_v_n_samples;
         __pyx_t_11 = __pyx_t_7;
         for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
           __pyx_v_i = __pyx_t_12;
-
+0988:                 node = self.nodes
+
+0988:                 node = self.nodes
          __pyx_t_13 = __pyx_v_self->nodes;
           __pyx_v_node = __pyx_t_13;
-
 0989:                 # While node not a leaf
-
+0990:                 while node.left_child != _TREE_LEAF:
+
 0989:                 # While node not a leaf
+
+0990:                 while node.left_child != _TREE_LEAF:
          while (1) {
             __pyx_t_2 = ((__pyx_v_node->left_child != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
             if (!__pyx_t_2) break;
-
 0991:                     # ... and node.right_child != _TREE_LEAF:
-
+0992:                     if X_ndarray[i, node.feature] <= node.threshold:
+
 0991:                     # ... and node.right_child != _TREE_LEAF:
+
+0992:                     if X_ndarray[i, node.feature] <= node.threshold:
            __pyx_t_14 = __pyx_v_i;
             __pyx_t_15 = __pyx_v_node->feature;
             __pyx_t_2 = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X_ndarray.data + __pyx_t_14 * __pyx_v_X_ndarray.strides[0]) ) + __pyx_t_15 * __pyx_v_X_ndarray.strides[1]) ))) <= __pyx_v_node->threshold) != 0);
@@ -6791,28 +6865,28 @@
 /* … */
               goto __pyx_L12;
             }
-
+0993:                         node = &self.nodes[node.left_child]
+
+0993:                         node = &self.nodes[node.left_child]
              __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->left_child]));
-
 0994:                     else:
-
+0995:                         node = &self.nodes[node.right_child]
+
 0994:                     else:
+
+0995:                         node = &self.nodes[node.right_child]
            /*else*/ {
               __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->right_child]));
             }
             __pyx_L12:;
           }
 
 0996: 
-
+0997:                 out_ptr[i] = <SIZE_t>(node - self.nodes)  # node offset
+
+0997:                 out_ptr[i] = <SIZE_t>(node - self.nodes)  # node offset
          (__pyx_v_out_ptr[__pyx_v_i]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
         }
       }
 
 0998: 
-
+0999:         return out
+
+0999:         return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
   __Pyx_INCREF(((PyObject *)__pyx_v_out));
   __pyx_r = ((PyArrayObject *)__pyx_v_out);
   goto __pyx_L0;
 
 1000: 
-
+1001:     cdef inline np.ndarray _apply_sparse_csr(self, object X):
+
+1001:     cdef inline np.ndarray _apply_sparse_csr(self, object X):
static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__apply_sparse_csr(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X) {
   PyArrayObject *__pyx_v_X_data_ndarray = 0;
   PyArrayObject *__pyx_v_X_indices_ndarray = 0;
@@ -6891,10 +6965,10 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1002:         """Finds the terminal region (=leaf node) for each sample in sparse X.
-
 1003:         """
-
 1004:         # Check input
-
+1005:         if not isinstance(X, csr_matrix):
+
 1002:         """Finds the terminal region (=leaf node) for each sample in sparse X.
+
 1003:         """
+
 1004:         # Check input
+
+1005:         if not isinstance(X, csr_matrix):
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1005, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = PyObject_IsInstance(__pyx_v_X, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1005, __pyx_L1_error)
@@ -6903,18 +6977,18 @@
   if (unlikely(__pyx_t_3)) {
 /* … */
   }
-
+1006:             raise ValueError("X should be in csr_matrix format, got %s"
+
+1006:             raise ValueError("X should be in csr_matrix format, got %s"
    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1006, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __Pyx_Raise(__pyx_t_4, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
     __PYX_ERR(0, 1006, __pyx_L1_error)
-
+1007:                              % type(X))
+
+1007:                              % type(X))
    __pyx_t_1 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_should_be_in_csr_matrix_format, ((PyObject *)Py_TYPE(__pyx_v_X))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1007, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
 
 1008: 
-
+1009:         if X.dtype != DTYPE:
+
+1009:         if X.dtype != DTYPE:
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1009, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1009, __pyx_L1_error)
@@ -6927,7 +7001,7 @@
   if (unlikely(__pyx_t_3)) {
 /* … */
   }
-
+1010:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
+
+1010:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1010, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_1 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_dtype_should_be_np_float32_got, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1010, __pyx_L1_error)
@@ -6940,8 +7014,8 @@
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
     __PYX_ERR(0, 1010, __pyx_L1_error)
 
 1011: 
-
 1012:         # Extract input
-
+1013:         cdef np.ndarray[ndim=1, dtype=DTYPE_t] X_data_ndarray = X.data
+
 1012:         # Extract input
+
+1013:         cdef np.ndarray[ndim=1, dtype=DTYPE_t] X_data_ndarray = X.data
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1013, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1013, __pyx_L1_error)
@@ -6957,7 +7031,7 @@
   __pyx_t_6 = 0;
   __pyx_v_X_data_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
-
+1014:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indices_ndarray  = X.indices
+
+1014:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indices_ndarray  = X.indices
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indices); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1014, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1014, __pyx_L1_error)
@@ -6973,7 +7047,7 @@
   __pyx_t_7 = 0;
   __pyx_v_X_indices_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
-
+1015:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indptr_ndarray  = X.indptr
+
+1015:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indptr_ndarray  = X.indptr
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1015, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1015, __pyx_L1_error)
@@ -6990,14 +7064,14 @@
   __pyx_v_X_indptr_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
 
 1016: 
-
+1017:         cdef DTYPE_t* X_data = <DTYPE_t*>X_data_ndarray.data
+
+1017:         cdef DTYPE_t* X_data = <DTYPE_t*>X_data_ndarray.data
  __pyx_v_X_data = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t *)__pyx_v_X_data_ndarray->data);
-
+1018:         cdef INT32_t* X_indices = <INT32_t*>X_indices_ndarray.data
+
+1018:         cdef INT32_t* X_indices = <INT32_t*>X_indices_ndarray.data
  __pyx_v_X_indices = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_INT32_t *)__pyx_v_X_indices_ndarray->data);
-
+1019:         cdef INT32_t* X_indptr = <INT32_t*>X_indptr_ndarray.data
+
+1019:         cdef INT32_t* X_indptr = <INT32_t*>X_indptr_ndarray.data
  __pyx_v_X_indptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_INT32_t *)__pyx_v_X_indptr_ndarray->data);
 
 1020: 
-
+1021:         cdef SIZE_t n_samples = X.shape[0]
+
+1021:         cdef SIZE_t n_samples = X.shape[0]
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1021, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1021, __pyx_L1_error)
@@ -7006,7 +7080,7 @@
   __pyx_t_9 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_9 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1021, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_n_samples = __pyx_t_9;
-
+1022:         cdef SIZE_t n_features = X.shape[1]
+
+1022:         cdef SIZE_t n_features = X.shape[1]
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1022, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1022, __pyx_L1_error)
@@ -7016,8 +7090,8 @@
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   __pyx_v_n_features = __pyx_t_9;
 
 1023: 
-
 1024:         # Initialize output
-
+1025:         cdef np.ndarray[SIZE_t, ndim=1] out = np.zeros((n_samples,),
+
 1024:         # Initialize output
+
+1025:         cdef np.ndarray[SIZE_t, ndim=1] out = np.zeros((n_samples,),
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1025, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1025, __pyx_L1_error)
@@ -7054,7 +7128,7 @@
   __pyx_t_12 = 0;
   __pyx_v_out = ((PyArrayObject *)__pyx_t_11);
   __pyx_t_11 = 0;
-
+1026:                                                        dtype=np.intp)
+
+1026:                                                        dtype=np.intp)
  __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1026, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1026, __pyx_L1_error)
@@ -7064,33 +7138,33 @@
   __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
   if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_11) < 0) __PYX_ERR(0, 1026, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
-
+1027:         cdef SIZE_t* out_ptr = <SIZE_t*> out.data
+
+1027:         cdef SIZE_t* out_ptr = <SIZE_t*> out.data
  __pyx_v_out_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_out->data);
 
 1028: 
-
 1029:         # Initialize auxiliary data-structure
-
+1030:         cdef DTYPE_t feature_value = 0.
+
 1029:         # Initialize auxiliary data-structure
+
+1030:         cdef DTYPE_t feature_value = 0.
  __pyx_v_feature_value = 0.;
-
+1031:         cdef Node* node = NULL
+
+1031:         cdef Node* node = NULL
  __pyx_v_node = NULL;
-
+1032:         cdef DTYPE_t* X_sample = NULL
+
+1032:         cdef DTYPE_t* X_sample = NULL
  __pyx_v_X_sample = NULL;
-
+1033:         cdef SIZE_t i = 0
+
+1033:         cdef SIZE_t i = 0
  __pyx_v_i = 0;
-
+1034:         cdef INT32_t k = 0
+
+1034:         cdef INT32_t k = 0
  __pyx_v_k = 0;
 
 1035: 
-
 1036:         # feature_to_sample as a data structure records the last seen sample
-
 1037:         # for each feature; functionally, it is an efficient way to identify
-
 1038:         # which features are nonzero in the present sample.
-
+1039:         cdef SIZE_t* feature_to_sample = NULL
+
 1036:         # feature_to_sample as a data structure records the last seen sample
+
 1037:         # for each feature; functionally, it is an efficient way to identify
+
 1038:         # which features are nonzero in the present sample.
+
+1039:         cdef SIZE_t* feature_to_sample = NULL
  __pyx_v_feature_to_sample = NULL;
 
 1040: 
-
+1041:         safe_realloc(&X_sample, n_features)
+
+1041:         safe_realloc(&X_sample, n_features)
  __pyx_fuse_0__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_X_sample), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1041, __pyx_L1_error)
-
+1042:         safe_realloc(&feature_to_sample, n_features)
+
+1042:         safe_realloc(&feature_to_sample, n_features)
  __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_feature_to_sample), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1042, __pyx_L1_error)
 
 1043: 
-
+1044:         with nogil:
+
+1044:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -7110,85 +7184,85 @@
         __pyx_L7:;
       }
   }
-
+1045:             memset(feature_to_sample, -1, n_features * sizeof(SIZE_t))
+
+1045:             memset(feature_to_sample, -1, n_features * sizeof(SIZE_t))
        (void)(memset(__pyx_v_feature_to_sample, -1, (__pyx_v_n_features * (sizeof(__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)))));
 
 1046: 
-
+1047:             for i in range(n_samples):
+
+1047:             for i in range(n_samples):
        __pyx_t_9 = __pyx_v_n_samples;
         __pyx_t_13 = __pyx_t_9;
         for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
           __pyx_v_i = __pyx_t_14;
-
+1048:                 node = self.nodes
+
+1048:                 node = self.nodes
          __pyx_t_15 = __pyx_v_self->nodes;
           __pyx_v_node = __pyx_t_15;
 
 1049: 
-
+1050:                 for k in range(X_indptr[i], X_indptr[i + 1]):
+
+1050:                 for k in range(X_indptr[i], X_indptr[i + 1]):
          __pyx_t_16 = (__pyx_v_X_indptr[(__pyx_v_i + 1)]);
           __pyx_t_17 = __pyx_t_16;
           for (__pyx_t_18 = (__pyx_v_X_indptr[__pyx_v_i]); __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
             __pyx_v_k = __pyx_t_18;
-
+1051:                     feature_to_sample[X_indices[k]] = i
+
+1051:                     feature_to_sample[X_indices[k]] = i
            (__pyx_v_feature_to_sample[(__pyx_v_X_indices[__pyx_v_k])]) = __pyx_v_i;
-
+1052:                     X_sample[X_indices[k]] = X_data[k]
+
+1052:                     X_sample[X_indices[k]] = X_data[k]
            (__pyx_v_X_sample[(__pyx_v_X_indices[__pyx_v_k])]) = (__pyx_v_X_data[__pyx_v_k]);
           }
 
 1053: 
-
 1054:                 # While node not a leaf
-
+1055:                 while node.left_child != _TREE_LEAF:
+
 1054:                 # While node not a leaf
+
+1055:                 while node.left_child != _TREE_LEAF:
          while (1) {
             __pyx_t_3 = ((__pyx_v_node->left_child != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
             if (!__pyx_t_3) break;
-
 1056:                     # ... and node.right_child != _TREE_LEAF:
-
+1057:                     if feature_to_sample[node.feature] == i:
+
 1056:                     # ... and node.right_child != _TREE_LEAF:
+
+1057:                     if feature_to_sample[node.feature] == i:
            __pyx_t_3 = (((__pyx_v_feature_to_sample[__pyx_v_node->feature]) == __pyx_v_i) != 0);
             if (__pyx_t_3) {
 /* … */
               goto __pyx_L14;
             }
-
+1058:                         feature_value = X_sample[node.feature]
+
+1058:                         feature_value = X_sample[node.feature]
              __pyx_v_feature_value = (__pyx_v_X_sample[__pyx_v_node->feature]);
 
 1059: 
-
 1060:                     else:
-
+1061:                         feature_value = 0.
+
 1060:                     else:
+
+1061:                         feature_value = 0.
            /*else*/ {
               __pyx_v_feature_value = 0.;
             }
             __pyx_L14:;
 
 1062: 
-
+1063:                     if feature_value <= node.threshold:
+
+1063:                     if feature_value <= node.threshold:
            __pyx_t_3 = ((__pyx_v_feature_value <= __pyx_v_node->threshold) != 0);
             if (__pyx_t_3) {
 /* … */
               goto __pyx_L15;
             }
-
+1064:                         node = &self.nodes[node.left_child]
+
+1064:                         node = &self.nodes[node.left_child]
              __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->left_child]));
-
 1065:                     else:
-
+1066:                         node = &self.nodes[node.right_child]
+
 1065:                     else:
+
+1066:                         node = &self.nodes[node.right_child]
            /*else*/ {
               __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->right_child]));
             }
             __pyx_L15:;
           }
 
 1067: 
-
+1068:                 out_ptr[i] = <SIZE_t>(node - self.nodes)  # node offset
+
+1068:                 out_ptr[i] = <SIZE_t>(node - self.nodes)  # node offset
          (__pyx_v_out_ptr[__pyx_v_i]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
         }
 
 1069: 
-
 1070:             # Free auxiliary arrays
-
+1071:             free(X_sample)
+
 1070:             # Free auxiliary arrays
+
+1071:             free(X_sample)
        free(__pyx_v_X_sample);
-
+1072:             free(feature_to_sample)
+
+1072:             free(feature_to_sample)
        free(__pyx_v_feature_to_sample);
       }
 
 1073: 
-
+1074:         return out
+
+1074:         return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
   __Pyx_INCREF(((PyObject *)__pyx_v_out));
   __pyx_r = ((PyArrayObject *)__pyx_v_out);
   goto __pyx_L0;
 
 1075: 
-
+1076:     cpdef object decision_path(self, object X):
+
+1076:     cpdef object decision_path(self, object X):
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_17decision_path(PyObject *__pyx_v_self, PyObject *__pyx_v_X); /*proto*/
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree_decision_path(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X, int __pyx_skip_dispatch) {
   PyObject *__pyx_r = NULL;
@@ -7290,8 +7364,8 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1077:         """Finds the decision path (=node) for each sample in X."""
-
+1078:         if issparse(X):
+
 1077:         """Finds the decision path (=node) for each sample in X."""
+
+1078:         if issparse(X):
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_issparse); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1078, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = NULL;
@@ -7314,15 +7388,15 @@
   if (__pyx_t_5) {
 /* … */
   }
-
+1079:             return self._decision_path_sparse_csr(X)
+
+1079:             return self._decision_path_sparse_csr(X)
    __Pyx_XDECREF(__pyx_r);
     __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_sparse_csr(__pyx_v_self, __pyx_v_X); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1079, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_r = __pyx_t_1;
     __pyx_t_1 = 0;
     goto __pyx_L0;
-
 1080:         else:
-
+1081:             return self._decision_path_dense(X)
+
 1080:         else:
+
+1081:             return self._decision_path_dense(X)
  /*else*/ {
     __Pyx_XDECREF(__pyx_r);
     __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_dense(__pyx_v_self, __pyx_v_X); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1081, __pyx_L1_error)
@@ -7332,7 +7406,7 @@
     goto __pyx_L0;
   }
 
 1082: 
-
+1083:     cdef inline object _decision_path_dense(self, object X):
+
+1083:     cdef inline object _decision_path_dense(self, object X):
static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_dense(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X) {
   __Pyx_memviewslice __pyx_v_X_ndarray = { 0, 0, { 0 }, { 0 }, { 0 } };
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_n_samples;
@@ -7400,27 +7474,27 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1084:         """Finds the decision path (=node) for each sample in X."""
+
 1084:         """Finds the decision path (=node) for each sample in X."""
 1085: 
-
 1086:         # Check input
-
+1087:         if not isinstance(X, np.ndarray):
+
 1086:         # Check input
+
+1087:         if not isinstance(X, np.ndarray):
  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_X, __pyx_ptype_5numpy_ndarray); 
   __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
   if (unlikely(__pyx_t_2)) {
 /* … */
   }
-
+1088:             raise ValueError("X should be in np.ndarray format, got %s"
+
+1088:             raise ValueError("X should be in np.ndarray format, got %s"
    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1088, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __Pyx_Raise(__pyx_t_4, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
     __PYX_ERR(0, 1088, __pyx_L1_error)
-
+1089:                              % type(X))
+
+1089:                              % type(X))
    __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_should_be_in_np_ndarray_format, ((PyObject *)Py_TYPE(__pyx_v_X))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1089, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_3);
 
 1090: 
-
+1091:         if X.dtype != DTYPE:
+
+1091:         if X.dtype != DTYPE:
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1091, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1091, __pyx_L1_error)
@@ -7433,7 +7507,7 @@
   if (unlikely(__pyx_t_2)) {
 /* … */
   }
-
+1092:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
+
+1092:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1092, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_dtype_should_be_np_float32_got, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1092, __pyx_L1_error)
@@ -7446,13 +7520,13 @@
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
     __PYX_ERR(0, 1092, __pyx_L1_error)
 
 1093: 
-
 1094:         # Extract input
-
+1095:         cdef const DTYPE_t[:, :] X_ndarray = X
+
 1094:         # Extract input
+
+1095:         cdef const DTYPE_t[:, :] X_ndarray = X
  __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t__const__(__pyx_v_X, 0); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 1095, __pyx_L1_error)
   __pyx_v_X_ndarray = __pyx_t_6;
   __pyx_t_6.memview = NULL;
   __pyx_t_6.data = NULL;
-
+1096:         cdef SIZE_t n_samples = X.shape[0]
+
+1096:         cdef SIZE_t n_samples = X.shape[0]
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1096, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1096, __pyx_L1_error)
@@ -7462,8 +7536,8 @@
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __pyx_v_n_samples = __pyx_t_7;
 
 1097: 
-
 1098:         # Initialize output
-
+1099:         cdef np.ndarray[SIZE_t] indptr = np.zeros(n_samples + 1, dtype=np.intp)
+
 1098:         # Initialize output
+
+1099:         cdef np.ndarray[SIZE_t] indptr = np.zeros(n_samples + 1, dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1099, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1099, __pyx_L1_error)
@@ -7503,7 +7577,7 @@
   __pyx_t_10 = 0;
   __pyx_v_indptr = ((PyArrayObject *)__pyx_t_9);
   __pyx_t_9 = 0;
-
+1100:         cdef SIZE_t* indptr_ptr = <SIZE_t*> indptr.data
+
+1100:         cdef SIZE_t* indptr_ptr = <SIZE_t*> indptr.data
   /* "stpredictions/models/OK3/_tree.pyx":1100
  *         # Initialize output
@@ -7514,7 +7588,7 @@
  */
   __pyx_v_indptr_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_indptr->data);
 
 1101: 
-
+1102:         cdef np.ndarray[SIZE_t] indices = np.zeros(n_samples *
+
+1102:         cdef np.ndarray[SIZE_t] indices = np.zeros(n_samples *
  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1102, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_9);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1102, __pyx_L1_error)
@@ -7548,8 +7622,8 @@
   __pyx_t_11 = 0;
   __pyx_v_indices = ((PyArrayObject *)__pyx_t_8);
   __pyx_t_8 = 0;
-
 1103:                                                    (1 + self.max_depth),
-
+1104:                                                    dtype=np.intp)
+
 1103:                                                    (1 + self.max_depth),
+
+1104:                                                    dtype=np.intp)
  __pyx_t_9 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1104, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_9);
   __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1104, __pyx_L1_error)
@@ -7559,16 +7633,16 @@
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_dtype, __pyx_t_8) < 0) __PYX_ERR(0, 1104, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-
+1105:         cdef SIZE_t* indices_ptr = <SIZE_t*> indices.data
+
+1105:         cdef SIZE_t* indices_ptr = <SIZE_t*> indices.data
  __pyx_v_indices_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_indices->data);
 
 1106: 
-
 1107:         # Initialize auxiliary data-structure
-
+1108:         cdef Node* node = NULL
+
 1107:         # Initialize auxiliary data-structure
+
+1108:         cdef Node* node = NULL
  __pyx_v_node = NULL;
-
+1109:         cdef SIZE_t i = 0
+
+1109:         cdef SIZE_t i = 0
  __pyx_v_i = 0;
 
 1110: 
-
+1111:         with nogil:
+
+1111:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -7588,30 +7662,30 @@
         __pyx_L7:;
       }
   }
-
+1112:             for i in range(n_samples):
+
+1112:             for i in range(n_samples):
        __pyx_t_7 = __pyx_v_n_samples;
         __pyx_t_12 = __pyx_t_7;
         for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
           __pyx_v_i = __pyx_t_13;
-
+1113:                 node = self.nodes
+
+1113:                 node = self.nodes
          __pyx_t_14 = __pyx_v_self->nodes;
           __pyx_v_node = __pyx_t_14;
-
+1114:                 indptr_ptr[i + 1] = indptr_ptr[i]
+
+1114:                 indptr_ptr[i + 1] = indptr_ptr[i]
          (__pyx_v_indptr_ptr[(__pyx_v_i + 1)]) = (__pyx_v_indptr_ptr[__pyx_v_i]);
 
 1115: 
-
 1116:                 # Add all external nodes
-
+1117:                 while node.left_child != _TREE_LEAF:
+
 1116:                 # Add all external nodes
+
+1117:                 while node.left_child != _TREE_LEAF:
          while (1) {
             __pyx_t_2 = ((__pyx_v_node->left_child != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
             if (!__pyx_t_2) break;
-
 1118:                     # ... and node.right_child != _TREE_LEAF:
-
+1119:                     indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
+
 1118:                     # ... and node.right_child != _TREE_LEAF:
+
+1119:                     indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
            (__pyx_v_indices_ptr[(__pyx_v_indptr_ptr[(__pyx_v_i + 1)])]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
-
+1120:                     indptr_ptr[i + 1] += 1
+
+1120:                     indptr_ptr[i + 1] += 1
            __pyx_t_15 = (__pyx_v_i + 1);
             (__pyx_v_indptr_ptr[__pyx_t_15]) = ((__pyx_v_indptr_ptr[__pyx_t_15]) + 1);
 
 1121: 
-
+1122:                     if X_ndarray[i, node.feature] <= node.threshold:
+
+1122:                     if X_ndarray[i, node.feature] <= node.threshold:
            __pyx_t_16 = __pyx_v_i;
             __pyx_t_17 = __pyx_v_node->feature;
             __pyx_t_2 = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X_ndarray.data + __pyx_t_16 * __pyx_v_X_ndarray.strides[0]) ) + __pyx_t_17 * __pyx_v_X_ndarray.strides[1]) ))) <= __pyx_v_node->threshold) != 0);
@@ -7619,26 +7693,26 @@
 /* … */
               goto __pyx_L12;
             }
-
+1123:                         node = &self.nodes[node.left_child]
+
+1123:                         node = &self.nodes[node.left_child]
              __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->left_child]));
-
 1124:                     else:
-
+1125:                         node = &self.nodes[node.right_child]
+
 1124:                     else:
+
+1125:                         node = &self.nodes[node.right_child]
            /*else*/ {
               __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->right_child]));
             }
             __pyx_L12:;
           }
 
 1126: 
-
 1127:                 # Add the leave node
-
+1128:                 indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
+
 1127:                 # Add the leave node
+
+1128:                 indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
          (__pyx_v_indices_ptr[(__pyx_v_indptr_ptr[(__pyx_v_i + 1)])]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
-
+1129:                 indptr_ptr[i + 1] += 1
+
+1129:                 indptr_ptr[i + 1] += 1
          __pyx_t_15 = (__pyx_v_i + 1);
           (__pyx_v_indptr_ptr[__pyx_t_15]) = ((__pyx_v_indptr_ptr[__pyx_t_15]) + 1);
         }
       }
 
 1130: 
-
+1131:         indices = indices[:indptr[n_samples]]
+
+1131:         indices = indices[:indptr[n_samples]]
  __pyx_t_17 = __pyx_v_n_samples;
   __pyx_t_8 = __Pyx_PyInt_From_Py_intptr_t((*__Pyx_BufPtrStrided1d(__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *, __pyx_pybuffernd_indptr.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_indptr.diminfo[0].strides))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1131, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_8);
@@ -7670,7 +7744,7 @@
   __pyx_t_11 = 0;
   __Pyx_DECREF_SET(__pyx_v_indices, ((PyArrayObject *)__pyx_t_8));
   __pyx_t_8 = 0;
-
+1132:         cdef np.ndarray[SIZE_t] data = np.ones(shape=len(indices),
+
+1132:         cdef np.ndarray[SIZE_t] data = np.ones(shape=len(indices),
  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1132, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_8);
   __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_ones); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1132, __pyx_L1_error)
@@ -7701,7 +7775,7 @@
   __pyx_t_23 = 0;
   __pyx_v_data = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
-
+1133:                                                dtype=np.intp)
+
+1133:                                                dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1133, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_intp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1133, __pyx_L1_error)
@@ -7709,7 +7783,7 @@
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 1132, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
+1134:         out = csr_matrix((data, indices, indptr),
+
+1134:         out = csr_matrix((data, indices, indptr),
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1134, __pyx_L1_error)
@@ -7736,7 +7810,7 @@
   __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
   __pyx_v_out = __pyx_t_24;
   __pyx_t_24 = 0;
-
+1135:                          shape=(n_samples, self.node_count))
+
+1135:                          shape=(n_samples, self.node_count))
  __pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1135, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_8);
   __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_n_samples); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1135, __pyx_L1_error)
@@ -7754,13 +7828,13 @@
   if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_shape, __pyx_t_24) < 0) __PYX_ERR(0, 1135, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_24); __pyx_t_24 = 0;
 
 1136: 
-
+1137:         return out
+
+1137:         return out
  __Pyx_XDECREF(__pyx_r);
   __Pyx_INCREF(__pyx_v_out);
   __pyx_r = __pyx_v_out;
   goto __pyx_L0;
 
 1138: 
-
+1139:     cdef inline object _decision_path_sparse_csr(self, object X):
+
+1139:     cdef inline object _decision_path_sparse_csr(self, object X):
static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__decision_path_sparse_csr(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, PyObject *__pyx_v_X) {
   PyArrayObject *__pyx_v_X_data_ndarray = 0;
   PyArrayObject *__pyx_v_X_indices_ndarray = 0;
@@ -7863,10 +7937,10 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1140:         """Finds the decision path (=node) for each sample in X."""
+
 1140:         """Finds the decision path (=node) for each sample in X."""
 1141: 
-
 1142:         # Check input
-
+1143:         if not isinstance(X, csr_matrix):
+
 1142:         # Check input
+
+1143:         if not isinstance(X, csr_matrix):
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1143, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = PyObject_IsInstance(__pyx_v_X, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 1143, __pyx_L1_error)
@@ -7875,18 +7949,18 @@
   if (unlikely(__pyx_t_3)) {
 /* … */
   }
-
+1144:             raise ValueError("X should be in csr_matrix format, got %s"
+
+1144:             raise ValueError("X should be in csr_matrix format, got %s"
    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1144, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __Pyx_Raise(__pyx_t_4, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
     __PYX_ERR(0, 1144, __pyx_L1_error)
-
+1145:                              % type(X))
+
+1145:                              % type(X))
    __pyx_t_1 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_should_be_in_csr_matrix_format, ((PyObject *)Py_TYPE(__pyx_v_X))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1145, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
 
 1146: 
-
+1147:         if X.dtype != DTYPE:
+
+1147:         if X.dtype != DTYPE:
  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1147, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1147, __pyx_L1_error)
@@ -7899,7 +7973,7 @@
   if (unlikely(__pyx_t_3)) {
 /* … */
   }
-
+1148:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
+
+1148:             raise ValueError("X.dtype should be np.float32, got %s" % X.dtype)
    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1148, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_1 = __Pyx_PyString_FormatSafe(__pyx_kp_s_X_dtype_should_be_np_float32_got, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1148, __pyx_L1_error)
@@ -7912,8 +7986,8 @@
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
     __PYX_ERR(0, 1148, __pyx_L1_error)
 
 1149: 
-
 1150:         # Extract input
-
+1151:         cdef np.ndarray[ndim=1, dtype=DTYPE_t] X_data_ndarray = X.data
+
 1150:         # Extract input
+
+1151:         cdef np.ndarray[ndim=1, dtype=DTYPE_t] X_data_ndarray = X.data
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1151, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1151, __pyx_L1_error)
@@ -7929,7 +8003,7 @@
   __pyx_t_6 = 0;
   __pyx_v_X_data_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
-
+1152:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indices_ndarray  = X.indices
+
+1152:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indices_ndarray  = X.indices
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indices); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1152, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1152, __pyx_L1_error)
@@ -7945,7 +8019,7 @@
   __pyx_t_7 = 0;
   __pyx_v_X_indices_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
-
+1153:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indptr_ndarray  = X.indptr
+
+1153:         cdef np.ndarray[ndim=1, dtype=INT32_t] X_indptr_ndarray  = X.indptr
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_indptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1153, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1153, __pyx_L1_error)
@@ -7962,14 +8036,14 @@
   __pyx_v_X_indptr_ndarray = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
 
 1154: 
-
+1155:         cdef DTYPE_t* X_data = <DTYPE_t*>X_data_ndarray.data
+
+1155:         cdef DTYPE_t* X_data = <DTYPE_t*>X_data_ndarray.data
  __pyx_v_X_data = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_DTYPE_t *)__pyx_v_X_data_ndarray->data);
-
+1156:         cdef INT32_t* X_indices = <INT32_t*>X_indices_ndarray.data
+
+1156:         cdef INT32_t* X_indices = <INT32_t*>X_indices_ndarray.data
  __pyx_v_X_indices = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_INT32_t *)__pyx_v_X_indices_ndarray->data);
-
+1157:         cdef INT32_t* X_indptr = <INT32_t*>X_indptr_ndarray.data
+
+1157:         cdef INT32_t* X_indptr = <INT32_t*>X_indptr_ndarray.data
  __pyx_v_X_indptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_INT32_t *)__pyx_v_X_indptr_ndarray->data);
 
 1158: 
-
+1159:         cdef SIZE_t n_samples = X.shape[0]
+
+1159:         cdef SIZE_t n_samples = X.shape[0]
  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1159, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1159, __pyx_L1_error)
@@ -7978,7 +8052,7 @@
   __pyx_t_9 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_9 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1159, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_n_samples = __pyx_t_9;
-
+1160:         cdef SIZE_t n_features = X.shape[1]
+
+1160:         cdef SIZE_t n_features = X.shape[1]
  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1160, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1160, __pyx_L1_error)
@@ -7988,8 +8062,8 @@
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   __pyx_v_n_features = __pyx_t_9;
 
 1161: 
-
 1162:         # Initialize output
-
+1163:         cdef np.ndarray[SIZE_t] indptr = np.zeros(n_samples + 1, dtype=np.intp)
+
 1162:         # Initialize output
+
+1163:         cdef np.ndarray[SIZE_t] indptr = np.zeros(n_samples + 1, dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1163, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1163, __pyx_L1_error)
@@ -8029,7 +8103,7 @@
   __pyx_t_12 = 0;
   __pyx_v_indptr = ((PyArrayObject *)__pyx_t_11);
   __pyx_t_11 = 0;
-
+1164:         cdef SIZE_t* indptr_ptr = <SIZE_t*> indptr.data
+
+1164:         cdef SIZE_t* indptr_ptr = <SIZE_t*> indptr.data
   /* "stpredictions/models/OK3/_tree.pyx":1164
  *         # Initialize output
@@ -8040,7 +8114,7 @@
  */
   __pyx_v_indptr_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_indptr->data);
 
 1165: 
-
+1166:         cdef np.ndarray[SIZE_t] indices = np.zeros(n_samples *
+
+1166:         cdef np.ndarray[SIZE_t] indices = np.zeros(n_samples *
  __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_n_s_np); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1166, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_11);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1166, __pyx_L1_error)
@@ -8074,8 +8148,8 @@
   __pyx_t_13 = 0;
   __pyx_v_indices = ((PyArrayObject *)__pyx_t_10);
   __pyx_t_10 = 0;
-
 1167:                                                    (1 + self.max_depth),
-
+1168:                                                    dtype=np.intp)
+
 1167:                                                    (1 + self.max_depth),
+
+1168:                                                    dtype=np.intp)
  __pyx_t_11 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1168, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_11);
   __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1168, __pyx_L1_error)
@@ -8085,33 +8159,33 @@
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_dtype, __pyx_t_10) < 0) __PYX_ERR(0, 1168, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-
+1169:         cdef SIZE_t* indices_ptr = <SIZE_t*> indices.data
+
+1169:         cdef SIZE_t* indices_ptr = <SIZE_t*> indices.data
  __pyx_v_indices_ptr = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *)__pyx_v_indices->data);
 
 1170: 
-
 1171:         # Initialize auxiliary data-structure
-
+1172:         cdef DTYPE_t feature_value = 0.
+
 1171:         # Initialize auxiliary data-structure
+
+1172:         cdef DTYPE_t feature_value = 0.
  __pyx_v_feature_value = 0.;
-
+1173:         cdef Node* node = NULL
+
+1173:         cdef Node* node = NULL
  __pyx_v_node = NULL;
-
+1174:         cdef DTYPE_t* X_sample = NULL
+
+1174:         cdef DTYPE_t* X_sample = NULL
  __pyx_v_X_sample = NULL;
-
+1175:         cdef SIZE_t i = 0
+
+1175:         cdef SIZE_t i = 0
  __pyx_v_i = 0;
-
+1176:         cdef INT32_t k = 0
+
+1176:         cdef INT32_t k = 0
  __pyx_v_k = 0;
 
 1177: 
-
 1178:         # feature_to_sample as a data structure records the last seen sample
-
 1179:         # for each feature; functionally, it is an efficient way to identify
-
 1180:         # which features are nonzero in the present sample.
-
+1181:         cdef SIZE_t* feature_to_sample = NULL
+
 1178:         # feature_to_sample as a data structure records the last seen sample
+
 1179:         # for each feature; functionally, it is an efficient way to identify
+
 1180:         # which features are nonzero in the present sample.
+
+1181:         cdef SIZE_t* feature_to_sample = NULL
  __pyx_v_feature_to_sample = NULL;
 
 1182: 
-
+1183:         safe_realloc(&X_sample, n_features)
+
+1183:         safe_realloc(&X_sample, n_features)
  __pyx_fuse_0__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_X_sample), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1183, __pyx_L1_error)
-
+1184:         safe_realloc(&feature_to_sample, n_features)
+
+1184:         safe_realloc(&feature_to_sample, n_features)
  __pyx_fuse_1__pyx_f_7sklearn_4tree_6_utils_safe_realloc((&__pyx_v_feature_to_sample), __pyx_v_n_features); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1184, __pyx_L1_error)
 
 1185: 
-
+1186:         with nogil:
+
+1186:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -8131,92 +8205,92 @@
         __pyx_L7:;
       }
   }
-
+1187:             memset(feature_to_sample, -1, n_features * sizeof(SIZE_t))
+
+1187:             memset(feature_to_sample, -1, n_features * sizeof(SIZE_t))
        (void)(memset(__pyx_v_feature_to_sample, -1, (__pyx_v_n_features * (sizeof(__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)))));
 
 1188: 
-
+1189:             for i in range(n_samples):
+
+1189:             for i in range(n_samples):
        __pyx_t_9 = __pyx_v_n_samples;
         __pyx_t_14 = __pyx_t_9;
         for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
           __pyx_v_i = __pyx_t_15;
-
+1190:                 node = self.nodes
+
+1190:                 node = self.nodes
          __pyx_t_16 = __pyx_v_self->nodes;
           __pyx_v_node = __pyx_t_16;
-
+1191:                 indptr_ptr[i + 1] = indptr_ptr[i]
+
+1191:                 indptr_ptr[i + 1] = indptr_ptr[i]
          (__pyx_v_indptr_ptr[(__pyx_v_i + 1)]) = (__pyx_v_indptr_ptr[__pyx_v_i]);
 
 1192: 
-
+1193:                 for k in range(X_indptr[i], X_indptr[i + 1]):
+
+1193:                 for k in range(X_indptr[i], X_indptr[i + 1]):
          __pyx_t_17 = (__pyx_v_X_indptr[(__pyx_v_i + 1)]);
           __pyx_t_18 = __pyx_t_17;
           for (__pyx_t_19 = (__pyx_v_X_indptr[__pyx_v_i]); __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) {
             __pyx_v_k = __pyx_t_19;
-
+1194:                     feature_to_sample[X_indices[k]] = i
+
+1194:                     feature_to_sample[X_indices[k]] = i
            (__pyx_v_feature_to_sample[(__pyx_v_X_indices[__pyx_v_k])]) = __pyx_v_i;
-
+1195:                     X_sample[X_indices[k]] = X_data[k]
+
+1195:                     X_sample[X_indices[k]] = X_data[k]
            (__pyx_v_X_sample[(__pyx_v_X_indices[__pyx_v_k])]) = (__pyx_v_X_data[__pyx_v_k]);
           }
 
 1196: 
-
 1197:                 # While node not a leaf
-
+1198:                 while node.left_child != _TREE_LEAF:
+
 1197:                 # While node not a leaf
+
+1198:                 while node.left_child != _TREE_LEAF:
          while (1) {
             __pyx_t_3 = ((__pyx_v_node->left_child != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
             if (!__pyx_t_3) break;
-
 1199:                     # ... and node.right_child != _TREE_LEAF:
+
 1199:                     # ... and node.right_child != _TREE_LEAF:
 1200: 
-
+1201:                     indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
+
+1201:                     indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
            (__pyx_v_indices_ptr[(__pyx_v_indptr_ptr[(__pyx_v_i + 1)])]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
-
+1202:                     indptr_ptr[i + 1] += 1
+
+1202:                     indptr_ptr[i + 1] += 1
            __pyx_t_20 = (__pyx_v_i + 1);
             (__pyx_v_indptr_ptr[__pyx_t_20]) = ((__pyx_v_indptr_ptr[__pyx_t_20]) + 1);
 
 1203: 
-
+1204:                     if feature_to_sample[node.feature] == i:
+
+1204:                     if feature_to_sample[node.feature] == i:
            __pyx_t_3 = (((__pyx_v_feature_to_sample[__pyx_v_node->feature]) == __pyx_v_i) != 0);
             if (__pyx_t_3) {
 /* … */
               goto __pyx_L14;
             }
-
+1205:                         feature_value = X_sample[node.feature]
+
+1205:                         feature_value = X_sample[node.feature]
              __pyx_v_feature_value = (__pyx_v_X_sample[__pyx_v_node->feature]);
 
 1206: 
-
 1207:                     else:
-
+1208:                         feature_value = 0.
+
 1207:                     else:
+
+1208:                         feature_value = 0.
            /*else*/ {
               __pyx_v_feature_value = 0.;
             }
             __pyx_L14:;
 
 1209: 
-
+1210:                     if feature_value <= node.threshold:
+
+1210:                     if feature_value <= node.threshold:
            __pyx_t_3 = ((__pyx_v_feature_value <= __pyx_v_node->threshold) != 0);
             if (__pyx_t_3) {
 /* … */
               goto __pyx_L15;
             }
-
+1211:                         node = &self.nodes[node.left_child]
+
+1211:                         node = &self.nodes[node.left_child]
              __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->left_child]));
-
 1212:                     else:
-
+1213:                         node = &self.nodes[node.right_child]
+
 1212:                     else:
+
+1213:                         node = &self.nodes[node.right_child]
            /*else*/ {
               __pyx_v_node = (&(__pyx_v_self->nodes[__pyx_v_node->right_child]));
             }
             __pyx_L15:;
           }
 
 1214: 
-
 1215:                 # Add the leave node
-
+1216:                 indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
+
 1215:                 # Add the leave node
+
+1216:                 indices_ptr[indptr_ptr[i + 1]] = <SIZE_t>(node - self.nodes)
          (__pyx_v_indices_ptr[(__pyx_v_indptr_ptr[(__pyx_v_i + 1)])]) = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)(__pyx_v_node - __pyx_v_self->nodes));
-
+1217:                 indptr_ptr[i + 1] += 1
+
+1217:                 indptr_ptr[i + 1] += 1
          __pyx_t_20 = (__pyx_v_i + 1);
           (__pyx_v_indptr_ptr[__pyx_t_20]) = ((__pyx_v_indptr_ptr[__pyx_t_20]) + 1);
         }
 
 1218: 
-
 1219:             # Free auxiliary arrays
-
+1220:             free(X_sample)
+
 1219:             # Free auxiliary arrays
+
+1220:             free(X_sample)
        free(__pyx_v_X_sample);
-
+1221:             free(feature_to_sample)
+
+1221:             free(feature_to_sample)
        free(__pyx_v_feature_to_sample);
       }
 
 1222: 
-
+1223:         indices = indices[:indptr[n_samples]]
+
+1223:         indices = indices[:indptr[n_samples]]
  __pyx_t_21 = __pyx_v_n_samples;
   __pyx_t_10 = __Pyx_PyInt_From_Py_intptr_t((*__Pyx_BufPtrStrided1d(__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *, __pyx_pybuffernd_indptr.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_indptr.diminfo[0].strides))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1223, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_10);
@@ -8248,7 +8322,7 @@
   __pyx_t_13 = 0;
   __Pyx_DECREF_SET(__pyx_v_indices, ((PyArrayObject *)__pyx_t_10));
   __pyx_t_10 = 0;
-
+1224:         cdef np.ndarray[SIZE_t] data = np.ones(shape=len(indices),
+
+1224:         cdef np.ndarray[SIZE_t] data = np.ones(shape=len(indices),
  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1224, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_10);
   __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_ones); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 1224, __pyx_L1_error)
@@ -8279,7 +8353,7 @@
   __pyx_t_27 = 0;
   __pyx_v_data = ((PyArrayObject *)__pyx_t_5);
   __pyx_t_5 = 0;
-
+1225:                                                dtype=np.intp)
+
+1225:                                                dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1225, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_intp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1225, __pyx_L1_error)
@@ -8287,7 +8361,7 @@
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 1224, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-
+1226:         out = csr_matrix((data, indices, indptr),
+
+1226:         out = csr_matrix((data, indices, indptr),
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1226, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1226, __pyx_L1_error)
@@ -8314,7 +8388,7 @@
   __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
   __pyx_v_out = __pyx_t_28;
   __pyx_t_28 = 0;
-
+1227:                          shape=(n_samples, self.node_count))
+
+1227:                          shape=(n_samples, self.node_count))
  __pyx_t_10 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 1227, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_10);
   __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_n_samples); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1227, __pyx_L1_error)
@@ -8332,14 +8406,14 @@
   if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_shape, __pyx_t_28) < 0) __PYX_ERR(0, 1227, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_28); __pyx_t_28 = 0;
 
 1228: 
-
+1229:         return out
+
+1229:         return out
  __Pyx_XDECREF(__pyx_r);
   __Pyx_INCREF(__pyx_v_out);
   __pyx_r = __pyx_v_out;
   goto __pyx_L0;
 
 1230: 
 1231: 
-
+1232:     cpdef compute_feature_importances(self, normalize=True):
+
+1232:     cpdef compute_feature_importances(self, normalize=True):
static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_4Tree_19compute_feature_importances(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree_compute_feature_importances(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self, int __pyx_skip_dispatch, struct __pyx_opt_args_13stpredictions_6models_3OK3_5_tree_4Tree_compute_feature_importances *__pyx_optional_args) {
   PyObject *__pyx_v_normalize = ((PyObject *)Py_True);
@@ -8516,22 +8590,22 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1233:         """Computes the importance of each feature (aka variable)."""
-
 1234:         cdef Node* left
-
 1235:         cdef Node* right
-
+1236:         cdef Node* nodes = self.nodes
+
 1233:         """Computes the importance of each feature (aka variable)."""
+
 1234:         cdef Node* left
+
 1235:         cdef Node* right
+
+1236:         cdef Node* nodes = self.nodes
  __pyx_t_5 = __pyx_v_self->nodes;
   __pyx_v_nodes = __pyx_t_5;
-
+1237:         cdef Node* node = nodes
+
+1237:         cdef Node* node = nodes
  __pyx_v_node = __pyx_v_nodes;
-
+1238:         cdef Node* end_node = node + self.node_count
+
+1238:         cdef Node* end_node = node + self.node_count
  __pyx_v_end_node = (__pyx_v_node + __pyx_v_self->node_count);
 
 1239: 
-
+1240:         cdef double normalizer = 0.
+
+1240:         cdef double normalizer = 0.
  __pyx_v_normalizer = 0.;
 
 1241: 
-
 1242:         cdef np.ndarray[np.float64_t, ndim=1] importances
-
+1243:         importances = np.zeros((self.n_features,))
+
 1242:         cdef np.ndarray[np.float64_t, ndim=1] importances
+
+1243:         importances = np.zeros((self.n_features,))
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1243, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1243, __pyx_L1_error)
@@ -8582,10 +8656,10 @@
   __pyx_t_6 = 0;
   __pyx_v_importances = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
-
+1244:         cdef DOUBLE_t* importance_data = <DOUBLE_t*>importances.data
+
+1244:         cdef DOUBLE_t* importance_data = <DOUBLE_t*>importances.data
  __pyx_v_importance_data = ((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *)__pyx_v_importances->data);
 
 1245: 
-
+1246:         with nogil:
+
+1246:         with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -8605,34 +8679,34 @@
         __pyx_L5:;
       }
   }
-
+1247:             while node != end_node:
+
+1247:             while node != end_node:
        while (1) {
           __pyx_t_11 = ((__pyx_v_node != __pyx_v_end_node) != 0);
           if (!__pyx_t_11) break;
-
+1248:                 if node.left_child != _TREE_LEAF:
+
+1248:                 if node.left_child != _TREE_LEAF:
          __pyx_t_11 = ((__pyx_v_node->left_child != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
           if (__pyx_t_11) {
 /* … */
           }
-
 1249:                     # ... and node.right_child != _TREE_LEAF:
-
+1250:                     left = &nodes[node.left_child]
+
 1249:                     # ... and node.right_child != _TREE_LEAF:
+
+1250:                     left = &nodes[node.left_child]
            __pyx_v_left = (&(__pyx_v_nodes[__pyx_v_node->left_child]));
-
+1251:                     right = &nodes[node.right_child]
+
+1251:                     right = &nodes[node.right_child]
            __pyx_v_right = (&(__pyx_v_nodes[__pyx_v_node->right_child]));
 
 1252: 
-
+1253:                     importance_data[node.feature] += (
+
+1253:                     importance_data[node.feature] += (
            __pyx_t_12 = __pyx_v_node->feature;
 /* … */
             (__pyx_v_importance_data[__pyx_t_12]) = ((__pyx_v_importance_data[__pyx_t_12]) + (((__pyx_v_node->weighted_n_node_samples * __pyx_v_node->impurity) - (__pyx_v_left->weighted_n_node_samples * __pyx_v_left->impurity)) - (__pyx_v_right->weighted_n_node_samples * __pyx_v_right->impurity)));
-
 1254:                         node.weighted_n_node_samples * node.impurity -
-
 1255:                         left.weighted_n_node_samples * left.impurity -
-
 1256:                         right.weighted_n_node_samples * right.impurity)
-
+1257:                 node += 1
+
 1254:                         node.weighted_n_node_samples * node.impurity -
+
 1255:                         left.weighted_n_node_samples * left.impurity -
+
 1256:                         right.weighted_n_node_samples * right.impurity)
+
+1257:                 node += 1
          __pyx_v_node = (__pyx_v_node + 1);
         }
       }
 
 1258: 
-
+1259:         importances /= nodes[0].weighted_n_node_samples
+
+1259:         importances /= nodes[0].weighted_n_node_samples
  __pyx_t_1 = PyFloat_FromDouble((__pyx_v_nodes[0]).weighted_n_node_samples); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1259, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_3 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_importances), __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1259, __pyx_L1_error)
@@ -8661,12 +8735,12 @@
   __Pyx_DECREF_SET(__pyx_v_importances, ((PyArrayObject *)__pyx_t_3));
   __pyx_t_3 = 0;
 
 1260: 
-
+1261:         if normalize:
+
+1261:         if normalize:
  __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_normalize); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1261, __pyx_L1_error)
   if (__pyx_t_11) {
 /* … */
   }
-
+1262:             normalizer = np.sum(importances)
+
+1262:             normalizer = np.sum(importances)
    __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1262, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1262, __pyx_L1_error)
@@ -8691,13 +8765,13 @@
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_v_normalizer = __pyx_t_13;
 
 1263: 
-
+1264:             if normalizer > 0.0:
+
+1264:             if normalizer > 0.0:
    __pyx_t_11 = ((__pyx_v_normalizer > 0.0) != 0);
     if (__pyx_t_11) {
 /* … */
     }
-
 1265:                 # Avoid dividing by zero (e.g., when root is pure)
-
+1266:                 importances /= normalizer
+
 1265:                 # Avoid dividing by zero (e.g., when root is pure)
+
+1266:                 importances /= normalizer
      __pyx_t_3 = PyFloat_FromDouble(__pyx_v_normalizer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1266, __pyx_L1_error)
       __Pyx_GOTREF(__pyx_t_3);
       __pyx_t_4 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_importances), __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1266, __pyx_L1_error)
@@ -8726,13 +8800,13 @@
       __Pyx_DECREF_SET(__pyx_v_importances, ((PyArrayObject *)__pyx_t_4));
       __pyx_t_4 = 0;
 
 1267: 
-
+1268:         return importances
+
+1268:         return importances
  __Pyx_XDECREF(__pyx_r);
   __Pyx_INCREF(((PyObject *)__pyx_v_importances));
   __pyx_r = ((PyObject *)__pyx_v_importances);
   goto __pyx_L0;
 
 1269: 
-
+1270:     cdef np.ndarray _get_value_ndarray(self):
+
+1270:     cdef np.ndarray _get_value_ndarray(self):
static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__get_value_ndarray(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self) {
   npy_intp __pyx_v_shape[2];
   PyArrayObject *__pyx_v_arr = 0;
@@ -8751,31 +8825,31 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1271:         """Wraps value as a 2-d NumPy array.
+
 1271:         """Wraps value as a 2-d NumPy array.
 1272: 
-
 1273:         The array keeps a reference to this Tree, which manages the underlying
-
 1274:         memory.
-
 1275:         """
-
 1276:         cdef np.npy_intp shape[2]
-
+1277:         shape[0] = <np.npy_intp> self.node_count
+
 1273:         The array keeps a reference to this Tree, which manages the underlying
+
 1274:         memory.
+
 1275:         """
+
 1276:         cdef np.npy_intp shape[2]
+
+1277:         shape[0] = <np.npy_intp> self.node_count
  (__pyx_v_shape[0]) = ((npy_intp)__pyx_v_self->node_count);
-
+1278:         shape[1] = <np.npy_intp> self.K_y.shape[0]
+
+1278:         shape[1] = <np.npy_intp> self.K_y.shape[0]
  (__pyx_v_shape[1]) = ((npy_intp)(__pyx_v_self->K_y->dimensions[0]));
-
 1279:         cdef np.ndarray arr
-
+1280:         arr = np.PyArray_SimpleNewFromData(2, shape, np.NPY_DOUBLE, self.value)
+
 1279:         cdef np.ndarray arr
+
+1280:         arr = np.PyArray_SimpleNewFromData(2, shape, np.NPY_DOUBLE, self.value)
  __pyx_t_1 = PyArray_SimpleNewFromData(2, __pyx_v_shape, NPY_DOUBLE, __pyx_v_self->value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1280, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1280, __pyx_L1_error)
   __pyx_v_arr = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
-
+1281:         Py_INCREF(self)
+
+1281:         Py_INCREF(self)
  Py_INCREF(((PyObject *)__pyx_v_self));
-
+1282:         if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
+
+1282:         if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
  __pyx_t_2 = ((PyArray_SetBaseObject(__pyx_v_arr, ((PyObject *)__pyx_v_self)) < 0) != 0);
   if (unlikely(__pyx_t_2)) {
 /* … */
   }
-
+1283:             raise ValueError("Can't initialize array.")
+
+1283:             raise ValueError("Can't initialize array.")
    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1283, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
@@ -8785,13 +8859,13 @@
   __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_t_initialize_array); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 1283, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_tuple__11);
   __Pyx_GIVEREF(__pyx_tuple__11);
-
+1284:         return arr
+
+1284:         return arr
  __Pyx_XDECREF(((PyObject *)__pyx_r));
   __Pyx_INCREF(((PyObject *)__pyx_v_arr));
   __pyx_r = __pyx_v_arr;
   goto __pyx_L0;
 
 1285: 
-
+1286:     cdef np.ndarray _get_node_ndarray(self):
+
+1286:     cdef np.ndarray _get_node_ndarray(self):
static PyArrayObject *__pyx_f_13stpredictions_6models_3OK3_5_tree_4Tree__get_node_ndarray(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_self) {
   npy_intp __pyx_v_shape[1];
   npy_intp __pyx_v_strides[1];
@@ -8812,50 +8886,50 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1287:         """Wraps nodes as a NumPy struct array.
+
 1287:         """Wraps nodes as a NumPy struct array.
 1288: 
-
 1289:         The array keeps a reference to this Tree, which manages the underlying
-
 1290:         memory. Individual fields are publicly accessible as properties of the
-
 1291:         Tree.
-
 1292:         """
-
 1293:         cdef np.npy_intp shape[1]
-
+1294:         shape[0] = <np.npy_intp> self.node_count
+
 1289:         The array keeps a reference to this Tree, which manages the underlying
+
 1290:         memory. Individual fields are publicly accessible as properties of the
+
 1291:         Tree.
+
 1292:         """
+
 1293:         cdef np.npy_intp shape[1]
+
+1294:         shape[0] = <np.npy_intp> self.node_count
  (__pyx_v_shape[0]) = ((npy_intp)__pyx_v_self->node_count);
-
 1295:         cdef np.npy_intp strides[1]
-
+1296:         strides[0] = sizeof(Node)
+
 1295:         cdef np.npy_intp strides[1]
+
+1296:         strides[0] = sizeof(Node)
  (__pyx_v_strides[0]) = (sizeof(struct __pyx_t_13stpredictions_6models_3OK3_5_tree_Node));
-
 1297:         cdef np.ndarray arr
-
+1298:         Py_INCREF(NODE_DTYPE)
+
 1297:         cdef np.ndarray arr
+
+1298:         Py_INCREF(NODE_DTYPE)
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_NODE_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1298, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   Py_INCREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
+1299:         arr = PyArray_NewFromDescr(<PyTypeObject *> np.ndarray,
+
+1299:         arr = PyArray_NewFromDescr(<PyTypeObject *> np.ndarray,
  __pyx_t_2 = PyArray_NewFromDescr(((PyTypeObject *)__pyx_ptype_5numpy_ndarray), ((PyArray_Descr *)__pyx_t_1), 1, __pyx_v_shape, __pyx_v_strides, ((void *)__pyx_v_self->nodes), NPY_DEFAULT, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1299, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1299, __pyx_L1_error)
   __pyx_v_arr = ((PyArrayObject *)__pyx_t_2);
   __pyx_t_2 = 0;
-
+1300:                                    <np.dtype> NODE_DTYPE, 1, shape,
+
+1300:                                    <np.dtype> NODE_DTYPE, 1, shape,
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_NODE_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1300, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
-
 1301:                                    strides, <void*> self.nodes,
-
 1302:                                    np.NPY_DEFAULT, None)
-
+1303:         Py_INCREF(self)
+
 1301:                                    strides, <void*> self.nodes,
+
 1302:                                    np.NPY_DEFAULT, None)
+
+1303:         Py_INCREF(self)
  Py_INCREF(((PyObject *)__pyx_v_self));
-
+1304:         if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
+
+1304:         if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
  __pyx_t_3 = ((PyArray_SetBaseObject(__pyx_v_arr, ((PyObject *)__pyx_v_self)) < 0) != 0);
   if (unlikely(__pyx_t_3)) {
 /* … */
   }
-
+1305:             raise ValueError("Can't initialize array.")
+
+1305:             raise ValueError("Can't initialize array.")
    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1305, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_Raise(__pyx_t_2, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     __PYX_ERR(0, 1305, __pyx_L1_error)
-
+1306:         return arr
+
+1306:         return arr
  __Pyx_XDECREF(((PyObject *)__pyx_r));
   __Pyx_INCREF(((PyObject *)__pyx_v_arr));
   __pyx_r = __pyx_v_arr;
@@ -8864,12 +8938,12 @@
 
 1308: 
 1309: 
 1310: 
-
 1311: # =============================================================================
-
 1312: # Build Pruned Tree
-
 1313: # =============================================================================
+
 1311: # =============================================================================
+
 1312: # Build Pruned Tree
+
 1313: # =============================================================================
 1314: 
 1315: 
-
+1316: cdef class _CCPPruneController:
+
+1316: cdef class _CCPPruneController:
struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController {
   PyObject_HEAD
   struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_vtab;
@@ -8882,10 +8956,10 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree__CCPPruneController;
 
-
 1317:     """Base class used by build_pruned_tree_ccp and ccp_pruning_path
-
 1318:     to control pruning.
-
 1319:     """
-
+1320:     cdef bint stop_pruning(self, DOUBLE_t effective_alpha) nogil:
+
 1317:     """Base class used by build_pruned_tree_ccp and ccp_pruning_path
+
 1318:     to control pruning.
+
 1319:     """
+
+1320:     cdef bint stop_pruning(self, DOUBLE_t effective_alpha) nogil:
static int __pyx_f_13stpredictions_6models_3OK3_5_tree_19_CCPPruneController_stop_pruning(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_v_self, CYTHON_UNUSED __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_effective_alpha) {
   int __pyx_r;
 /* … */
@@ -8893,30 +8967,30 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 1321:         """Return 1 to stop pruning and 0 to continue pruning"""
-
+1322:         return 0
+
 1321:         """Return 1 to stop pruning and 0 to continue pruning"""
+
+1322:         return 0
  __pyx_r = 0;
   goto __pyx_L0;
 
 1323: 
-
+1324:     cdef void save_metrics(self, DOUBLE_t effective_alpha,
+
+1324:     cdef void save_metrics(self, DOUBLE_t effective_alpha,
static void __pyx_f_13stpredictions_6models_3OK3_5_tree_19_CCPPruneController_save_metrics(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_v_self, CYTHON_UNUSED __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_effective_alpha, CYTHON_UNUSED __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_subtree_impurities) {
 
   /* function exit code */
 }
-
 1325:                            DOUBLE_t subtree_impurities) nogil:
-
 1326:         """Save metrics when pruning"""
-
 1327:         pass
+
 1325:                            DOUBLE_t subtree_impurities) nogil:
+
 1326:         """Save metrics when pruning"""
+
 1327:         pass
 1328: 
-
+1329:     cdef void after_pruning(self, unsigned char[:] in_subtree) nogil:
+
+1329:     cdef void after_pruning(self, unsigned char[:] in_subtree) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_5_tree_19_CCPPruneController_after_pruning(CYTHON_UNUSED struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_v_self, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_in_subtree) {
 
   /* function exit code */
 }
-
 1330:         """Called after pruning"""
-
 1331:         pass
+
 1330:         """Called after pruning"""
+
 1331:         pass
 1332: 
 1333: 
-
+1334: cdef class _AlphaPruner(_CCPPruneController):
+
+1334: cdef class _AlphaPruner(_CCPPruneController):
struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__AlphaPruner {
   struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController __pyx_base;
   __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t ccp_alpha;
@@ -8928,11 +9002,11 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__AlphaPruner *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree__AlphaPruner;
 
-
 1335:     """Use alpha to control when to stop pruning."""
-
 1336:     cdef DOUBLE_t ccp_alpha
-
 1337:     cdef SIZE_t capacity
+
 1335:     """Use alpha to control when to stop pruning."""
+
 1336:     cdef DOUBLE_t ccp_alpha
+
 1337:     cdef SIZE_t capacity
 1338: 
-
+1339:     def __cinit__(self, DOUBLE_t ccp_alpha):
+
+1339:     def __cinit__(self, DOUBLE_t ccp_alpha):
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_12_AlphaPruner_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_12_AlphaPruner_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -8996,12 +9070,12 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+1340:         self.ccp_alpha = ccp_alpha
+
+1340:         self.ccp_alpha = ccp_alpha
  __pyx_v_self->ccp_alpha = __pyx_v_ccp_alpha;
-
+1341:         self.capacity = 0
+
+1341:         self.capacity = 0
  __pyx_v_self->capacity = 0;
 
 1342: 
-
+1343:     cdef bint stop_pruning(self, DOUBLE_t effective_alpha) nogil:
+
+1343:     cdef bint stop_pruning(self, DOUBLE_t effective_alpha) nogil:
static int __pyx_f_13stpredictions_6models_3OK3_5_tree_12_AlphaPruner_stop_pruning(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__AlphaPruner *__pyx_v_self, __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_effective_alpha) {
   int __pyx_r;
 /* … */
@@ -9009,36 +9083,36 @@
   __pyx_L0:;
   return __pyx_r;
 }
-
 1344:         # The subtree on the previous iteration has the greatest ccp_alpha
-
 1345:         # less than or equal to self.ccp_alpha
-
+1346:         return self.ccp_alpha < effective_alpha
+
 1344:         # The subtree on the previous iteration has the greatest ccp_alpha
+
 1345:         # less than or equal to self.ccp_alpha
+
+1346:         return self.ccp_alpha < effective_alpha
  __pyx_r = (__pyx_v_self->ccp_alpha < __pyx_v_effective_alpha);
   goto __pyx_L0;
 
 1347: 
-
+1348:     cdef void after_pruning(self, unsigned char[:] in_subtree) nogil:
+
+1348:     cdef void after_pruning(self, unsigned char[:] in_subtree) nogil:
static void __pyx_f_13stpredictions_6models_3OK3_5_tree_12_AlphaPruner_after_pruning(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__AlphaPruner *__pyx_v_self, __Pyx_memviewslice __pyx_v_in_subtree) {
   Py_ssize_t __pyx_v_i;
 /* … */
   /* function exit code */
 }
-
 1349:         """Updates the number of leaves in subtree"""
-
+1350:         for i in range(in_subtree.shape[0]):
+
 1349:         """Updates the number of leaves in subtree"""
+
+1350:         for i in range(in_subtree.shape[0]):
  __pyx_t_1 = (__pyx_v_in_subtree.shape[0]);
   __pyx_t_2 = __pyx_t_1;
   for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
     __pyx_v_i = __pyx_t_3;
-
+1351:             if in_subtree[i]:
+
+1351:             if in_subtree[i]:
    __pyx_t_4 = __pyx_v_i;
     __pyx_t_5 = ((*((unsigned char *) ( /* dim=0 */ (__pyx_v_in_subtree.data + __pyx_t_4 * __pyx_v_in_subtree.strides[0]) ))) != 0);
     if (__pyx_t_5) {
 /* … */
     }
   }
-
+1352:                 self.capacity += 1
+
+1352:                 self.capacity += 1
      __pyx_v_self->capacity = (__pyx_v_self->capacity + 1);
 
 1353: 
 1354: 
-
+1355: cdef class _PathFinder(_CCPPruneController):
+
+1355: cdef class _PathFinder(_CCPPruneController):
struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__PathFinder {
   struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController __pyx_base;
   __Pyx_memviewslice ccp_alphas;
@@ -9051,12 +9125,12 @@
 };
 static struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__PathFinder *__pyx_vtabptr_13stpredictions_6models_3OK3_5_tree__PathFinder;
 
-
 1356:     """Record metrics used to return the cost complexity path."""
-
 1357:     cdef DOUBLE_t[:] ccp_alphas
-
 1358:     cdef DOUBLE_t[:] impurities
-
 1359:     cdef UINT32_t count
+
 1356:     """Record metrics used to return the cost complexity path."""
+
 1357:     cdef DOUBLE_t[:] ccp_alphas
+
 1358:     cdef DOUBLE_t[:] impurities
+
 1359:     cdef UINT32_t count
 1360: 
-
+1361:     def __cinit__(self,  int node_count):
+
+1361:     def __cinit__(self,  int node_count):
/* Python wrapper */
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_11_PathFinder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static int __pyx_pw_13stpredictions_6models_3OK3_5_tree_11_PathFinder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
@@ -9130,7 +9204,7 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
+1362:         self.ccp_alphas = np.zeros(shape=(node_count), dtype=np.float64)
+
+1362:         self.ccp_alphas = np.zeros(shape=(node_count), dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1362, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1362, __pyx_L1_error)
@@ -9159,7 +9233,7 @@
   __pyx_v_self->ccp_alphas = __pyx_t_5;
   __pyx_t_5.memview = NULL;
   __pyx_t_5.data = NULL;
-
+1363:         self.impurities = np.zeros(shape=(node_count), dtype=np.float64)
+
+1363:         self.impurities = np.zeros(shape=(node_count), dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1363, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1363, __pyx_L1_error)
@@ -9188,10 +9262,10 @@
   __pyx_v_self->impurities = __pyx_t_5;
   __pyx_t_5.memview = NULL;
   __pyx_t_5.data = NULL;
-
+1364:         self.count = 0
+
+1364:         self.count = 0
  __pyx_v_self->count = 0;
 
 1365: 
-
+1366:     cdef void save_metrics(self,
+
+1366:     cdef void save_metrics(self,
static void __pyx_f_13stpredictions_6models_3OK3_5_tree_11_PathFinder_save_metrics(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__PathFinder *__pyx_v_self, __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_effective_alpha, __pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t __pyx_v_subtree_impurities) {
 /* … */
   /* function exit code */
@@ -9200,21 +9274,21 @@
   __Pyx_WriteUnraisable("stpredictions.models.OK3._tree._PathFinder.save_metrics", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
   __pyx_L0:;
 }
-
 1367:                            DOUBLE_t effective_alpha,
-
 1368:                            DOUBLE_t subtree_impurities) nogil:
-
+1369:         self.ccp_alphas[self.count] = effective_alpha
+
 1367:                            DOUBLE_t effective_alpha,
+
 1368:                            DOUBLE_t subtree_impurities) nogil:
+
+1369:         self.ccp_alphas[self.count] = effective_alpha
  if (unlikely(!__pyx_v_self->ccp_alphas.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 1369, __pyx_L1_error)}
   __pyx_t_1 = __pyx_v_self->count;
   *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_self->ccp_alphas.data + __pyx_t_1 * __pyx_v_self->ccp_alphas.strides[0]) )) = __pyx_v_effective_alpha;
-
+1370:         self.impurities[self.count] = subtree_impurities
+
+1370:         self.impurities[self.count] = subtree_impurities
  if (unlikely(!__pyx_v_self->impurities.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 1370, __pyx_L1_error)}
   __pyx_t_1 = __pyx_v_self->count;
   *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_self->impurities.data + __pyx_t_1 * __pyx_v_self->impurities.strides[0]) )) = __pyx_v_subtree_impurities;
-
+1371:         self.count += 1
+
+1371:         self.count += 1
  __pyx_v_self->count = (__pyx_v_self->count + 1);
 
 1372: 
 1373: 
-
+1374: cdef _cost_complexity_prune(unsigned char[:] leaves_in_subtree, # OUT
+
+1374: cdef _cost_complexity_prune(unsigned char[:] leaves_in_subtree, # OUT
static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree__cost_complexity_prune(__Pyx_memviewslice __pyx_v_leaves_in_subtree, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_orig_tree, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *__pyx_v_controller) {
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_i;
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_n_nodes;
@@ -9277,32 +9351,32 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1375:                             Tree orig_tree,
-
 1376:                             _CCPPruneController controller):
-
 1377:     """Perform cost complexity pruning.
+
 1375:                             Tree orig_tree,
+
 1376:                             _CCPPruneController controller):
+
 1377:     """Perform cost complexity pruning.
 1378: 
-
 1379:     This function takes an already grown tree, `orig_tree` and outputs a
-
 1380:     boolean mask `leaves_in_subtree` to are the leaves in the pruned tree. The
-
 1381:     controller signals when the pruning should stop and is passed the
-
 1382:     metrics of the subtrees during the pruning process.
+
 1379:     This function takes an already grown tree, `orig_tree` and outputs a
+
 1380:     boolean mask `leaves_in_subtree` to are the leaves in the pruned tree. The
+
 1381:     controller signals when the pruning should stop and is passed the
+
 1382:     metrics of the subtrees during the pruning process.
 1383: 
-
 1384:     Parameters
-
 1385:     ----------
-
 1386:     leaves_in_subtree : unsigned char[:]
-
 1387:         Output for leaves of subtree
-
 1388:     orig_tree : Tree
-
 1389:         Original tree
-
 1390:     ccp_controller : _CCPPruneController
-
 1391:         Cost complexity controller
-
 1392:     """
+
 1384:     Parameters
+
 1385:     ----------
+
 1386:     leaves_in_subtree : unsigned char[:]
+
 1387:         Output for leaves of subtree
+
 1388:     orig_tree : Tree
+
 1389:         Original tree
+
 1390:     ccp_controller : _CCPPruneController
+
 1391:         Cost complexity controller
+
 1392:     """
 1393: 
-
 1394:     cdef:
-
 1395:         SIZE_t i
-
+1396:         SIZE_t n_nodes = orig_tree.node_count
+
 1394:     cdef:
+
 1395:         SIZE_t i
+
+1396:         SIZE_t n_nodes = orig_tree.node_count
  __pyx_t_1 = __pyx_v_orig_tree->node_count;
   __pyx_v_n_nodes = __pyx_t_1;
-
 1397:         # prior probability using weighted samples
-
+1398:         DOUBLE_t[:] weighted_n_node_samples = orig_tree.weighted_n_node_samples
+
 1397:         # prior probability using weighted samples
+
+1398:         DOUBLE_t[:] weighted_n_node_samples = orig_tree.weighted_n_node_samples
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_orig_tree), __pyx_n_s_weighted_n_node_samples); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1398, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 1398, __pyx_L1_error)
@@ -9310,10 +9384,10 @@
   __pyx_v_weighted_n_node_samples = __pyx_t_3;
   __pyx_t_3.memview = NULL;
   __pyx_t_3.data = NULL;
-
+1399:         DOUBLE_t total_sum_weights = weighted_n_node_samples[0]
+
+1399:         DOUBLE_t total_sum_weights = weighted_n_node_samples[0]
  __pyx_t_4 = 0;
   __pyx_v_total_sum_weights = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_weighted_n_node_samples.data + __pyx_t_4 * __pyx_v_weighted_n_node_samples.strides[0]) )));
-
+1400:         DOUBLE_t[:] impurity = orig_tree.impurity
+
+1400:         DOUBLE_t[:] impurity = orig_tree.impurity
  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_orig_tree), __pyx_n_s_impurity); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1400, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 1400, __pyx_L1_error)
@@ -9321,8 +9395,8 @@
   __pyx_v_impurity = __pyx_t_3;
   __pyx_t_3.memview = NULL;
   __pyx_t_3.data = NULL;
-
 1401:         # weighted impurity of each node
-
+1402:         DOUBLE_t[:] r_node = np.empty(shape=n_nodes, dtype=np.float64)
+
 1401:         # weighted impurity of each node
+
+1402:         DOUBLE_t[:] r_node = np.empty(shape=n_nodes, dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1402, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1402, __pyx_L1_error)
@@ -9351,7 +9425,7 @@
   __pyx_t_3.memview = NULL;
   __pyx_t_3.data = NULL;
 
 1403: 
-
+1404:         SIZE_t[:] child_l = orig_tree.children_left
+
+1404:         SIZE_t[:] child_l = orig_tree.children_left
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_orig_tree), __pyx_n_s_children_left); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1404, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t(__pyx_t_7, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 1404, __pyx_L1_error)
@@ -9359,7 +9433,7 @@
   __pyx_v_child_l = __pyx_t_8;
   __pyx_t_8.memview = NULL;
   __pyx_t_8.data = NULL;
-
+1405:         SIZE_t[:] child_r = orig_tree.children_right
+
+1405:         SIZE_t[:] child_r = orig_tree.children_right
  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_orig_tree), __pyx_n_s_children_right); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1405, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t(__pyx_t_7, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 1405, __pyx_L1_error)
@@ -9367,7 +9441,7 @@
   __pyx_v_child_r = __pyx_t_8;
   __pyx_t_8.memview = NULL;
   __pyx_t_8.data = NULL;
-
+1406:         SIZE_t[:] parent = np.zeros(shape=n_nodes, dtype=np.intp)
+
+1406:         SIZE_t[:] parent = np.zeros(shape=n_nodes, dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1406, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1406, __pyx_L1_error)
@@ -9396,8 +9470,8 @@
   __pyx_t_8.memview = NULL;
   __pyx_t_8.data = NULL;
 
 1407: 
-
 1408:         # Only uses the start and parent variables
-
+1409:         Stack stack = Stack(INITIAL_STACK_SIZE)
+
 1408:         # Only uses the start and parent variables
+
+1409:         Stack stack = Stack(INITIAL_STACK_SIZE)
  __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_13stpredictions_6models_3OK3_5_tree_INITIAL_STACK_SIZE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1409, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __pyx_t_7 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7sklearn_4tree_6_utils_Stack), __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1409, __pyx_L1_error)
@@ -9405,12 +9479,12 @@
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   __pyx_v_stack = ((struct __pyx_obj_7sklearn_4tree_6_utils_Stack *)__pyx_t_7);
   __pyx_t_7 = 0;
-
 1410:         StackRecord stack_record
-
+1411:         int rc = 0
+
 1410:         StackRecord stack_record
+
+1411:         int rc = 0
  __pyx_v_rc = 0;
-
 1412:         SIZE_t node_idx
+
 1412:         SIZE_t node_idx
 1413: 
-
+1414:         SIZE_t[:] n_leaves = np.zeros(shape=n_nodes, dtype=np.intp)
+
+1414:         SIZE_t[:] n_leaves = np.zeros(shape=n_nodes, dtype=np.intp)
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1414, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1414, __pyx_L1_error)
@@ -9438,7 +9512,7 @@
   __pyx_v_n_leaves = __pyx_t_8;
   __pyx_t_8.memview = NULL;
   __pyx_t_8.data = NULL;
-
+1415:         DOUBLE_t[:] r_branch = np.zeros(shape=n_nodes, dtype=np.float64)
+
+1415:         DOUBLE_t[:] r_branch = np.zeros(shape=n_nodes, dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1415, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1415, __pyx_L1_error)
@@ -9466,12 +9540,12 @@
   __pyx_v_r_branch = __pyx_t_3;
   __pyx_t_3.memview = NULL;
   __pyx_t_3.data = NULL;
-
 1416:         DOUBLE_t current_r
-
 1417:         SIZE_t leaf_idx
-
 1418:         SIZE_t parent_idx
+
 1416:         DOUBLE_t current_r
+
 1417:         SIZE_t leaf_idx
+
 1418:         SIZE_t parent_idx
 1419: 
-
 1420:         # candidate nodes that can be pruned
-
+1421:         unsigned char[:] candidate_nodes = np.zeros(shape=n_nodes,
+
 1420:         # candidate nodes that can be pruned
+
+1421:         unsigned char[:] candidate_nodes = np.zeros(shape=n_nodes,
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1421, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1421, __pyx_L1_error)
@@ -9493,7 +9567,7 @@
   __pyx_v_candidate_nodes = __pyx_t_9;
   __pyx_t_9.memview = NULL;
   __pyx_t_9.data = NULL;
-
+1422:                                                     dtype=np.uint8)
+
+1422:                                                     dtype=np.uint8)
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1422, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_uint8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1422, __pyx_L1_error)
@@ -9501,8 +9575,8 @@
   __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
   if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 1421, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-
 1423:         # nodes in subtree
-
+1424:         unsigned char[:] in_subtree = np.ones(shape=n_nodes, dtype=np.uint8)
+
 1423:         # nodes in subtree
+
+1424:         unsigned char[:] in_subtree = np.ones(shape=n_nodes, dtype=np.uint8)
  __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1424, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_6);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_ones); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1424, __pyx_L1_error)
@@ -9530,7 +9604,7 @@
   __pyx_v_in_subtree = __pyx_t_9;
   __pyx_t_9.memview = NULL;
   __pyx_t_9.data = NULL;
-
+1425:         DOUBLE_t[:] g_node = np.zeros(shape=n_nodes, dtype=np.float64)
+
+1425:         DOUBLE_t[:] g_node = np.zeros(shape=n_nodes, dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1425, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1425, __pyx_L1_error)
@@ -9558,14 +9632,14 @@
   __pyx_v_g_node = __pyx_t_3;
   __pyx_t_3.memview = NULL;
   __pyx_t_3.data = NULL;
-
 1426:         SIZE_t pruned_branch_node_idx
-
 1427:         DOUBLE_t subtree_alpha
-
 1428:         DOUBLE_t effective_alpha
-
 1429:         SIZE_t child_l_idx
-
 1430:         SIZE_t child_r_idx
-
 1431:         SIZE_t n_pruned_leaves
-
 1432:         DOUBLE_t r_diff
-
+1433:         DOUBLE_t max_float64 = np.finfo(np.float64).max
+
 1426:         SIZE_t pruned_branch_node_idx
+
 1427:         DOUBLE_t subtree_alpha
+
 1428:         DOUBLE_t effective_alpha
+
 1429:         SIZE_t child_l_idx
+
 1430:         SIZE_t child_r_idx
+
 1431:         SIZE_t n_pruned_leaves
+
 1432:         DOUBLE_t r_diff
+
+1433:         DOUBLE_t max_float64 = np.finfo(np.float64).max
  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1433, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_7);
   __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_finfo); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1433, __pyx_L1_error)
@@ -9599,8 +9673,8 @@
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   __pyx_v_max_float64 = __pyx_t_10;
 
 1434: 
-
 1435:     # find parent node ids and leaves
-
+1436:     with nogil:
+
 1435:     # find parent node ids and leaves
+
+1436:     with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -9628,29 +9702,29 @@
       }
   }
 
 1437: 
-
+1438:         for i in range(r_node.shape[0]):
+
+1438:         for i in range(r_node.shape[0]):
        __pyx_t_11 = (__pyx_v_r_node.shape[0]);
         __pyx_t_12 = __pyx_t_11;
         for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_12; __pyx_t_1+=1) {
           __pyx_v_i = __pyx_t_1;
-
+1439:             r_node[i] = (
+
+1439:             r_node[i] = (
          __pyx_t_14 = __pyx_v_i;
           *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_14 * __pyx_v_r_node.strides[0]) )) = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_weighted_n_node_samples.data + __pyx_t_4 * __pyx_v_weighted_n_node_samples.strides[0]) ))) * (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_impurity.data + __pyx_t_13 * __pyx_v_impurity.strides[0]) )))) / __pyx_v_total_sum_weights);
         }
-
+1440:                 weighted_n_node_samples[i] * impurity[i] / total_sum_weights)
+
+1440:                 weighted_n_node_samples[i] * impurity[i] / total_sum_weights)
          __pyx_t_4 = __pyx_v_i;
           __pyx_t_13 = __pyx_v_i;
 
 1441: 
-
 1442:         # Push root node, using StackRecord.start as node id
-
+1443:         rc = stack.push(0, 0, 0, -1, 0, 0, 0)
+
 1442:         # Push root node, using StackRecord.start as node id
+
+1443:         rc = stack.push(0, 0, 0, -1, 0, 0, 0)
        __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, 0, 0, 0, -1, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1443, __pyx_L4_error)
         __pyx_v_rc = __pyx_t_15;
-
+1444:         if rc == -1:
+
+1444:         if rc == -1:
        __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
         if (__pyx_t_16) {
 /* … */
         }
-
+1445:             with gil:
+
+1445:             with gil:
          {
               #ifdef WITH_THREAD
               PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -9666,7 +9740,7 @@
                 }
               }
           }
-
+1446:                 raise MemoryError("pruning tree")
+
+1446:                 raise MemoryError("pruning tree")
                __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1446, __pyx_L10_error)
                 __Pyx_GOTREF(__pyx_t_6);
                 __Pyx_Raise(__pyx_t_6, 0, 0, 0);
@@ -9678,42 +9752,42 @@
   __Pyx_GOTREF(__pyx_tuple__16);
   __Pyx_GIVEREF(__pyx_tuple__16);
 
 1447: 
-
+1448:         while not stack.is_empty():
+
+1448:         while not stack.is_empty():
        while (1) {
           __pyx_t_16 = ((!(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->is_empty(__pyx_v_stack) != 0)) != 0);
           if (!__pyx_t_16) break;
-
+1449:             stack.pop(&stack_record)
+
+1449:             stack.pop(&stack_record)
          (void)(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->pop(__pyx_v_stack, (&__pyx_v_stack_record)));
-
+1450:             node_idx = stack_record.start
+
+1450:             node_idx = stack_record.start
          __pyx_t_17 = __pyx_v_stack_record.start;
           __pyx_v_node_idx = __pyx_t_17;
-
+1451:             parent[node_idx] = stack_record.parent
+
+1451:             parent[node_idx] = stack_record.parent
          __pyx_t_17 = __pyx_v_stack_record.parent;
           __pyx_t_13 = __pyx_v_node_idx;
           *((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_parent.data + __pyx_t_13 * __pyx_v_parent.strides[0]) )) = __pyx_t_17;
-
+1452:             if child_l[node_idx] == _TREE_LEAF:
+
+1452:             if child_l[node_idx] == _TREE_LEAF:
          __pyx_t_13 = __pyx_v_node_idx;
           __pyx_t_16 = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_l.data + __pyx_t_13 * __pyx_v_child_l.strides[0]) ))) == __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
           if (__pyx_t_16) {
 /* … */
             goto __pyx_L14;
           }
-
 1453:                 # ... and child_r[node_idx] == _TREE_LEAF:
-
+1454:                 leaves_in_subtree[node_idx] = 1
+
 1453:                 # ... and child_r[node_idx] == _TREE_LEAF:
+
+1454:                 leaves_in_subtree[node_idx] = 1
            __pyx_t_13 = __pyx_v_node_idx;
             *((unsigned char *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_13 * __pyx_v_leaves_in_subtree.strides[0]) )) = 1;
-
 1455:             else:
-
+1456:                 rc = stack.push(child_l[node_idx], 0, 0, node_idx, 0, 0, 0)
+
 1455:             else:
+
+1456:                 rc = stack.push(child_l[node_idx], 0, 0, node_idx, 0, 0, 0)
          /*else*/ {
             __pyx_t_13 = __pyx_v_node_idx;
             __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_l.data + __pyx_t_13 * __pyx_v_child_l.strides[0]) ))), 0, 0, __pyx_v_node_idx, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1456, __pyx_L4_error)
             __pyx_v_rc = __pyx_t_15;
-
+1457:                 if rc == -1:
+
+1457:                 if rc == -1:
            __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_16) {
 /* … */
             }
-
+1458:                     with gil:
+
+1458:                     with gil:
              {
                   #ifdef WITH_THREAD
                   PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -9729,7 +9803,7 @@
                     }
                   }
               }
-
+1459:                         raise MemoryError("pruning tree")
+
+1459:                         raise MemoryError("pruning tree")
                    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1459, __pyx_L19_error)
                     __Pyx_GOTREF(__pyx_t_6);
                     __Pyx_Raise(__pyx_t_6, 0, 0, 0);
@@ -9737,11 +9811,11 @@
                     __PYX_ERR(0, 1459, __pyx_L19_error)
                   }
 
 1460: 
-
+1461:                 rc = stack.push(child_r[node_idx], 0, 0, node_idx, 0, 0, 0)
+
+1461:                 rc = stack.push(child_r[node_idx], 0, 0, node_idx, 0, 0, 0)
            __pyx_t_13 = __pyx_v_node_idx;
             __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_r.data + __pyx_t_13 * __pyx_v_child_r.strides[0]) ))), 0, 0, __pyx_v_node_idx, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1461, __pyx_L4_error)
             __pyx_v_rc = __pyx_t_15;
-
+1462:                 if rc == -1:
+
+1462:                 if rc == -1:
            __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_16) {
 /* … */
@@ -9749,7 +9823,7 @@
           }
           __pyx_L14:;
         }
-
+1463:                     with gil:
+
+1463:                     with gil:
              {
                   #ifdef WITH_THREAD
                   PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -9765,7 +9839,7 @@
                     }
                   }
               }
-
+1464:                         raise MemoryError("pruning tree")
+
+1464:                         raise MemoryError("pruning tree")
                    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1464, __pyx_L25_error)
                     __Pyx_GOTREF(__pyx_t_6);
                     __Pyx_Raise(__pyx_t_6, 0, 0, 0);
@@ -9773,123 +9847,123 @@
                     __PYX_ERR(0, 1464, __pyx_L25_error)
                   }
 
 1465: 
-
 1466:         # computes number of leaves in all branches and the overall impurity of
-
 1467:         # the branch. The overall impurity is the sum of r_node in its leaves.
-
+1468:         for leaf_idx in range(leaves_in_subtree.shape[0]):
+
 1466:         # computes number of leaves in all branches and the overall impurity of
+
 1467:         # the branch. The overall impurity is the sum of r_node in its leaves.
+
+1468:         for leaf_idx in range(leaves_in_subtree.shape[0]):
        __pyx_t_11 = (__pyx_v_leaves_in_subtree.shape[0]);
         __pyx_t_12 = __pyx_t_11;
         for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_12; __pyx_t_1+=1) {
           __pyx_v_leaf_idx = __pyx_t_1;
-
+1469:             if not leaves_in_subtree[leaf_idx]:
+
+1469:             if not leaves_in_subtree[leaf_idx]:
          __pyx_t_13 = __pyx_v_leaf_idx;
           __pyx_t_16 = ((!((*((unsigned char *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_13 * __pyx_v_leaves_in_subtree.strides[0]) ))) != 0)) != 0);
           if (__pyx_t_16) {
 /* … */
           }
-
+1470:                 continue
+
+1470:                 continue
            goto __pyx_L27_continue;
-
+1471:             r_branch[leaf_idx] = r_node[leaf_idx]
+
+1471:             r_branch[leaf_idx] = r_node[leaf_idx]
          __pyx_t_13 = __pyx_v_leaf_idx;
           __pyx_t_4 = __pyx_v_leaf_idx;
           *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_4 * __pyx_v_r_branch.strides[0]) )) = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_13 * __pyx_v_r_node.strides[0]) )));
 
 1472: 
-
 1473:             # bubble up values to ancestor nodes
-
+1474:             current_r = r_node[leaf_idx]
+
 1473:             # bubble up values to ancestor nodes
+
+1474:             current_r = r_node[leaf_idx]
          __pyx_t_13 = __pyx_v_leaf_idx;
           __pyx_v_current_r = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_13 * __pyx_v_r_node.strides[0]) )));
-
+1475:             while leaf_idx != 0:
+
+1475:             while leaf_idx != 0:
          while (1) {
             __pyx_t_16 = ((__pyx_v_leaf_idx != 0) != 0);
             if (!__pyx_t_16) break;
-
+1476:                 parent_idx = parent[leaf_idx]
+
+1476:                 parent_idx = parent[leaf_idx]
            __pyx_t_13 = __pyx_v_leaf_idx;
             __pyx_v_parent_idx = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_parent.data + __pyx_t_13 * __pyx_v_parent.strides[0]) )));
-
+1477:                 r_branch[parent_idx] += current_r
+
+1477:                 r_branch[parent_idx] += current_r
            __pyx_t_13 = __pyx_v_parent_idx;
             *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_13 * __pyx_v_r_branch.strides[0]) )) += __pyx_v_current_r;
-
+1478:                 n_leaves[parent_idx] += 1
+
+1478:                 n_leaves[parent_idx] += 1
            __pyx_t_13 = __pyx_v_parent_idx;
             *((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_n_leaves.data + __pyx_t_13 * __pyx_v_n_leaves.strides[0]) )) += 1;
-
+1479:                 leaf_idx = parent_idx
+
+1479:                 leaf_idx = parent_idx
            __pyx_v_leaf_idx = __pyx_v_parent_idx;
           }
           __pyx_L27_continue:;
         }
 
 1480: 
-
+1481:         for i in range(leaves_in_subtree.shape[0]):
+
+1481:         for i in range(leaves_in_subtree.shape[0]):
        __pyx_t_11 = (__pyx_v_leaves_in_subtree.shape[0]);
         __pyx_t_12 = __pyx_t_11;
         for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_12; __pyx_t_1+=1) {
           __pyx_v_i = __pyx_t_1;
-
+1482:             candidate_nodes[i] = not leaves_in_subtree[i]
+
+1482:             candidate_nodes[i] = not leaves_in_subtree[i]
          __pyx_t_13 = __pyx_v_i;
           __pyx_t_4 = __pyx_v_i;
           *((unsigned char *) ( /* dim=0 */ (__pyx_v_candidate_nodes.data + __pyx_t_4 * __pyx_v_candidate_nodes.strides[0]) )) = (!((*((unsigned char *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_13 * __pyx_v_leaves_in_subtree.strides[0]) ))) != 0));
         }
 
 1483: 
-
 1484:         # save metrics before pruning
-
+1485:         controller.save_metrics(0.0, r_branch[0])
+
 1484:         # save metrics before pruning
+
+1485:         controller.save_metrics(0.0, r_branch[0])
        __pyx_t_13 = 0;
         ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_controller->__pyx_vtab)->save_metrics(__pyx_v_controller, 0.0, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_13 * __pyx_v_r_branch.strides[0]) ))));
 
 1486: 
-
 1487:         # while root node is not a leaf
-
+1488:         while candidate_nodes[0]:
+
 1487:         # while root node is not a leaf
+
+1488:         while candidate_nodes[0]:
        while (1) {
           __pyx_t_13 = 0;
           __pyx_t_16 = ((*((unsigned char *) ( /* dim=0 */ (__pyx_v_candidate_nodes.data + __pyx_t_13 * __pyx_v_candidate_nodes.strides[0]) ))) != 0);
           if (!__pyx_t_16) break;
 
 1489: 
-
 1490:             # computes ccp_alpha for subtrees and finds the minimal alpha
-
+1491:             effective_alpha = max_float64
+
 1490:             # computes ccp_alpha for subtrees and finds the minimal alpha
+
+1491:             effective_alpha = max_float64
          __pyx_v_effective_alpha = __pyx_v_max_float64;
-
+1492:             for i in range(n_nodes):
+
+1492:             for i in range(n_nodes):
          __pyx_t_1 = __pyx_v_n_nodes;
           __pyx_t_18 = __pyx_t_1;
           for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) {
             __pyx_v_i = __pyx_t_19;
-
+1493:                 if not candidate_nodes[i]:
+
+1493:                 if not candidate_nodes[i]:
            __pyx_t_13 = __pyx_v_i;
             __pyx_t_16 = ((!((*((unsigned char *) ( /* dim=0 */ (__pyx_v_candidate_nodes.data + __pyx_t_13 * __pyx_v_candidate_nodes.strides[0]) ))) != 0)) != 0);
             if (__pyx_t_16) {
 /* … */
             }
-
+1494:                     continue
+
+1494:                     continue
              goto __pyx_L36_continue;
-
+1495:                 subtree_alpha = (r_node[i] - r_branch[i]) / (n_leaves[i] - 1)
+
+1495:                 subtree_alpha = (r_node[i] - r_branch[i]) / (n_leaves[i] - 1)
            __pyx_t_13 = __pyx_v_i;
             __pyx_t_4 = __pyx_v_i;
             __pyx_t_14 = __pyx_v_i;
             __pyx_v_subtree_alpha = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_13 * __pyx_v_r_node.strides[0]) ))) - (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_4 * __pyx_v_r_branch.strides[0]) )))) / ((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_n_leaves.data + __pyx_t_14 * __pyx_v_n_leaves.strides[0]) ))) - 1));
-
+1496:                 if subtree_alpha < effective_alpha:
+
+1496:                 if subtree_alpha < effective_alpha:
            __pyx_t_16 = ((__pyx_v_subtree_alpha < __pyx_v_effective_alpha) != 0);
             if (__pyx_t_16) {
 /* … */
             }
             __pyx_L36_continue:;
           }
-
+1497:                     effective_alpha = subtree_alpha
+
+1497:                     effective_alpha = subtree_alpha
              __pyx_v_effective_alpha = __pyx_v_subtree_alpha;
-
+1498:                     pruned_branch_node_idx = i
+
+1498:                     pruned_branch_node_idx = i
              __pyx_v_pruned_branch_node_idx = __pyx_v_i;
 
 1499: 
-
+1500:             if controller.stop_pruning(effective_alpha):
+
+1500:             if controller.stop_pruning(effective_alpha):
          __pyx_t_16 = (((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_controller->__pyx_vtab)->stop_pruning(__pyx_v_controller, __pyx_v_effective_alpha) != 0);
           if (__pyx_t_16) {
 /* … */
           }
-
+1501:                 break
+
+1501:                 break
            goto __pyx_L35_break;
 
 1502: 
-
 1503:             # stack uses only the start variable
-
+1504:             rc = stack.push(pruned_branch_node_idx, 0, 0, 0, 0, 0, 0)
+
 1503:             # stack uses only the start variable
+
+1504:             rc = stack.push(pruned_branch_node_idx, 0, 0, 0, 0, 0, 0)
          __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, __pyx_v_pruned_branch_node_idx, 0, 0, 0, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1504, __pyx_L4_error)
           __pyx_v_rc = __pyx_t_15;
-
+1505:             if rc == -1:
+
+1505:             if rc == -1:
          __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
           if (__pyx_t_16) {
 /* … */
           }
-
+1506:                 with gil:
+
+1506:                 with gil:
            {
                 #ifdef WITH_THREAD
                 PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -9905,7 +9979,7 @@
                   }
                 }
             }
-
+1507:                     raise MemoryError("pruning tree")
+
+1507:                     raise MemoryError("pruning tree")
                  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1507, __pyx_L45_error)
                   __Pyx_GOTREF(__pyx_t_6);
                   __Pyx_Raise(__pyx_t_6, 0, 0, 0);
@@ -9913,36 +9987,36 @@
                   __PYX_ERR(0, 1507, __pyx_L45_error)
                 }
 
 1508: 
-
 1509:             # descendants of branch are not in subtree
-
+1510:             while not stack.is_empty():
+
 1509:             # descendants of branch are not in subtree
+
+1510:             while not stack.is_empty():
          while (1) {
             __pyx_t_16 = ((!(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->is_empty(__pyx_v_stack) != 0)) != 0);
             if (!__pyx_t_16) break;
-
+1511:                 stack.pop(&stack_record)
+
+1511:                 stack.pop(&stack_record)
            (void)(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->pop(__pyx_v_stack, (&__pyx_v_stack_record)));
-
+1512:                 node_idx = stack_record.start
+
+1512:                 node_idx = stack_record.start
            __pyx_t_17 = __pyx_v_stack_record.start;
             __pyx_v_node_idx = __pyx_t_17;
 
 1513: 
-
+1514:                 if not in_subtree[node_idx]:
+
+1514:                 if not in_subtree[node_idx]:
            __pyx_t_14 = __pyx_v_node_idx;
             __pyx_t_16 = ((!((*((unsigned char *) ( /* dim=0 */ (__pyx_v_in_subtree.data + __pyx_t_14 * __pyx_v_in_subtree.strides[0]) ))) != 0)) != 0);
             if (__pyx_t_16) {
 /* … */
             }
-
+1515:                     continue # branch has already been marked for pruning
+
+1515:                     continue # branch has already been marked for pruning
              goto __pyx_L47_continue;
-
+1516:                 candidate_nodes[node_idx] = 0
+
+1516:                 candidate_nodes[node_idx] = 0
            __pyx_t_14 = __pyx_v_node_idx;
             *((unsigned char *) ( /* dim=0 */ (__pyx_v_candidate_nodes.data + __pyx_t_14 * __pyx_v_candidate_nodes.strides[0]) )) = 0;
-
+1517:                 leaves_in_subtree[node_idx] = 0
+
+1517:                 leaves_in_subtree[node_idx] = 0
            __pyx_t_14 = __pyx_v_node_idx;
             *((unsigned char *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_14 * __pyx_v_leaves_in_subtree.strides[0]) )) = 0;
-
+1518:                 in_subtree[node_idx] = 0
+
+1518:                 in_subtree[node_idx] = 0
            __pyx_t_14 = __pyx_v_node_idx;
             *((unsigned char *) ( /* dim=0 */ (__pyx_v_in_subtree.data + __pyx_t_14 * __pyx_v_in_subtree.strides[0]) )) = 0;
 
 1519: 
-
+1520:                 if child_l[node_idx] != _TREE_LEAF:
+
+1520:                 if child_l[node_idx] != _TREE_LEAF:
            __pyx_t_14 = __pyx_v_node_idx;
             __pyx_t_16 = (((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_l.data + __pyx_t_14 * __pyx_v_child_l.strides[0]) ))) != __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_LEAF) != 0);
             if (__pyx_t_16) {
@@ -9950,17 +10024,17 @@
             }
             __pyx_L47_continue:;
           }
-
 1521:                     # ... and child_r[node_idx] != _TREE_LEAF:
-
+1522:                     rc = stack.push(child_l[node_idx], 0, 0, 0, 0, 0, 0)
+
 1521:                     # ... and child_r[node_idx] != _TREE_LEAF:
+
+1522:                     rc = stack.push(child_l[node_idx], 0, 0, 0, 0, 0, 0)
              __pyx_t_14 = __pyx_v_node_idx;
               __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_l.data + __pyx_t_14 * __pyx_v_child_l.strides[0]) ))), 0, 0, 0, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1522, __pyx_L4_error)
               __pyx_v_rc = __pyx_t_15;
-
+1523:                     if rc == -1:
+
+1523:                     if rc == -1:
              __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
               if (__pyx_t_16) {
 /* … */
               }
-
+1524:                         with gil:
+
+1524:                         with gil:
                {
                     #ifdef WITH_THREAD
                     PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -9976,23 +10050,23 @@
                       }
                     }
                 }
-
+1525:                             raise MemoryError("pruning tree")
+
+1525:                             raise MemoryError("pruning tree")
                      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1525, __pyx_L55_error)
                       __Pyx_GOTREF(__pyx_t_6);
                       __Pyx_Raise(__pyx_t_6, 0, 0, 0);
                       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
                       __PYX_ERR(0, 1525, __pyx_L55_error)
                     }
-
+1526:                     rc = stack.push(child_r[node_idx], 0, 0, 0, 0, 0, 0)
+
+1526:                     rc = stack.push(child_r[node_idx], 0, 0, 0, 0, 0, 0)
              __pyx_t_14 = __pyx_v_node_idx;
               __pyx_t_15 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_child_r.data + __pyx_t_14 * __pyx_v_child_r.strides[0]) ))), 0, 0, 0, 0, 0.0, 0); if (unlikely(__pyx_t_15 == ((int)-1))) __PYX_ERR(0, 1526, __pyx_L4_error)
               __pyx_v_rc = __pyx_t_15;
-
+1527:                     if rc == -1:
+
+1527:                     if rc == -1:
              __pyx_t_16 = ((__pyx_v_rc == -1L) != 0);
               if (__pyx_t_16) {
 /* … */
               }
-
+1528:                         with gil:
+
+1528:                         with gil:
                {
                     #ifdef WITH_THREAD
                     PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -10008,69 +10082,69 @@
                       }
                     }
                 }
-
+1529:                             raise MemoryError("pruning tree")
+
+1529:                             raise MemoryError("pruning tree")
                      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1529, __pyx_L61_error)
                       __Pyx_GOTREF(__pyx_t_6);
                       __Pyx_Raise(__pyx_t_6, 0, 0, 0);
                       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
                       __PYX_ERR(0, 1529, __pyx_L61_error)
                     }
-
+1530:             leaves_in_subtree[pruned_branch_node_idx] = 1
+
+1530:             leaves_in_subtree[pruned_branch_node_idx] = 1
          __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           *((unsigned char *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_14 * __pyx_v_leaves_in_subtree.strides[0]) )) = 1;
-
+1531:             in_subtree[pruned_branch_node_idx] = 1
+
+1531:             in_subtree[pruned_branch_node_idx] = 1
          __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           *((unsigned char *) ( /* dim=0 */ (__pyx_v_in_subtree.data + __pyx_t_14 * __pyx_v_in_subtree.strides[0]) )) = 1;
 
 1532: 
-
 1533:             # updates number of leaves
-
+1534:             n_pruned_leaves = n_leaves[pruned_branch_node_idx] - 1
+
 1533:             # updates number of leaves
+
+1534:             n_pruned_leaves = n_leaves[pruned_branch_node_idx] - 1
          __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           __pyx_v_n_pruned_leaves = ((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_n_leaves.data + __pyx_t_14 * __pyx_v_n_leaves.strides[0]) ))) - 1);
-
+1535:             n_leaves[pruned_branch_node_idx] = 0
+
+1535:             n_leaves[pruned_branch_node_idx] = 0
          __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           *((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_n_leaves.data + __pyx_t_14 * __pyx_v_n_leaves.strides[0]) )) = 0;
 
 1536: 
-
 1537:             # computes the increase in r_branch to bubble up
-
+1538:             r_diff = r_node[pruned_branch_node_idx] - r_branch[pruned_branch_node_idx]
+
 1537:             # computes the increase in r_branch to bubble up
+
+1538:             r_diff = r_node[pruned_branch_node_idx] - r_branch[pruned_branch_node_idx]
          __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           __pyx_t_4 = __pyx_v_pruned_branch_node_idx;
           __pyx_v_r_diff = ((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_14 * __pyx_v_r_node.strides[0]) ))) - (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_4 * __pyx_v_r_branch.strides[0]) ))));
-
+1539:             r_branch[pruned_branch_node_idx] = r_node[pruned_branch_node_idx]
+
+1539:             r_branch[pruned_branch_node_idx] = r_node[pruned_branch_node_idx]
          __pyx_t_4 = __pyx_v_pruned_branch_node_idx;
           __pyx_t_14 = __pyx_v_pruned_branch_node_idx;
           *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_14 * __pyx_v_r_branch.strides[0]) )) = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_node.data + __pyx_t_4 * __pyx_v_r_node.strides[0]) )));
 
 1540: 
-
 1541:             # bubble up values to ancestors
-
+1542:             node_idx = parent[pruned_branch_node_idx]
+
 1541:             # bubble up values to ancestors
+
+1542:             node_idx = parent[pruned_branch_node_idx]
          __pyx_t_4 = __pyx_v_pruned_branch_node_idx;
           __pyx_v_node_idx = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_parent.data + __pyx_t_4 * __pyx_v_parent.strides[0]) )));
-
+1543:             while node_idx != -1:
+
+1543:             while node_idx != -1:
          while (1) {
             __pyx_t_16 = ((__pyx_v_node_idx != -1L) != 0);
             if (!__pyx_t_16) break;
-
+1544:                 n_leaves[node_idx] -= n_pruned_leaves
+
+1544:                 n_leaves[node_idx] -= n_pruned_leaves
            __pyx_t_4 = __pyx_v_node_idx;
             *((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_n_leaves.data + __pyx_t_4 * __pyx_v_n_leaves.strides[0]) )) -= __pyx_v_n_pruned_leaves;
-
+1545:                 r_branch[node_idx] += r_diff
+
+1545:                 r_branch[node_idx] += r_diff
            __pyx_t_4 = __pyx_v_node_idx;
             *((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_4 * __pyx_v_r_branch.strides[0]) )) += __pyx_v_r_diff;
-
+1546:                 node_idx = parent[node_idx]
+
+1546:                 node_idx = parent[node_idx]
            __pyx_t_4 = __pyx_v_node_idx;
             __pyx_v_node_idx = (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t *) ( /* dim=0 */ (__pyx_v_parent.data + __pyx_t_4 * __pyx_v_parent.strides[0]) )));
           }
 
 1547: 
-
+1548:             controller.save_metrics(effective_alpha, r_branch[0])
+
+1548:             controller.save_metrics(effective_alpha, r_branch[0])
          __pyx_t_4 = 0;
           ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_controller->__pyx_vtab)->save_metrics(__pyx_v_controller, __pyx_v_effective_alpha, (*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_r_branch.data + __pyx_t_4 * __pyx_v_r_branch.strides[0]) ))));
         }
         __pyx_L35_break:;
 
 1549: 
-
+1550:         controller.after_pruning(in_subtree)
+
+1550:         controller.after_pruning(in_subtree)
        ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_controller->__pyx_vtab)->after_pruning(__pyx_v_controller, __pyx_v_in_subtree);
       }
 
 1551: 
 1552: 
-
+1553: def _build_pruned_tree_ccp(
+
+1553: def _build_pruned_tree_ccp(
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_1_build_pruned_tree_ccp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
 static char __pyx_doc_13stpredictions_6models_3OK3_5_tree__build_pruned_tree_ccp[] = "Build a pruned tree from the original tree using cost complexity\n    pruning.\n\n    The values and nodes from the original tree are copied into the pruned\n    tree.\n\n    Parameters\n    ----------\n    tree : Tree\n        Location to place the pruned tree\n    orig_tree : Tree\n        Original tree\n    ccp_alpha : positive double\n        Complexity parameter. The subtree with the largest cost complexity\n        that is smaller than ``ccp_alpha`` will be chosen. By default,\n        no pruning is performed.\n    ";
@@ -10190,32 +10264,32 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_build_pruned_tree_ccp, __pyx_t_1) < 0) __PYX_ERR(0, 1553, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_codeobj__50 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__49, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stpredictions_models_OK3__tree_p, __pyx_n_s_build_pruned_tree_ccp, 1553, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__50)) __PYX_ERR(0, 1553, __pyx_L1_error)
-
 1554:     Tree tree, # OUT
-
 1555:     Tree orig_tree,
-
 1556:     DOUBLE_t ccp_alpha):
-
 1557:     """Build a pruned tree from the original tree using cost complexity
-
 1558:     pruning.
+
 1554:     Tree tree, # OUT
+
 1555:     Tree orig_tree,
+
 1556:     DOUBLE_t ccp_alpha):
+
 1557:     """Build a pruned tree from the original tree using cost complexity
+
 1558:     pruning.
 1559: 
-
 1560:     The values and nodes from the original tree are copied into the pruned
-
 1561:     tree.
+
 1560:     The values and nodes from the original tree are copied into the pruned
+
 1561:     tree.
 1562: 
-
 1563:     Parameters
-
 1564:     ----------
-
 1565:     tree : Tree
-
 1566:         Location to place the pruned tree
-
 1567:     orig_tree : Tree
-
 1568:         Original tree
-
 1569:     ccp_alpha : positive double
-
 1570:         Complexity parameter. The subtree with the largest cost complexity
-
 1571:         that is smaller than ``ccp_alpha`` will be chosen. By default,
-
 1572:         no pruning is performed.
-
 1573:     """
+
 1563:     Parameters
+
 1564:     ----------
+
 1565:     tree : Tree
+
 1566:         Location to place the pruned tree
+
 1567:     orig_tree : Tree
+
 1568:         Original tree
+
 1569:     ccp_alpha : positive double
+
 1570:         Complexity parameter. The subtree with the largest cost complexity
+
 1571:         that is smaller than ``ccp_alpha`` will be chosen. By default,
+
 1572:         no pruning is performed.
+
 1573:     """
 1574: 
-
 1575:     cdef:
-
+1576:         SIZE_t n_nodes = orig_tree.node_count
+
 1575:     cdef:
+
+1576:         SIZE_t n_nodes = orig_tree.node_count
  __pyx_t_1 = __pyx_v_orig_tree->node_count;
   __pyx_v_n_nodes = __pyx_t_1;
-
+1577:         unsigned char[:] leaves_in_subtree = np.zeros(
+
+1577:         unsigned char[:] leaves_in_subtree = np.zeros(
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1577, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1577, __pyx_L1_error)
@@ -10231,7 +10305,7 @@
   __pyx_v_leaves_in_subtree = __pyx_t_6;
   __pyx_t_6.memview = NULL;
   __pyx_t_6.data = NULL;
-
+1578:             shape=n_nodes, dtype=np.uint8)
+
+1578:             shape=n_nodes, dtype=np.uint8)
  __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1578, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_n_nodes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1578, __pyx_L1_error)
@@ -10246,7 +10320,7 @@
   if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 1578, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
 
 1579: 
-
+1580:     pruning_controller = _AlphaPruner(ccp_alpha=ccp_alpha)
+
+1580:     pruning_controller = _AlphaPruner(ccp_alpha=ccp_alpha)
  __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1580, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_5);
   __pyx_t_2 = PyFloat_FromDouble(__pyx_v_ccp_alpha); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1580, __pyx_L1_error)
@@ -10259,19 +10333,19 @@
   __pyx_v_pruning_controller = ((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__AlphaPruner *)__pyx_t_2);
   __pyx_t_2 = 0;
 
 1581: 
-
+1582:     _cost_complexity_prune(leaves_in_subtree, orig_tree, pruning_controller)
+
+1582:     _cost_complexity_prune(leaves_in_subtree, orig_tree, pruning_controller)
  __pyx_t_2 = __pyx_f_13stpredictions_6models_3OK3_5_tree__cost_complexity_prune(__pyx_v_leaves_in_subtree, __pyx_v_orig_tree, ((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_pruning_controller)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1582, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
 1583: 
-
+1584:     _build_pruned_tree(tree, orig_tree, leaves_in_subtree,
+
+1584:     _build_pruned_tree(tree, orig_tree, leaves_in_subtree,
  __pyx_t_2 = __pyx_f_13stpredictions_6models_3OK3_5_tree__build_pruned_tree(__pyx_v_tree, __pyx_v_orig_tree, __pyx_v_leaves_in_subtree, __pyx_v_pruning_controller->capacity); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1584, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
 1585:                        pruning_controller.capacity)
+
 1585:                        pruning_controller.capacity)
 1586: 
 1587: 
-
+1588: def ccp_pruning_path(Tree orig_tree):
+
+1588: def ccp_pruning_path(Tree orig_tree):
/* Python wrapper */
 static PyObject *__pyx_pw_13stpredictions_6models_3OK3_5_tree_3ccp_pruning_path(PyObject *__pyx_self, PyObject *__pyx_v_orig_tree); /*proto*/
 static char __pyx_doc_13stpredictions_6models_3OK3_5_tree_2ccp_pruning_path[] = "Computes the cost complexity pruning path.\n\n    Parameters\n    ----------\n    tree : Tree\n        Original tree.\n\n    Returns\n    -------\n    path_info : dict\n        Information about pruning path with attributes:\n\n        ccp_alphas : ndarray\n            Effective alphas of subtree during pruning.\n\n        impurities : ndarray\n            Sum of the impurities of the subtree leaves for the\n            corresponding alpha value in ``ccp_alphas``.\n    ";
@@ -10334,27 +10408,27 @@
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_ccp_pruning_path, __pyx_t_1) < 0) __PYX_ERR(0, 1588, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_codeobj__52 = (PyObject*)__Pyx_PyCode_New(1, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__51, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stpredictions_models_OK3__tree_p, __pyx_n_s_ccp_pruning_path, 1588, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__52)) __PYX_ERR(0, 1588, __pyx_L1_error)
-
 1589:     """Computes the cost complexity pruning path.
+
 1589:     """Computes the cost complexity pruning path.
 1590: 
-
 1591:     Parameters
-
 1592:     ----------
-
 1593:     tree : Tree
-
 1594:         Original tree.
+
 1591:     Parameters
+
 1592:     ----------
+
 1593:     tree : Tree
+
 1594:         Original tree.
 1595: 
-
 1596:     Returns
-
 1597:     -------
-
 1598:     path_info : dict
-
 1599:         Information about pruning path with attributes:
+
 1596:     Returns
+
 1597:     -------
+
 1598:     path_info : dict
+
 1599:         Information about pruning path with attributes:
 1600: 
-
 1601:         ccp_alphas : ndarray
-
 1602:             Effective alphas of subtree during pruning.
+
 1601:         ccp_alphas : ndarray
+
 1602:             Effective alphas of subtree during pruning.
 1603: 
-
 1604:         impurities : ndarray
-
 1605:             Sum of the impurities of the subtree leaves for the
-
 1606:             corresponding alpha value in ``ccp_alphas``.
-
 1607:     """
-
 1608:     cdef:
-
+1609:         unsigned char[:] leaves_in_subtree = np.zeros(
+
 1604:         impurities : ndarray
+
 1605:             Sum of the impurities of the subtree leaves for the
+
 1606:             corresponding alpha value in ``ccp_alphas``.
+
 1607:     """
+
 1608:     cdef:
+
+1609:         unsigned char[:] leaves_in_subtree = np.zeros(
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1609, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1609, __pyx_L1_error)
@@ -10370,7 +10444,7 @@
   __pyx_v_leaves_in_subtree = __pyx_t_5;
   __pyx_t_5.memview = NULL;
   __pyx_t_5.data = NULL;
-
+1610:             shape=orig_tree.node_count, dtype=np.uint8)
+
+1610:             shape=orig_tree.node_count, dtype=np.uint8)
  __pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1610, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_orig_tree->node_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1610, __pyx_L1_error)
@@ -10385,7 +10459,7 @@
   if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 1610, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
 1611: 
-
+1612:     path_finder = _PathFinder(orig_tree.node_count)
+
+1612:     path_finder = _PathFinder(orig_tree.node_count)
  __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_orig_tree->node_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1612, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_1 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_13stpredictions_6models_3OK3_5_tree__PathFinder), __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1612, __pyx_L1_error)
@@ -10394,16 +10468,16 @@
   __pyx_v_path_finder = ((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__PathFinder *)__pyx_t_1);
   __pyx_t_1 = 0;
 
 1613: 
-
+1614:     _cost_complexity_prune(leaves_in_subtree, orig_tree, path_finder)
+
+1614:     _cost_complexity_prune(leaves_in_subtree, orig_tree, path_finder)
  __pyx_t_1 = __pyx_f_13stpredictions_6models_3OK3_5_tree__cost_complexity_prune(__pyx_v_leaves_in_subtree, __pyx_v_orig_tree, ((struct __pyx_obj_13stpredictions_6models_3OK3_5_tree__CCPPruneController *)__pyx_v_path_finder)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1614, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
 1615: 
-
 1616:     cdef:
-
+1617:         UINT32_t total_items = path_finder.count
+
 1616:     cdef:
+
+1617:         UINT32_t total_items = path_finder.count
  __pyx_t_6 = __pyx_v_path_finder->count;
   __pyx_v_total_items = __pyx_t_6;
-
+1618:         np.ndarray ccp_alphas = np.empty(shape=total_items,
+
+1618:         np.ndarray ccp_alphas = np.empty(shape=total_items,
  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1618, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1618, __pyx_L1_error)
@@ -10423,7 +10497,7 @@
   if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1618, __pyx_L1_error)
   __pyx_v_ccp_alphas = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
-
+1619:                                          dtype=np.float64)
+
+1619:                                          dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1619, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1619, __pyx_L1_error)
@@ -10431,7 +10505,7 @@
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 1618, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
+1620:         np.ndarray impurities = np.empty(shape=total_items,
+
+1620:         np.ndarray impurities = np.empty(shape=total_items,
  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1620, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1620, __pyx_L1_error)
@@ -10451,7 +10525,7 @@
   if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1620, __pyx_L1_error)
   __pyx_v_impurities = ((PyArrayObject *)__pyx_t_2);
   __pyx_t_2 = 0;
-
+1621:                                          dtype=np.float64)
+
+1621:                                          dtype=np.float64)
  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1621, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_4);
   __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1621, __pyx_L1_error)
@@ -10459,32 +10533,32 @@
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 1620, __pyx_L1_error)
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
+1622:         UINT32_t count = 0
+
+1622:         UINT32_t count = 0
  __pyx_v_count = 0;
 
 1623: 
-
+1624:     while count < total_items:
+
+1624:     while count < total_items:
  while (1) {
     __pyx_t_7 = ((__pyx_v_count < __pyx_v_total_items) != 0);
     if (!__pyx_t_7) break;
-
+1625:         ccp_alphas[count] = path_finder.ccp_alphas[count]
+
+1625:         ccp_alphas[count] = path_finder.ccp_alphas[count]
    if (unlikely(!__pyx_v_path_finder->ccp_alphas.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 1625, __pyx_L1_error)}
     __pyx_t_8 = __pyx_v_count;
     __pyx_t_2 = PyFloat_FromDouble((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_path_finder->ccp_alphas.data + __pyx_t_8 * __pyx_v_path_finder->ccp_alphas.strides[0]) )))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1625, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     if (unlikely(__Pyx_SetItemInt(((PyObject *)__pyx_v_ccp_alphas), __pyx_v_count, __pyx_t_2, __pyx_t_13stpredictions_6models_3OK3_5_tree_UINT32_t, 0, __Pyx_PyInt_From_npy_uint32, 0, 0, 0) < 0)) __PYX_ERR(0, 1625, __pyx_L1_error)
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
+1626:         impurities[count] = path_finder.impurities[count]
+
+1626:         impurities[count] = path_finder.impurities[count]
    if (unlikely(!__pyx_v_path_finder->impurities.memview)) {PyErr_SetString(PyExc_AttributeError,"Memoryview is not initialized");__PYX_ERR(0, 1626, __pyx_L1_error)}
     __pyx_t_8 = __pyx_v_count;
     __pyx_t_2 = PyFloat_FromDouble((*((__pyx_t_13stpredictions_6models_3OK3_5_tree_DOUBLE_t *) ( /* dim=0 */ (__pyx_v_path_finder->impurities.data + __pyx_t_8 * __pyx_v_path_finder->impurities.strides[0]) )))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1626, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_2);
     if (unlikely(__Pyx_SetItemInt(((PyObject *)__pyx_v_impurities), __pyx_v_count, __pyx_t_2, __pyx_t_13stpredictions_6models_3OK3_5_tree_UINT32_t, 0, __Pyx_PyInt_From_npy_uint32, 0, 0, 0) < 0)) __PYX_ERR(0, 1626, __pyx_L1_error)
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
+1627:         count += 1
+
+1627:         count += 1
    __pyx_v_count = (__pyx_v_count + 1);
   }
 
 1628: 
-
+1629:     return {'ccp_alphas': ccp_alphas, 'impurities': impurities}
+
+1629:     return {'ccp_alphas': ccp_alphas, 'impurities': impurities}
  __Pyx_XDECREF(__pyx_r);
   __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1629, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
@@ -10495,7 +10569,7 @@
   goto __pyx_L0;
 
 1630: 
 1631: 
-
+1632: cdef _build_pruned_tree(
+
+1632: cdef _build_pruned_tree(
static PyObject *__pyx_f_13stpredictions_6models_3OK3_5_tree__build_pruned_tree(struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_tree, struct __pyx_obj_13stpredictions_6models_3OK3_5_tree_Tree *__pyx_v_orig_tree, __Pyx_memviewslice __pyx_v_leaves_in_subtree, __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_capacity) {
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_orig_node_id;
   __pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t __pyx_v_new_node_id;
@@ -10528,47 +10602,47 @@
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
-
 1633:     Tree tree, # OUT
-
 1634:     Tree orig_tree,
-
 1635:     const unsigned char[:] leaves_in_subtree,
-
 1636:     SIZE_t capacity):
-
 1637:     """Build a pruned tree.
+
 1633:     Tree tree, # OUT
+
 1634:     Tree orig_tree,
+
 1635:     const unsigned char[:] leaves_in_subtree,
+
 1636:     SIZE_t capacity):
+
 1637:     """Build a pruned tree.
 1638: 
-
 1639:     Build a pruned tree from the original tree by transforming the nodes in
-
 1640:     ``leaves_in_subtree`` into leaves.
+
 1639:     Build a pruned tree from the original tree by transforming the nodes in
+
 1640:     ``leaves_in_subtree`` into leaves.
 1641: 
-
 1642:     Parameters
-
 1643:     ----------
-
 1644:     tree : Tree
-
 1645:         Location to place the pruned tree
-
 1646:     orig_tree : Tree
-
 1647:         Original tree
-
 1648:     leaves_in_subtree : unsigned char memoryview, shape=(node_count, )
-
 1649:         Boolean mask for leaves to include in subtree
-
 1650:     capacity : SIZE_t
-
 1651:         Number of nodes to initially allocate in pruned tree
-
 1652:     """
-
+1653:     tree._resize(capacity)
+
 1642:     Parameters
+
 1643:     ----------
+
 1644:     tree : Tree
+
 1645:         Location to place the pruned tree
+
 1646:     orig_tree : Tree
+
 1647:         Original tree
+
 1648:     leaves_in_subtree : unsigned char memoryview, shape=(node_count, )
+
 1649:         Boolean mask for leaves to include in subtree
+
 1650:     capacity : SIZE_t
+
 1651:         Number of nodes to initially allocate in pruned tree
+
 1652:     """
+
+1653:     tree._resize(capacity)
  __pyx_t_1 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_resize(__pyx_v_tree, __pyx_v_capacity); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 1653, __pyx_L1_error)
 
 1654: 
-
 1655:     cdef:
-
 1656:         SIZE_t orig_node_id
-
 1657:         SIZE_t new_node_id
-
 1658:         SIZE_t depth
-
 1659:         SIZE_t parent
-
 1660:         bint is_left
-
 1661:         bint is_leaf
+
 1655:     cdef:
+
 1656:         SIZE_t orig_node_id
+
 1657:         SIZE_t new_node_id
+
 1658:         SIZE_t depth
+
 1659:         SIZE_t parent
+
 1660:         bint is_left
+
 1661:         bint is_leaf
 1662: 
-
+1663:         SIZE_t max_depth_seen = -1
+
+1663:         SIZE_t max_depth_seen = -1
  __pyx_v_max_depth_seen = -1;
-
+1664:         int rc = 0
+
+1664:         int rc = 0
  __pyx_v_rc = 0;
-
 1665:         Node* node
-
 1666:         double* orig_value_ptr
-
 1667:         double* new_value_ptr
+
 1665:         Node* node
+
 1666:         double* orig_value_ptr
+
 1667:         double* new_value_ptr
 1668: 
-
 1669:         # Only uses the start, depth, parent, and is_left variables
-
+1670:         Stack stack = Stack(INITIAL_STACK_SIZE)
+
 1669:         # Only uses the start, depth, parent, and is_left variables
+
+1670:         Stack stack = Stack(INITIAL_STACK_SIZE)
  __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_13stpredictions_6models_3OK3_5_tree_INITIAL_STACK_SIZE); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1670, __pyx_L1_error)
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7sklearn_4tree_6_utils_Stack), __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1670, __pyx_L1_error)
@@ -10576,9 +10650,9 @@
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_v_stack = ((struct __pyx_obj_7sklearn_4tree_6_utils_Stack *)__pyx_t_3);
   __pyx_t_3 = 0;
-
 1671:         StackRecord stack_record
+
 1671:         StackRecord stack_record
 1672: 
-
+1673:     with nogil:
+
+1673:     with nogil:
  {
       #ifdef WITH_THREAD
       PyThreadState *_save;
@@ -10605,16 +10679,16 @@
         __pyx_L5:;
       }
   }
-
 1674:         # push root node onto stack
-
+1675:         rc = stack.push(0, 0, 0, _TREE_UNDEFINED, 0, 0.0, 0)
+
 1674:         # push root node onto stack
+
+1675:         rc = stack.push(0, 0, 0, _TREE_UNDEFINED, 0, 0.0, 0)
        __pyx_t_1 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, 0, 0, 0, __pyx_v_13stpredictions_6models_3OK3_5_tree__TREE_UNDEFINED, 0, 0.0, 0); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 1675, __pyx_L4_error)
         __pyx_v_rc = __pyx_t_1;
-
+1676:         if rc == -1:
+
+1676:         if rc == -1:
        __pyx_t_4 = ((__pyx_v_rc == -1L) != 0);
         if (__pyx_t_4) {
 /* … */
         }
-
+1677:             with gil:
+
+1677:             with gil:
          {
               #ifdef WITH_THREAD
               PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
@@ -10630,7 +10704,7 @@
                 }
               }
           }
-
+1678:                 raise MemoryError("pruning tree")
+
+1678:                 raise MemoryError("pruning tree")
                __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1678, __pyx_L8_error)
                 __Pyx_GOTREF(__pyx_t_3);
                 __Pyx_Raise(__pyx_t_3, 0, 0, 0);
@@ -10638,108 +10712,108 @@
                 __PYX_ERR(0, 1678, __pyx_L8_error)
               }
 
 1679: 
-
+1680:         while not stack.is_empty():
+
+1680:         while not stack.is_empty():
        while (1) {
           __pyx_t_4 = ((!(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->is_empty(__pyx_v_stack) != 0)) != 0);
           if (!__pyx_t_4) break;
-
+1681:             stack.pop(&stack_record)
+
+1681:             stack.pop(&stack_record)
          (void)(((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->pop(__pyx_v_stack, (&__pyx_v_stack_record)));
 
 1682: 
-
+1683:             orig_node_id = stack_record.start
+
+1683:             orig_node_id = stack_record.start
          __pyx_t_5 = __pyx_v_stack_record.start;
           __pyx_v_orig_node_id = __pyx_t_5;
-
+1684:             depth = stack_record.depth
+
+1684:             depth = stack_record.depth
          __pyx_t_5 = __pyx_v_stack_record.depth;
           __pyx_v_depth = __pyx_t_5;
-
+1685:             parent = stack_record.parent
+
+1685:             parent = stack_record.parent
          __pyx_t_5 = __pyx_v_stack_record.parent;
           __pyx_v_parent = __pyx_t_5;
-
+1686:             is_left = stack_record.is_left
+
+1686:             is_left = stack_record.is_left
          __pyx_t_4 = __pyx_v_stack_record.is_left;
           __pyx_v_is_left = __pyx_t_4;
 
 1687: 
-
+1688:             is_leaf = leaves_in_subtree[orig_node_id]
+
+1688:             is_leaf = leaves_in_subtree[orig_node_id]
          __pyx_t_6 = __pyx_v_orig_node_id;
           __pyx_v_is_leaf = (*((unsigned char const  *) ( /* dim=0 */ (__pyx_v_leaves_in_subtree.data + __pyx_t_6 * __pyx_v_leaves_in_subtree.strides[0]) )));
-
+1689:             node = &orig_tree.nodes[orig_node_id]
+
+1689:             node = &orig_tree.nodes[orig_node_id]
          __pyx_v_node = (&(__pyx_v_orig_tree->nodes[__pyx_v_orig_node_id]));
 
 1690: 
-
+1691:             new_node_id = tree._add_node(
+
+1691:             new_node_id = tree._add_node(
          __pyx_t_7 = ((struct __pyx_vtabstruct_13stpredictions_6models_3OK3_5_tree_Tree *)__pyx_v_tree->__pyx_vtab)->_add_node(__pyx_v_tree, __pyx_v_parent, __pyx_v_is_left, __pyx_v_is_leaf, __pyx_v_node->feature, __pyx_v_node->threshold, __pyx_v_node->impurity, __pyx_v_node->n_node_samples, __pyx_v_node->weighted_n_node_samples); if (unlikely(__pyx_t_7 == ((__pyx_t_13stpredictions_6models_3OK3_5_tree_SIZE_t)-1))) __PYX_ERR(0, 1691, __pyx_L4_error)
           __pyx_v_new_node_id = __pyx_t_7;
-
 1692:                 parent, is_left, is_leaf, node.feature, node.threshold,
-
 1693:                 node.impurity, node.n_node_samples,
-
 1694:                 node.weighted_n_node_samples)
+
 1692:                 parent, is_left, is_leaf, node.feature, node.threshold,
+
 1693:                 node.impurity, node.n_node_samples,
+
 1694:                 node.weighted_n_node_samples)
 1695: 
-
+1696:             if new_node_id == SIZE_MAX:
+
+1696:             if new_node_id == SIZE_MAX:
          __pyx_t_4 = ((__pyx_v_new_node_id == SIZE_MAX) != 0);
           if (__pyx_t_4) {
 /* … */
           }
-
+1697:                 rc = -1
+
+1697:                 rc = -1
            __pyx_v_rc = -1;
-
+1698:                 break
+
+1698:                 break
            goto __pyx_L11_break;
 
 1699: 
-
 1700:             # copy value from original tree to new tree
-
+1701:             orig_value_ptr = orig_tree.value + tree.K_y.shape[0] * orig_node_id
+
 1700:             # copy value from original tree to new tree
+
+1701:             orig_value_ptr = orig_tree.value + tree.K_y.shape[0] * orig_node_id
          __pyx_v_orig_value_ptr = (__pyx_v_orig_tree->value + ((__pyx_v_tree->K_y->dimensions[0]) * __pyx_v_orig_node_id));
-
+1702:             new_value_ptr = tree.value + tree.K_y.shape[0] * new_node_id
+
+1702:             new_value_ptr = tree.value + tree.K_y.shape[0] * new_node_id
          __pyx_v_new_value_ptr = (__pyx_v_tree->value + ((__pyx_v_tree->K_y->dimensions[0]) * __pyx_v_new_node_id));
-
+1703:             memcpy(new_value_ptr, orig_value_ptr, sizeof(double) * tree.K_y.shape[0])
+
+1703:             memcpy(new_value_ptr, orig_value_ptr, sizeof(double) * tree.K_y.shape[0])
          (void)(memcpy(__pyx_v_new_value_ptr, __pyx_v_orig_value_ptr, ((sizeof(double)) * (__pyx_v_tree->K_y->dimensions[0]))));
 
 1704: 
-
+1705:             if not is_leaf:
+
+1705:             if not is_leaf:
          __pyx_t_4 = ((!(__pyx_v_is_leaf != 0)) != 0);
           if (__pyx_t_4) {
 /* … */
           }
-
 1706:                 # Push right child on stack
-
+1707:                 rc = stack.push(
+
 1706:                 # Push right child on stack
+
+1707:                 rc = stack.push(
            __pyx_t_1 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, __pyx_v_node->right_child, 0, (__pyx_v_depth + 1), __pyx_v_new_node_id, 0, 0.0, 0); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 1707, __pyx_L4_error)
             __pyx_v_rc = __pyx_t_1;
-
 1708:                     node.right_child, 0, depth + 1, new_node_id, 0, 0.0, 0)
-
+1709:                 if rc == -1:
+
 1708:                     node.right_child, 0, depth + 1, new_node_id, 0, 0.0, 0)
+
+1709:                 if rc == -1:
            __pyx_t_4 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_4) {
 /* … */
             }
-
+1710:                     break
+
+1710:                     break
              goto __pyx_L11_break;
 
 1711: 
-
 1712:                 # push left child on stack
-
+1713:                 rc = stack.push(
+
 1712:                 # push left child on stack
+
+1713:                 rc = stack.push(
            __pyx_t_1 = ((struct __pyx_vtabstruct_7sklearn_4tree_6_utils_Stack *)__pyx_v_stack->__pyx_vtab)->push(__pyx_v_stack, __pyx_v_node->left_child, 0, (__pyx_v_depth + 1), __pyx_v_new_node_id, 1, 0.0, 0); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 1713, __pyx_L4_error)
             __pyx_v_rc = __pyx_t_1;
-
 1714:                     node.left_child, 0, depth + 1, new_node_id, 1, 0.0, 0)
-
+1715:                 if rc == -1:
+
 1714:                     node.left_child, 0, depth + 1, new_node_id, 1, 0.0, 0)
+
+1715:                 if rc == -1:
            __pyx_t_4 = ((__pyx_v_rc == -1L) != 0);
             if (__pyx_t_4) {
 /* … */
             }
-
+1716:                     break
+
+1716:                     break
              goto __pyx_L11_break;
 
 1717: 
-
+1718:             if depth > max_depth_seen:
+
+1718:             if depth > max_depth_seen:
          __pyx_t_4 = ((__pyx_v_depth > __pyx_v_max_depth_seen) != 0);
           if (__pyx_t_4) {
 /* … */
           }
         }
         __pyx_L11_break:;
-
+1719:                 max_depth_seen = depth
+
+1719:                 max_depth_seen = depth
            __pyx_v_max_depth_seen = __pyx_v_depth;
 
 1720: 
-
+1721:         if rc >= 0:
+
+1721:         if rc >= 0:
        __pyx_t_4 = ((__pyx_v_rc >= 0) != 0);
         if (__pyx_t_4) {
 /* … */
         }
       }
-
+1722:             tree.max_depth = max_depth_seen
+
+1722:             tree.max_depth = max_depth_seen
          __pyx_v_tree->max_depth = __pyx_v_max_depth_seen;
 
 1723: 
-
+1724:     tree.K_y = orig_tree.K_y
+
+1724:     tree.K_y = orig_tree.K_y
  __pyx_t_3 = ((PyObject *)__pyx_v_orig_tree->K_y);
   __Pyx_INCREF(__pyx_t_3);
   __Pyx_GIVEREF(__pyx_t_3);
@@ -10747,7 +10821,7 @@
   __Pyx_DECREF(((PyObject *)__pyx_v_tree->K_y));
   __pyx_v_tree->K_y = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
-
+1725:     tree.y = orig_tree.y
+
+1725:     tree.y = orig_tree.y
  __pyx_t_3 = ((PyObject *)__pyx_v_orig_tree->y);
   __Pyx_INCREF(__pyx_t_3);
   __Pyx_GIVEREF(__pyx_t_3);
@@ -10755,12 +10829,12 @@
   __Pyx_DECREF(((PyObject *)__pyx_v_tree->y));
   __pyx_v_tree->y = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
-
+1726:     if rc == -1:
+
+1726:     if rc == -1:
  __pyx_t_4 = ((__pyx_v_rc == -1L) != 0);
   if (unlikely(__pyx_t_4)) {
 /* … */
   }
-
+1727:         raise MemoryError("pruning tree")
+
+1727:         raise MemoryError("pruning tree")
    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1727, __pyx_L1_error)
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
diff --git a/tests/tests_IOKR/IOKR.py b/tests/tests_IOKR/IOKR.py
new file mode 100644
index 000000000..32a7a4d8b
--- /dev/null
+++ b/tests/tests_IOKR/IOKR.py
@@ -0,0 +1,89 @@
+import os
+import time
+import numpy as np
+
+# sklearn
+from sklearn.model_selection import KFold
+from sklearn.metrics.pairwise import rbf_kernel
+from sklearn.metrics import f1_score
+from sklearn.preprocessing import StandardScaler
+
+from stpredictions.models.IOKR.Sketch import SubSample, pSparsified, Incomplete, Accumulation, Rademacher, Gaussian
+from stpredictions.models.IOKR.SketchedIOKR import IOKR, SIOKR, ISOKR, SISOKR
+from stpredictions.datasets.load_data import *
+
+# ignore warnings
+import warnings
+warnings.filterwarnings('ignore')
+
+# Defining Gaussian kernel
+def Gaussian_kernel(gamma):
+    def Compute_Gram(X, Y=None):
+        if Y is None:
+            Y = X.copy()
+        return rbf_kernel(X, Y, gamma=gamma)
+    return Compute_Gram
+
+# Load the bibtex dataset
+x, y = load_bibtex_train_from_arff()
+X_tr, Y_tr = x.todense(), y.todense()
+
+x_test, y_test = load_bibtex_test_from_arff()
+X_te, Y_te = x_test.todense(), y_test.todense()
+
+Y_tr, Y_te = Y_tr.astype(int), Y_te.astype(int)
+
+scaler = StandardScaler()
+X_tr = scaler.fit_transform(X_tr)
+X_te = scaler.transform(X_te)
+
+# X_tr, Y_tr, X_te, Y_te = load_bibtex()
+n_tr = X_tr.shape[0]
+n_te = X_te.shape[0]
+input_dim = X_tr.shape[1]
+label_dim = Y_tr.shape[1]
+
+print(f'Train set size = {n_tr}')
+print(f'Test set size = {n_te}')
+print(f'Input dim. = {input_dim}')
+print(f'Output dim. = {label_dim}')
+
+# Selection of the hyper-parameters: rbf output kernel
+
+Ls = np.logspace(-6, -4, 3)
+sxs = np.logspace(1, 4, 4)
+sys = np.logspace(1, 4, 4)
+n_tr = X_tr.shape[0]
+n_val = n_tr//5
+f1_val_best = 0
+clf = IOKR()
+clf.verbose = 0
+
+
+t0 = time.time()
+for L in Ls:
+    for sx in sxs:
+        for sy in sys:
+            input_kernel = Gaussian_kernel(gamma=1/(2 * sx))
+            output_kernel = Gaussian_kernel(gamma=1/(2 * sy))
+            clf.fit(X=X_tr[:-n_val], Y=Y_tr[:-n_val], L=L, input_kernel=input_kernel, output_kernel=output_kernel)
+            Y_pred_val = clf.predict(X_te=X_tr[-n_val:])
+            f1_val = f1_score(Y_pred_val, Y_tr[-n_val:], average='samples')
+            if f1_val > f1_val_best:
+                f1_val_best = f1_val
+                sx_best = sx
+                sy_best = sy
+                L_best = L
+
+print(f'Selection time: {time.time() - t0}')
+print(f'Best selected parameters: L: {L_best} | sx: {sx_best} | sy: {sy_best}')
+
+# Test rbf output kernel
+
+clf.verbose = 1
+input_kernel = Gaussian_kernel(gamma=1/(2 * sx_best))
+output_kernel = Gaussian_kernel(gamma=1/(2 * sy_best))
+clf.fit(X=X_tr, Y=Y_tr, L=L_best, input_kernel=input_kernel, output_kernel=output_kernel)
+Y_pred_te = clf.predict(X_te=X_te)
+f1_te = f1_score(Y_pred_te, Y_te, average='samples')
+print(f'Test f1 score with selected parameters: {f1_te}')
\ No newline at end of file
diff --git a/tests/tests_IOKR/ISOKR.py b/tests/tests_IOKR/ISOKR.py
new file mode 100644
index 000000000..242180987
--- /dev/null
+++ b/tests/tests_IOKR/ISOKR.py
@@ -0,0 +1,110 @@
+import os
+import time
+import numpy as np
+
+# sklearn
+from sklearn.model_selection import KFold
+from sklearn.metrics.pairwise import rbf_kernel
+from sklearn.metrics import f1_score
+from sklearn.preprocessing import StandardScaler
+
+from stpredictions.models.IOKR.Sketch import SubSample, pSparsified, Incomplete, Accumulation, Rademacher, Gaussian
+from stpredictions.models.IOKR.SketchedIOKR import IOKR, SIOKR, ISOKR, SISOKR
+from stpredictions.datasets.load_data import *
+
+# ignore warnings
+import warnings
+warnings.filterwarnings('ignore')
+
+# Defining Gaussian kernel
+def Gaussian_kernel(gamma):
+    def Compute_Gram(X, Y=None):
+        if Y is None:
+            Y = X.copy()
+        return rbf_kernel(X, Y, gamma=gamma)
+    return Compute_Gram
+
+# Load the bibtex dataset
+x, y = load_bibtex_train_from_arff()
+X_tr, Y_tr = x.todense(), y.todense()
+
+x_test, y_test = load_bibtex_test_from_arff()
+X_te, Y_te = x_test.todense(), y_test.todense()
+
+Y_tr, Y_te = Y_tr.astype(int), Y_te.astype(int)
+
+scaler = StandardScaler()
+X_tr = scaler.fit_transform(X_tr)
+X_te = scaler.transform(X_te)
+
+# X_tr, Y_tr, X_te, Y_te = load_bibtex()
+n_tr = X_tr.shape[0]
+n_te = X_te.shape[0]
+input_dim = X_tr.shape[1]
+label_dim = Y_tr.shape[1]
+
+print(f'Train set size = {n_tr}')
+print(f'Test set size = {n_te}')
+print(f'Input dim. = {input_dim}')
+print(f'Output dim. = {label_dim}')
+
+
+
+# Selection of the hyper-parameters: rbf output kernel
+
+Ls = np.logspace(-6, -4, 3)
+sxs = np.logspace(1, 4, 4)
+sys = np.logspace(1, 4, 4)
+n_tr = X_tr.shape[0]
+n_val = n_tr//5
+n_tr_cv = int(n_tr - n_val)
+s = 150
+f1_val_best = 0
+clf = ISOKR()
+clf.verbose = 0
+
+
+t0 = time.time()
+for L in Ls:
+    for sx in sxs:
+        for sy in sys:
+            S = Gaussian((s, n_tr_cv))
+            input_kernel = Gaussian_kernel(gamma=1/(2 * sx))
+            output_kernel = Gaussian_kernel(gamma=1/(2 * sy))
+            clf.fit(X=X_tr[:-n_val], Y=Y_tr[:-n_val], S=S, L=L, input_kernel=input_kernel, output_kernel=output_kernel)
+            Y_pred_val = clf.predict(X_te=X_tr[-n_val:])
+            f1_val = f1_score(Y_pred_val, Y_tr[-n_val:], average='samples')
+            if f1_val > f1_val_best:
+                f1_val_best = f1_val
+                sx_best = sx
+                sy_best = sy
+                L_best = L
+
+print(f'Selection time: {time.time() - t0}')
+print(f'Best selected parameters: L: {L_best} | sx: {sx_best} | sy: {sy_best}')
+
+# Test rbf output kernel
+
+input_kernel = Gaussian_kernel(gamma=1/(2 * sx_best))
+output_kernel = Gaussian_kernel(gamma=1/(2 * sy_best))
+
+n_rep = 30
+
+f1_tes = np.zeros(n_rep)
+fit_times = np.zeros(n_rep)
+decode_times = np.zeros(n_rep)
+
+for i in range(n_rep):
+    S = Gaussian((s, n_tr))
+    clf.verbose = 0
+    clf.fit(X=X_tr, Y=Y_tr, S=S, L=L_best, input_kernel=input_kernel, output_kernel=output_kernel)
+    Y_pred_te = clf.predict(X_te=X_te)
+    f1_tes[i] = f1_score(Y_pred_te, Y_te, average='samples')
+    fit_times[i] = clf.fit_time
+    decode_times[i] = clf.decode_time
+print(f'Test f1 score with selected parameters: {np.mean(f1_tes)}')
+print(f'Std: {0.5 * np.std(f1_tes)}')
+print(f'Fitting time: {np.mean(fit_times)}')
+print(f'Std: {0.5 * np.std(fit_times)}')
+print(f'Decoding time: {np.mean(decode_times)}')
+print(f'Std: {0.5 * np.std(decode_times)}')
\ No newline at end of file
diff --git a/tests/tests_IOKR/SIOKR.py b/tests/tests_IOKR/SIOKR.py
new file mode 100644
index 000000000..b3ccb12a4
--- /dev/null
+++ b/tests/tests_IOKR/SIOKR.py
@@ -0,0 +1,108 @@
+import os
+import time
+import numpy as np
+
+# sklearn
+from sklearn.model_selection import KFold
+from sklearn.metrics.pairwise import rbf_kernel
+from sklearn.metrics import f1_score
+from sklearn.preprocessing import StandardScaler
+
+from stpredictions.models.IOKR.Sketch import SubSample, pSparsified, Incomplete, Accumulation, Rademacher, Gaussian
+from stpredictions.models.IOKR.SketchedIOKR import IOKR, SIOKR, ISOKR, SISOKR
+from stpredictions.datasets.load_data import *
+
+# ignore warnings
+import warnings
+warnings.filterwarnings('ignore')
+
+# Defining Gaussian kernel
+def Gaussian_kernel(gamma):
+    def Compute_Gram(X, Y=None):
+        if Y is None:
+            Y = X.copy()
+        return rbf_kernel(X, Y, gamma=gamma)
+    return Compute_Gram
+
+# Load the bibtex dataset
+x, y = load_bibtex_train_from_arff()
+X_tr, Y_tr = x.todense(), y.todense()
+
+x_test, y_test = load_bibtex_test_from_arff()
+X_te, Y_te = x_test.todense(), y_test.todense()
+
+Y_tr, Y_te = Y_tr.astype(int), Y_te.astype(int)
+
+scaler = StandardScaler()
+X_tr = scaler.fit_transform(X_tr)
+X_te = scaler.transform(X_te)
+
+# X_tr, Y_tr, X_te, Y_te = load_bibtex()
+n_tr = X_tr.shape[0]
+n_te = X_te.shape[0]
+input_dim = X_tr.shape[1]
+label_dim = Y_tr.shape[1]
+
+print(f'Train set size = {n_tr}')
+print(f'Test set size = {n_te}')
+print(f'Input dim. = {input_dim}')
+print(f'Output dim. = {label_dim}')
+
+# Selection of the hyper-parameters: rbf output kernel
+
+Ls = np.logspace(-6, -4, 3)
+sxs = np.logspace(1, 4, 4)
+sys = np.logspace(1, 4, 4)
+n_tr = X_tr.shape[0]
+n_val = n_tr//5
+n_tr_cv = int(n_tr - n_val)
+s = 1000
+f1_val_best = 0
+clf = SIOKR()
+clf.verbose = 0
+
+
+t0 = time.time()
+for L in Ls:
+    for sx in sxs:
+        for sy in sys:
+            S = SubSample((s, n_tr_cv))
+            input_kernel = Gaussian_kernel(gamma=1/(2 * sx))
+            output_kernel = Gaussian_kernel(gamma=1/(2 * sy))
+            clf.fit(X=X_tr[:-n_val], Y=Y_tr[:-n_val], S=S, L=L, input_kernel=input_kernel, output_kernel=output_kernel)
+            Y_pred_val = clf.predict(X_te=X_tr[-n_val:])
+            f1_val = f1_score(Y_pred_val, Y_tr[-n_val:], average='samples')
+            if f1_val > f1_val_best:
+                f1_val_best = f1_val
+                sx_best = sx
+                sy_best = sy
+                L_best = L
+
+print(f'Selection time: {time.time() - t0}')
+print(f'Best selected parameters: L: {L_best} | sx: {sx_best} | sy: {sy_best}')
+
+# Test rbf output kernel
+
+input_kernel = Gaussian_kernel(gamma=1/(2 * sx_best))
+output_kernel = Gaussian_kernel(gamma=1/(2 * sy_best))
+
+n_rep = 30
+
+f1_tes = np.zeros(n_rep)
+fit_times = np.zeros(n_rep)
+decode_times = np.zeros(n_rep)
+
+for i in range(n_rep):
+    S = Rademacher((s, n_tr))
+    clf.verbose = 0
+    clf.fit(X=X_tr, Y=Y_tr, S=S, L=L_best, input_kernel=input_kernel, output_kernel=output_kernel)
+    Y_pred_te = clf.predict(X_te=X_te)
+    f1_tes[i] = f1_score(Y_pred_te, Y_te, average='samples')
+    fit_times[i] = clf.fit_time
+    decode_times[i] = clf.decode_time
+print(f'Test f1 score with selected parameters: {np.mean(f1_tes)}')
+print(f'Std: {0.5 * np.std(f1_tes)}')
+print(f'Fitting time: {np.mean(fit_times)}')
+print(f'Std: {0.5 * np.std(fit_times)}')
+print(f'Decoding time: {np.mean(decode_times)}')
+print(f'Std: {0.5 * np.std(decode_times)}')
\ No newline at end of file
diff --git a/tests/tests_IOKR/SISOKR.py b/tests/tests_IOKR/SISOKR.py
new file mode 100644
index 000000000..ad2337af1
--- /dev/null
+++ b/tests/tests_IOKR/SISOKR.py
@@ -0,0 +1,113 @@
+import os
+import time
+import numpy as np
+
+# sklearn
+from sklearn.model_selection import KFold
+from sklearn.metrics.pairwise import rbf_kernel
+from sklearn.metrics import f1_score
+from sklearn.preprocessing import StandardScaler
+
+from stpredictions.models.IOKR.Sketch import SubSample, pSparsified, Incomplete, Accumulation, Rademacher, Gaussian
+from stpredictions.models.IOKR.SketchedIOKR import IOKR, SIOKR, ISOKR, SISOKR
+from stpredictions.datasets.load_data import *
+
+# ignore warnings
+import warnings
+warnings.filterwarnings('ignore')
+
+# Defining Gaussian kernel
+def Gaussian_kernel(gamma):
+    def Compute_Gram(X, Y=None):
+        if Y is None:
+            Y = X.copy()
+        return rbf_kernel(X, Y, gamma=gamma)
+    return Compute_Gram
+
+# Load the bibtex dataset
+x, y = load_bibtex_train_from_arff()
+X_tr, Y_tr = x.todense(), y.todense()
+
+x_test, y_test = load_bibtex_test_from_arff()
+X_te, Y_te = x_test.todense(), y_test.todense()
+
+Y_tr, Y_te = Y_tr.astype(int), Y_te.astype(int)
+
+scaler = StandardScaler()
+X_tr = scaler.fit_transform(X_tr)
+X_te = scaler.transform(X_te)
+
+# X_tr, Y_tr, X_te, Y_te = load_bibtex()
+n_tr = X_tr.shape[0]
+n_te = X_te.shape[0]
+input_dim = X_tr.shape[1]
+label_dim = Y_tr.shape[1]
+
+print(f'Train set size = {n_tr}')
+print(f'Test set size = {n_te}')
+print(f'Input dim. = {input_dim}')
+print(f'Output dim. = {label_dim}')
+
+
+
+# Selection of the hyper-parameters: rbf output kernel
+
+Ls = np.logspace(-10, -4, 7)
+sxs = np.logspace(1, 6, 6) 
+sys = np.logspace(1, 4, 4)
+n_tr = X_tr.shape[0]
+n_val = n_tr//5
+n_tr_cv = int(n_tr - n_val)
+s_in = 1000
+s_out = 150
+f1_val_best = 0
+clf = SISOKR()
+clf.verbose = 0
+
+
+t0 = time.time()
+for L in Ls:
+    for sx in sxs:
+        for sy in sys:
+            S_in = SubSample((s_in, n_tr_cv))
+            S_out = Gaussian((s_out, n_tr_cv))
+            input_kernel = Gaussian_kernel(gamma=1/(2 * sx))
+            output_kernel = Gaussian_kernel(gamma=1/(2 * sy))
+            clf.fit(X=X_tr[:-n_val], Y=Y_tr[:-n_val], S_in=S_in, S_out=S_out, L=L, input_kernel=input_kernel, output_kernel=output_kernel)
+            Y_pred_val = clf.predict(X_te=X_tr[-n_val:])
+            f1_val = f1_score(Y_pred_val, Y_tr[-n_val:], average='samples')
+            if f1_val > f1_val_best:
+                f1_val_best = f1_val
+                sx_best = sx
+                sy_best = sy
+                L_best = L
+
+print(f'Selection time: {time.time() - t0}')
+print(f'Best selected parameters: L: {L_best} | sx: {sx_best} | sy: {sy_best}')
+
+# Test rbf output kernel
+
+input_kernel = Gaussian_kernel(gamma=1/(2 * sx_best))
+output_kernel = Gaussian_kernel(gamma=1/(2 * sy_best))
+
+n_rep = 30
+
+f1_tes = np.zeros(n_rep)
+fit_times = np.zeros(n_rep)
+decode_times = np.zeros(n_rep)
+
+for i in range(n_rep):
+    S_in = SubSample((s_in, n_tr))
+    S_out = Gaussian((s_out, n_tr))
+    clf.verbose = 0
+    clf.fit(X=X_tr, Y=Y_tr, S_in=S_in, S_out=S_out, L=L_best, input_kernel=input_kernel, output_kernel=output_kernel)
+    Y_pred_te = clf.predict(X_te=X_te)
+    f1_tes[i] = f1_score(Y_pred_te, Y_te, average='samples')
+    fit_times[i] = clf.fit_time
+    decode_times[i] = clf.decode_time
+print(f'Test f1 score with selected parameters: {np.mean(f1_tes)}')
+print(f'Std: {0.5 * np.std(f1_tes)}')
+print(f'Fitting time: {np.mean(fit_times)}')
+print(f'Std: {0.5 * np.std(fit_times)}')
+print(f'Decoding time: {np.mean(decode_times)}')
+print(f'Std: {0.5 * np.std(decode_times)}')
\ No newline at end of file

From 8d7f397dc45fbc44633442a679fc3f758a4c1fa2 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Fri, 24 Feb 2023 22:23:55 +0100
Subject: [PATCH 31/85] py 3.9 new mac ubuntu

---
 .github/workflows/pytest_cp39.yml | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 46089d297..b5b34a6ff 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest]
+        os: [macos-latest, ubuntu-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.9',]
     env:
@@ -69,5 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        python tests/tests_IOKR/ISOKR.py   tests/tests_IOKR/SISOKR.py
+        python tests/tests_IOKR/ISOKR.py   
+        python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR

From 74aa6e977ee73bfae7cb34032170cebc6b696e68 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Fri, 24 Feb 2023 23:03:12 +0100
Subject: [PATCH 32/85] Build Wheels for ubuntu

---
 .github/workflows/build_wheels.yml | 6 +++---
 .github/workflows/pytest_cp39.yml  | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index d39e6eb85..00328f783 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 
 jobs:
   build_wheels:
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [windows-2019]
+        os: [ubuntu-20.04]
 
     steps:
       - uses: actions/checkout@v2
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index b5b34a6ff..ad44ae191 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 94106e180998b0b62bc66f9703b8aaa36537b8ba Mon Sep 17 00:00:00 2001
From: yohila 
Date: Fri, 24 Feb 2023 23:17:26 +0100
Subject: [PATCH 33/85] Build Wheels for windows

---
 .github/workflows/build_wheels.yml | 2 +-
 pyproject.toml                     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 00328f783..c899700d6 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [ubuntu-20.04]
+        os: [windows-2019]
 
     steps:
       - uses: actions/checkout@v2
diff --git a/pyproject.toml b/pyproject.toml
index 31e5b4244..de26d0dda 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,11 +8,11 @@ build-backend = "setuptools.build_meta"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
-# skip = ["pp*", "cp310-*"]
+skip = ["pp*", "cp310-*"]
 #Skip for MacOS
 # skip = ["pp*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
-skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
+# skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
 
 
 # Build `universal2` and `arm64` wheels on an Intel runner.

From 06f1a93b4ecda4b4159ca063088a2c61c88fe7f0 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:05:05 +0100
Subject: [PATCH 34/85] Testing ubuntu all the pythons

---
 .github/workflows/build_wheels.yml | 4 ++--
 .github/workflows/pytest_cp36.yml  | 4 ++--
 .github/workflows/pytest_cp37.yml  | 4 ++--
 .github/workflows/pytest_cp38.yml  | 4 ++--
 .github/workflows/pytest_cp39.yml  | 4 ++--
 5 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index c899700d6..d39e6eb85 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 
 jobs:
   build_wheels:
diff --git a/.github/workflows/pytest_cp36.yml b/.github/workflows/pytest_cp36.yml
index 270020916..5ce17cdbc 100644
--- a/.github/workflows/pytest_cp36.yml
+++ b/.github/workflows/pytest_cp36.yml
@@ -2,8 +2,8 @@ name: pytesting
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 # #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml
index 846fc77ea..8ddd6e659 100644
--- a/.github/workflows/pytest_cp37.yml
+++ b/.github/workflows/pytest_cp37.yml
@@ -2,8 +2,8 @@ name: pytesting
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 # #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml
index 5f4c8be5d..e571b65fa 100644
--- a/.github/workflows/pytest_cp38.yml
+++ b/.github/workflows/pytest_cp38.yml
@@ -2,8 +2,8 @@ name: pytesting
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
   #  branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index ad44ae191..b5b34a6ff 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 006662a8baa9f90580e39987f07af8d13bcf51b5 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:26:34 +0100
Subject: [PATCH 35/85] Testing py37 for mac and ubuntu

---
 .github/workflows/pytest_cp36.yml | 6 +++---
 .github/workflows/pytest_cp37.yml | 6 ++++--
 .github/workflows/pytest_cp38.yml | 6 +++---
 .github/workflows/pytest_cp39.yml | 6 +++---
 4 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/.github/workflows/pytest_cp36.yml b/.github/workflows/pytest_cp36.yml
index 5ce17cdbc..ee206c818 100644
--- a/.github/workflows/pytest_cp36.yml
+++ b/.github/workflows/pytest_cp36.yml
@@ -1,9 +1,9 @@
-name: pytesting
+name: pytesting 3.6
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 # #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml
index 8ddd6e659..135c6250e 100644
--- a/.github/workflows/pytest_cp37.yml
+++ b/.github/workflows/pytest_cp37.yml
@@ -1,4 +1,4 @@
-name: pytesting
+name: pytesting 3.7
 
 on: 
   push:
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [ubuntu-latest]
+        os: [ubuntu-latest, macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7',]
     env:
@@ -69,4 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
+        python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml
index e571b65fa..5efe04c5e 100644
--- a/.github/workflows/pytest_cp38.yml
+++ b/.github/workflows/pytest_cp38.yml
@@ -1,9 +1,9 @@
-name: pytesting
+name: pytesting 3.8
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
   #  branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index b5b34a6ff..4a87940e5 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -1,9 +1,9 @@
-name: pytesting
+name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 5fa25015fdb225347d6de06c4baaf9e2fa1e141d Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:37:35 +0100
Subject: [PATCH 36/85] Testing py310 for mac and ubuntu

---
 .github/workflows/pytest_cp310.yml | 74 ++++++++++++++++++++++++++++++
 1 file changed, 74 insertions(+)
 create mode 100644 .github/workflows/pytest_cp310.yml

diff --git a/.github/workflows/pytest_cp310.yml b/.github/workflows/pytest_cp310.yml
new file mode 100644
index 000000000..e0279975c
--- /dev/null
+++ b/.github/workflows/pytest_cp310.yml
@@ -0,0 +1,74 @@
+name: pytesting 3.10
+
+on: 
+  push:
+    branches-ignore:
+      - "main"
+#    branches: [ main ]
+  schedule:
+    - cron: '0 0 1 * *'
+# pull_request:
+#    branches:
+#      - '*'
+
+jobs:
+  run:
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [macos-latest, ubuntu-latest]
+        # os: [macos-latest, ubuntu-latest, windows-latest]
+        python: ['3.10',]
+    env:
+      OS: ${{ matrix.os }}
+      PYTHON: ${{ matrix.python }}
+    steps:
+    - uses: actions/checkout@v1
+    - name: Set up Python 3.10
+      uses: actions/setup-python@v1
+      with:
+        python-version: 3.10
+    - name: Install dependencies
+      run: |
+        python -m pip install --upgrade pip
+        ################### Use this when you want to use local wheel installation through dist  #################
+        pip install -r requirements.txt
+    # - name: Lint with flake8
+    #   run: |
+    #     pip install flake8
+    #     # stop the build if there are Python syntax errors or undefined names
+    #     flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
+    #     # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
+    #     flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+    # - name: Test with pytest
+    #   run: |
+    #     pip install pytest
+    #     pytest -v --cov=structured-predictions --cov-report=html
+
+
+    # - name: Build and compile
+    #   run: |
+    #     make dist
+    #     python setup.py develop
+
+    - name: Build and compile
+      run: |
+        make clean
+        # pip install .
+        ################## Use this when you want to use local wheel installation through dist ######################
+        make dist
+        pip install dist/*
+        #############################################################################
+        # python setup.py develop
+        # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6
+        # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
+        # pip install -i https://test.pypi.org/simple/ structured-predictions
+        # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py
+    - name: Test with pytest
+      run: |
+        pip install pytest
+        pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
+        pytest  tests/tests_DIOKR
+        python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/SISOKR.py
+        # pytest tests/tests_IOKR

From 2b01947d8b0c35604ae4ab5f25520bc982d05064 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:40:30 +0100
Subject: [PATCH 37/85] Testing py310 for mac and ubuntu

---
 .github/workflows/pytest_cp310.yml | 4 ++--
 .github/workflows/pytest_cp37.yml  | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/pytest_cp310.yml b/.github/workflows/pytest_cp310.yml
index e0279975c..81fbc2f29 100644
--- a/.github/workflows/pytest_cp310.yml
+++ b/.github/workflows/pytest_cp310.yml
@@ -2,8 +2,8 @@ name: pytesting 3.10
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml
index 135c6250e..3033b482d 100644
--- a/.github/workflows/pytest_cp37.yml
+++ b/.github/workflows/pytest_cp37.yml
@@ -2,8 +2,8 @@ name: pytesting 3.7
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 # #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 5286f8f3ca2d28e99b56895d25092d174565b388 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:44:00 +0100
Subject: [PATCH 38/85] Testing py310 for mac and ubuntu

---
 .github/workflows/pytest_cp310.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pytest_cp310.yml b/.github/workflows/pytest_cp310.yml
index 81fbc2f29..ee8b55abd 100644
--- a/.github/workflows/pytest_cp310.yml
+++ b/.github/workflows/pytest_cp310.yml
@@ -27,7 +27,7 @@ jobs:
     - name: Set up Python 3.10
       uses: actions/setup-python@v1
       with:
-        python-version: 3.10
+        python-version: '3.10'
     - name: Install dependencies
       run: |
         python -m pip install --upgrade pip

From f10182615a09adbae260c8d74bf9dfcfb9ec678e Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:49:40 +0100
Subject: [PATCH 39/85] Testing py310 for mac

---
 .github/workflows/pytest_cp310.yml | 4 ++--
 .github/workflows/pytest_cp37.yml  | 2 +-
 .github/workflows/pytest_cp38.yml  | 2 ++
 .github/workflows/pytest_cp39.yml  | 4 ++--
 4 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/pytest_cp310.yml b/.github/workflows/pytest_cp310.yml
index ee8b55abd..2f6d2f8d3 100644
--- a/.github/workflows/pytest_cp310.yml
+++ b/.github/workflows/pytest_cp310.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest, ubuntu-latest]
+        os: [macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.10',]
     env:
@@ -69,6 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml
index 3033b482d..b40b2d092 100644
--- a/.github/workflows/pytest_cp37.yml
+++ b/.github/workflows/pytest_cp37.yml
@@ -69,6 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml
index 5efe04c5e..5acc41129 100644
--- a/.github/workflows/pytest_cp38.yml
+++ b/.github/workflows/pytest_cp38.yml
@@ -68,4 +68,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
+        # python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 4a87940e5..0432e8ef8 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -69,6 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        python tests/tests_IOKR/ISOKR.py   
-        python tests/tests_IOKR/SISOKR.py
+        # python tests/tests_IOKR/ISOKR.py   
+        # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR

From 804bcd8318bab14e7d08ebc2a7cef409719e4690 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 13:58:22 +0100
Subject: [PATCH 40/85] Generating Wheels for py39 on mac and ubuntu

---
 .github/workflows/build_wheels.yml | 2 +-
 pyproject.toml                     | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index d39e6eb85..e90e2ff38 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [windows-2019]
+        os: [ubuntu-20.04, macos-10.15]
 
     steps:
       - uses: actions/checkout@v2
diff --git a/pyproject.toml b/pyproject.toml
index de26d0dda..be804b786 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,11 +8,12 @@ build-backend = "setuptools.build_meta"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
-skip = ["pp*", "cp310-*"]
+# skip = ["pp*", "cp310-*"]
 #Skip for MacOS
 # skip = ["pp*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
 # skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
+skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
 
 
 # Build `universal2` and `arm64` wheels on an Intel runner.

From 6d847448996fee98eaa790b41300164cc697fe7a Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 14:09:50 +0100
Subject: [PATCH 41/85] Generating Wheels for py39 on mac and ubuntu

---
 .github/workflows/pytest_cp310.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/pytest_cp310.yml b/.github/workflows/pytest_cp310.yml
index 2f6d2f8d3..6d152db84 100644
--- a/.github/workflows/pytest_cp310.yml
+++ b/.github/workflows/pytest_cp310.yml
@@ -2,8 +2,8 @@ name: pytesting 3.10
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From d4e936f07b1d8b7b699b56823fb23dc5e8df2c8d Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 14:12:06 +0100
Subject: [PATCH 42/85] Generating Wheels for py39 on mac and ubuntu

---
 .github/workflows/build_wheels.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index e90e2ff38..fe3af3a43 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 
 jobs:
   build_wheels:

From 2571d5f23708b46e50a7592345550f2cb2e69639 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 15:45:35 +0100
Subject: [PATCH 43/85] Generating Wheels for py39 on mac and ubuntu

---
 pyproject.toml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index be804b786..aa2f7d711 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-#build = "cp39-*"
+build = "cp37-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
@@ -12,8 +12,8 @@ build-backend = "setuptools.build_meta"
 #Skip for MacOS
 # skip = ["pp*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
-# skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
-skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
+skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
+# skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
 
 
 # Build `universal2` and `arm64` wheels on an Intel runner.

From c8da01f8dee892ff5acca3348f1edb9b4c0a8efd Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 1 Mar 2023 15:56:49 +0100
Subject: [PATCH 44/85] Generating Wheels for py39 on mac, win and ubuntu

---
 .github/workflows/build_wheels.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index fe3af3a43..a91188a84 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [ubuntu-20.04, macos-10.15]
+        os: [ubuntu-20.04, macos-10.15,  windows-2019]
 
     steps:
       - uses: actions/checkout@v2

From ccc77a478ce12dd6fa74e43c744885487ba1496a Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 14:24:19 +0100
Subject: [PATCH 45/85] Generating and Storing wheel for ubuntu py39

---
 .github/workflows/build_wheels.yml     |  4 +--
 .github/workflows/build_wheels_3.9.yml | 43 ++++++++++++++++++++++++++
 2 files changed, 45 insertions(+), 2 deletions(-)
 create mode 100644 .github/workflows/build_wheels_3.9.yml

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index a91188a84..2af91fd0d 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 
 jobs:
   build_wheels:
diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_3.9.yml
new file mode 100644
index 000000000..8d2295785
--- /dev/null
+++ b/.github/workflows/build_wheels_3.9.yml
@@ -0,0 +1,43 @@
+name: pytesting 3.9
+
+on: 
+  push:
+    branches-ignore:
+      - "main"
+#    branches: [ main ]
+  schedule:
+    - cron: '0 0 1 * *'
+# pull_request:
+#    branches:
+#      - '*'
+
+jobs:
+  run:
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [macos-latest, ubuntu-latest]
+        # os: [macos-latest, ubuntu-latest, windows-latest]
+        python: ['3.9',]
+    env:
+      OS: ${{ matrix.os }}
+      PYTHON: ${{ matrix.python }}
+    steps:
+      - uses: actions/checkout@v2
+      - name: Set up Python 3.9
+        uses: actions/setup-python@v1
+        with:
+          python-version: 3.9
+      - name: Install dependencies
+        run: python -m pip install --upgrade pip
+      - name: Build and compile
+        run: make dist 
+        # to supply options, put them in 'env', like:
+        # env:
+        #   CIBW_SOME_OPTION: value
+
+      - uses: actions/upload-artifact@v2
+        with:
+          path: ./wheelhouse/*.whl
+
+

From ecb7018574b541c96db8b54c6e4c8171e225d5a5 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 14:33:46 +0100
Subject: [PATCH 46/85] Generating and Storing wheel for ubuntu py39

---
 .github/workflows/build_wheels_3.9.yml | 6 +++---
 pyproject.toml                         | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_3.9.yml
index 8d2295785..2b99deade 100644
--- a/.github/workflows/build_wheels_3.9.yml
+++ b/.github/workflows/build_wheels_3.9.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest, ubuntu-latest]
+        os: [ubuntu-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.9',]
     env:
diff --git a/pyproject.toml b/pyproject.toml
index aa2f7d711..6b7064af5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-build = "cp37-*"
+# build = "cp37-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows

From 9df20339aa197be7f7ddd7636b056052661607bf Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 14:36:44 +0100
Subject: [PATCH 47/85] Generating and Storing wheel for ubuntu py39

---
 .github/workflows/build_wheels_3.9.yml | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_3.9.yml
index 2b99deade..c14b3ce92 100644
--- a/.github/workflows/build_wheels_3.9.yml
+++ b/.github/workflows/build_wheels_3.9.yml
@@ -29,7 +29,9 @@ jobs:
         with:
           python-version: 3.9
       - name: Install dependencies
-        run: python -m pip install --upgrade pip
+        run: |
+          python -m pip install --upgrade pip
+          pip install -r requirements.txt
       - name: Build and compile
         run: make dist 
         # to supply options, put them in 'env', like:

From 9a9b70ccbc1dff9a585438823c15bc539d1819fc Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 14:48:27 +0100
Subject: [PATCH 48/85] Generating and Storing wheel for ubuntu py39

---
 .github/workflows/build_wheels_3.9.yml |  2 +-
 requirements.txt                       | 29 +++++++++++++-------------
 2 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_3.9.yml
index c14b3ce92..067f1ade0 100644
--- a/.github/workflows/build_wheels_3.9.yml
+++ b/.github/workflows/build_wheels_3.9.yml
@@ -40,6 +40,6 @@ jobs:
 
       - uses: actions/upload-artifact@v2
         with:
-          path: ./wheelhouse/*.whl
+          path: ./dist/*.whl
 
 
diff --git a/requirements.txt b/requirements.txt
index ec7952bf9..5f380e485 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -45,18 +45,17 @@ rsa==4.7.2
 scikit-learn==0.24.2
 scipy==1.7.0
 six==1.15.0
-sklearn==0.0
-sobol-seq==0.2.0
-tensorboard==2.5.0
-tensorboard-data-server==0.6.1
-tensorboard-plugin-wit==1.8.0
-tensorflow==2.5.0
-tensorflow-estimator==2.5.0
-termcolor==1.1.0
-threadpoolctl==2.1.0
-tomli==2.0.0
-torch==1.9.0
-typing-extensions==3.7.4.3
-urllib3==1.26.5
-Werkzeug==2.0.1
-wrapt==1.12.1
+# sobol-seq==0.2.0
+# tensorboard==2.5.0
+# tensorboard-data-server==0.6.1
+# tensorboard-plugin-wit==1.8.0
+# tensorflow==2.5.0
+# tensorflow-estimator==2.5.0
+# termcolor==1.1.0
+# threadpoolctl==2.1.0
+# tomli==2.0.0
+# torch==1.9.0
+# typing-extensions==3.7.4.3
+# urllib3==1.26.5
+# Werkzeug==2.0.1
+# wrapt==1.12.1

From 3e5810f64e8e5c091cb679b8859451e8be95d910 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 15:14:47 +0100
Subject: [PATCH 49/85] Generating and Storing wheel for ubuntu py39

---
 .github/workflows/build_wheels_3.9.yml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_3.9.yml
index 067f1ade0..afe3cace2 100644
--- a/.github/workflows/build_wheels_3.9.yml
+++ b/.github/workflows/build_wheels_3.9.yml
@@ -18,16 +18,16 @@ jobs:
       matrix:
         os: [ubuntu-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
-        python: ['3.9',]
+        python: ['3.7', '3.8', '3.9']
     env:
       OS: ${{ matrix.os }}
       PYTHON: ${{ matrix.python }}
     steps:
       - uses: actions/checkout@v2
-      - name: Set up Python 3.9
+      - name: Set up Python
         uses: actions/setup-python@v1
         with:
-          python-version: 3.9
+          python-version: ${{ matrix.python }}
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip

From f8ebb42772f88f3e5892b6eb515a9b2d9974078f Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 15:19:26 +0100
Subject: [PATCH 50/85] Generating and Storing wheels for MacOS

---
 .../workflows/{build_wheels_3.9.yml => build_wheels_Manul.yml}  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
 rename .github/workflows/{build_wheels_3.9.yml => build_wheels_Manul.yml} (97%)

diff --git a/.github/workflows/build_wheels_3.9.yml b/.github/workflows/build_wheels_Manul.yml
similarity index 97%
rename from .github/workflows/build_wheels_3.9.yml
rename to .github/workflows/build_wheels_Manul.yml
index afe3cace2..75471d971 100644
--- a/.github/workflows/build_wheels_3.9.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [ubuntu-latest]
+        os: [macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7', '3.8', '3.9']
     env:

From f1a38ada0332637ee8c07df6eb3758a724d3d646 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 15:28:38 +0100
Subject: [PATCH 51/85] Generating and Storing wheels for Windows

---
 .github/workflows/build_wheels_Manul.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 75471d971..ea23a01b0 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest]
+        os: [windows-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7', '3.8', '3.9']
     env:

From 73a78c803a34adb14c292432cf31066a5dc03f4f Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 16:18:19 +0100
Subject: [PATCH 52/85] Testing Wheels for Windows and MacOS

---
 .github/workflows/build_wheels_Manul.yml |  4 ++--
 .github/workflows/pytest_cp39.yml        | 16 ++++++++--------
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index ea23a01b0..131034d85 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 0432e8ef8..20acbec58 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest, ubuntu-latest]
+        os: [macos-latest, windows-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.9',]
     env:
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        pip install -r requirements.txt
+        # pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8
@@ -53,15 +53,15 @@ jobs:
 
     - name: Build and compile
       run: |
-        make clean
+        # make clean
         # pip install .
         ################## Use this when you want to use local wheel installation through dist ######################
-        make dist
-        pip install dist/*
+        # make dist
+        # pip install dist/*
         #############################################################################
         # python setup.py develop
         # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6
-        # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
+        pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
         # pip install -i https://test.pypi.org/simple/ structured-predictions
         # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py
     - name: Test with pytest

From 68ebcc86b0b31f3c64cb0b906b56d9731d0f1bd0 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 16:29:48 +0100
Subject: [PATCH 53/85] Testing Wheels for MacOS

---
 .github/workflows/pytest_cp39.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 20acbec58..d2e07192e 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest, windows-latest]
+        os: [macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.9',]
     env:
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        # pip install -r requirements.txt
+        pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8

From e5e19b5c56aa37cc7e1a686683ea336e4198caca Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 16:40:39 +0100
Subject: [PATCH 54/85] Testing Wheels for MacOS

---
 .github/workflows/pytest_cp39.yml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index d2e07192e..ebcac852a 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -67,8 +67,8 @@ jobs:
     - name: Test with pytest
       run: |
         pip install pytest
-        pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
-        pytest  tests/tests_DIOKR
-        # python tests/tests_IOKR/ISOKR.py   
+        # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
+        # pytest  tests/tests_DIOKR
+        python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR

From 8006fe32cae126e5229b9fbc4ad9e6d33f3a6b44 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:28:32 +0100
Subject: [PATCH 55/85] Generating and Storing wheels for MacOS

---
 .github/workflows/build_wheels_Manul.yml | 2 +-
 requirements.txt                         | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 131034d85..01da4b669 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [windows-latest]
+        os: [macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7', '3.8', '3.9']
     env:
diff --git a/requirements.txt b/requirements.txt
index 5f380e485..e1f45ab00 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,7 +23,8 @@ Keras-Preprocessing==1.1.2
 kiwisolver==1.3.1
 Markdown==3.3.4
 matplotlib==3.4.2
-numpy==1.19.5
+# numpy==1.19.5
+numpy=="oldest-supported-numpy"
 oauthlib==3.1.1
 operalib==0.2b27
 opt-einsum==3.3.0

From ec995a09aaf684bebbc9b41f8cbf1c175dbb389f Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:30:36 +0100
Subject: [PATCH 56/85] Generating and Storing wheels for MacOS

---
 .github/workflows/build_wheels_Manul.yml | 4 ++--
 .github/workflows/pytest_cp39.yml        | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 01da4b669..75471d971 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index ebcac852a..293a2a50c 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From a11128b74d8c5c97411862e140d8c212c5e92b85 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:33:42 +0100
Subject: [PATCH 57/85] Generating and Storing wheels for MacOS

---
 requirements.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/requirements.txt b/requirements.txt
index e1f45ab00..8b753c28a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -24,7 +24,7 @@ kiwisolver==1.3.1
 Markdown==3.3.4
 matplotlib==3.4.2
 # numpy==1.19.5
-numpy=="oldest-supported-numpy"
+numpy>=1.20.0
 oauthlib==3.1.1
 operalib==0.2b27
 opt-einsum==3.3.0

From 458d29e31868b1f670aecafd329ca9bee93ea5d1 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:42:47 +0100
Subject: [PATCH 58/85] Generating and Storing wheels for Windows

---
 .github/workflows/build_wheels_Manul.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 75471d971..ea23a01b0 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest]
+        os: [windows-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7', '3.8', '3.9']
     env:

From aee1388b1073d3d448b4c981893df980fdc8166c Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:55:06 +0100
Subject: [PATCH 59/85] Generating and Storing wheels for Windows

---
 setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/setup.py b/setup.py
index 5ddecf6fa..18e387153 100644
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
 
 setup(
     name='structured-predictions',
-    version='0.0.9',
+    version='0.1.0',
     description='Structured-Predictions',
     # long_description=README,
     long_description_content_type='text/markdown',

From 9beb4d2c393d783318f3ce7a5a2125339c1d85d5 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 17:55:58 +0100
Subject: [PATCH 60/85] Generating and Storing wheels for MacOS

---
 .github/workflows/build_wheels_Manul.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index ea23a01b0..75471d971 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -16,7 +16,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [windows-latest]
+        os: [macos-latest]
         # os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7', '3.8', '3.9']
     env:

From 96258752b5f66b797ad1d0890612807b9d1519c0 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 21:57:59 +0100
Subject: [PATCH 61/85] Testing pypi test package MacOS py39

---
 .github/workflows/pytest_cp39.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 293a2a50c..7f917501d 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        pip install -r requirements.txt
+        # pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8

From c596b5def5753318bac9e98d794c060a5dc0b6c6 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:13:13 +0100
Subject: [PATCH 62/85] Testing pypi test package MacOS py39

---
 .github/workflows/build_wheels_Manul.yml | 4 ++--
 .github/workflows/pytest_cp39.yml        | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 75471d971..01da4b669 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 7f917501d..6db6ed795 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 1baf2b09f1dba41a85fcd84aba4c7880ec33da95 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:20:15 +0100
Subject: [PATCH 63/85] Testing pypi test package MacOS py39

---
 .github/workflows/pytest_cp39.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 6db6ed795..ebcac852a 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        # pip install -r requirements.txt
+        pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8

From 326e6b54fce19e2e38af20bf0c888fbb5b43159b Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:37:47 +0100
Subject: [PATCH 64/85] Generating Wheels for manylinux py39

---
 .../workflows/build_wheels_Manul_linux.yml    | 33 +++++++++++++++++++
 .github/workflows/pytest_cp39.yml             |  4 +--
 2 files changed, 35 insertions(+), 2 deletions(-)
 create mode 100644 .github/workflows/build_wheels_Manul_linux.yml

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
new file mode 100644
index 000000000..487f9d596
--- /dev/null
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -0,0 +1,33 @@
+name: Python package build and publish
+
+on:
+  release:
+    types: [created]
+
+jobs:
+  deploy:
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v2
+    - name: Set up Python
+      uses: actions/setup-python@v1
+      with:
+        python-version: 3.9
+    - name: Install dependencies
+      run: |
+        python -m pip install --upgrade pip
+        pip install twine flake8
+    # - name: Lint with flake8 for syntax errors
+    #   run: |
+    #     pip install flake8
+    #     flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
+    #     flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+    - name: Build manylinux Python wheels
+      uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
+      with:
+        python-versions: 'cp39-cp39'
+        build-requirements: 'cython numpy'
+    - uses: actions/upload-artifact@v2
+      with:
+        path: ./dist/*.whl
+
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index ebcac852a..293a2a50c 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'

From 53471bf000c31f59014641870e58b4ab40777b0c Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:42:41 +0100
Subject: [PATCH 65/85] Generating Wheels for manylinux py39

---
 .github/workflows/build_wheels_Manul_linux.yml | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index 487f9d596..c3b8bd3a3 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -1,8 +1,12 @@
 name: Python package build and publish
 
-on:
-  release:
-    types: [created]
+# on:
+#   release:
+#     types: [created]
+on: 
+  push:
+    # branches-ignore:
+    #   - "main"
 
 jobs:
   deploy:

From d1406aa1a856c14552ac88c3c1fb139b6b236d52 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:47:18 +0100
Subject: [PATCH 66/85] Generating Wheels for manylinux py39

---
 .github/workflows/build_wheels_Manul.yml       | 2 +-
 .github/workflows/build_wheels_Manul_linux.yml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul.yml b/.github/workflows/build_wheels_Manul.yml
index 01da4b669..d74585077 100644
--- a/.github/workflows/build_wheels_Manul.yml
+++ b/.github/workflows/build_wheels_Manul.yml
@@ -1,4 +1,4 @@
-name: pytesting 3.9
+name: Generating Wheels
 
 on: 
   push:
diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index c3b8bd3a3..f927d5b2d 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -26,7 +26,7 @@ jobs:
     #     pip install flake8
     #     flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
     #     flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
-    - name: Build manylinux Python wheels
+    - name: Build manylinux Python wheels scikit-learn
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
         python-versions: 'cp39-cp39'

From 70d4d560d04d1af9408eb9a7ccb6e2464a99c4dd Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 22:58:05 +0100
Subject: [PATCH 67/85] Generating Wheels for manylinux py37

---
 .github/workflows/build_wheels_Manul_linux.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index f927d5b2d..da5c351f5 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -16,7 +16,7 @@ jobs:
     - name: Set up Python
       uses: actions/setup-python@v1
       with:
-        python-version: 3.9
+        python-version: 3.7
     - name: Install dependencies
       run: |
         python -m pip install --upgrade pip
@@ -29,7 +29,7 @@ jobs:
     - name: Build manylinux Python wheels scikit-learn
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
-        python-versions: 'cp39-cp39'
+        python-versions: 'cp37-cp37'
         build-requirements: 'cython numpy'
     - uses: actions/upload-artifact@v2
       with:

From 0fd97a1ae1ca1c7a78c42a5b5cb912c943ee6bfc Mon Sep 17 00:00:00 2001
From: yohila 
Date: Tue, 14 Mar 2023 23:00:17 +0100
Subject: [PATCH 68/85] Generating Wheels for manylinux py37

---
 .github/workflows/build_wheels_Manul_linux.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index da5c351f5..4a46c35a8 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -29,7 +29,7 @@ jobs:
     - name: Build manylinux Python wheels scikit-learn
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
-        python-versions: 'cp37-cp37'
+        python-versions: 'cp37-cp37m'
         build-requirements: 'cython numpy'
     - uses: actions/upload-artifact@v2
       with:

From 2077c5b1857c8276f17e767a704afeda4d0fabff Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 09:54:05 +0100
Subject: [PATCH 69/85] Building Linux wheels for scikit-learn==0.24.2

---
 .github/workflows/build_wheels_Manul_linux.yml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index 4a46c35a8..7326356a9 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -16,7 +16,7 @@ jobs:
     - name: Set up Python
       uses: actions/setup-python@v1
       with:
-        python-version: 3.7
+        python-version: 3.9
     - name: Install dependencies
       run: |
         python -m pip install --upgrade pip
@@ -26,10 +26,10 @@ jobs:
     #     pip install flake8
     #     flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
     #     flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
-    - name: Build manylinux Python wheels scikit-learn
+    - name: Build manylinux Python wheels scikit-learn==0.24.2
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
-        python-versions: 'cp37-cp37m'
+        python-versions: 'cp39-cp39m'
         build-requirements: 'cython numpy'
     - uses: actions/upload-artifact@v2
       with:

From 881722d980f50ab18062269d71053e04842f5321 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 09:58:03 +0100
Subject: [PATCH 70/85] Building Linux wheels for scikit-learn==0.24.2

---
 .github/workflows/build_wheels_Manul_linux.yml | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index 7326356a9..fab139482 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -20,17 +20,17 @@ jobs:
     - name: Install dependencies
       run: |
         python -m pip install --upgrade pip
-        pip install twine flake8
+        # pip install twine flake8
     # - name: Lint with flake8 for syntax errors
     #   run: |
     #     pip install flake8
     #     flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
     #     flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
-    - name: Build manylinux Python wheels scikit-learn==0.24.2
+    - name: Build manylinux Python wheels 
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
-        python-versions: 'cp39-cp39m'
-        build-requirements: 'cython numpy'
+        python-versions: 'cp39-cp39' 
+        build-requirements: 'cython numpy scikit-learn==0.24.2'
     - uses: actions/upload-artifact@v2
       with:
         path: ./dist/*.whl

From 9a061c8b59a0158933210f5ead371037ce157b3f Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 10:04:56 +0100
Subject: [PATCH 71/85] Building Linux wheels for scikit-learn==1.0

---
 .github/workflows/build_wheels_Manul_linux.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index fab139482..24af48855 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -30,7 +30,7 @@ jobs:
       uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
       with:
         python-versions: 'cp39-cp39' 
-        build-requirements: 'cython numpy scikit-learn==0.24.2'
+        build-requirements: 'cython numpy scikit-learn==1.0'
     - uses: actions/upload-artifact@v2
       with:
         path: ./dist/*.whl

From 451a183bc4ac5471aea57500365fd37a3eebda76 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 10:54:36 +0100
Subject: [PATCH 72/85] Building Linux wheels for scikit-learn==0.24.2

---
 .github/workflows/build_wheels.yml             | 12 ++++++++----
 .github/workflows/build_wheels_Manul_linux.yml |  4 ++--
 pyproject.toml                                 |  4 ++--
 3 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 2af91fd0d..758bbab20 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 
 jobs:
   build_wheels:
@@ -12,11 +12,15 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [ubuntu-20.04, macos-10.15,  windows-2019]
+        os: [ubuntu-20.04]
 
     steps:
       - uses: actions/checkout@v2
-
+      # - name: Install dependencies
+      #   run: |
+      #     python -m pip install --upgrade pip
+      #     ################### Use this when you want to use local wheel installation through dist  #################
+      #     pip install -r requirements.txt
       - name: Build wheels
         uses: pypa/cibuildwheel@v2.4.0
         # to supply options, put them in 'env', like:
diff --git a/.github/workflows/build_wheels_Manul_linux.yml b/.github/workflows/build_wheels_Manul_linux.yml
index 24af48855..1bca7cf41 100644
--- a/.github/workflows/build_wheels_Manul_linux.yml
+++ b/.github/workflows/build_wheels_Manul_linux.yml
@@ -5,8 +5,8 @@ name: Python package build and publish
 #     types: [created]
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 
 jobs:
   deploy:
diff --git a/pyproject.toml b/pyproject.toml
index 6b7064af5..569fceb46 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,10 @@
 [build-system]
-requires = ["setuptools", "wheel", "oldest-supported-numpy", "cython>=0.23", "scikit-learn"]
+requires = ["setuptools", "wheel", "oldest-supported-numpy", "cython>=0.23", "scikit-learn==0.24.2"]
 build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-# build = "cp37-*"
+build = "cp39-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows

From e5affaa4870bd8375a290da5dc5dac679ceb5f10 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 11:08:47 +0100
Subject: [PATCH 73/85] Building Linux wheels for scikit-learn==0.24.2

---
 pyproject.toml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index 569fceb46..d05b74c90 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,5 +25,5 @@ archs = ["x86_64", "universal2", "arm64"]
 # On an Linux Intel runner with qemu installed, build Intel and ARM wheels
 [tool.cibuildwheel.linux]
 #archs = ["auto", "aarch64"]   # aarch64 results in unfinished job on github Action
-archs = ["auto"]
-#archs = ["x86_64"]
+# archs = ["auto"]
+archs = ["x86_64"]

From 9cf74b5d8b68d1302fef05be378389092f48dc85 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 11:10:52 +0100
Subject: [PATCH 74/85] Building Linux wheels for scikit-learn==0.24.2

---
 pyproject.toml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index d05b74c90..f7ae67622 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-build = "cp39-*"
+# build = "cp39-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
@@ -12,7 +12,7 @@ build = "cp39-*"
 #Skip for MacOS
 # skip = ["pp*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
-skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*"]
+skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-"]
 # skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
 
 

From 3929fefe71d0ea34d5bb515aa10235ecf2c4ae56 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 13:40:07 +0100
Subject: [PATCH 75/85] Building Linux wheels for scikit-learn==0.24.2

---
 .github/workflows/build_wheels.yml | 7 +------
 pyproject.toml                     | 6 +++---
 setup.py                           | 2 +-
 3 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 758bbab20..ae0e8dbb6 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,15 +12,10 @@ jobs:
     strategy:
       matrix:
         # os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [ubuntu-20.04]
+        os: [macos-10.15]
 
     steps:
       - uses: actions/checkout@v2
-      # - name: Install dependencies
-      #   run: |
-      #     python -m pip install --upgrade pip
-      #     ################### Use this when you want to use local wheel installation through dist  #################
-      #     pip install -r requirements.txt
       - name: Build wheels
         uses: pypa/cibuildwheel@v2.4.0
         # to supply options, put them in 'env', like:
diff --git a/pyproject.toml b/pyproject.toml
index f7ae67622..d985119f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,15 +4,15 @@ build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-# build = "cp39-*"
+build = "cp310-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
 # skip = ["pp*", "cp310-*"]
 #Skip for MacOS
-# skip = ["pp*"]
+skip = ["pp*", "cp36-*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
-skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-"]
+# skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-*"]
 # skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
 
 
diff --git a/setup.py b/setup.py
index 18e387153..c1e86d7cc 100644
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
 
 setup(
     name='structured-predictions',
-    version='0.1.0',
+    version='0.1.1',
     description='Structured-Predictions',
     # long_description=README,
     long_description_content_type='text/markdown',

From 3b26d44837395aef386a83a9105e91f5b998cc97 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 13:42:34 +0100
Subject: [PATCH 76/85] Building MacOS wheels for scikit-learn==0.24.2 py310

---
 .github/workflows/build_wheels.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index ae0e8dbb6..9511fea9b 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -11,7 +11,7 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        # os: [ubuntu-20.04, windows-2019, macos-10.15]
+        ## os: [ubuntu-20.04, windows-2019, macos-10.15]
         os: [macos-10.15]
 
     steps:

From 3ad1d8935cebe715864655fafc051b243b83711d Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 14:36:52 +0100
Subject: [PATCH 77/85] Building MacOS wheels for scikit-learn==0.24.2

---
 pyproject.toml | 2 +-
 setup.py       | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index d985119f0..ac90d7a44 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [tool.cibuildwheel]
 # Only build on CPython 3.9
-build = "cp310-*"
+# build = "cp310-*"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
diff --git a/setup.py b/setup.py
index c1e86d7cc..fb608c3ad 100644
--- a/setup.py
+++ b/setup.py
@@ -89,7 +89,7 @@
     setup_requires=["oldest-supported-numpy", "cython>=0.23"],
     # install_requires=["numpy>=1.16", "scipy>=1.0", "scikit-learn", "torch", 
     #          "liac-arff", "requests"],
-    install_requires=["numpy", "scipy", "scikit-learn", "torch", 
+    install_requires=["numpy", "scipy", "scikit-learn==0.24.2", "torch", 
              "liac-arff", "requests"],
     python_requires=">=3.6",
     classifiers=[

From dd5753f6f58d41154d5c97cc6cd4b3f3fa3b5d4c Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 15:17:57 +0100
Subject: [PATCH 78/85] Building Windows wheels for scikit-learn==0.24.2

---
 .github/workflows/build_wheels.yml | 2 +-
 pyproject.toml                     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 9511fea9b..bdf7f7efc 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         ## os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [macos-10.15]
+        os: [windows-2019]
 
     steps:
       - uses: actions/checkout@v2
diff --git a/pyproject.toml b/pyproject.toml
index ac90d7a44..5256e27b7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,9 +8,9 @@ build-backend = "setuptools.build_meta"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
-# skip = ["pp*", "cp310-*"]
+skip = ["pp*", "cp310-*", "cp36-*"]
 #Skip for MacOS
-skip = ["pp*", "cp36-*"]
+# skip = ["pp*", "cp36-*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
 # skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-*"]
 # skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]

From 382cee98d1cf9764206c77cb02e2e545e84a9741 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 15:37:51 +0100
Subject: [PATCH 79/85] Building Windows wheels for scikit-learn==0.24.2 64-bti
 architecture

---
 pyproject.toml | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/pyproject.toml b/pyproject.toml
index 5256e27b7..d6a5f363e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,3 +27,6 @@ archs = ["x86_64", "universal2", "arm64"]
 #archs = ["auto", "aarch64"]   # aarch64 results in unfinished job on github Action
 # archs = ["auto"]
 archs = ["x86_64"]
+
+[tool.cibuildwheel.windows]
+archs = ["x86_64"]
\ No newline at end of file

From f09e81fa5f17a8b9aa36caef8ea120d0e78ff774 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 15:46:50 +0100
Subject: [PATCH 80/85] Building Windows wheels for scikit-learn==0.24.2 64-bti
 architecture

---
 pyproject.toml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyproject.toml b/pyproject.toml
index d6a5f363e..acb6caefe 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,4 +29,4 @@ archs = ["x86_64", "universal2", "arm64"]
 archs = ["x86_64"]
 
 [tool.cibuildwheel.windows]
-archs = ["x86_64"]
\ No newline at end of file
+archs = ['AMD64', 'ARM64']
\ No newline at end of file

From 9711180503ca71e658883c16e9b71609561a3eed Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 16:17:15 +0100
Subject: [PATCH 81/85] Building Windows wheels for scikit-learn==0.24.2 64-bti
 architecture

---
 pyproject.toml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyproject.toml b/pyproject.toml
index acb6caefe..dd86f8fa4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,4 +29,4 @@ archs = ["x86_64", "universal2", "arm64"]
 archs = ["x86_64"]
 
 [tool.cibuildwheel.windows]
-archs = ['AMD64', 'ARM64']
\ No newline at end of file
+archs = ['AMD64']
\ No newline at end of file

From 925c8c1a0f2bf0443019bdffc75a6f594ddc5cb6 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Wed, 15 Mar 2023 18:30:46 +0100
Subject: [PATCH 82/85] Generationg Linux Wheels for 64-bit architecture

---
 .github/workflows/build_wheels.yml | 2 +-
 pyproject.toml                     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index bdf7f7efc..9ae4da323 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -12,7 +12,7 @@ jobs:
     strategy:
       matrix:
         ## os: [ubuntu-20.04, windows-2019, macos-10.15]
-        os: [windows-2019]
+        os: [ubuntu-20.04]
 
     steps:
       - uses: actions/checkout@v2
diff --git a/pyproject.toml b/pyproject.toml
index dd86f8fa4..5b81787b6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,11 +8,11 @@ build-backend = "setuptools.build_meta"
 #skip = ["cp39-musllinux*"]
 
 # Skip for Windows
-skip = ["pp*", "cp310-*", "cp36-*"]
+# skip = ["pp*", "cp310-*", "cp36-*"]
 #Skip for MacOS
 # skip = ["pp*", "cp36-*"]
 # Skip for Ubuntu   # 32bit cp310 results in error
-# skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-*"]
+skip = ["cp36-musllinux*", "cp37-musllinux*", "cp38-musllinux*", "cp39-musllinux*", "pp*", "cp310-*", "cp36-*"]
 # skip = ["cp36-*", "cp37-*", "cp38-*", "pp*", "cp310-*"]
 
 

From 1463f91447aaee1cef1ec22238b865c5c184e7d4 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Thu, 16 Mar 2023 11:05:06 +0100
Subject: [PATCH 83/85] Testing all OS py39

---
 .github/workflows/build_wheels.yml |  4 ++--
 .github/workflows/pytest_cp39.yml  | 16 ++++++++--------
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index 9ae4da323..23addf972 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -2,8 +2,8 @@ name: Build
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 
 jobs:
   build_wheels:
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 293a2a50c..760f86b13 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
@@ -16,8 +16,8 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [macos-latest]
-        # os: [macos-latest, ubuntu-latest, windows-latest]
+        # os: [macos-latest]
+        os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.9',]
     env:
       OS: ${{ matrix.os }}
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        pip install -r requirements.txt
+        # pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8
@@ -67,8 +67,8 @@ jobs:
     - name: Test with pytest
       run: |
         pip install pytest
-        # pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
-        # pytest  tests/tests_DIOKR
-        python tests/tests_IOKR/ISOKR.py   
+        pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
+        pytest  tests/tests_DIOKR
+        # python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR

From ac84d33a72a4398a102179460c9a61d16988b7cc Mon Sep 17 00:00:00 2001
From: yohila 
Date: Thu, 16 Mar 2023 11:05:17 +0100
Subject: [PATCH 84/85] Testing all OS py39

---
 .github/workflows/pytest_cp39.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 760f86b13..59ab323fd 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -69,6 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        # python tests/tests_IOKR/ISOKR.py   
+        python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR

From 3b5cb389d1a87bf36a88b387df792bc7dcfa7177 Mon Sep 17 00:00:00 2001
From: yohila 
Date: Thu, 16 Mar 2023 11:25:15 +0100
Subject: [PATCH 85/85] Testing all OS py38 py37

---
 .github/workflows/pytest_cp37.yml | 20 ++++++++++----------
 .github/workflows/pytest_cp38.yml | 19 ++++++++++---------
 .github/workflows/pytest_cp39.yml |  4 ++--
 3 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/.github/workflows/pytest_cp37.yml b/.github/workflows/pytest_cp37.yml
index b40b2d092..5e36d40fb 100644
--- a/.github/workflows/pytest_cp37.yml
+++ b/.github/workflows/pytest_cp37.yml
@@ -2,8 +2,8 @@ name: pytesting 3.7
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
 # #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
@@ -16,8 +16,8 @@ jobs:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-        os: [ubuntu-latest, macos-latest]
-        # os: [macos-latest, ubuntu-latest, windows-latest]
+        # os: [ubuntu-latest, macos-latest]
+        os: [macos-latest, ubuntu-latest, windows-latest]
         python: ['3.7',]
     env:
       OS: ${{ matrix.os }}
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        pip install -r requirements.txt
+        # pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8
@@ -53,15 +53,15 @@ jobs:
 
     - name: Build and compile
       run: |
-        make clean
+        # make clean
         # pip install .
         ################## Use this when you want to use local wheel installation through dist ######################
-        make dist
-        pip install ./dist/*
+        # make dist
+        # pip install dist/*
         #############################################################################
         # python setup.py develop
         # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6
-        # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
+        pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
         # pip install -i https://test.pypi.org/simple/ structured-predictions
         # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py
     - name: Test with pytest
@@ -69,6 +69,6 @@ jobs:
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        # python tests/tests_IOKR/ISOKR.py   
+        python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp38.yml b/.github/workflows/pytest_cp38.yml
index 5acc41129..1273162f6 100644
--- a/.github/workflows/pytest_cp38.yml
+++ b/.github/workflows/pytest_cp38.yml
@@ -2,8 +2,8 @@ name: pytesting 3.8
 
 on: 
   push:
-    branches-ignore:
-      - "main"
+    # branches-ignore:
+    #   - "main"
   #  branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'
@@ -32,7 +32,7 @@ jobs:
       run: |
         python -m pip install --upgrade pip
         ################### Use this when you want to use local wheel installation through dist  #################
-        pip install -r requirements.txt
+        # pip install -r requirements.txt
     # - name: Lint with flake8
     #   run: |
     #     pip install flake8
@@ -53,21 +53,22 @@ jobs:
 
     - name: Build and compile
       run: |
-        make clean
+        # make clean
         # pip install .
         ################## Use this when you want to use local wheel installation through dist ######################
-        make dist
-        pip install dist/*
+        # make dist
+        # pip install dist/*
         #############################################################################
         # python setup.py develop
-        # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions==0.0.6
-        # pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
+        # pip install -i https://test.pypi.org/simple/ structured-predictions==0.0.6
+        pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple structured-predictions
+        # pip install -i https://test.pypi.org/simple/ structured-predictions
         # python tests/tests_DIOKR/Demo_DeepIOKR_bibtex.py
     - name: Test with pytest
       run: |
         pip install pytest
         pytest tests/tests_OK3/test_tree_clf_and_reg.py tests/tests_OK3/test_complementary.py 
         pytest  tests/tests_DIOKR
-        # python tests/tests_IOKR/ISOKR.py   
+        python tests/tests_IOKR/ISOKR.py   
         # python tests/tests_IOKR/SISOKR.py
         # pytest tests/tests_IOKR
diff --git a/.github/workflows/pytest_cp39.yml b/.github/workflows/pytest_cp39.yml
index 59ab323fd..7c0692318 100644
--- a/.github/workflows/pytest_cp39.yml
+++ b/.github/workflows/pytest_cp39.yml
@@ -2,8 +2,8 @@ name: pytesting 3.9
 
 on: 
   push:
-    # branches-ignore:
-    #   - "main"
+    branches-ignore:
+      - "main"
 #    branches: [ main ]
   schedule:
     - cron: '0 0 1 * *'