diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 04a26a214..b65faff9d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -17,11 +17,11 @@ repos:
     hooks:
       - id: black
         language_version: python3
-        args: [--target-version=py39]
+        args: [--target-version=py310]
         files: ^(python/.*|benchmarks/.*)$
         exclude: ^python/nx-cugraph/
   - repo: https://github.com/PyCQA/flake8
-    rev: 6.0.0
+    rev: 7.1.1
     hooks:
       - id: flake8
         args: ["--config=.flake8"]
@@ -34,7 +34,7 @@ repos:
     hooks:
       - id: yesqa
         additional_dependencies:
-          - flake8==6.0.0
+          - flake8==7.1.1
   - repo: https://github.com/pre-commit/mirrors-clang-format
     rev: v16.0.6
     hooks:
@@ -42,7 +42,7 @@ repos:
         types_or: [c, c++, cuda]
         args: ["-fallback-style=none", "-style=file", "-i"]
   - repo: https://github.com/rapidsai/pre-commit-hooks
-    rev: v0.2.0
+    rev: v0.4.0
     hooks:
       - id: verify-copyright
         files: |
diff --git a/VERSION b/VERSION
index ec8489fda..af28c42b5 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-24.08.00
+24.12.00
diff --git a/benchmarks/nx-cugraph/pytest-based/README.md b/benchmarks/nx-cugraph/pytest-based/README.md
new file mode 100644
index 000000000..781550fa5
--- /dev/null
+++ b/benchmarks/nx-cugraph/pytest-based/README.md
@@ -0,0 +1,54 @@
+## `nx-cugraph` Benchmarks
+
+### Overview
+
+This directory contains a set of scripts designed to benchmark NetworkX with the `nx-cugraph` backend and deliver a report that summarizes the speed-up and runtime deltas over default NetworkX.
+
+Our current benchmarks provide the following datasets:
+
+| Dataset     | Nodes | Edges | Directed |
+| --------    | ------- | ------- | ------- |
+| netscience  | 1,461    | 5,484 | Yes |
+| email-Eu-core  | 1,005    | 25,571 | Yes |
+| cit-Patents  | 3,774,768    | 16,518,948 | Yes |
+| hollywood  | 1,139,905    | 57,515,616 | No |
+| soc-LiveJournal1  | 4,847,571    | 68,993,773 | Yes |
+
+
+
+### Scripts
+
+#### 1. `run-main-benchmarks.sh`
+This script allows users to run a small set of commonly-used algorithms across multiple datasets and backends. All results are stored inside a sub-directory (`logs/`) and output files are named based on the combination of parameters for that benchmark.
+
+NOTE: If running with all algorithms and datasets using NetworkX without an accelerated backend, this script may take a few hours to finish running.
+
+**Usage:**
+ - Run with `--cpu-only`:
+  ```bash
+  ./run-main-benchmarks.sh --cpu-only
+  ```
+ - Run with `--gpu-only`:
+  ```bash
+  ./run-main-benchmarks.sh --gpu-only
+  ```
+ - Run without any arguments (all backends):
+  ```bash
+  ./run-main-benchmarks.sh
+  ```
+
+#### 2. `get_graph_bench_dataset.py`
+This script downloads the specified dataset using `cugraph.datasets`.
+
+**Usage:**
+  ```bash
+  python get_graph_bench_dataset.py [dataset]
+  ```
+
+#### 3. `create_results_summary_page.py`
+This script is designed to be run after `run-gap-benchmarks.sh` in order to generate an HTML page displaying a results table comparing default NetworkX to nx-cugraph. The script also provides information about the current system, so it should be run on the machine on which benchmarks were run.
+
+**Usage:**
+  ```bash
+  python create_results_summary_page.py > report.html
+  ```
diff --git a/benchmarks/nx-cugraph/pytest-based/bench_algos.py b/benchmarks/nx-cugraph/pytest-based/bench_algos.py
index d40b51308..f88d93c3f 100644
--- a/benchmarks/nx-cugraph/pytest-based/bench_algos.py
+++ b/benchmarks/nx-cugraph/pytest-based/bench_algos.py
@@ -271,9 +271,8 @@ def bench_from_networkx(benchmark, graph_obj):
 
 
 # normalized_param_values = [True, False]
-# k_param_values = [10, 100]
 normalized_param_values = [True]
-k_param_values = [10]
+k_param_values = [10, 100, 1000]
 
 
 @pytest.mark.parametrize(
@@ -282,6 +281,10 @@ def bench_from_networkx(benchmark, graph_obj):
 @pytest.mark.parametrize("k", k_param_values, ids=lambda k: f"{k=}")
 def bench_betweenness_centrality(benchmark, graph_obj, backend_wrapper, normalized, k):
     G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
+
+    if k > G.number_of_nodes():
+        pytest.skip(reason=f"{k=} > {G.number_of_nodes()=}")
+
     result = benchmark.pedantic(
         target=backend_wrapper(nx.betweenness_centrality),
         args=(G,),
@@ -305,6 +308,10 @@ def bench_edge_betweenness_centrality(
     benchmark, graph_obj, backend_wrapper, normalized, k
 ):
     G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
+
+    if k > G.number_of_nodes():
+        pytest.skip(reason=f"{k=} > {G.number_of_nodes()=}")
+
     result = benchmark.pedantic(
         target=backend_wrapper(nx.edge_betweenness_centrality),
         args=(G,),
@@ -473,6 +480,26 @@ def bench_pagerank_personalized(benchmark, graph_obj, backend_wrapper):
     assert type(result) is dict
 
 
+def bench_shortest_path(benchmark, graph_obj, backend_wrapper):
+    """
+    This passes in the source node with the highest degree, but no target.
+    """
+    G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
+    node = get_highest_degree_node(graph_obj)
+
+    result = benchmark.pedantic(
+        target=backend_wrapper(nx.shortest_path),
+        args=(G,),
+        kwargs=dict(
+            source=node,
+        ),
+        rounds=rounds,
+        iterations=iterations,
+        warmup_rounds=warmup_rounds,
+    )
+    assert type(result) is dict
+
+
 def bench_single_source_shortest_path_length(benchmark, graph_obj, backend_wrapper):
     G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
     node = get_highest_degree_node(graph_obj)
diff --git a/benchmarks/nx-cugraph/pytest-based/create_results_summary_page.py b/benchmarks/nx-cugraph/pytest-based/create_results_summary_page.py
new file mode 100644
index 000000000..f1cc4b06c
--- /dev/null
+++ b/benchmarks/nx-cugraph/pytest-based/create_results_summary_page.py
@@ -0,0 +1,291 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import re
+import pathlib
+import json
+import platform
+import psutil
+import socket
+import subprocess
+
+
+def get_formatted_time_value(time):
+    res = ""
+    if time < 1:
+        if time < 0.001:
+            units = "us"
+            time *= 1e6
+        else:
+            units = "ms"
+            time *= 1e3
+    else:
+        units = "s"
+    return f"{time:.3f}{units}"
+
+
+def get_all_benchmark_info():
+    benchmarks = {}
+    # Populate benchmarks dir from .json files
+    for json_file in logs_dir.glob("*.json"):
+        try:
+            data = json.loads(open(json_file).read())
+        except json.decoder.JSONDecodeError:
+            continue
+
+        for benchmark_run in data["benchmarks"]:
+            # example name: "bench_triangles[ds=netscience-backend=cugraph-preconverted]"
+            name = benchmark_run["name"]
+
+            algo_name = name.split("[")[0]
+            if algo_name.startswith("bench_"):
+                algo_name = algo_name[6:]
+            # special case for betweenness_centrality
+            match = k_patt.match(name)
+            if match is not None:
+                algo_name += f", k={match.group(1)}"
+
+            match = dataset_patt.match(name)
+            if match is None:
+                raise RuntimeError(
+                    f"benchmark name {name} in file {json_file} has an unexpected format"
+                )
+            dataset = match.group(1)
+            if dataset.endswith("-backend"):
+                dataset = dataset[:-8]
+
+            match = backend_patt.match(name)
+            if match is None:
+                raise RuntimeError(
+                    f"benchmark name {name} in file {json_file} has an unexpected format"
+                )
+            backend = match.group(1)
+            if backend == "None":
+                backend = "networkx"
+
+            runtime = benchmark_run["stats"]["mean"]
+            benchmarks.setdefault(algo_name, {}).setdefault(backend, {})[
+                dataset
+            ] = runtime
+    return benchmarks
+
+
+def compute_perf_vals(cugraph_runtime, networkx_runtime):
+    speedup_string = f"{networkx_runtime / cugraph_runtime:.3f}X"
+    delta = networkx_runtime - cugraph_runtime
+    if abs(delta) < 1:
+        if abs(delta) < 0.001:
+            units = "us"
+            delta *= 1e6
+        else:
+            units = "ms"
+            delta *= 1e3
+    else:
+        units = "s"
+    delta_string = f"{delta:.3f}{units}"
+
+    return (speedup_string, delta_string)
+
+
+def get_mem_info():
+    return round(psutil.virtual_memory().total / (1024**3), 2)
+
+
+def get_cuda_version():
+    output = subprocess.check_output("nvidia-smi", shell=True).decode()
+    try:
+        return next(
+            line.split("CUDA Version: ")[1].split()[0]
+            for line in output.splitlines()
+            if "CUDA Version" in line
+        )
+    except subprocess.CalledProcessError:
+        return "Failed to get CUDA version."
+
+
+def get_first_gpu_info():
+    try:
+        gpu_info = (
+            subprocess.check_output(
+                "nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv,noheader",
+                shell=True,
+            )
+            .decode()
+            .strip()
+        )
+        if gpu_info:
+            gpus = gpu_info.split("\n")
+            num_gpus = len(gpus)
+            first_gpu = gpus[0]  # Get the information for the first GPU
+            gpu_name, mem_total, _, _ = first_gpu.split(",")
+            return f"{num_gpus} x {gpu_name.strip()} ({round(int(mem_total.strip().split()[0]) / (1024), 2)} GB)"
+        else:
+            print("No GPU found or unable to query GPU details.")
+    except subprocess.CalledProcessError:
+        print("Failed to execute nvidia-smi. No GPU information available.")
+
+
+def get_system_info():
+    print('<div class="box2">')
+    print(f"<p>Hostname: {socket.gethostname()}</p>")
+    print(
+        f'<p class="indent"">Operating System: {platform.system()} {platform.release()}</p>'
+    )
+    print(f'<p class="indent">Kernel Version  : {platform.version()}</p>')
+    with open("/proc/cpuinfo") as f:
+        print(
+            f'<p>CPU: {next(line.strip().split(": ")[1] for line in f if "model name" in line)} ({psutil.cpu_count(logical=False)} cores)</p>'
+        )
+    print(f'<p class="indent">Memory: {get_mem_info()} GB</p>')
+    print(f"<p>GPU: {get_first_gpu_info()}</p>")
+    print(f"<p>CUDA Version: {get_cuda_version()}</p>")
+
+
+if __name__ == "__main__":
+    logs_dir = pathlib.Path("logs")
+
+    dataset_patt = re.compile(".*ds=([\w-]+).*")
+    backend_patt = re.compile(".*backend=(\w+).*")
+    k_patt = re.compile(".*k=(10*).*")
+
+    # Organize all benchmark runs by the following hierarchy: algo -> backend -> dataset
+    benchmarks = get_all_benchmark_info()
+
+    # dump HTML table
+    ordered_datasets = [
+        "netscience",
+        "email_Eu_core",
+        "cit-patents",
+        "hollywood",
+        "soc-livejournal1",
+    ]
+    # dataset, # Node, # Edge, Directed info
+    dataset_meta = {
+        "netscience": ["1,461", "5,484", "Yes"],
+        "email_Eu_core": ["1,005", "25,571", "Yes"],
+        "cit-patents": ["3,774,768", "16,518,948", "Yes"],
+        "hollywood": ["1,139,905", "57,515,616", "No"],
+        "soc-livejournal1": ["4,847,571", "68,993,773", "Yes"],
+    }
+
+    print(
+        """
+    <html>
+    <head>
+    <style>
+        table {
+            table-layout: fixed;
+            width: 100%;
+            border-collapse: collapse;
+        }
+        tbody tr:nth-child(odd) {
+            background-color: #ffffff;
+        }
+        tbody tr:nth-child(even) {
+            background-color: #d3d3d3;
+        }
+        tbody td {
+            text-align: center;
+            color: black;
+        }
+        th,
+        td {
+            padding: 12px;
+        }
+        .footer-main {
+            background-color: #d1d1d1;
+            padding: 20px;
+            padding-top: 0px;
+            font-size: 12px;
+            color: black;
+            width: 100%;
+            display: flex;
+        }
+        .box1{
+            flex: 1;
+            padding-right: 30px;
+        }
+        .box2{
+            flex: 4;
+        }
+        .indent {
+            text-indent: 20px;
+        }
+    </style>
+    </head>
+    <table>
+    <thead>
+    <tr>
+        <th>Dataset<br>Nodes<br>Edges<Br>Directed</th>"""
+    )
+    for ds in ordered_datasets:
+        print(
+            f"      <th>{ds}<br>{dataset_meta[ds][0]}<br>{dataset_meta[ds][1]}<br>{dataset_meta[ds][2]}<br></th>"
+        )
+    print(
+        """   </tr>
+    </thead>
+    <tbody>
+    """
+    )
+    for algo_name in sorted(benchmarks):
+        algo_runs = benchmarks[algo_name]
+        print("   <tr>")
+        print(f"      <td>{algo_name}</td>")
+        # Proceed only if any results are present for both cugraph and NX
+        if "cugraph" in algo_runs and "networkx" in algo_runs:
+            cugraph_algo_runs = algo_runs["cugraph"]
+            networkx_algo_runs = algo_runs["networkx"]
+            datasets_in_both = set(cugraph_algo_runs).intersection(networkx_algo_runs)
+
+            # populate the table with speedup results for each dataset in the order
+            # specified in ordered_datasets. If results for a run using a dataset
+            # are not present for both cugraph and NX, output an empty cell.
+            for dataset in ordered_datasets:
+                if dataset in datasets_in_both:
+                    cugraph_runtime = cugraph_algo_runs[dataset]
+                    networkx_runtime = networkx_algo_runs[dataset]
+                    (speedup, runtime_delta) = compute_perf_vals(
+                        cugraph_runtime=cugraph_runtime,
+                        networkx_runtime=networkx_runtime,
+                    )
+                    nx_formatted = get_formatted_time_value(networkx_runtime)
+                    cg_formatted = get_formatted_time_value(cugraph_runtime)
+                    print(
+                        f"      <td>{nx_formatted} / {cg_formatted}<br>{speedup}<br>{runtime_delta}</td>"
+                    )
+                else:
+                    print(f"      <td></td>")
+
+        # If a comparison between cugraph and NX cannot be made, output empty cells
+        # for each dataset
+        else:
+            for _ in range(len(ordered_datasets)):
+                print("      <td></td>")
+        print("   </tr>")
+    print(
+        """
+    </tbody>\n</table>
+    <div class="footer-main">
+        <div class="box1">
+            <h4>Table Format:</h4>
+            <ul>
+                <li><strong>NetworkX time / nx-cugraph time</strong></li>
+                <li><strong>Speed-up of using nx-cugraph</strong></li>
+                <li><strong>Time-delta</strong></li>
+            </ul>
+        </div>"""
+    )
+    get_system_info()
+    print("""</div>\n</div>\n</html>""")
diff --git a/benchmarks/nx-cugraph/pytest-based/get_graph_bench_dataset.py b/benchmarks/nx-cugraph/pytest-based/get_graph_bench_dataset.py
new file mode 100644
index 000000000..5a0a15da8
--- /dev/null
+++ b/benchmarks/nx-cugraph/pytest-based/get_graph_bench_dataset.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Checks if a particular dataset has been downloaded inside the datasets dir
+(RAPIDS_DATAEST_ROOT_DIR). If not, the file will be downloaded using the
+datasets API.
+
+Positional Arguments:
+    1) dataset name (e.g. 'email_Eu_core', 'cit-patents')
+       available datasets can be found here: `python/cugraph/cugraph/datasets/__init__.py`
+"""
+
+import sys
+
+import cugraph.datasets as cgds
+
+
+if __name__ == "__main__":
+    # download and store dataset (csv) by using the Datasets API
+    dataset = sys.argv[1].replace("-", "_")
+    dataset_obj = getattr(cgds, dataset)
+
+    if not dataset_obj.get_path().exists():
+        dataset_obj.get_edgelist(download=True)
diff --git a/benchmarks/nx-cugraph/pytest-based/run-main-benchmarks.sh b/benchmarks/nx-cugraph/pytest-based/run-main-benchmarks.sh
new file mode 100755
index 000000000..3059e3d4b
--- /dev/null
+++ b/benchmarks/nx-cugraph/pytest-based/run-main-benchmarks.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# location to store datasets used for benchmarking
+export RAPIDS_DATASET_ROOT_DIR=/datasets/cugraph
+mkdir -p logs
+
+# list of algos, datasets, and back-ends to use in combinations
+algos="
+    pagerank
+    betweenness_centrality
+    louvain
+    shortest_path
+    weakly_connected_components
+    triangles
+    bfs_predecessors
+"
+datasets="
+   netscience
+   email_Eu_core
+   cit-patents
+   hollywood
+   soc-livejournal
+"
+# None backend is default networkx
+# cugraph-preconvert backend is nx-cugraph
+backends="
+    None
+    cugraph-preconverted
+"
+# check for --cpu-only or --gpu-only args
+if [[ "$#" -eq 1 ]]; then
+    case $1 in
+        --cpu-only)
+            backends="None"
+            ;;
+        --gpu-only)
+            backends="cugraph-preconverted"
+            ;;
+        *)
+            echo "Unknown option: $1"
+            exit 1
+            ;;
+    esac
+fi
+
+for algo in $algos; do
+    for dataset in $datasets; do
+	# this script can be used to download benchmarking datasets by name via cugraph.datasets
+    	python get_graph_bench_dataset.py $dataset
+        for backend in $backends; do
+            name="${backend}__${algo}__${dataset}"
+            echo "Running: $backend, $dataset, bench_$algo"
+            # command to preproduce test
+            # echo "RUNNING: \"pytest -sv -k \"$backend and $dataset and bench_$algo and not 1000\" --benchmark-json=\"logs/${name}.json\" bench_algos.py"
+            pytest -sv \
+                -k "$backend and $dataset and bench_$algo and not 1000" \
+                --benchmark-json="logs/${name}.json" \
+                bench_algos.py 2>&1 | tee "logs/${name}.out"
+        done
+    done
+done
diff --git a/benchmarks/pytest.ini b/benchmarks/pytest.ini
index fe7fc31b6..d692b78de 100644
--- a/benchmarks/pytest.ini
+++ b/benchmarks/pytest.ini
@@ -8,6 +8,7 @@ testpaths =
 
 addopts =
     --benchmark-columns="min, max, mean, stddev, outliers"
+    --tb=native
 
 markers =
     managedmem_on: RMM managed memory enabled
diff --git a/docs/nx-cugraph/source/_static/bc_benchmark.png b/docs/nx-cugraph/source/_static/bc_benchmark.png
new file mode 100644
index 000000000..9e385c97e
Binary files /dev/null and b/docs/nx-cugraph/source/_static/bc_benchmark.png differ
diff --git a/docs/nx-cugraph/source/_static/colab.png b/docs/nx-cugraph/source/_static/colab.png
new file mode 100644
index 000000000..c4c3f5b46
Binary files /dev/null and b/docs/nx-cugraph/source/_static/colab.png differ
diff --git a/docs/nx-cugraph/source/_static/nxcg-execution-diagram.jpg b/docs/nx-cugraph/source/_static/nxcg-execution-diagram.jpg
new file mode 100644
index 000000000..48136289a
Binary files /dev/null and b/docs/nx-cugraph/source/_static/nxcg-execution-diagram.jpg differ
diff --git a/docs/nx-cugraph/source/index.rst b/docs/nx-cugraph/source/index.rst
index 9ea9e4d65..259a36b8f 100644
--- a/docs/nx-cugraph/source/index.rst
+++ b/docs/nx-cugraph/source/index.rst
@@ -1,58 +1,82 @@
 RAPIDS Graph documentation
 ==========================
+
 .. image:: images/cugraph_logo_2.png
    :width: 600
 
-*Making graph analytics fast and easy regardless of scale*
-
-
-.. list-table:: RAPIDS Graph covers a range of graph libraries and packages, that includes:
-   :widths: 25 25 25
-   :header-rows: 1
-
-   * - Core
-     - GNN
-     - Extension
-   * - :abbr:`cugraph (Python wrapper with lots of convenience functions)`
-     - :abbr:`cugraph-ops (GNN aggregators and operators)`
-     - :abbr:`cugraph-service (Graph-as-a-service provides both Client and Server packages)`
-   * - :abbr:`pylibcugraph (light-weight Python wrapper with no guard rails)`
-     - :abbr:`cugraph-dgl (Accelerated extensions for use with the DGL framework)`
-     -
-   * - :abbr:`libcugraph (C++ API)`
-     - :abbr:`cugraph-pyg (Accelerated extensions for use with the PyG framework)`
-     -
-   * - :abbr:`libcugraph_etl (C++ renumbering function for strings)`
-     - :abbr:`wholegraph (Shared memory-based GPU-accelerated GNN training)`
-     -
-..
-|
 
 ~~~~~~~~~~~~
 Introduction
 ~~~~~~~~~~~~
 cuGraph is a library of graph algorithms that seamlessly integrates into the
 RAPIDS data science ecosystem and allows the data scientist to easily call
-graph algorithms using data stored in GPU DataFrames, NetworkX Graphs, or
-even CuPy or SciPy sparse Matrices.
+graph algorithms using data stored in GPU DataFrames, NetworkX Graphs, or even
+CuPy or SciPy sparse Matrices. Our major integration effort with NetworkX
+allows for **zero code change** GPU acceleration through the use of the
+nx-cugraph backend. NetworkX and the nx-cugraph backend offer a seamless
+transition to GPU accelerated graph analytics for NetworkX users with access to
+a supported GPU.
+
+Getting started with cuGraph
+
+Required hardware/software for cuGraph and `RAPIDS <https://docs.rapids.ai/user-guide>`_
+ * NVIDIA GPU, Volta architecture or later, with `compute capability <https://developer.nvidia.com/cuda-gpus> 7.0+`_
+ * CUDA 11.2-11.8, 12.0-12.5
+ * Python version 3.10, 3.11, or 3.12
+ * NetworkX version 3.0 or newer in order to use use the nx-cuGraph backend. NetworkX version 3.4 or newer is recommended. (`see below <#cugraph-using-networkx-code>`).
+
+Installation
+The latest RAPIDS System Requirements documentation is located `here <https://docs.rapids.ai/install#system-req>`_.
+
+This includes several ways to set up cuGraph
+
+* From Unix
+
+  * `Conda <https://docs.rapids.ai/install/#conda>`_
+  * `Docker <https://docs.rapids.ai/install/#docker>`_
+  * `pip <https://docs.rapids.ai/install/#pip>`_
+
+
+**Note: Windows use of RAPIDS depends on prior installation of** `WSL2 <https://learn.microsoft.com/en-us/windows/wsl/install>`_.
+
+* From Windows
 
-Note: We are redoing all of our documents, please be patient as we update
-the docs and links
+  * `Conda <https://docs.rapids.ai/install#wsl-conda>`_
+  * `Docker <https://docs.rapids.ai/install#wsl-docker>`_
+  * `pip <https://docs.rapids.ai/install#wsl-pip>`_
 
-|
+
+cuGraph Using NetworkX Code
+
+cuGraph is now available as a NetworkX backend using `nx-cugraph <https://rapids.ai/nx-cugraph/>`_.
+nx-cugraph offers NetworkX users a **zero code change** option to accelerate
+their existing NetworkX code using an NVIDIA GPU and cuGraph.
+
+
+ Cugraph API Example
+
+ .. code-block:: python
+
+  import cugraph
+  import cudf
+
+  # Create an instance of the popular Zachary Karate Club graph
+  from cugraph.datasets import karate
+  G = karate.get_graph()
+
+  # Call cugraph.degree_centrality
+  vertex_bc = cugraph.degree_centrality(G)
+
+There are several resources containing cuGraph examples, `the cuGraph notebook repository <https://github.com/rapidsai/cugraph/blob/main/notebooks/README.md>`_
+has many examples of loading graph data and running algorithms in Jupyter notebooks.
+The `cuGraph test code <https://github.com/rapidsai/cugraph/tree/main/python/cugraph/cugraph/tests>_` contain python scripts setting up and calling cuGraph algorithms.
+A simple example of `testing the degree centrality algorithm <https://github.com/rapidsai/cugraph/blob/main/python/cugraph/cugraph/tests/centrality/test_degree_centrality.py>`_
+is a good place to start. Some of these show `multi-GPU tests/examples <https://github.com/rapidsai/cugraph/blob/main/python/cugraph/cugraph/tests/centrality/test_degree_centrality_mg.py>`_ with larger data sets as well.
 
 .. toctree::
    :maxdepth: 2
-   :caption: Contents:
-
-   basics/index
-   nx_cugraph/index
-   installation/index
-   tutorials/index
-   graph_support/index
-   wholegraph/index
-   references/index
-   api_docs/index
+
+   top_toc
 
 Indices and tables
 ==================
diff --git a/docs/nx-cugraph/source/installation/getting_cugraph.md b/docs/nx-cugraph/source/installation/getting_cugraph.md
index 126325c09..01bc9e379 100644
--- a/docs/nx-cugraph/source/installation/getting_cugraph.md
+++ b/docs/nx-cugraph/source/installation/getting_cugraph.md
@@ -21,7 +21,7 @@ The RAPIDS Docker containers contain all RAPIDS packages, including all from cuG
 
 
 ## Conda
-It is easy to install cuGraph using conda. You can get a minimal conda installation with [Miniconda](https://conda.io/miniconda.html) or get the full installation with [Anaconda](https://www.anaconda.com/download).
+It is easy to install cuGraph using conda. You can get a minimal conda installation with [miniforge](https://github.com/conda-forge/miniforge).
 
 cuGraph Conda packages
  * cugraph - this will also import:
@@ -45,7 +45,7 @@ conda install -c rapidsai -c conda-forge -c nvidia cugraph cuda-version=12.0
 
 Alternatively, use `cuda-version=11.8` for packages supporting CUDA 11.
 
-Note: This conda installation only applies to Linux and Python versions 3.9/3.10/3.11.
+Note: This conda installation only applies to Linux and Python versions 3.10/3.11/3.12.
 
 <br>
 
diff --git a/docs/nx-cugraph/source/installation/source_build.md b/docs/nx-cugraph/source/installation/source_build.md
index 89e63bade..243a62e5c 100644
--- a/docs/nx-cugraph/source/installation/source_build.md
+++ b/docs/nx-cugraph/source/installation/source_build.md
@@ -12,8 +12,7 @@ __Compilers:__
 * `nvcc`          version 11.5+
 
 __CUDA:__
-* CUDA 11.2+
-* NVIDIA driver 470.42.01 or newer
+* CUDA 11.8+
 * NVIDIA GPU, Volta architecture or later, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+
 
 Further details and download links for these prerequisites are available on the
@@ -178,7 +177,7 @@ Run either the C++ or the Python tests with datasets
    make test
    ```
 
-Note: This conda installation only applies to Linux and Python versions 3.8/3.11.
+Note: This conda installation only applies to Linux and Python versions 3.10, 3.11, and 3.12.
 
 ### (OPTIONAL) Set environment variable on activation
 
diff --git a/docs/nx-cugraph/source/nx_cugraph/benchmarks.md b/docs/nx-cugraph/source/nx_cugraph/benchmarks.md
new file mode 100644
index 000000000..31d5e5b09
--- /dev/null
+++ b/docs/nx-cugraph/source/nx_cugraph/benchmarks.md
@@ -0,0 +1,28 @@
+# Benchmarks
+
+## NetworkX vs. nx-cugraph
+We ran several commonly used graph algorithms on both `networkx` and `nx-cugraph`. Here are the results
+
+
+<figure>
+
+![bench-image](../_static/bc_benchmark.png)
+
+<figcaption style="text-align: center;">Results from running this <a
+href="https://github.com/rapidsai/cugraph/blob/HEAD/benchmarks/nx-cugraph/pytest-based/bench_algos.py">Benchmark</a><span
+class="title-ref"></span></figcaption>
+</figure>
+
+## Reproducing Benchmarks
+
+Below are the steps to reproduce the results on your workstation. These are documented in this [README](https://github.com/rapidsai/cugraph/blob/HEAD/benchmarks/nx-cugraph/pytest-based).
+
+1. Clone the latest <https://github.com/rapidsai/cugraph>
+
+2. Follow the instructions to build an environment
+
+3. Activate the environment
+
+4. Install the latest `nx-cugraph` by following the [guide](installation.md)
+
+5. Follow the instructions written in the README here: `cugraph/benchmarks/nx-cugraph/pytest-based/`
diff --git a/docs/nx-cugraph/source/nx_cugraph/faqs.md b/docs/nx-cugraph/source/nx_cugraph/faqs.md
new file mode 100644
index 000000000..dee943d19
--- /dev/null
+++ b/docs/nx-cugraph/source/nx_cugraph/faqs.md
@@ -0,0 +1,5 @@
+# FAQ
+
+ > **1. Is `nx-cugraph` able to run across multiple GPUs?**
+
+nx-cugraph currently does not support multi-GPU. Multi-GPU support may be added to a future release of nx-cugraph, but consider [cugraph](https://docs.rapids.ai/api/cugraph/stable) for multi-GPU accelerated graph analytics in Python today.
diff --git a/docs/nx-cugraph/source/nx_cugraph/how-it-works.md b/docs/nx-cugraph/source/nx_cugraph/how-it-works.md
new file mode 100644
index 000000000..f9dc5af67
--- /dev/null
+++ b/docs/nx-cugraph/source/nx_cugraph/how-it-works.md
@@ -0,0 +1,114 @@
+# How it Works
+
+NetworkX has the ability to **dispatch function calls to separately-installed third-party backends**.
+
+NetworkX backends let users experience improved performance and/or additional functionality without changing their NetworkX Python code. Examples include backends that provide algorithm acceleration using GPUs, parallel processing, graph database integration, and more.
+
+While NetworkX is a pure-Python implementation with minimal to no dependencies, backends may be written in other languages and require specialized hardware and/or OS support, additional software dependencies, or even separate services. Installation instructions vary based on the backend, and additional information can be found from the individual backend project pages listed in the NetworkX Backend Gallery.
+
+
+![nxcg-execution-flow](../_static/nxcg-execution-diagram.jpg)
+
+## Enabling nx-cugraph
+
+NetworkX will use nx-cugraph as the graph analytics backend if any of the
+following are used:
+
+### `NETWORKX_BACKEND_PRIORITY` environment variable.
+
+The `NETWORKX_BACKEND_PRIORITY` environment variable can be used to have NetworkX automatically dispatch to specified backends. This variable can be set to a single backend name, or a comma-separated list of backends ordered using the priority which NetworkX should try.  If a NetworkX function is called that nx-cugraph supports, NetworkX will redirect the function call to nx-cugraph automatically, or fall back to the next backend in the list if provided, or run using the default NetworkX implementation. See [NetworkX Backends and Configs](https://networkx.org/documentation/stable/reference/backends.html).
+
+For example, this setting will have NetworkX use nx-cugraph for any function called by the script supported by nx-cugraph, and the default NetworkX implementation for all others.
+```
+bash> NETWORKX_BACKEND_PRIORITY=cugraph python my_networkx_script.py
+```
+
+This example will have NetworkX use nx-cugraph for functions it supports, then try other_backend if nx-cugraph does not support them, and finally the default NetworkX implementation if not supported by either backend:
+```
+bash> NETWORKX_BACKEND_PRIORITY="cugraph,other_backend" python my_networkx_script.py
+```
+
+### `backend=` keyword argument
+
+To explicitly specify a particular backend for an API, use the `backend=`
+keyword argument. This argument takes precedence over the
+`NETWORKX_BACKEND_PRIORITY` environment variable. This requires anyone
+running code that uses the `backend=` keyword argument to have the specified
+backend installed.
+
+Example:
+```python
+nx.betweenness_centrality(cit_patents_graph, k=k, backend="cugraph")
+```
+
+### Type-based dispatching
+
+NetworkX also supports automatically dispatching to backends associated with
+specific graph types. Like the `backend=` keyword argument example above, this
+requires the user to write code for a specific backend, and therefore requires
+the backend to be installed, but has the advantage of ensuring a particular
+behavior without the potential for runtime conversions.
+
+To use type-based dispatching with nx-cugraph, the user must import the backend
+directly in their code to access the utilities provided to create a Graph
+instance specifically for the nx-cugraph backend.
+
+Example:
+```python
+import networkx as nx
+import nx_cugraph as nxcg
+
+G = nx.Graph()
+...
+nxcg_G = nxcg.from_networkx(G)             # conversion happens once here
+nx.betweenness_centrality(nxcg_G, k=1000)  # nxcg Graph type causes cugraph backend
+                                           # to be used, no conversion necessary
+```
+
+## Command Line Example
+
+---
+
+Create `bc_demo.ipy` and paste the code below.
+
+```python
+import pandas as pd
+import networkx as nx
+
+url = "https://data.rapids.ai/cugraph/datasets/cit-Patents.csv"
+df = pd.read_csv(url, sep=" ", names=["src", "dst"], dtype="int32")
+G = nx.from_pandas_edgelist(df, source="src", target="dst")
+
+%time result = nx.betweenness_centrality(G, k=10)
+```
+Run the command:
+```
+user@machine:/# ipython bc_demo.ipy
+```
+
+You will observe a run time of approximately 7 minutes...more or less depending on your CPU.
+
+Run the command again, this time specifying cugraph as the NetworkX backend.
+```
+user@machine:/# NETWORKX_BACKEND_PRIORITY=cugraph ipython bc_demo.ipy
+```
+This run will be much faster, typically around 20 seconds depending on your GPU.
+```
+user@machine:/# NETWORKX_BACKEND_PRIORITY=cugraph ipython bc_demo.ipy
+```
+There is also an option to cache the graph conversion to GPU. This can dramatically improve performance when running multiple algorithms on the same graph. Caching is enabled by default for NetworkX versions 3.4 and later, but if using an older version, set "NETWORKX_CACHE_CONVERTED_GRAPHS=True"
+```
+NETWORKX_BACKEND_PRIORITY=cugraph NETWORKX_CACHE_CONVERTED_GRAPHS=True ipython bc_demo.ipy
+```
+
+When running Python interactively, the cugraph backend can be specified as an argument in the algorithm call.
+
+For example:
+```
+nx.betweenness_centrality(cit_patents_graph, k=k, backend="cugraph")
+```
+
+
+The latest list of algorithms supported by nx-cugraph can be found [here](https://github.com/rapidsai/cugraph/blob/HEAD/python/nx-cugraph/README.md#algorithms) or in the next section.
+
+---
diff --git a/docs/nx-cugraph/source/nx_cugraph/index.rst b/docs/nx-cugraph/source/nx_cugraph/index.rst
index ef6f51601..110300c18 100644
--- a/docs/nx-cugraph/source/nx_cugraph/index.rst
+++ b/docs/nx-cugraph/source/nx_cugraph/index.rst
@@ -1,9 +1,48 @@
-===============================
-nxCugraph as a NetworkX Backend
-===============================
+nx-cugraph
+-----------
 
+nx-cugraph is a `NetworkX backend <https://networkx.org/documentation/stable/reference/utils.html#backends>`_ that provides **GPU acceleration** to many popular NetworkX algorithms.
+
+By simply `installing and enabling nx-cugraph <https://github.com/rapidsai/cugraph/blob/HEAD/python/nx-cugraph/README.md#install>`_, users can see significant speedup on workflows where performance is hindered by the default NetworkX implementation.  With ``nx-cugraph``, users can have GPU-based, large-scale performance **without** changing their familiar and easy-to-use NetworkX code.
+
+.. code-block:: python
+
+    import pandas as pd
+    import networkx as nx
+
+    url = "https://data.rapids.ai/cugraph/datasets/cit-Patents.csv"
+    df = pd.read_csv(url, sep=" ", names=["src", "dst"], dtype="int32")
+    G = nx.from_pandas_edgelist(df, source="src", target="dst")
+
+    %time result = nx.betweenness_centrality(G, k=10)
+
+.. figure:: ../_static/colab.png
+    :width: 200px
+    :target: https://nvda.ws/4drM4re
+
+    Try it on Google Colab!
+
+
++------------------------------------------------------------------------------------------------------------------------+
+| **Zero Code Change Acceleration**                                                                                      |
+|                                                                                                                        |
+| Just ``nx.config.backend_priority=["cugraph"]`` in Jupyter, or set ``NETWORKX_BACKEND_PRIORITY=cugraph`` in the shell. |
++------------------------------------------------------------------------------------------------------------------------+
+| **Run the same code on CPU or GPU**                                                                                    |
+|                                                                                                                        |
+| Nothing changes, not even your `import` statements, when going from CPU to GPU.                                        |
++------------------------------------------------------------------------------------------------------------------------+
+
+
+``nx-cugraph`` is now Generally Available (GA) as part of the ``RAPIDS`` package.  See `RAPIDS
+Quick Start <https://rapids.ai/#quick-start>`_ to get up-and-running with ``nx-cugraph``.
 
 .. toctree::
-   :maxdepth: 2
+    :maxdepth: 1
+    :caption: Contents:
 
-   nx_cugraph.md
+    how-it-works
+    supported-algorithms
+    installation
+    benchmarks
+    faqs
diff --git a/docs/nx-cugraph/source/nx_cugraph/installation.md b/docs/nx-cugraph/source/nx_cugraph/installation.md
new file mode 100644
index 000000000..8d221f16f
--- /dev/null
+++ b/docs/nx-cugraph/source/nx_cugraph/installation.md
@@ -0,0 +1,50 @@
+# Getting Started
+
+This guide describes how to install ``nx-cugraph`` and use it in your workflows.
+
+
+## System Requirements
+
+`nx-cugraph` requires the following:
+
+ - **Volta architecture or later NVIDIA GPU, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+**
+ - **[CUDA](https://docs.nvidia.com/cuda/index.html) 11.2, 11.4, 11.5, 11.8, 12.0, 12.2, or 12.5**
+ - **Python >= 3.10**
+ - **[NetworkX](https://networkx.org/documentation/stable/install.html#) >= 3.0 (version 3.2 or higher recommended)**
+
+More details about system requirements can be found in the [RAPIDS System Requirements Documentation](https://docs.rapids.ai/install#system-req).
+
+## Installing nx-cugraph
+
+Read the [RAPIDS Quick Start Guide](https://docs.rapids.ai/install) to learn more about installing all RAPIDS libraries.
+
+`nx-cugraph` can be installed using conda or pip. It is included in the RAPIDS metapackage, or can be installed separately.
+
+### Conda
+**Nightly version**
+```bash
+conda install -c rapidsai-nightly -c conda-forge -c nvidia nx-cugraph
+```
+
+**Stable version**
+```bash
+conda install -c rapidsai -c conda-forge -c nvidia nx-cugraph
+```
+
+### pip
+**Nightly version**
+```bash
+pip install nx-cugraph-cu11 --extra-index-url https://pypi.anaconda.org/rapidsai-wheels-nightly/simple
+```
+
+**Stable version**
+```bash
+pip install nx-cugraph-cu11 --extra-index-url https://pypi.nvidia.com
+```
+
+<div style="border: 1px solid #ccc; background-color: #f9f9f9; padding: 10px; border-radius: 5px;">
+
+**Note:**
+ - The `pip install` examples above are for CUDA 11. To install for CUDA 12, replace `-cu11` with `-cu12`
+
+</div>
diff --git a/docs/nx-cugraph/source/nx_cugraph/nx_cugraph.md b/docs/nx-cugraph/source/nx_cugraph/nx_cugraph.md
index 75a30b0be..900362a6e 100644
--- a/docs/nx-cugraph/source/nx_cugraph/nx_cugraph.md
+++ b/docs/nx-cugraph/source/nx_cugraph/nx_cugraph.md
@@ -1,18 +1,10 @@
 ### nx_cugraph
 
 
-nx-cugraph is a [NetworkX
-backend](<https://networkx.org/documentation/stable/reference/utils.html#backends>) that provides GPU acceleration to many popular NetworkX algorithms.
-
-By simply [installing and enabling nx-cugraph](<https://github.com/rapidsai/cugraph/blob/HEAD/python/nx-cugraph/README.md#install>), users can see significant speedup on workflows where performance is hindered by the default NetworkX implementation.  With nx-cugraph, users can have GPU-based, large-scale performance without changing their familiar and easy-to-use NetworkX code.
-
-Let's look at some examples of algorithm speedups comparing NetworkX with and without GPU acceleration using nx-cugraph.
-
-Each chart has three measurements.
-* NX - default NetworkX, no GPU acceleration
-* nx-cugraph - GPU-accelerated NetworkX using nx-cugraph. This involves an internal conversion/transfer of graph data from CPU to GPU memory
-* nx-cugraph (preconvert) - GPU-accelerated NetworkX using nx-cugraph with the graph data pre-converted/transferred to GPU
+`nx-cugraph` is a [networkX backend](<https://networkx.org/documentation/stable/reference/utils.html#backends>) that accelerates many popular NetworkX functions using cuGraph and NVIDIA GPUs.
+Users simply [install and enable nx-cugraph](installation.md) to experience GPU speedups.
 
+Lets look at some examples of algorithm speedups comparing CPU based NetworkX to dispatched versions run on GPU with nx_cugraph.
 
 ![Ancestors](../images/ancestors.png)
 ![BFS Tree](../images/bfs_tree.png)
@@ -22,46 +14,3 @@ Each chart has three measurements.
 ![Pagerank](../images/pagerank.png)
 ![Single Source Shortest Path](../images/sssp.png)
 ![Weakly Connected Components](../images/wcc.png)
-
-### Command line example
-Open bc_demo.ipy and paste the code below.
-
-```
-import pandas as pd
-import networkx as nx
-
-url = "https://data.rapids.ai/cugraph/datasets/cit-Patents.csv"
-df = pd.read_csv(url, sep=" ", names=["src", "dst"], dtype="int32")
-G = nx.from_pandas_edgelist(df, source="src", target="dst")
-
-%time result = nx.betweenness_centrality(G, k=10)
-```
-Run the command:
-```
-user@machine:/# ipython bc_demo.ipy
-```
-
-You will observe a run time of approximately 7 minutes...more or less depending on your cpu.
-
-Run the command again, this time specifying cugraph as the NetworkX backend.
-```
-user@machine:/# NETWORKX_BACKEND_PRIORITY=cugraph ipython bc_demo.ipy
-```
-This run will be much faster, typically around 20 seconds depending on your GPU.
-```
-user@machine:/# NETWORKX_BACKEND_PRIORITY=cugraph ipython bc_demo.ipy
-```
-There is also an option to cache the graph conversion to GPU. This can dramatically improve performance when running multiple algorithms on the same graph.
-```
-NETWORKX_BACKEND_PRIORITY=cugraph NETWORKX_CACHE_CONVERTED_GRAPHS=True ipython bc_demo.ipy
-```
-
-When running Python interactively, the cugraph backend can be specified as an argument in the algorithm call.
-
-For example:
-```
-nx.betweenness_centrality(cit_patents_graph, k=k, backend="cugraph")
-```
-
-
-The latest list of algorithms supported by nx-cugraph can be found [here](https://github.com/rapidsai/cugraph/blob/main/python/nx-cugraph/README.md#algorithms).
diff --git a/docs/nx-cugraph/source/nx_cugraph/supported-algorithms.rst b/docs/nx-cugraph/source/nx_cugraph/supported-algorithms.rst
new file mode 100644
index 000000000..b21ef7bb6
--- /dev/null
+++ b/docs/nx-cugraph/source/nx_cugraph/supported-algorithms.rst
@@ -0,0 +1,354 @@
+Supported Algorithms
+=====================
+
+The nx-cugraph backend to NetworkX connects
+`pylibcugraph <../../readme_pages/pylibcugraph.md>`_ (cuGraph's low-level Python
+interface to its CUDA-based graph analytics library) and
+`CuPy <https://cupy.dev/>`_ (a GPU-accelerated array library) to NetworkX's
+familiar and easy-to-use API.
+
+Below is the list of algorithms that are currently supported in nx-cugraph.
+
+
+Algorithms
+----------
+
++-----------------------------+
+| **Centrality**              |
++=============================+
+| betweenness_centrality      |
++-----------------------------+
+| edge_betweenness_centrality |
++-----------------------------+
+| degree_centrality           |
++-----------------------------+
+| in_degree_centrality        |
++-----------------------------+
+| out_degree_centrality       |
++-----------------------------+
+| eigenvector_centrality      |
++-----------------------------+
+| katz_centrality             |
++-----------------------------+
+
++---------------------+
+| **Cluster**         |
++=====================+
+| average_clustering  |
++---------------------+
+| clustering          |
++---------------------+
+| transitivity        |
++---------------------+
+| triangles           |
++---------------------+
+
++--------------------------+
+| **Community**            |
++==========================+
+| louvain_communities      |
++--------------------------+
+
++--------------------------+
+| **Bipartite**            |
++==========================+
+| complete_bipartite_graph |
++--------------------------+
+
++------------------------------------+
+| **Components**                     |
++====================================+
+| connected_components               |
++------------------------------------+
+| is_connected                       |
++------------------------------------+
+| node_connected_component           |
++------------------------------------+
+| number_connected_components        |
++------------------------------------+
+| weakly_connected                   |
++------------------------------------+
+| is_weakly_connected                |
++------------------------------------+
+| number_weakly_connected_components |
++------------------------------------+
+| weakly_connected_components        |
++------------------------------------+
+
++-------------+
+| **Core**    |
++=============+
+| core_number |
++-------------+
+| k_truss     |
++-------------+
+
++-------------+
+| **DAG**     |
++=============+
+| ancestors   |
++-------------+
+| descendants |
++-------------+
+
++--------------------+
+| **Isolate**        |
++====================+
+| is_isolate         |
++--------------------+
+| isolates           |
++--------------------+
+| number_of_isolates |
++--------------------+
+
++-------------------+
+| **Link analysis** |
++===================+
+| hits              |
++-------------------+
+| pagerank          |
++-------------------+
+
++----------------+
+| **Operators**  |
++================+
+| complement     |
++----------------+
+| reverse        |
++----------------+
+
++----------------------+
+| **Reciprocity**      |
++======================+
+| overall_reciprocity  |
++----------------------+
+| reciprocity          |
++----------------------+
+
++---------------------------------------+
+| **Shortest Paths**                    |
++=======================================+
+| has_path                              |
++---------------------------------------+
+| shortest_path                         |
++---------------------------------------+
+| shortest_path_length                  |
++---------------------------------------+
+| all_pairs_shortest_path               |
++---------------------------------------+
+| all_pairs_shortest_path_length        |
++---------------------------------------+
+| bidirectional_shortest_path           |
++---------------------------------------+
+| single_source_shortest_path           |
++---------------------------------------+
+| single_source_shortest_path_length    |
++---------------------------------------+
+| single_target_shortest_path           |
++---------------------------------------+
+| single_target_shortest_path_length    |
++---------------------------------------+
+| all_pairs_bellman_ford_path           |
++---------------------------------------+
+| all_pairs_bellman_ford_path_length    |
++---------------------------------------+
+| all_pairs_dijkstra                    |
++---------------------------------------+
+| all_pairs_dijkstra_path               |
++---------------------------------------+
+| all_pairs_dijkstra_path_length        |
++---------------------------------------+
+| bellman_ford_path                     |
++---------------------------------------+
+| bellman_ford_path_length              |
++---------------------------------------+
+| dijkstra_path                         |
++---------------------------------------+
+| dijkstra_path_length                  |
++---------------------------------------+
+| single_source_bellman_ford            |
++---------------------------------------+
+| single_source_bellman_ford_path       |
++---------------------------------------+
+| single_source_bellman_ford_path_length|
++---------------------------------------+
+| single_source_dijkstra                |
++---------------------------------------+
+| single_source_dijkstra_path           |
++---------------------------------------+
+| single_source_dijkstra_path_length    |
++---------------------------------------+
+
++---------------------------+
+| **Traversal**       		|
++===========================+
+| bfs_edges                 |
++---------------------------+
+| bfs_layers                |
++---------------------------+
+| bfs_predecessors          |
++---------------------------+
+| bfs_successors            |
++---------------------------+
+| bfs_tree                  |
++---------------------------+
+| descendants_at_distance   |
++---------------------------+
+| generic_bfs_edges         |
++---------------------------+
+
++---------------------+
+| **Tree**            |
++=====================+
+| is_arborescence     |
++---------------------+
+| is_branching        |
++---------------------+
+| is_forest           |
++---------------------+
+| is_tree             |
++---------------------+
+
+Generators
+------------
+
++-------------------------------+
+| **Classic**                   |
++===============================+
+| barbell_graph                 |
++-------------------------------+
+| circular_ladder_graph         |
++-------------------------------+
+| complete_graph                |
++-------------------------------+
+| complete_multipartite_graph   |
++-------------------------------+
+| cycle_graph                   |
++-------------------------------+
+| empty_graph                   |
++-------------------------------+
+| ladder_graph                  |
++-------------------------------+
+| lollipop_graph                |
++-------------------------------+
+| null_graph                    |
++-------------------------------+
+| path_graph                    |
++-------------------------------+
+| star_graph                    |
++-------------------------------+
+| tadpole_graph                 |
++-------------------------------+
+| trivial_graph                 |
++-------------------------------+
+| turan_graph                   |
++-------------------------------+
+| wheel_graph                   |
++-------------------------------+
+
++-----------------+
+| **Classic**     |
++=================+
+| caveman_graph   |
++-----------------+
+
++------------+
+| **Ego**    |
++============+
+| ego_graph  |
++------------+
+
++------------------------------+
+| **small**                    |
++==============================+
+| bull_graph                   |
++------------------------------+
+| chvatal_graph                |
++------------------------------+
+| cubical_graph                |
++------------------------------+
+| desargues_graph              |
++------------------------------+
+| diamond_graph                |
++------------------------------+
+| dodecahedral_graph           |
++------------------------------+
+| frucht_graph                 |
++------------------------------+
+| heawood_graph                |
++------------------------------+
+| house_graph                  |
++------------------------------+
+| house_x_graph                |
++------------------------------+
+| icosahedral_graph            |
++------------------------------+
+| krackhardt_kite_graph        |
++------------------------------+
+| moebius_kantor_graph         |
++------------------------------+
+| octahedral_graph             |
++------------------------------+
+| pappus_graph                 |
++------------------------------+
+| petersen_graph               |
++------------------------------+
+| sedgewick_maze_graph         |
++------------------------------+
+| tetrahedral_graph            |
++------------------------------+
+| truncated_cube_graph         |
++------------------------------+
+| truncated_tetrahedron_graph  |
++------------------------------+
+| tutte_graph                  |
++------------------------------+
+
++-------------------------------+
+| **Social**                    |
++===============================+
+| davis_southern_women_graph    |
++-------------------------------+
+| florentine_families_graph     |
++-------------------------------+
+| karate_club_graph             |
++-------------------------------+
+| les_miserables_graph          |
++-------------------------------+
+
+Other
+-------
+
++-------------------------+
+| **Classes**             |
++=========================+
+| is_negatively_weighted  |
++-------------------------+
+
++----------------------+
+| **Convert**          |
++======================+
+| from_dict_of_lists   |
++----------------------+
+| to_dict_of_lists     |
++----------------------+
+
++--------------------------+
+| **Convert Matrix**       |
++==========================+
+| from_pandas_edgelist     |
++--------------------------+
+| from_scipy_sparse_array  |
++--------------------------+
+
++-----------------------------------+
+| **Relabel**                       |
++===================================+
+| convert_node_labels_to_integers   |
++-----------------------------------+
+| relabel_nodes                     |
++-----------------------------------+
+
+
+To request nx-cugraph backend support for a NetworkX API that is not listed
+above, visit the `cuGraph GitHub repo <https://github.com/rapidsai/cugraph>`_.
diff --git a/docs/nx-cugraph/source/top_toc.rst b/docs/nx-cugraph/source/top_toc.rst
new file mode 100644
index 000000000..8e31e70ca
--- /dev/null
+++ b/docs/nx-cugraph/source/top_toc.rst
@@ -0,0 +1,13 @@
+.. toctree::
+  :maxdepth: 2
+  :caption: cuGraph documentation Contents:
+  :name: top_toc
+
+  basics/index
+  nx_cugraph/index
+  installation/index
+  tutorials/index
+  graph_support/index
+  wholegraph/index
+  references/index
+  api_docs/index
diff --git a/docs/nx-cugraph/source/tutorials/basic_cugraph.md b/docs/nx-cugraph/source/tutorials/basic_cugraph.md
index 783254724..a0c9ad576 100644
--- a/docs/nx-cugraph/source/tutorials/basic_cugraph.md
+++ b/docs/nx-cugraph/source/tutorials/basic_cugraph.md
@@ -4,8 +4,8 @@
 
 CuGraph is part of [Rapids](https://docs.rapids.ai/user-guide) and has the following system requirements:
  * NVIDIA GPU, Volta architecture or later, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+
- * CUDA 11.2, 11.4, 11.5, 11.8, 12.0 or 12.2
- * Python version 3.9, 3.10, or 3.11
+ * CUDA 11.2, 11.4, 11.5, 11.8, 12.0, 12.2, or 12.5
+ * Python version 3.10, 3.11, or 3.12
  * NetworkX >= version 3.3 or newer in order to use use [NetworkX Configs](https://networkx.org/documentation/stable/reference/backends.html#module-networkx.utils.configs) **This is required for use of nx-cuGraph, [see below](#cugraph-using-networkx-code).**
 
 ## Installation
diff --git a/docs/nx-cugraph/source/tutorials/cugraph_notebooks.md b/docs/nx-cugraph/source/tutorials/cugraph_notebooks.md
index 559ba36e9..6d7840dc3 100644
--- a/docs/nx-cugraph/source/tutorials/cugraph_notebooks.md
+++ b/docs/nx-cugraph/source/tutorials/cugraph_notebooks.md
@@ -55,10 +55,9 @@ Running the example in these notebooks requires:
   * Download via Docker, Conda (See [__Getting Started__](https://rapids.ai/start.html))
 
 * cuGraph is dependent on the latest version of cuDF.  Please install all components of RAPIDS
-* Python 3.8+
-* A system with an NVIDIA GPU:  Pascal architecture or better
+* Python 3.10+
+* A system with an NVIDIA GPU: Volta architecture or newer
 * CUDA 11.4+
-* NVIDIA driver 450.51+
 
 ## Copyright
 
diff --git a/notebooks/demo/accelerating_networkx.ipynb b/notebooks/demo/accelerating_networkx.ipynb
new file mode 100644
index 000000000..1a6c6cfb3
--- /dev/null
+++ b/notebooks/demo/accelerating_networkx.ipynb
@@ -0,0 +1,614 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "R2cpVp2WdOsp"
+      },
+      "source": [
+        "# NetworkX - Easy Graph Analytics\n",
+        "\n",
+        "NetworkX is the most popular library for graph analytics available in Python, or quite possibly any language. To illustrate this, NetworkX was downloaded more than 71 million times in September of 2024 alone, which is roughly 71 times more than the next most popular graph analytics library! [*](https://en.wikipedia.org/wiki/NetworkX) NetworkX has earned this popularity from its very easy-to-use API, the wealth of documentation and examples available, the large (and friendly) community behind it, and its easy installation which requires nothing more than Python.\n",
+        "\n",
+        "However, NetworkX users are familiar with the tradeoff that comes with those benefits. The pure-Python implementation often results in poor performance when graph data starts to reach larger scales, limiting the usefulness of the library for many real-world problems.\n",
+        "\n",
+        "# Accelerated NetworkX - Easy (and fast!) Graph Analytics\n",
+        "\n",
+        "To address the performance problem, NetworkX 3.0 introduced a mechanism to dispatch algorithm calls to alternate implementations. The NetworkX Python API remains the same but NetworkX will use more capable algorithm implementations provided by one or more backends. This approach means users don't have to give up NetworkX -or even change their code- in order to take advantage of GPU performance."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "xkg10FrNThrK"
+      },
+      "source": [
+        "# Let's Get the Environment Setup\n",
+        "This notebook will demonstrate NetworkX both with and without GPU acceleration provided by the `nx-cugraph` backend.\n",
+        "\n",
+        "`nx-cugraph` is available as a package installable using `pip`, `conda`, and [from source](https://github.com/rapidsai/nx-cugraph).  Before importing `networkx`, lets install `nx-cugraph` so it can be registered as an available backend by NetworkX when needed.  We'll use `pip` to install.\n",
+        "\n",
+        "NOTES:\n",
+        "* `nx-cugraph` requires a compatible NVIDIA GPU, NVIDIA CUDA and associated drivers, and a supported OS. Details about these and other installation prerequisites can be seen [here](https://docs.rapids.ai/install#system-req).\n",
+        "* The `nx-cugraph` package is currently hosted by NVIDIA and therefore the `--extra-index-url` option must be used.\n",
+        "* `nx-cugraph` is supported on specific 11.x and 12.x CUDA versions, and the major version number must be known in order to install the correct build (this is determined automatically when using `conda`).\n",
+        "\n",
+        "To find the CUDA major version on your system, run the following command:"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "NMFwzc1I95BS"
+      },
+      "outputs": [],
+      "source": [
+        "!nvcc --version"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "i91Yj-yZ-nGS"
+      },
+      "source": [
+        "From the above output we can see we're using CUDA 12.x so we'll be installing `nx-cugraph-cu12`. If we were using CUDA 11.x, the package name would be `nx-cugraph-cu11`. We'll also be adding `https://pypi.nvidia.com` as an `--extra-index-url`:"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "mYYN9EpnWphu"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install nx-cugraph-cu12 --extra-index-url=https://pypi.nvidia.com"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "0h1K-7tI_AZH"
+      },
+      "source": [
+        "Of course, we'll also be using `networkx`, which is already provided in the Colab environment. This notebook will be using features added in version 3.3, so we'll import it here to verify we have a compatible version."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "YTV0ZTME2tV6"
+      },
+      "outputs": [],
+      "source": [
+        "import networkx as nx\n",
+        "nx.__version__"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "UiZKOa3WC7be"
+      },
+      "source": [
+        "# Let's Start with Something Simple\n",
+        "\n",
+        "To begin, we'll compare NetworkX results without a backend to results of the same algorithm using the `nx-cugraph` backend on a small graph.  `nx.karate_club_graph()` returns an instance of the famous example graph consisting of 34 nodes and 78 edges from Zachary's paper, described [here](https://en.wikipedia.org/wiki/Zachary%27s_karate_club)."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "3atL3tI0frYm"
+      },
+      "source": [
+        "## Betweenness Centrality\n",
+        "[Betweenness Centrality](https://en.wikipedia.org/wiki/Betweenness_centrality) is a graph algorithm that computes a centrality score for each node (`v`) based on how many of the shortest paths between pairs of nodes in the graph pass through `v`. A higher centrality score represents a node that \"connects\" other nodes in a network more than that of a node with a lower score.\n",
+        "\n",
+        "First, let's create a NetworkX Graph instance of the the Karate Club graph and inspect it."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "JSw7EZ46-kRu"
+      },
+      "outputs": [],
+      "source": [
+        "G = nx.karate_club_graph()\n",
+        "G.number_of_nodes(), G.number_of_edges()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "_-E17u2gKgbC"
+      },
+      "source": [
+        "Next, let's run betweenness centrality and save the results.  Because the Karate Club graph is so small, this should not take long."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "qjxXXKJhKQ4s"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nx_bc_results = nx.betweenness_centrality(G)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ClrR3z9XMfLr"
+      },
+      "source": [
+        "Now, let's run the same algorithm on the same data using the `nx-cugraph` backend.\n",
+        "\n",
+        "There are several ways to instruct NetworkX to use a particular backend instead of the default implementation. Here, we will use the `config` API, which was added in NetworkX version 3.3.\n",
+        "\n",
+        "The following two lines set the backend to \"cugraph\" and enable graph conversion caching.\n",
+        "\n",
+        "Some notes:\n",
+        "* The standard convention for NetworkX backends is to name the package with a `nx-` prefix to denote that these are packages intended to be used with NetworkX, but the `nx-` prefix is not included when referring to them in NetworkX API calls. Here, `nx-cugraph` is the name of the backend package, and `\"cugraph\"` is the name NetworkX will use to refer to it.\n",
+        "* NetworkX can use multiple backends! `nx.config.backend_priority` is a list that can contain several backends, ordered based on priority. If a backend in the list cannot run a particular algorithm (either because it isn't supported in the backend, the algorithm doesn't support a particular option, or some other reason), NetworkX will try the next backend in the list. If no specified backend is able to run the algorithm, NetworkX will fall back to the default implementation.\n",
+        "* Many backends have their own data structures for representing an input graph, often optimized for that backend's implementation. Prior to running a backend algorithm, NetworkX will have the backend convert the standard NetworkX Graph instance to the backend-specific type. This conversion can be expensive, and rather than repeat it as part of each algorithm call, NetworkX can cache the conversion so it can be skipped on future calls if the graph doesn't change. This caching can save significant time and improve overall performance."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "oFHwNqqsNsqS"
+      },
+      "outputs": [],
+      "source": [
+        "nx.config.backend_priority=[\"cugraph\"]  # NETWORKX_BACKEND_PRIORITY=cugraph\n",
+        "nx.config.cache_converted_graphs=True   # NETWORKX_CACHE_CONVERTED_GRAPHS=True"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "HrUeWRRQRzFP"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nxcg_bc_results = nx.betweenness_centrality(G)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "z1hxut3GTj5A"
+      },
+      "source": [
+        "You may have noticed that using the `nx-cugraph` backend resulted in a slightly slower execution time. This is not surprising when working with a graph this small, since the overhead of converting the graph for the first time and launching the algorithm kernel on the GPU is actually significantly more than the computation time itself.  We'll see later that this overhead is negligible when compared to the time saved when running on a GPU for larger graphs.\n",
+        "\n",
+        "Since we've enabled graph conversion caching, we can see that if we re-run the same call the execution time is noticeably shorter."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "7a0XvpUOr9Ju"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nxcg_bc_results = nx.betweenness_centrality(G)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ppjE5J5RscOe"
+      },
+      "source": [
+        "Notice the warning above about using the cache. This will only be raised **once** per graph instance (it can also be easily disabled), but its purpose is to point out that the cache should not be used if the Graph object will have its attribute dictionary modified directly. In this case and many others, we won't be modifying the dictionaries directly. Instead, we will use APIs such as `nx.set_node_attributes` which properly clear the cache, so it's safe for us to use the cache. Because of that, we'll disable the warning so we don't see it on other graphs in this session."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "Namb5JLvwS-q"
+      },
+      "outputs": [],
+      "source": [
+        "import warnings\n",
+        "warnings.filterwarnings(\"ignore\", message=\"Using cached graph for 'cugraph' backend\")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "BzGAphcILFsT"
+      },
+      "source": [
+        "Smaller graphs are also easy to visualize with NetworkX's plotting utilities. The flexibility of NetworkX's `Graph` instances make it trivial to add the betweenness centrality scores back to the graph object as node attributes. This will allow us to use those values for the visualization.\n",
+        "\n",
+        "In this case, we'll create new attributes for each node called \"nx_bc\" for the default NetworkX results, and \"nxcg_bc\" for the nx-cugraph results. We'll use those values to assign the color for each node and plot two graphs side-by-side. This will make it easy to visually validate that the nodes with the higher centrality scores for both implementations match and do indeed appear to be more \"central\" to other nodes."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "1coV6ZfcUoqI"
+      },
+      "outputs": [],
+      "source": [
+        "nx.set_node_attributes(G, nx_bc_results, \"nx_bc\")\n",
+        "nx.set_node_attributes(G, nxcg_bc_results, \"nxcg_bc\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "Sba2iYJgLoN2"
+      },
+      "outputs": [],
+      "source": [
+        "# Configure plot size and layout/position for each node\n",
+        "import matplotlib.pyplot as plt\n",
+        "plt.rcParams['figure.figsize'] = [12, 8]\n",
+        "pos = nx.spring_layout(G)\n",
+        "\n",
+        "# Assign colors for each set of betweenness centrality results\n",
+        "nx_colors = [G.nodes[n][\"nx_bc\"] for n in G.nodes()]\n",
+        "nxcg_colors = [G.nodes[n][\"nxcg_bc\"] for n in G.nodes()]\n",
+        "\n",
+        "# Plot the graph and color each node corresponding to NetworkX betweenness centrality values\n",
+        "plt.subplot(1, 2, 1)\n",
+        "nx.draw(G, pos=pos, with_labels=True, node_color=nx_colors)\n",
+        "\n",
+        "# Plot the graph and color each node corresponding to nx-cugraph betweenness centrality values\n",
+        "plt.subplot(1, 2, 2)\n",
+        "nx.draw(G, pos=pos, with_labels=True, node_color=nxcg_colors)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "dJXH4Zn5VNSg"
+      },
+      "source": [
+        "As we can see, the same two nodes (`0` and `33`) are the two most central in both graphs, followed by `2`, `31`, and `32`.\n",
+        "\n",
+        "## PageRank\n",
+        "Another popular algorithm is [PageRank](https://en.wikipedia.org/wiki/PageRank). PageRank also assigns scores to each node, but these scores are based on analyzing links to each node to determine relative \"importance\" within the graph.\n",
+        "\n",
+        "Let's update the config to use the default NetworkX implementation and run `nx.pagerank`."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "9CdYNk62E1v_"
+      },
+      "outputs": [],
+      "source": [
+        "nx.config.backend_priority=[]"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "Jo39YxVmYolq"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nx_pr_results = nx.pagerank(G)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "sV6dM8ToZDiC"
+      },
+      "source": [
+        "We could set `nx.config.backend_priority` again to list `\"cugraph\"` as the backend, but let's instead show how the `backend` kwarg can be used to override the priority list and force a specific backend to be used."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "oMSvQVGKY0rn"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nxcg_pr_results = nx.pagerank(G, backend=\"cugraph\")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ZGux_8xFZneI"
+      },
+      "source": [
+        "In this example, instead of plotting the graph to show that the results are identical, we can compare them directly using the saved values from both runs."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "RcmtdFy4Zw7p"
+      },
+      "outputs": [],
+      "source": [
+        "sorted(nx_pr_results) == sorted(nxcg_pr_results)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "mefjUEAnZ4pq"
+      },
+      "source": [
+        "# Working with Bigger Data"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "yLY-yl6PuNYo"
+      },
+      "source": [
+        "Now we'll look at a larger dataset from https://snap.stanford.edu/data/cit-Patents.html which contains citations across different U.S. patents granted from January 1, 1963 to December 30, 1999. The dataset represents 16.5M citations (edges) between 3.77M patents (nodes).\n",
+        "\n",
+        "This will demonstrate that data of this size starts to push the limits of the default pure-Python NetworkX implementation."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "lyYF0LbtFwjh"
+      },
+      "outputs": [],
+      "source": [
+        "# The locale encoding may have been modified from the plots above, reset here to run shell commands\n",
+        "import locale\n",
+        "locale.getpreferredencoding = lambda: \"UTF-8\"\n",
+        "!wget https://data.rapids.ai/cugraph/datasets/cit-Patents.csv  # Skip if cit-Patents.csv already exists.\n",
+        "# !wget https://snap.stanford.edu/data/cit-Patents.txt.gz  # Skip if cit-Patents.txt.gz already exists."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "kjGINYphQSQ2"
+      },
+      "outputs": [],
+      "source": [
+        "%load_ext cudf.pandas\n",
+        "import pandas as pd"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "iV4DieGZOalc"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "df = pd.read_csv(\"cit-Patents.csv\",\n",
+        "                sep=\" \",\n",
+        "                names=[\"src\", \"dst\"],\n",
+        "                dtype=\"int32\",\n",
+        ")\n",
+        "# df = pd.read_csv(\"cit-Patents.txt.gz\",\n",
+        "#                  compression=\"gzip\",\n",
+        "#                  skiprows=4,\n",
+        "#                  sep=\"\\t\",\n",
+        "#                  names=[\"src\", \"dst\"],\n",
+        "#                  dtype=\"int32\",\n",
+        "# )"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "PREA67u4eKat"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "G = nx.from_pandas_edgelist(df, source=\"src\", target=\"dst\")\n",
+        "G.number_of_nodes(), G.number_of_edges()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "NcsUxBqpu4zY"
+      },
+      "source": [
+        "By default, `nx.betweenness_centrality` will perform an all-pairs shortest path analysis when determining the centrality scores for each node. However, due to the much larger size of this graph, determining the shortest path for all pairs of nodes in the graph is not feasible. Instead, we'll use the parameter `k` to limit the number of shortest path computations used for determining the centrality scores, at the expense of accuracy. As we'll see when using a dataset this size with `nx.betweenness_centrality`, we have to limit `k` to `1` which is not practical but is sufficient here for demonstration purposes (since anything larger than `1` will result in many minutes of execution time)."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "gNDWbj3kAk3j"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "bc_results = nx.betweenness_centrality(G, k=1)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "NB8xmxMd1PlX"
+      },
+      "source": [
+        "Now we'll configure NetworkX to use the `nx-cugraph` backend (again, using the name convention that drops the package name's `nx-` prefix) and run the same call. Because this is a Graph that `nx-cugraph` hasn't seen before, the runtime will include the time to convert and cache a GPU-based graph."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "xUYNG1xhvbWc"
+      },
+      "outputs": [],
+      "source": [
+        "nx.config.backend_priority = [\"cugraph\"]"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "cmK8ZuQGvfPo"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "bc_results = nx.betweenness_centrality(G, k=1)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "vdHb1YXP15TZ"
+      },
+      "source": [
+        "Let's run betweenness centrality again, now with a more useful number of samples by setting `k=100`."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "fKjIrzL-vrGS"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "bc_results = nx.betweenness_centrality(G, k=100)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "QeMcrAX2HZSM"
+      },
+      "source": [
+        "Let's also run pagerank on the same dataset to compare."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "gR8ID6ekHgHt"
+      },
+      "outputs": [],
+      "source": [
+        "nx.config.backend_priority = []"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "rTFuvX5wb_c1"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nx_pr_results = nx.pagerank(G)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "8sJx9aeJV9hv"
+      },
+      "outputs": [],
+      "source": [
+        "%%time\n",
+        "nxcg_pr_results = nx.pagerank(G, backend=\"cugraph\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "wGOVQ6ZyY4Ih"
+      },
+      "outputs": [],
+      "source": [
+        "sorted(nx_pr_results) == sorted(nxcg_pr_results)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "k2DfAaZaDIBj"
+      },
+      "source": [
+        "---\n",
+        "<i>\n",
+        "Information on the U.S. Patent Citation Network dataset used in this notebook is as follows:\n",
+        "<br>Authors: Jure Leskovec and Andrej Krevl\n",
+        "<br>Title: SNAP Datasets, Stanford Large Network Dataset Collection\n",
+        "<br>URL: http://snap.stanford.edu/data\n",
+        "<br>Date: June 2014\n",
+        "</i>\n"
+      ]
+    }
+  ],
+  "metadata": {
+    "accelerator": "GPU",
+    "colab": {
+      "gpuType": "T4",
+      "provenance": []
+    },
+    "kernelspec": {
+      "display_name": "Python 3",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.12.4"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}
diff --git a/notebooks/demo/mg_pagerank.ipynb b/notebooks/demo/mg_pagerank.ipynb
index bb3330484..e3314f80b 100644
--- a/notebooks/demo/mg_pagerank.ipynb
+++ b/notebooks/demo/mg_pagerank.ipynb
@@ -219,250 +219,250 @@
      "text": [
       "2023-05-12 09:25:01,974 - distributed.sizeof - WARNING - Sizeof calculation failed. Defaulting to 0.95 MiB\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
       "    return sizeof(obj)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
       "    return meth(arg, *args, **kwargs)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
       "    + df._index.memory_usage()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
       "    if self.levels:\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
       "    self._compute_levels_and_codes()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
       "    code, cats = cudf.Series._from_data({None: col}).factorize()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
       "    return cudf.core.algorithms.factorize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
       "    labels = values._column._label_encoding(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
       "    order = order.take(left_gather_map, check_bounds=False).argsort()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
       "    return self.as_frame()._get_sorted_inds(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
       "    return libcudf.sort.order_by(to_sort, ascending, na_position)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
       "  File \"sort.pyx\", line 141, in cudf._lib.sort.order_by\n",
-      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniconda3/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
+      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniforge/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
       "2023-05-12 09:25:01,976 - distributed.sizeof - WARNING - Sizeof calculation failed. Defaulting to 0.95 MiB\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
       "    return sizeof(obj)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
       "    return meth(arg, *args, **kwargs)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
       "    + df._index.memory_usage()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
       "    if self.levels:\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
       "    self._compute_levels_and_codes()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
       "    code, cats = cudf.Series._from_data({None: col}).factorize()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
       "    return cudf.core.algorithms.factorize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
       "    labels = values._column._label_encoding(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
       "    order = order.take(left_gather_map, check_bounds=False).argsort()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
       "    return self.as_frame()._get_sorted_inds(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
       "    return libcudf.sort.order_by(to_sort, ascending, na_position)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
       "  File \"sort.pyx\", line 141, in cudf._lib.sort.order_by\n",
-      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniconda3/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
+      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniforge/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
       "2023-05-12 09:25:03,767 - distributed.sizeof - WARNING - Sizeof calculation failed. Defaulting to 0.95 MiB\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
       "    return sizeof(obj)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
       "    return meth(arg, *args, **kwargs)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
       "    + df._index.memory_usage()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
       "    if self.levels:\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
       "    self._compute_levels_and_codes()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
       "    code, cats = cudf.Series._from_data({None: col}).factorize()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
       "    return cudf.core.algorithms.factorize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
       "    labels = values._column._label_encoding(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
       "    order = order.take(left_gather_map, check_bounds=False).argsort()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
       "    return self.as_frame()._get_sorted_inds(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
       "    return libcudf.sort.order_by(to_sort, ascending, na_position)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
       "  File \"sort.pyx\", line 141, in cudf._lib.sort.order_by\n",
-      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniconda3/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
+      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniforge/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
       "2023-05-12 09:25:03,768 - distributed.sizeof - WARNING - Sizeof calculation failed. Defaulting to 0.95 MiB\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/sizeof.py\", line 17, in safe_sizeof\n",
       "    return sizeof(obj)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/utils.py\", line 642, in __call__\n",
       "    return meth(arg, *args, **kwargs)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask_cudf/backends.py\", line 430, in sizeof_cudf_dataframe\n",
       "    + df._index.memory_usage()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 1594, in memory_usage\n",
       "    if self.levels:\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 605, in levels\n",
       "    self._compute_levels_and_codes()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/multiindex.py\", line 748, in _compute_levels_and_codes\n",
       "    code, cats = cudf.Series._from_data({None: col}).factorize()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/single_column_frame.py\", line 311, in factorize\n",
       "    return cudf.core.algorithms.factorize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/algorithms.py\", line 138, in factorize\n",
       "    labels = values._column._label_encoding(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1385, in _label_encoding\n",
       "    order = order.take(left_gather_map, check_bounds=False).argsort()\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1101, in argsort\n",
       "    return self.as_frame()._get_sorted_inds(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 1572, in _get_sorted_inds\n",
       "    return libcudf.sort.order_by(to_sort, ascending, na_position)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
       "  File \"sort.pyx\", line 141, in cudf._lib.sort.order_by\n",
-      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniconda3/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
+      "MemoryError: std::bad_alloc: out_of_memory: CUDA error at: /home/dacosta/miniforge/envs/cugraph_0411/include/rmm/mr/device/cuda_memory_resource.hpp\n",
       "2023-05-12 09:25:03,820 - distributed.worker - ERROR - Could not deserialize task ('len-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d', 1)\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2923, in loads_function\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2923, in loads_function\n",
       "    result = cache_loads[bytes_object]\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/collections.py\", line 24, in __getitem__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/collections.py\", line 24, in __getitem__\n",
       "    value = super().__getitem__(key)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/collections/__init__.py\", line 1106, in __getitem__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/collections/__init__.py\", line 1106, in __getitem__\n",
       "    raise KeyError(key)\n",
       "KeyError: b'\\x80\\x05\\x95>\\x0b\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x11dask.optimization\\x94\\x8c\\x10SubgraphCallable\\x94\\x93\\x94(}\\x94(\\x8cKlen-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d\\x94\\x8cZassign-getitem-len-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d\\x94\\x8c*rename-01db283bd79fee66f232920c8dc6b55e_.0\\x94\\x8c;getitem-to_frame-rename-01db283bd79fee66f232920c8dc6b55e_.0\\x94\\x8c+getitem-3499fd71ac25ebbc1a06991edea6067c_.0\\x94\\x8c\\t_operator\\x94\\x8c\\x07getitem\\x94\\x93\\x94\\x8c/reset_index-f4c18304ca92859ccd09f44cf89b4b43_.0\\x94\\x8c\\x13__dask_blockwise__1\\x94\\x87\\x94h\\x0c(\\x8c\\ndask.utils\\x94\\x8c\\x05apply\\x94\\x93\\x94h\\x0f\\x8c\\x0cmethodcaller\\x94\\x93\\x94\\x8c\\x0breset_index\\x94\\x85\\x94R\\x94]\\x94\\x8c\\x13__dask_blockwise__5\\x94a\\x8c\\x08builtins\\x94\\x8c\\x04dict\\x94\\x93\\x94]\\x94]\\x94(\\x8c\\x04drop\\x94\\x89ea\\x86\\x94t\\x94h\\x07(h\\x11\\x8c\\x13dask.dataframe.core\\x94\\x8c\\x11apply_and_enforce\\x94\\x93\\x94]\\x94((h\\x11h#]\\x94h\\x0bh\\x0c\\x8c\\x13__dask_blockwise__0\\x94\\x87\\x94ah\\x1b]\\x94(]\\x94(\\x8c\\x05_func\\x94h\\x13\\x8c\\x08to_frame\\x94\\x85\\x94R\\x94e]\\x94(\\x8c\\x05_meta\\x94\\x8c\\x08builtins\\x94\\x8c\\x07getattr\\x94\\x93\\x94\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94\\x8c\\x10host_deserialize\\x94\\x86\\x94R\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94C0\\x80\\x04\\x95%\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94.\\x94\\x8c\\x0ccolumn_names\\x94C\\x14\\x80\\x04\\x95\\t\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x03src\\x94\\x85\\x94.\\x94\\x8c\\x07columns\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94C=\\x80\\x04\\x952\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x1acudf.core.column.numerical\\x94\\x8c\\x0fNumericalColumn\\x94\\x93\\x94.\\x94\\x8c\\x05dtype\\x94CB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i4\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94\\x8c\\x18dtype-is-cudf-serialized\\x94\\x89\\x8c\\x04data\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94CI\\x80\\x04\\x95>\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x14SpillableBufferSlice\\x94\\x93\\x94.\\x94\\x8c\\x0bframe_count\\x94K\\x01u\\x8c\\x04mask\\x94}\\x94(hGCD\\x80\\x04\\x959\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x0fSpillableBuffer\\x94\\x93\\x94.\\x94hIK\\x01u\\x8c\\x04size\\x94K\\x00hIK\\x02u\\x85\\x94\\x8c\\x05index\\x94}\\x94(\\x8c\\x0cindex_column\\x94}\\x94(\\x8c\\x05start\\x94K\\x00\\x8c\\x04stop\\x94K\\x00\\x8c\\x04step\\x94K\\x01u\\x8c\\x04name\\x94C\\x04\\x80\\x04N.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i8\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94\\x8c\\x0ftype-serialized\\x94C-\\x80\\x04\\x95\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x0fcudf.core.index\\x94\\x8c\\nRangeIndex\\x94\\x93\\x94.\\x94hIK\\x00u\\x8c\\x11index_frame_count\\x94K\\x00\\x8c\\x07is-cuda\\x94]\\x94(\\x88\\x88e\\x8c\\x07lengths\\x94]\\x94(K\\x00K\\x00e\\x8c\\twriteable\\x94NN\\x86\\x94u]\\x94(\\x8c\\x12numpy.core.numeric\\x94\\x8c\\x0b_frombuffer\\x94\\x93\\x94(C\\x00\\x94\\x8c\\x05numpy\\x94hB\\x93\\x94\\x8c\\x02u1\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01|\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94bK\\x00\\x85\\x94\\x8c\\x01C\\x94t\\x94R\\x94he(C\\x00\\x94hkK\\x00\\x85\\x94hot\\x94R\\x94e\\x86\\x94R\\x94ee\\x86\\x94t\\x94\\x8c\\x13__dask_blockwise__2\\x94eh\\x1b]\\x94(]\\x94(h*h\\x13\\x8c\\x06rename\\x94\\x85\\x94R\\x94e]\\x94(h/h2h5h6\\x86\\x94R\\x94}\\x94(h:C0\\x80\\x04\\x95%\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94.\\x94h<C\\x14\\x80\\x04\\x95\\t\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x03src\\x94\\x85\\x94.\\x94h>}\\x94(h@C=\\x80\\x04\\x952\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x1acudf.core.column.numerical\\x94\\x8c\\x0fNumericalColumn\\x94\\x93\\x94.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i4\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94hD\\x89hE}\\x94(hGCI\\x80\\x04\\x95>\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x14SpillableBufferSlice\\x94\\x93\\x94.\\x94hIK\\x01uhMK\\x00hIK\\x01u\\x85\\x94hO}\\x94(hQ}\\x94(hSK\\x00hTK\\x00hUK\\x01uhVC\\x04\\x80\\x04N.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i8\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94hYC-\\x80\\x04\\x95\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x0fcudf.core.index\\x94\\x8c\\nRangeIndex\\x94\\x93\\x94.\\x94hIK\\x00uh[K\\x00h\\\\]\\x94\\x88ah^]\\x94K\\x00ah`N\\x85\\x94u]\\x94he(C\\x00\\x94hkK\\x00\\x85\\x94hot\\x94R\\x94a\\x86\\x94R\\x94e]\\x94(h>h\\x1b]\\x94]\\x94(\\x8c\\x03src\\x94h\\x9eea\\x86\\x94ee\\x86\\x94t\\x94h\\x05(h\\x11h!\\x8c\\x10_reduction_chunk\\x94\\x93\\x94]\\x94h\\x0b(\\x8c\\x16dask.dataframe.methods\\x94\\x8c\\x06assign\\x94\\x93\\x94h\\x06h\\rh\\x08t\\x94h&\\x87\\x94ah\\x1b]\\x94]\\x94(\\x8c\\taca_chunk\\x94h0\\x8c\\x03len\\x94\\x93\\x94ea\\x86\\x94t\\x94\\x8c\\x13__dask_blockwise__0\\x94h\\x9e\\x8c\\x13__dask_blockwise__1\\x94\\x8c\\x03dst\\x94\\x8c\\x13__dask_blockwise__2\\x94N\\x8c\\x13__dask_blockwise__3\\x94\\x8c)to_frame-804980ae30b71d28f0a6bd3d5b7610f9\\x94\\x8c\\x13__dask_blockwise__4\\x94\\x8c(getitem-15414b72be12e28054238b44933937ab\\x94\\x8c\\x13__dask_blockwise__6\\x94\\x8c3cudf-aggregate-agg-c50c2d97de169ca4f41e43a92a042630\\x94uh\\x04\\x8c\\x13__dask_blockwise__5\\x94\\x85\\x94\\x8c6subgraph_callable-b4ca530e-8895-432e-b553-40a7b5892ab2\\x94t\\x94R\\x94.'\n",
       "\n",
       "During handling of the above exception, another exception occurred:\n",
       "\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2244, in execute\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2244, in execute\n",
       "    function, args, kwargs = await self._maybe_deserialize_task(ts)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2216, in _maybe_deserialize_task\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2216, in _maybe_deserialize_task\n",
       "    function, args, kwargs = _deserialize(*ts.run_spec)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2937, in _deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2937, in _deserialize\n",
       "    function = loads_function(function)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2925, in loads_function\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2925, in loads_function\n",
       "    result = pickle.loads(bytes_object)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py\", line 96, in loads\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py\", line 96, in loads\n",
       "    return pickle.loads(x)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 176, in host_deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 176, in host_deserialize\n",
       "    obj = cls.device_deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 130, in device_deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 130, in device_deserialize\n",
       "    return typ.deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py\", line 1019, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py\", line 1019, in deserialize\n",
       "    obj = super().deserialize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 106, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 106, in deserialize\n",
       "    columns = deserialize_columns(header[\"columns\"], frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 2450, in deserialize_columns\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 2450, in deserialize_columns\n",
       "    colobj = col_typ.deserialize(meta, frames[:col_frame_count])\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1216, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1216, in deserialize\n",
       "    data, frames = unpack(header[\"data\"], frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1204, in unpack\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1204, in unpack\n",
       "    obj = klass.deserialize(header, frames[:count])\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 574, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 574, in deserialize\n",
       "    return SpillableBuffer.deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py\", line 335, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py\", line 335, in deserialize\n",
       "    return cls._from_device_memory(frame)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 235, in _from_device_memory\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 235, in _from_device_memory\n",
       "    ret._finalize_init(ptr_desc={\"type\": \"gpu\"}, exposed=exposed)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 206, in _finalize_init\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 206, in _finalize_init\n",
       "    raise ValueError(\n",
       "ValueError: cannot create <class 'cudf.core.buffer.spillable_buffer.SpillableBuffer'> without a global spill manager\n",
       "2023-05-12 09:25:03,817 - distributed.worker - ERROR - Could not deserialize task ('len-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d', 0)\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2923, in loads_function\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2923, in loads_function\n",
       "    result = cache_loads[bytes_object]\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/collections.py\", line 24, in __getitem__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/collections.py\", line 24, in __getitem__\n",
       "    value = super().__getitem__(key)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/collections/__init__.py\", line 1106, in __getitem__\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/collections/__init__.py\", line 1106, in __getitem__\n",
       "    raise KeyError(key)\n",
       "KeyError: b'\\x80\\x05\\x95>\\x0b\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x11dask.optimization\\x94\\x8c\\x10SubgraphCallable\\x94\\x93\\x94(}\\x94(\\x8cKlen-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d\\x94\\x8cZassign-getitem-len-chunk-319fe46af5510615b2fae86c6e732896-841a12bf4568ebb80eb2030cc4d9651d\\x94\\x8c*rename-01db283bd79fee66f232920c8dc6b55e_.0\\x94\\x8c;getitem-to_frame-rename-01db283bd79fee66f232920c8dc6b55e_.0\\x94\\x8c+getitem-3499fd71ac25ebbc1a06991edea6067c_.0\\x94\\x8c\\t_operator\\x94\\x8c\\x07getitem\\x94\\x93\\x94\\x8c/reset_index-f4c18304ca92859ccd09f44cf89b4b43_.0\\x94\\x8c\\x13__dask_blockwise__1\\x94\\x87\\x94h\\x0c(\\x8c\\ndask.utils\\x94\\x8c\\x05apply\\x94\\x93\\x94h\\x0f\\x8c\\x0cmethodcaller\\x94\\x93\\x94\\x8c\\x0breset_index\\x94\\x85\\x94R\\x94]\\x94\\x8c\\x13__dask_blockwise__5\\x94a\\x8c\\x08builtins\\x94\\x8c\\x04dict\\x94\\x93\\x94]\\x94]\\x94(\\x8c\\x04drop\\x94\\x89ea\\x86\\x94t\\x94h\\x07(h\\x11\\x8c\\x13dask.dataframe.core\\x94\\x8c\\x11apply_and_enforce\\x94\\x93\\x94]\\x94((h\\x11h#]\\x94h\\x0bh\\x0c\\x8c\\x13__dask_blockwise__0\\x94\\x87\\x94ah\\x1b]\\x94(]\\x94(\\x8c\\x05_func\\x94h\\x13\\x8c\\x08to_frame\\x94\\x85\\x94R\\x94e]\\x94(\\x8c\\x05_meta\\x94\\x8c\\x08builtins\\x94\\x8c\\x07getattr\\x94\\x93\\x94\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94\\x8c\\x10host_deserialize\\x94\\x86\\x94R\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94C0\\x80\\x04\\x95%\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94.\\x94\\x8c\\x0ccolumn_names\\x94C\\x14\\x80\\x04\\x95\\t\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x03src\\x94\\x85\\x94.\\x94\\x8c\\x07columns\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94C=\\x80\\x04\\x952\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x1acudf.core.column.numerical\\x94\\x8c\\x0fNumericalColumn\\x94\\x93\\x94.\\x94\\x8c\\x05dtype\\x94CB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i4\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94\\x8c\\x18dtype-is-cudf-serialized\\x94\\x89\\x8c\\x04data\\x94}\\x94(\\x8c\\x0ftype-serialized\\x94CI\\x80\\x04\\x95>\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x14SpillableBufferSlice\\x94\\x93\\x94.\\x94\\x8c\\x0bframe_count\\x94K\\x01u\\x8c\\x04mask\\x94}\\x94(hGCD\\x80\\x04\\x959\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x0fSpillableBuffer\\x94\\x93\\x94.\\x94hIK\\x01u\\x8c\\x04size\\x94K\\x00hIK\\x02u\\x85\\x94\\x8c\\x05index\\x94}\\x94(\\x8c\\x0cindex_column\\x94}\\x94(\\x8c\\x05start\\x94K\\x00\\x8c\\x04stop\\x94K\\x00\\x8c\\x04step\\x94K\\x01u\\x8c\\x04name\\x94C\\x04\\x80\\x04N.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i8\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94\\x8c\\x0ftype-serialized\\x94C-\\x80\\x04\\x95\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x0fcudf.core.index\\x94\\x8c\\nRangeIndex\\x94\\x93\\x94.\\x94hIK\\x00u\\x8c\\x11index_frame_count\\x94K\\x00\\x8c\\x07is-cuda\\x94]\\x94(\\x88\\x88e\\x8c\\x07lengths\\x94]\\x94(K\\x00K\\x00e\\x8c\\twriteable\\x94NN\\x86\\x94u]\\x94(\\x8c\\x12numpy.core.numeric\\x94\\x8c\\x0b_frombuffer\\x94\\x93\\x94(C\\x00\\x94\\x8c\\x05numpy\\x94hB\\x93\\x94\\x8c\\x02u1\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01|\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94bK\\x00\\x85\\x94\\x8c\\x01C\\x94t\\x94R\\x94he(C\\x00\\x94hkK\\x00\\x85\\x94hot\\x94R\\x94e\\x86\\x94R\\x94ee\\x86\\x94t\\x94\\x8c\\x13__dask_blockwise__2\\x94eh\\x1b]\\x94(]\\x94(h*h\\x13\\x8c\\x06rename\\x94\\x85\\x94R\\x94e]\\x94(h/h2h5h6\\x86\\x94R\\x94}\\x94(h:C0\\x80\\x04\\x95%\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x13cudf.core.dataframe\\x94\\x8c\\tDataFrame\\x94\\x93\\x94.\\x94h<C\\x14\\x80\\x04\\x95\\t\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x03src\\x94\\x85\\x94.\\x94h>}\\x94(h@C=\\x80\\x04\\x952\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x1acudf.core.column.numerical\\x94\\x8c\\x0fNumericalColumn\\x94\\x93\\x94.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i4\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94hD\\x89hE}\\x94(hGCI\\x80\\x04\\x95>\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c!cudf.core.buffer.spillable_buffer\\x94\\x8c\\x14SpillableBufferSlice\\x94\\x93\\x94.\\x94hIK\\x01uhMK\\x00hIK\\x01u\\x85\\x94hO}\\x94(hQ}\\x94(hSK\\x00hTK\\x00hUK\\x01uhVC\\x04\\x80\\x04N.\\x94hBCB\\x80\\x04\\x957\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x05numpy\\x94\\x8c\\x05dtype\\x94\\x93\\x94\\x8c\\x02i8\\x94\\x89\\x88\\x87\\x94R\\x94(K\\x03\\x8c\\x01<\\x94NNNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xffK\\x00t\\x94b.\\x94hYC-\\x80\\x04\\x95\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x0fcudf.core.index\\x94\\x8c\\nRangeIndex\\x94\\x93\\x94.\\x94hIK\\x00uh[K\\x00h\\\\]\\x94\\x88ah^]\\x94K\\x00ah`N\\x85\\x94u]\\x94he(C\\x00\\x94hkK\\x00\\x85\\x94hot\\x94R\\x94a\\x86\\x94R\\x94e]\\x94(h>h\\x1b]\\x94]\\x94(\\x8c\\x03src\\x94h\\x9eea\\x86\\x94ee\\x86\\x94t\\x94h\\x05(h\\x11h!\\x8c\\x10_reduction_chunk\\x94\\x93\\x94]\\x94h\\x0b(\\x8c\\x16dask.dataframe.methods\\x94\\x8c\\x06assign\\x94\\x93\\x94h\\x06h\\rh\\x08t\\x94h&\\x87\\x94ah\\x1b]\\x94]\\x94(\\x8c\\taca_chunk\\x94h0\\x8c\\x03len\\x94\\x93\\x94ea\\x86\\x94t\\x94\\x8c\\x13__dask_blockwise__0\\x94h\\x9e\\x8c\\x13__dask_blockwise__1\\x94\\x8c\\x03dst\\x94\\x8c\\x13__dask_blockwise__2\\x94N\\x8c\\x13__dask_blockwise__3\\x94\\x8c)to_frame-804980ae30b71d28f0a6bd3d5b7610f9\\x94\\x8c\\x13__dask_blockwise__4\\x94\\x8c(getitem-15414b72be12e28054238b44933937ab\\x94\\x8c\\x13__dask_blockwise__6\\x94\\x8c3cudf-aggregate-agg-c50c2d97de169ca4f41e43a92a042630\\x94uh\\x04\\x8c\\x13__dask_blockwise__5\\x94\\x85\\x94\\x8c6subgraph_callable-b4ca530e-8895-432e-b553-40a7b5892ab2\\x94t\\x94R\\x94.'\n",
       "\n",
       "During handling of the above exception, another exception occurred:\n",
       "\n",
       "Traceback (most recent call last):\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2244, in execute\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2244, in execute\n",
       "    function, args, kwargs = await self._maybe_deserialize_task(ts)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2216, in _maybe_deserialize_task\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2216, in _maybe_deserialize_task\n",
       "    function, args, kwargs = _deserialize(*ts.run_spec)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py\", line 79, in inner\n",
       "    return func(*args, **kwds)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2937, in _deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2937, in _deserialize\n",
       "    function = loads_function(function)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2925, in loads_function\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py\", line 2925, in loads_function\n",
       "    result = pickle.loads(bytes_object)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py\", line 96, in loads\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py\", line 96, in loads\n",
       "    return pickle.loads(x)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 176, in host_deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 176, in host_deserialize\n",
       "    obj = cls.device_deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 130, in device_deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py\", line 130, in device_deserialize\n",
       "    return typ.deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py\", line 1019, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py\", line 1019, in deserialize\n",
       "    obj = super().deserialize(\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 106, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py\", line 106, in deserialize\n",
       "    columns = deserialize_columns(header[\"columns\"], frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 2450, in deserialize_columns\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 2450, in deserialize_columns\n",
       "    colobj = col_typ.deserialize(meta, frames[:col_frame_count])\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1216, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1216, in deserialize\n",
       "    data, frames = unpack(header[\"data\"], frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1204, in unpack\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py\", line 1204, in unpack\n",
       "    obj = klass.deserialize(header, frames[:count])\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 574, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 574, in deserialize\n",
       "    return SpillableBuffer.deserialize(header, frames)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py\", line 335, in deserialize\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py\", line 335, in deserialize\n",
       "    return cls._from_device_memory(frame)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 235, in _from_device_memory\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 235, in _from_device_memory\n",
       "    ret._finalize_init(ptr_desc={\"type\": \"gpu\"}, exposed=exposed)\n",
-      "  File \"/home/dacosta/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 206, in _finalize_init\n",
+      "  File \"/home/dacosta/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py\", line 206, in _finalize_init\n",
       "    raise ValueError(\n",
       "ValueError: cannot create <class 'cudf.core.buffer.spillable_buffer.SpillableBuffer'> without a global spill manager\n"
      ]
@@ -475,34 +475,34 @@
       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
       "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
       "Cell \u001b[0;32mIn[6], line 3\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[39m# Create a directed graph using the source (src) and destination (dst) vertex pairs from the Dataframe \u001b[39;00m\n\u001b[1;32m      2\u001b[0m G \u001b[39m=\u001b[39m cugraph\u001b[39m.\u001b[39mGraph(directed\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[0;32m----> 3\u001b[0m G\u001b[39m.\u001b[39;49mfrom_dask_cudf_edgelist(e_list, source\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39msrc\u001b[39;49m\u001b[39m'\u001b[39;49m, destination\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39mdst\u001b[39;49m\u001b[39m'\u001b[39;49m)\n\u001b[1;32m      5\u001b[0m \u001b[39m# Print time\u001b[39;00m\n\u001b[1;32m      6\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39mRead, load and renumber: \u001b[39m\u001b[39m\"\u001b[39m, time\u001b[39m.\u001b[39mtime()\u001b[39m-\u001b[39mt_start, \u001b[39m\"\u001b[39m\u001b[39ms\u001b[39m\u001b[39m\"\u001b[39m)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cugraph/structure/graph_classes.py:309\u001b[0m, in \u001b[0;36mGraph.from_dask_cudf_edgelist\u001b[0;34m(self, input_ddf, source, destination, edge_attr, renumber, store_transposed, legacy_renum_only)\u001b[0m\n\u001b[1;32m    307\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_Impl\u001b[39m.\u001b[39medgelist \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m    308\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mGraph already has values\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m--> 309\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_Impl\u001b[39m.\u001b[39;49m_simpleDistributedGraphImpl__from_edgelist(\n\u001b[1;32m    310\u001b[0m     input_ddf,\n\u001b[1;32m    311\u001b[0m     source,\n\u001b[1;32m    312\u001b[0m     destination,\n\u001b[1;32m    313\u001b[0m     edge_attr,\n\u001b[1;32m    314\u001b[0m     renumber,\n\u001b[1;32m    315\u001b[0m     store_transposed,\n\u001b[1;32m    316\u001b[0m     legacy_renum_only,\n\u001b[1;32m    317\u001b[0m )\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cugraph/structure/graph_implementation/simpleDistributedGraph.py:272\u001b[0m, in \u001b[0;36msimpleDistributedGraphImpl.__from_edgelist\u001b[0;34m(self, input_ddf, source, destination, edge_attr, renumber, store_transposed, legacy_renum_only)\u001b[0m\n\u001b[1;32m    268\u001b[0m     dst_col_name \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mrenumber_map\u001b[39m.\u001b[39mrenumbered_dst_col_name\n\u001b[1;32m    270\u001b[0m ddf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39medgelist\u001b[39m.\u001b[39medgelist_df\n\u001b[0;32m--> 272\u001b[0m num_edges \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39;49m(ddf)\n\u001b[1;32m    273\u001b[0m edge_data \u001b[39m=\u001b[39m get_distributed_data(ddf)\n\u001b[1;32m    275\u001b[0m graph_props \u001b[39m=\u001b[39m GraphProperties(\n\u001b[1;32m    276\u001b[0m     is_multigraph\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mproperties\u001b[39m.\u001b[39mmulti_edge,\n\u001b[1;32m    277\u001b[0m     is_symmetric\u001b[39m=\u001b[39m\u001b[39mnot\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mproperties\u001b[39m.\u001b[39mdirected,\n\u001b[1;32m    278\u001b[0m )\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/dataframe/core.py:4775\u001b[0m, in \u001b[0;36mDataFrame.__len__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   4773\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__len__\u001b[39m()\n\u001b[1;32m   4774\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 4775\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mlen\u001b[39;49m(s)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/dataframe/core.py:843\u001b[0m, in \u001b[0;36m_Frame.__len__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    840\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__len__\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[1;32m    841\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mreduction(\n\u001b[1;32m    842\u001b[0m         \u001b[39mlen\u001b[39;49m, np\u001b[39m.\u001b[39;49msum, token\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mlen\u001b[39;49m\u001b[39m\"\u001b[39;49m, meta\u001b[39m=\u001b[39;49m\u001b[39mint\u001b[39;49m, split_every\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m\n\u001b[0;32m--> 843\u001b[0m     )\u001b[39m.\u001b[39;49mcompute()\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/base.py:314\u001b[0m, in \u001b[0;36mDaskMethodsMixin.compute\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m    290\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mcompute\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m    291\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"Compute this dask collection\u001b[39;00m\n\u001b[1;32m    292\u001b[0m \n\u001b[1;32m    293\u001b[0m \u001b[39m    This turns a lazy Dask collection into its in-memory equivalent.\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    312\u001b[0m \u001b[39m    dask.base.compute\u001b[39;00m\n\u001b[1;32m    313\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[0;32m--> 314\u001b[0m     (result,) \u001b[39m=\u001b[39m compute(\u001b[39mself\u001b[39;49m, traverse\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    315\u001b[0m     \u001b[39mreturn\u001b[39;00m result\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/dask/base.py:599\u001b[0m, in \u001b[0;36mcompute\u001b[0;34m(traverse, optimize_graph, scheduler, get, *args, **kwargs)\u001b[0m\n\u001b[1;32m    596\u001b[0m     keys\u001b[39m.\u001b[39mappend(x\u001b[39m.\u001b[39m__dask_keys__())\n\u001b[1;32m    597\u001b[0m     postcomputes\u001b[39m.\u001b[39mappend(x\u001b[39m.\u001b[39m__dask_postcompute__())\n\u001b[0;32m--> 599\u001b[0m results \u001b[39m=\u001b[39m schedule(dsk, keys, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    600\u001b[0m \u001b[39mreturn\u001b[39;00m repack([f(r, \u001b[39m*\u001b[39ma) \u001b[39mfor\u001b[39;00m r, (f, a) \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(results, postcomputes)])\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:3186\u001b[0m, in \u001b[0;36mClient.get\u001b[0;34m(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)\u001b[0m\n\u001b[1;32m   3184\u001b[0m         should_rejoin \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m\n\u001b[1;32m   3185\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m-> 3186\u001b[0m     results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mgather(packed, asynchronous\u001b[39m=\u001b[39;49masynchronous, direct\u001b[39m=\u001b[39;49mdirect)\n\u001b[1;32m   3187\u001b[0m \u001b[39mfinally\u001b[39;00m:\n\u001b[1;32m   3188\u001b[0m     \u001b[39mfor\u001b[39;00m f \u001b[39min\u001b[39;00m futures\u001b[39m.\u001b[39mvalues():\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:2345\u001b[0m, in \u001b[0;36mClient.gather\u001b[0;34m(self, futures, errors, direct, asynchronous)\u001b[0m\n\u001b[1;32m   2343\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m   2344\u001b[0m     local_worker \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m-> 2345\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msync(\n\u001b[1;32m   2346\u001b[0m     \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_gather,\n\u001b[1;32m   2347\u001b[0m     futures,\n\u001b[1;32m   2348\u001b[0m     errors\u001b[39m=\u001b[39;49merrors,\n\u001b[1;32m   2349\u001b[0m     direct\u001b[39m=\u001b[39;49mdirect,\n\u001b[1;32m   2350\u001b[0m     local_worker\u001b[39m=\u001b[39;49mlocal_worker,\n\u001b[1;32m   2351\u001b[0m     asynchronous\u001b[39m=\u001b[39;49masynchronous,\n\u001b[1;32m   2352\u001b[0m )\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:349\u001b[0m, in \u001b[0;36mSyncMethodMixin.sync\u001b[0;34m(self, func, asynchronous, callback_timeout, *args, **kwargs)\u001b[0m\n\u001b[1;32m    347\u001b[0m     \u001b[39mreturn\u001b[39;00m future\n\u001b[1;32m    348\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 349\u001b[0m     \u001b[39mreturn\u001b[39;00m sync(\n\u001b[1;32m    350\u001b[0m         \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mloop, func, \u001b[39m*\u001b[39;49margs, callback_timeout\u001b[39m=\u001b[39;49mcallback_timeout, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs\n\u001b[1;32m    351\u001b[0m     )\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:416\u001b[0m, in \u001b[0;36msync\u001b[0;34m(loop, func, callback_timeout, *args, **kwargs)\u001b[0m\n\u001b[1;32m    414\u001b[0m \u001b[39mif\u001b[39;00m error:\n\u001b[1;32m    415\u001b[0m     typ, exc, tb \u001b[39m=\u001b[39m error\n\u001b[0;32m--> 416\u001b[0m     \u001b[39mraise\u001b[39;00m exc\u001b[39m.\u001b[39mwith_traceback(tb)\n\u001b[1;32m    417\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m    418\u001b[0m     \u001b[39mreturn\u001b[39;00m result\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:389\u001b[0m, in \u001b[0;36msync.<locals>.f\u001b[0;34m()\u001b[0m\n\u001b[1;32m    387\u001b[0m         future \u001b[39m=\u001b[39m wait_for(future, callback_timeout)\n\u001b[1;32m    388\u001b[0m     future \u001b[39m=\u001b[39m asyncio\u001b[39m.\u001b[39mensure_future(future)\n\u001b[0;32m--> 389\u001b[0m     result \u001b[39m=\u001b[39m \u001b[39myield\u001b[39;00m future\n\u001b[1;32m    390\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m    391\u001b[0m     error \u001b[39m=\u001b[39m sys\u001b[39m.\u001b[39mexc_info()\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/tornado/gen.py:769\u001b[0m, in \u001b[0;36mRunner.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    766\u001b[0m exc_info \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m    768\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 769\u001b[0m     value \u001b[39m=\u001b[39m future\u001b[39m.\u001b[39;49mresult()\n\u001b[1;32m    770\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m    771\u001b[0m     exc_info \u001b[39m=\u001b[39m sys\u001b[39m.\u001b[39mexc_info()\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:2208\u001b[0m, in \u001b[0;36mClient._gather\u001b[0;34m(self, futures, errors, direct, local_worker)\u001b[0m\n\u001b[1;32m   2206\u001b[0m         exc \u001b[39m=\u001b[39m CancelledError(key)\n\u001b[1;32m   2207\u001b[0m     \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 2208\u001b[0m         \u001b[39mraise\u001b[39;00m exception\u001b[39m.\u001b[39mwith_traceback(traceback)\n\u001b[1;32m   2209\u001b[0m     \u001b[39mraise\u001b[39;00m exc\n\u001b[1;32m   2210\u001b[0m \u001b[39mif\u001b[39;00m errors \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mskip\u001b[39m\u001b[39m\"\u001b[39m:\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/contextlib.py:79\u001b[0m, in \u001b[0;36minner\u001b[0;34m()\u001b[0m\n\u001b[1;32m     76\u001b[0m \u001b[39m@wraps\u001b[39m(func)\n\u001b[1;32m     77\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minner\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwds):\n\u001b[1;32m     78\u001b[0m     \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_recreate_cm():\n\u001b[0;32m---> 79\u001b[0m         \u001b[39mreturn\u001b[39;00m func(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwds)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py:2937\u001b[0m, in \u001b[0;36m_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2934\u001b[0m \u001b[39m# Some objects require threadlocal state during deserialization, e.g. to\u001b[39;00m\n\u001b[1;32m   2935\u001b[0m \u001b[39m# detect the current worker\u001b[39;00m\n\u001b[1;32m   2936\u001b[0m \u001b[39mif\u001b[39;00m function \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m-> 2937\u001b[0m     function \u001b[39m=\u001b[39m loads_function(function)\n\u001b[1;32m   2938\u001b[0m \u001b[39mif\u001b[39;00m args \u001b[39mand\u001b[39;00m \u001b[39misinstance\u001b[39m(args, \u001b[39mbytes\u001b[39m):\n\u001b[1;32m   2939\u001b[0m     args \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(args)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py:2925\u001b[0m, in \u001b[0;36mloads_function\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2923\u001b[0m     result \u001b[39m=\u001b[39m cache_loads[bytes_object]\n\u001b[1;32m   2924\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mKeyError\u001b[39;00m:\n\u001b[0;32m-> 2925\u001b[0m     result \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(bytes_object)\n\u001b[1;32m   2926\u001b[0m     cache_loads[bytes_object] \u001b[39m=\u001b[39m result\n\u001b[1;32m   2927\u001b[0m \u001b[39mreturn\u001b[39;00m result\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py:96\u001b[0m, in \u001b[0;36mloads\u001b[0;34m()\u001b[0m\n\u001b[1;32m     94\u001b[0m         \u001b[39mreturn\u001b[39;00m pickle\u001b[39m.\u001b[39mloads(x, buffers\u001b[39m=\u001b[39mbuffers)\n\u001b[1;32m     95\u001b[0m     \u001b[39melse\u001b[39;00m:\n\u001b[0;32m---> 96\u001b[0m         \u001b[39mreturn\u001b[39;00m pickle\u001b[39m.\u001b[39mloads(x)\n\u001b[1;32m     97\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m     98\u001b[0m     logger\u001b[39m.\u001b[39minfo(\u001b[39m\"\u001b[39m\u001b[39mFailed to deserialize \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m\"\u001b[39m, x[:\u001b[39m10000\u001b[39m], exc_info\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py:176\u001b[0m, in \u001b[0;36mhost_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    154\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Perform device-side deserialization tasks.\u001b[39;00m\n\u001b[1;32m    155\u001b[0m \n\u001b[1;32m    156\u001b[0m \u001b[39mParameters\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    170\u001b[0m \u001b[39m:meta private:\u001b[39;00m\n\u001b[1;32m    171\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m    172\u001b[0m frames \u001b[39m=\u001b[39m [\n\u001b[1;32m    173\u001b[0m     cudf\u001b[39m.\u001b[39mcore\u001b[39m.\u001b[39mbuffer\u001b[39m.\u001b[39mas_buffer(f) \u001b[39mif\u001b[39;00m c \u001b[39melse\u001b[39;00m f\n\u001b[1;32m    174\u001b[0m     \u001b[39mfor\u001b[39;00m c, f \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(header[\u001b[39m\"\u001b[39m\u001b[39mis-cuda\u001b[39m\u001b[39m\"\u001b[39m], \u001b[39mmap\u001b[39m(\u001b[39mmemoryview\u001b[39m, frames))\n\u001b[1;32m    175\u001b[0m ]\n\u001b[0;32m--> 176\u001b[0m obj \u001b[39m=\u001b[39m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39mdevice_deserialize(header, frames)\n\u001b[1;32m    177\u001b[0m \u001b[39mreturn\u001b[39;00m obj\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py:130\u001b[0m, in \u001b[0;36mdevice_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    125\u001b[0m typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m    126\u001b[0m frames \u001b[39m=\u001b[39m [\n\u001b[1;32m    127\u001b[0m     cudf\u001b[39m.\u001b[39mcore\u001b[39m.\u001b[39mbuffer\u001b[39m.\u001b[39mas_buffer(f) \u001b[39mif\u001b[39;00m c \u001b[39melse\u001b[39;00m \u001b[39mmemoryview\u001b[39m(f)\n\u001b[1;32m    128\u001b[0m     \u001b[39mfor\u001b[39;00m c, f \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(header[\u001b[39m\"\u001b[39m\u001b[39mis-cuda\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m    129\u001b[0m ]\n\u001b[0;32m--> 130\u001b[0m \u001b[39mreturn\u001b[39;00m typ\u001b[39m.\u001b[39mdeserialize(header, frames)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py:1019\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1016\u001b[0m \u001b[39m@classmethod\u001b[39m\n\u001b[1;32m   1017\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdeserialize\u001b[39m(\u001b[39mcls\u001b[39m, header, frames):\n\u001b[1;32m   1018\u001b[0m     index_nframes \u001b[39m=\u001b[39m header[\u001b[39m\"\u001b[39m\u001b[39mindex_frame_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[0;32m-> 1019\u001b[0m     obj \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39mdeserialize(\n\u001b[1;32m   1020\u001b[0m         header, frames[header[\u001b[39m\"\u001b[39m\u001b[39mindex_frame_count\u001b[39m\u001b[39m\"\u001b[39m] :]\n\u001b[1;32m   1021\u001b[0m     )\n\u001b[1;32m   1023\u001b[0m     idx_typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mindex\u001b[39m\u001b[39m\"\u001b[39m][\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m   1024\u001b[0m     index \u001b[39m=\u001b[39m idx_typ\u001b[39m.\u001b[39mdeserialize(header[\u001b[39m\"\u001b[39m\u001b[39mindex\u001b[39m\u001b[39m\"\u001b[39m], frames[:index_nframes])\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py:106\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    104\u001b[0m cls_deserialize \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m    105\u001b[0m column_names \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mcolumn_names\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m--> 106\u001b[0m columns \u001b[39m=\u001b[39m deserialize_columns(header[\u001b[39m\"\u001b[39m\u001b[39mcolumns\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m    107\u001b[0m \u001b[39mreturn\u001b[39;00m cls_deserialize\u001b[39m.\u001b[39m_from_data(\u001b[39mdict\u001b[39m(\u001b[39mzip\u001b[39m(column_names, columns)))\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:2450\u001b[0m, in \u001b[0;36mdeserialize_columns\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2448\u001b[0m col_frame_count \u001b[39m=\u001b[39m meta[\u001b[39m\"\u001b[39m\u001b[39mframe_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m   2449\u001b[0m col_typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(meta[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m-> 2450\u001b[0m colobj \u001b[39m=\u001b[39m col_typ\u001b[39m.\u001b[39mdeserialize(meta, frames[:col_frame_count])\n\u001b[1;32m   2451\u001b[0m columns\u001b[39m.\u001b[39mappend(colobj)\n\u001b[1;32m   2452\u001b[0m \u001b[39m# Advance frames\u001b[39;00m\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:1216\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1214\u001b[0m     dtype \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mdtype\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m   1215\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mdata\u001b[39m\u001b[39m\"\u001b[39m \u001b[39min\u001b[39;00m header:\n\u001b[0;32m-> 1216\u001b[0m     data, frames \u001b[39m=\u001b[39m unpack(header[\u001b[39m\"\u001b[39m\u001b[39mdata\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m   1217\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m   1218\u001b[0m     data \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:1204\u001b[0m, in \u001b[0;36munpack\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1202\u001b[0m count \u001b[39m=\u001b[39m header[\u001b[39m\"\u001b[39m\u001b[39mframe_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m   1203\u001b[0m klass \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m-> 1204\u001b[0m obj \u001b[39m=\u001b[39m klass\u001b[39m.\u001b[39mdeserialize(header, frames[:count])\n\u001b[1;32m   1205\u001b[0m \u001b[39mreturn\u001b[39;00m obj, frames[count:]\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:574\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    567\u001b[0m \u001b[39m@classmethod\u001b[39m\n\u001b[1;32m    568\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdeserialize\u001b[39m(\u001b[39mcls\u001b[39m, header: \u001b[39mdict\u001b[39m, frames: \u001b[39mlist\u001b[39m):\n\u001b[1;32m    569\u001b[0m     \u001b[39m# TODO: because of the hack in `SpillableBuffer.serialize()` where\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    572\u001b[0m     \u001b[39m# deserialize into `SpillableBufferSlice` when the frames hasn't been\u001b[39;00m\n\u001b[1;32m    573\u001b[0m     \u001b[39m# copied.\u001b[39;00m\n\u001b[0;32m--> 574\u001b[0m     \u001b[39mreturn\u001b[39;00m SpillableBuffer\u001b[39m.\u001b[39mdeserialize(header, frames)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py:335\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    332\u001b[0m     \u001b[39mreturn\u001b[39;00m frame  \u001b[39m# The frame is already deserialized\u001b[39;00m\n\u001b[1;32m    334\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mhasattr\u001b[39m(frame, \u001b[39m\"\u001b[39m\u001b[39m__cuda_array_interface__\u001b[39m\u001b[39m\"\u001b[39m):\n\u001b[0;32m--> 335\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_from_device_memory(frame)\n\u001b[1;32m    336\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_from_host_memory(frame)\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:235\u001b[0m, in \u001b[0;36m_from_device_memory\u001b[0;34m()\u001b[0m\n\u001b[1;32m    218\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Create a spillabe buffer from device memory.\u001b[39;00m\n\u001b[1;32m    219\u001b[0m \n\u001b[1;32m    220\u001b[0m \u001b[39mNo data is being copied.\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    232\u001b[0m \u001b[39m    Buffer representing the same device memory as `data`\u001b[39;00m\n\u001b[1;32m    233\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m    234\u001b[0m ret \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m_from_device_memory(data)\n\u001b[0;32m--> 235\u001b[0m ret\u001b[39m.\u001b[39m_finalize_init(ptr_desc\u001b[39m=\u001b[39m{\u001b[39m\"\u001b[39m\u001b[39mtype\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mgpu\u001b[39m\u001b[39m\"\u001b[39m}, exposed\u001b[39m=\u001b[39mexposed)\n\u001b[1;32m    236\u001b[0m \u001b[39mreturn\u001b[39;00m ret\n",
-      "File \u001b[0;32m~/miniconda3/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:206\u001b[0m, in \u001b[0;36m_finalize_init\u001b[0;34m()\u001b[0m\n\u001b[1;32m    204\u001b[0m manager \u001b[39m=\u001b[39m get_global_manager()\n\u001b[1;32m    205\u001b[0m \u001b[39mif\u001b[39;00m manager \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 206\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m    207\u001b[0m         \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcannot create \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m without \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    208\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39ma global spill manager\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    209\u001b[0m     )\n\u001b[1;32m    211\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_manager \u001b[39m=\u001b[39m manager\n\u001b[1;32m    212\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_manager\u001b[39m.\u001b[39madd(\u001b[39mself\u001b[39m)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cugraph/structure/graph_classes.py:309\u001b[0m, in \u001b[0;36mGraph.from_dask_cudf_edgelist\u001b[0;34m(self, input_ddf, source, destination, edge_attr, renumber, store_transposed, legacy_renum_only)\u001b[0m\n\u001b[1;32m    307\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_Impl\u001b[39m.\u001b[39medgelist \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m    308\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mGraph already has values\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m--> 309\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_Impl\u001b[39m.\u001b[39;49m_simpleDistributedGraphImpl__from_edgelist(\n\u001b[1;32m    310\u001b[0m     input_ddf,\n\u001b[1;32m    311\u001b[0m     source,\n\u001b[1;32m    312\u001b[0m     destination,\n\u001b[1;32m    313\u001b[0m     edge_attr,\n\u001b[1;32m    314\u001b[0m     renumber,\n\u001b[1;32m    315\u001b[0m     store_transposed,\n\u001b[1;32m    316\u001b[0m     legacy_renum_only,\n\u001b[1;32m    317\u001b[0m )\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cugraph/structure/graph_implementation/simpleDistributedGraph.py:272\u001b[0m, in \u001b[0;36msimpleDistributedGraphImpl.__from_edgelist\u001b[0;34m(self, input_ddf, source, destination, edge_attr, renumber, store_transposed, legacy_renum_only)\u001b[0m\n\u001b[1;32m    268\u001b[0m     dst_col_name \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mrenumber_map\u001b[39m.\u001b[39mrenumbered_dst_col_name\n\u001b[1;32m    270\u001b[0m ddf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39medgelist\u001b[39m.\u001b[39medgelist_df\n\u001b[0;32m--> 272\u001b[0m num_edges \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39;49m(ddf)\n\u001b[1;32m    273\u001b[0m edge_data \u001b[39m=\u001b[39m get_distributed_data(ddf)\n\u001b[1;32m    275\u001b[0m graph_props \u001b[39m=\u001b[39m GraphProperties(\n\u001b[1;32m    276\u001b[0m     is_multigraph\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mproperties\u001b[39m.\u001b[39mmulti_edge,\n\u001b[1;32m    277\u001b[0m     is_symmetric\u001b[39m=\u001b[39m\u001b[39mnot\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mproperties\u001b[39m.\u001b[39mdirected,\n\u001b[1;32m    278\u001b[0m )\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/dataframe/core.py:4775\u001b[0m, in \u001b[0;36mDataFrame.__len__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   4773\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__len__\u001b[39m()\n\u001b[1;32m   4774\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 4775\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mlen\u001b[39;49m(s)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/dataframe/core.py:843\u001b[0m, in \u001b[0;36m_Frame.__len__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    840\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__len__\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[1;32m    841\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mreduction(\n\u001b[1;32m    842\u001b[0m         \u001b[39mlen\u001b[39;49m, np\u001b[39m.\u001b[39;49msum, token\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mlen\u001b[39;49m\u001b[39m\"\u001b[39;49m, meta\u001b[39m=\u001b[39;49m\u001b[39mint\u001b[39;49m, split_every\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m\n\u001b[0;32m--> 843\u001b[0m     )\u001b[39m.\u001b[39;49mcompute()\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/base.py:314\u001b[0m, in \u001b[0;36mDaskMethodsMixin.compute\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m    290\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mcompute\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m    291\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"Compute this dask collection\u001b[39;00m\n\u001b[1;32m    292\u001b[0m \n\u001b[1;32m    293\u001b[0m \u001b[39m    This turns a lazy Dask collection into its in-memory equivalent.\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    312\u001b[0m \u001b[39m    dask.base.compute\u001b[39;00m\n\u001b[1;32m    313\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[0;32m--> 314\u001b[0m     (result,) \u001b[39m=\u001b[39m compute(\u001b[39mself\u001b[39;49m, traverse\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    315\u001b[0m     \u001b[39mreturn\u001b[39;00m result\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/dask/base.py:599\u001b[0m, in \u001b[0;36mcompute\u001b[0;34m(traverse, optimize_graph, scheduler, get, *args, **kwargs)\u001b[0m\n\u001b[1;32m    596\u001b[0m     keys\u001b[39m.\u001b[39mappend(x\u001b[39m.\u001b[39m__dask_keys__())\n\u001b[1;32m    597\u001b[0m     postcomputes\u001b[39m.\u001b[39mappend(x\u001b[39m.\u001b[39m__dask_postcompute__())\n\u001b[0;32m--> 599\u001b[0m results \u001b[39m=\u001b[39m schedule(dsk, keys, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    600\u001b[0m \u001b[39mreturn\u001b[39;00m repack([f(r, \u001b[39m*\u001b[39ma) \u001b[39mfor\u001b[39;00m r, (f, a) \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(results, postcomputes)])\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:3186\u001b[0m, in \u001b[0;36mClient.get\u001b[0;34m(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)\u001b[0m\n\u001b[1;32m   3184\u001b[0m         should_rejoin \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m\n\u001b[1;32m   3185\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m-> 3186\u001b[0m     results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mgather(packed, asynchronous\u001b[39m=\u001b[39;49masynchronous, direct\u001b[39m=\u001b[39;49mdirect)\n\u001b[1;32m   3187\u001b[0m \u001b[39mfinally\u001b[39;00m:\n\u001b[1;32m   3188\u001b[0m     \u001b[39mfor\u001b[39;00m f \u001b[39min\u001b[39;00m futures\u001b[39m.\u001b[39mvalues():\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:2345\u001b[0m, in \u001b[0;36mClient.gather\u001b[0;34m(self, futures, errors, direct, asynchronous)\u001b[0m\n\u001b[1;32m   2343\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m   2344\u001b[0m     local_worker \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m-> 2345\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msync(\n\u001b[1;32m   2346\u001b[0m     \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_gather,\n\u001b[1;32m   2347\u001b[0m     futures,\n\u001b[1;32m   2348\u001b[0m     errors\u001b[39m=\u001b[39;49merrors,\n\u001b[1;32m   2349\u001b[0m     direct\u001b[39m=\u001b[39;49mdirect,\n\u001b[1;32m   2350\u001b[0m     local_worker\u001b[39m=\u001b[39;49mlocal_worker,\n\u001b[1;32m   2351\u001b[0m     asynchronous\u001b[39m=\u001b[39;49masynchronous,\n\u001b[1;32m   2352\u001b[0m )\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:349\u001b[0m, in \u001b[0;36mSyncMethodMixin.sync\u001b[0;34m(self, func, asynchronous, callback_timeout, *args, **kwargs)\u001b[0m\n\u001b[1;32m    347\u001b[0m     \u001b[39mreturn\u001b[39;00m future\n\u001b[1;32m    348\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 349\u001b[0m     \u001b[39mreturn\u001b[39;00m sync(\n\u001b[1;32m    350\u001b[0m         \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mloop, func, \u001b[39m*\u001b[39;49margs, callback_timeout\u001b[39m=\u001b[39;49mcallback_timeout, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs\n\u001b[1;32m    351\u001b[0m     )\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:416\u001b[0m, in \u001b[0;36msync\u001b[0;34m(loop, func, callback_timeout, *args, **kwargs)\u001b[0m\n\u001b[1;32m    414\u001b[0m \u001b[39mif\u001b[39;00m error:\n\u001b[1;32m    415\u001b[0m     typ, exc, tb \u001b[39m=\u001b[39m error\n\u001b[0;32m--> 416\u001b[0m     \u001b[39mraise\u001b[39;00m exc\u001b[39m.\u001b[39mwith_traceback(tb)\n\u001b[1;32m    417\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m    418\u001b[0m     \u001b[39mreturn\u001b[39;00m result\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/utils.py:389\u001b[0m, in \u001b[0;36msync.<locals>.f\u001b[0;34m()\u001b[0m\n\u001b[1;32m    387\u001b[0m         future \u001b[39m=\u001b[39m wait_for(future, callback_timeout)\n\u001b[1;32m    388\u001b[0m     future \u001b[39m=\u001b[39m asyncio\u001b[39m.\u001b[39mensure_future(future)\n\u001b[0;32m--> 389\u001b[0m     result \u001b[39m=\u001b[39m \u001b[39myield\u001b[39;00m future\n\u001b[1;32m    390\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m    391\u001b[0m     error \u001b[39m=\u001b[39m sys\u001b[39m.\u001b[39mexc_info()\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/tornado/gen.py:769\u001b[0m, in \u001b[0;36mRunner.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    766\u001b[0m exc_info \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m    768\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 769\u001b[0m     value \u001b[39m=\u001b[39m future\u001b[39m.\u001b[39;49mresult()\n\u001b[1;32m    770\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m    771\u001b[0m     exc_info \u001b[39m=\u001b[39m sys\u001b[39m.\u001b[39mexc_info()\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/client.py:2208\u001b[0m, in \u001b[0;36mClient._gather\u001b[0;34m(self, futures, errors, direct, local_worker)\u001b[0m\n\u001b[1;32m   2206\u001b[0m         exc \u001b[39m=\u001b[39m CancelledError(key)\n\u001b[1;32m   2207\u001b[0m     \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 2208\u001b[0m         \u001b[39mraise\u001b[39;00m exception\u001b[39m.\u001b[39mwith_traceback(traceback)\n\u001b[1;32m   2209\u001b[0m     \u001b[39mraise\u001b[39;00m exc\n\u001b[1;32m   2210\u001b[0m \u001b[39mif\u001b[39;00m errors \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mskip\u001b[39m\u001b[39m\"\u001b[39m:\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/contextlib.py:79\u001b[0m, in \u001b[0;36minner\u001b[0;34m()\u001b[0m\n\u001b[1;32m     76\u001b[0m \u001b[39m@wraps\u001b[39m(func)\n\u001b[1;32m     77\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minner\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwds):\n\u001b[1;32m     78\u001b[0m     \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_recreate_cm():\n\u001b[0;32m---> 79\u001b[0m         \u001b[39mreturn\u001b[39;00m func(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwds)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py:2937\u001b[0m, in \u001b[0;36m_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2934\u001b[0m \u001b[39m# Some objects require threadlocal state during deserialization, e.g. to\u001b[39;00m\n\u001b[1;32m   2935\u001b[0m \u001b[39m# detect the current worker\u001b[39;00m\n\u001b[1;32m   2936\u001b[0m \u001b[39mif\u001b[39;00m function \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m-> 2937\u001b[0m     function \u001b[39m=\u001b[39m loads_function(function)\n\u001b[1;32m   2938\u001b[0m \u001b[39mif\u001b[39;00m args \u001b[39mand\u001b[39;00m \u001b[39misinstance\u001b[39m(args, \u001b[39mbytes\u001b[39m):\n\u001b[1;32m   2939\u001b[0m     args \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(args)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/worker.py:2925\u001b[0m, in \u001b[0;36mloads_function\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2923\u001b[0m     result \u001b[39m=\u001b[39m cache_loads[bytes_object]\n\u001b[1;32m   2924\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mKeyError\u001b[39;00m:\n\u001b[0;32m-> 2925\u001b[0m     result \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(bytes_object)\n\u001b[1;32m   2926\u001b[0m     cache_loads[bytes_object] \u001b[39m=\u001b[39m result\n\u001b[1;32m   2927\u001b[0m \u001b[39mreturn\u001b[39;00m result\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/distributed/protocol/pickle.py:96\u001b[0m, in \u001b[0;36mloads\u001b[0;34m()\u001b[0m\n\u001b[1;32m     94\u001b[0m         \u001b[39mreturn\u001b[39;00m pickle\u001b[39m.\u001b[39mloads(x, buffers\u001b[39m=\u001b[39mbuffers)\n\u001b[1;32m     95\u001b[0m     \u001b[39melse\u001b[39;00m:\n\u001b[0;32m---> 96\u001b[0m         \u001b[39mreturn\u001b[39;00m pickle\u001b[39m.\u001b[39mloads(x)\n\u001b[1;32m     97\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m     98\u001b[0m     logger\u001b[39m.\u001b[39minfo(\u001b[39m\"\u001b[39m\u001b[39mFailed to deserialize \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m\"\u001b[39m, x[:\u001b[39m10000\u001b[39m], exc_info\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py:176\u001b[0m, in \u001b[0;36mhost_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    154\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Perform device-side deserialization tasks.\u001b[39;00m\n\u001b[1;32m    155\u001b[0m \n\u001b[1;32m    156\u001b[0m \u001b[39mParameters\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    170\u001b[0m \u001b[39m:meta private:\u001b[39;00m\n\u001b[1;32m    171\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m    172\u001b[0m frames \u001b[39m=\u001b[39m [\n\u001b[1;32m    173\u001b[0m     cudf\u001b[39m.\u001b[39mcore\u001b[39m.\u001b[39mbuffer\u001b[39m.\u001b[39mas_buffer(f) \u001b[39mif\u001b[39;00m c \u001b[39melse\u001b[39;00m f\n\u001b[1;32m    174\u001b[0m     \u001b[39mfor\u001b[39;00m c, f \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(header[\u001b[39m\"\u001b[39m\u001b[39mis-cuda\u001b[39m\u001b[39m\"\u001b[39m], \u001b[39mmap\u001b[39m(\u001b[39mmemoryview\u001b[39m, frames))\n\u001b[1;32m    175\u001b[0m ]\n\u001b[0;32m--> 176\u001b[0m obj \u001b[39m=\u001b[39m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39mdevice_deserialize(header, frames)\n\u001b[1;32m    177\u001b[0m \u001b[39mreturn\u001b[39;00m obj\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/abc.py:130\u001b[0m, in \u001b[0;36mdevice_deserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    125\u001b[0m typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m    126\u001b[0m frames \u001b[39m=\u001b[39m [\n\u001b[1;32m    127\u001b[0m     cudf\u001b[39m.\u001b[39mcore\u001b[39m.\u001b[39mbuffer\u001b[39m.\u001b[39mas_buffer(f) \u001b[39mif\u001b[39;00m c \u001b[39melse\u001b[39;00m \u001b[39mmemoryview\u001b[39m(f)\n\u001b[1;32m    128\u001b[0m     \u001b[39mfor\u001b[39;00m c, f \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(header[\u001b[39m\"\u001b[39m\u001b[39mis-cuda\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m    129\u001b[0m ]\n\u001b[0;32m--> 130\u001b[0m \u001b[39mreturn\u001b[39;00m typ\u001b[39m.\u001b[39mdeserialize(header, frames)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/dataframe.py:1019\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1016\u001b[0m \u001b[39m@classmethod\u001b[39m\n\u001b[1;32m   1017\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdeserialize\u001b[39m(\u001b[39mcls\u001b[39m, header, frames):\n\u001b[1;32m   1018\u001b[0m     index_nframes \u001b[39m=\u001b[39m header[\u001b[39m\"\u001b[39m\u001b[39mindex_frame_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[0;32m-> 1019\u001b[0m     obj \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39mdeserialize(\n\u001b[1;32m   1020\u001b[0m         header, frames[header[\u001b[39m\"\u001b[39m\u001b[39mindex_frame_count\u001b[39m\u001b[39m\"\u001b[39m] :]\n\u001b[1;32m   1021\u001b[0m     )\n\u001b[1;32m   1023\u001b[0m     idx_typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mindex\u001b[39m\u001b[39m\"\u001b[39m][\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m   1024\u001b[0m     index \u001b[39m=\u001b[39m idx_typ\u001b[39m.\u001b[39mdeserialize(header[\u001b[39m\"\u001b[39m\u001b[39mindex\u001b[39m\u001b[39m\"\u001b[39m], frames[:index_nframes])\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/frame.py:106\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    104\u001b[0m cls_deserialize \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m    105\u001b[0m column_names \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mcolumn_names\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m--> 106\u001b[0m columns \u001b[39m=\u001b[39m deserialize_columns(header[\u001b[39m\"\u001b[39m\u001b[39mcolumns\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m    107\u001b[0m \u001b[39mreturn\u001b[39;00m cls_deserialize\u001b[39m.\u001b[39m_from_data(\u001b[39mdict\u001b[39m(\u001b[39mzip\u001b[39m(column_names, columns)))\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:2450\u001b[0m, in \u001b[0;36mdeserialize_columns\u001b[0;34m()\u001b[0m\n\u001b[1;32m   2448\u001b[0m col_frame_count \u001b[39m=\u001b[39m meta[\u001b[39m\"\u001b[39m\u001b[39mframe_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m   2449\u001b[0m col_typ \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(meta[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m-> 2450\u001b[0m colobj \u001b[39m=\u001b[39m col_typ\u001b[39m.\u001b[39mdeserialize(meta, frames[:col_frame_count])\n\u001b[1;32m   2451\u001b[0m columns\u001b[39m.\u001b[39mappend(colobj)\n\u001b[1;32m   2452\u001b[0m \u001b[39m# Advance frames\u001b[39;00m\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:1216\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1214\u001b[0m     dtype \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mdtype\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[1;32m   1215\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mdata\u001b[39m\u001b[39m\"\u001b[39m \u001b[39min\u001b[39;00m header:\n\u001b[0;32m-> 1216\u001b[0m     data, frames \u001b[39m=\u001b[39m unpack(header[\u001b[39m\"\u001b[39m\u001b[39mdata\u001b[39m\u001b[39m\"\u001b[39m], frames)\n\u001b[1;32m   1217\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m   1218\u001b[0m     data \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/column/column.py:1204\u001b[0m, in \u001b[0;36munpack\u001b[0;34m()\u001b[0m\n\u001b[1;32m   1202\u001b[0m count \u001b[39m=\u001b[39m header[\u001b[39m\"\u001b[39m\u001b[39mframe_count\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m   1203\u001b[0m klass \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mloads(header[\u001b[39m\"\u001b[39m\u001b[39mtype-serialized\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m-> 1204\u001b[0m obj \u001b[39m=\u001b[39m klass\u001b[39m.\u001b[39mdeserialize(header, frames[:count])\n\u001b[1;32m   1205\u001b[0m \u001b[39mreturn\u001b[39;00m obj, frames[count:]\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:574\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    567\u001b[0m \u001b[39m@classmethod\u001b[39m\n\u001b[1;32m    568\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdeserialize\u001b[39m(\u001b[39mcls\u001b[39m, header: \u001b[39mdict\u001b[39m, frames: \u001b[39mlist\u001b[39m):\n\u001b[1;32m    569\u001b[0m     \u001b[39m# TODO: because of the hack in `SpillableBuffer.serialize()` where\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    572\u001b[0m     \u001b[39m# deserialize into `SpillableBufferSlice` when the frames hasn't been\u001b[39;00m\n\u001b[1;32m    573\u001b[0m     \u001b[39m# copied.\u001b[39;00m\n\u001b[0;32m--> 574\u001b[0m     \u001b[39mreturn\u001b[39;00m SpillableBuffer\u001b[39m.\u001b[39mdeserialize(header, frames)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/buffer.py:335\u001b[0m, in \u001b[0;36mdeserialize\u001b[0;34m()\u001b[0m\n\u001b[1;32m    332\u001b[0m     \u001b[39mreturn\u001b[39;00m frame  \u001b[39m# The frame is already deserialized\u001b[39;00m\n\u001b[1;32m    334\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mhasattr\u001b[39m(frame, \u001b[39m\"\u001b[39m\u001b[39m__cuda_array_interface__\u001b[39m\u001b[39m\"\u001b[39m):\n\u001b[0;32m--> 335\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_from_device_memory(frame)\n\u001b[1;32m    336\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_from_host_memory(frame)\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:235\u001b[0m, in \u001b[0;36m_from_device_memory\u001b[0;34m()\u001b[0m\n\u001b[1;32m    218\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Create a spillabe buffer from device memory.\u001b[39;00m\n\u001b[1;32m    219\u001b[0m \n\u001b[1;32m    220\u001b[0m \u001b[39mNo data is being copied.\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    232\u001b[0m \u001b[39m    Buffer representing the same device memory as `data`\u001b[39;00m\n\u001b[1;32m    233\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m    234\u001b[0m ret \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m_from_device_memory(data)\n\u001b[0;32m--> 235\u001b[0m ret\u001b[39m.\u001b[39m_finalize_init(ptr_desc\u001b[39m=\u001b[39m{\u001b[39m\"\u001b[39m\u001b[39mtype\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mgpu\u001b[39m\u001b[39m\"\u001b[39m}, exposed\u001b[39m=\u001b[39mexposed)\n\u001b[1;32m    236\u001b[0m \u001b[39mreturn\u001b[39;00m ret\n",
+      "File \u001b[0;32m~/miniforge/envs/cugraph_0411/lib/python3.10/site-packages/cudf/core/buffer/spillable_buffer.py:206\u001b[0m, in \u001b[0;36m_finalize_init\u001b[0;34m()\u001b[0m\n\u001b[1;32m    204\u001b[0m manager \u001b[39m=\u001b[39m get_global_manager()\n\u001b[1;32m    205\u001b[0m \u001b[39mif\u001b[39;00m manager \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 206\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m    207\u001b[0m         \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcannot create \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m without \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    208\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39ma global spill manager\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    209\u001b[0m     )\n\u001b[1;32m    211\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_manager \u001b[39m=\u001b[39m manager\n\u001b[1;32m    212\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_manager\u001b[39m.\u001b[39madd(\u001b[39mself\u001b[39m)\n",
       "\u001b[0;31mValueError\u001b[0m: cannot create <class 'cudf.core.buffer.spillable_buffer.SpillableBuffer'> without a global spill manager"
      ]
     }
diff --git a/notebooks/demo/nx_cugraph_demo.ipynb b/notebooks/demo/nx_cugraph_demo.ipynb
index 6e50370ed..f1ce80aa1 100644
--- a/notebooks/demo/nx_cugraph_demo.ipynb
+++ b/notebooks/demo/nx_cugraph_demo.ipynb
@@ -20,7 +20,7 @@
     "Using `nx-cugraph` with this notebook requires the following: \n",
     "- NVIDIA GPU, Pascal architecture or later\n",
     "- CUDA 11.2, 11.4, 11.5, 11.8, or 12.0\n",
-    "- Python versions 3.9, 3.10, or 3.11\n",
+    "- Python versions 3.10, 3.11, or 3.12\n",
     "- NetworkX >= version 3.2\n",
     "  - _NetworkX 3.0 supports dispatching and is compatible with `nx-cugraph`, but this notebook will demonstrate features added in 3.2_\n",
     "  - At the time of this writing, NetworkX 3.2 is only available from source and can be installed by following the [development version install instructions](https://github.com/networkx/networkx/blob/main/INSTALL.rst#install-the-development-version).\n",
diff --git a/python/nx-cugraph/README.md b/python/nx-cugraph/README.md
index 458421e2b..c3ca0b880 100644
--- a/python/nx-cugraph/README.md
+++ b/python/nx-cugraph/README.md
@@ -8,8 +8,8 @@ to run supported algorithms with GPU acceleration.
 
 nx-cugraph requires the following:
  * NVIDIA GPU, Volta architecture or later, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+
- * CUDA 11.2, 11.4, 11.5, 11.8, or 12.0
- * Python version 3.9, 3.10, or 3.11
+ * CUDA 11.2, 11.4, 11.5, 11.8, 12.0, 12.2, or 12.5
+ * Python version 3.10, 3.11, or 3.12
  * NetworkX >= version 3.0 (version 3.2 or higher recommended)
 
 More details about system requirements can be found in the [RAPIDS System Requirements documentation](https://docs.rapids.ai/install#system-req).
diff --git a/python/nx-cugraph/_nx_cugraph/__init__.py b/python/nx-cugraph/_nx_cugraph/__init__.py
index f58a6e229..a5e45979f 100644
--- a/python/nx-cugraph/_nx_cugraph/__init__.py
+++ b/python/nx-cugraph/_nx_cugraph/__init__.py
@@ -22,6 +22,7 @@
 
 $ python _nx_cugraph/__init__.py
 """
+import os
 
 from _nx_cugraph._version import __version__
 
@@ -35,7 +36,7 @@
     "backend_name": "cugraph",
     "project": "nx-cugraph",
     "package": "nx_cugraph",
-    "url": f"https://github.com/rapidsai/cugraph/tree/branch-{_version_major:0>2}.{_version_minor:0>2}/python/nx-cugraph",
+    "url": f"https://rapids.ai/nx-cugraph",
     "short_summary": "GPU-accelerated backend.",
     # "description": "TODO",
     "functions": {
@@ -293,10 +294,19 @@ def get_info():
 
     for key in info_keys:
         del d[key]
+
+    d["default_config"] = {
+        "use_compat_graphs": os.environ.get("NX_CUGRAPH_USE_COMPAT_GRAPHS", "true")
+        .strip()
+        .lower()
+        == "true",
+    }
     return d
 
 
-def _check_networkx_version():
+def _check_networkx_version() -> tuple[int, int]:
+    """Check the version of networkx and return ``(major, minor)`` version tuple."""
+    import re
     import warnings
 
     import networkx as nx
@@ -310,12 +320,20 @@ def _check_networkx_version():
             UserWarning,
             stacklevel=2,
         )
-    if len(version_minor) > 1:
+
+    # Allow single-digit minor versions, e.g. 3.4 and release candidates, e.g. 3.4rc0
+    pattern = r"^\d(rc\d+)?$"
+
+    if not re.match(pattern, version_minor):
         raise RuntimeWarning(
             f"nx-cugraph version {__version__} does not work with networkx version "
             f"{nx.__version__}. Please upgrade (or fix) your Python environment."
         )
 
+    nxver_major = int(version_major)
+    nxver_minor = int(re.match(r"^\d+", version_minor).group())
+    return (nxver_major, nxver_minor)
+
 
 if __name__ == "__main__":
     from pathlib import Path
diff --git a/python/nx-cugraph/lint.yaml b/python/nx-cugraph/lint.yaml
index ce46360e2..dab2ea70e 100644
--- a/python/nx-cugraph/lint.yaml
+++ b/python/nx-cugraph/lint.yaml
@@ -26,7 +26,7 @@ repos:
       - id: mixed-line-ending
       - id: trailing-whitespace
   - repo: https://github.com/abravalheri/validate-pyproject
-    rev: v0.18
+    rev: v0.19
     hooks:
       - id: validate-pyproject
         name: Validate pyproject.toml
@@ -40,29 +40,29 @@ repos:
     hooks:
       - id: isort
   - repo: https://github.com/asottile/pyupgrade
-    rev: v3.16.0
+    rev: v3.17.0
     hooks:
       - id: pyupgrade
-        args: [--py39-plus]
+        args: [--py310-plus]
   - repo: https://github.com/psf/black
-    rev: 24.4.2
+    rev: 24.8.0
     hooks:
       - id: black
       # - id: black-jupyter
   - repo: https://github.com/astral-sh/ruff-pre-commit
-    rev: v0.5.4
+    rev: v0.6.7
     hooks:
       - id: ruff
         args: [--fix-only, --show-fixes]  # --unsafe-fixes]
   - repo: https://github.com/PyCQA/flake8
-    rev: 7.1.0
+    rev: 7.1.1
     hooks:
       - id: flake8
         args: ['--per-file-ignores=_nx_cugraph/__init__.py:E501', '--extend-ignore=B020,SIM105']  # Why is this necessary?
         additional_dependencies: &flake8_dependencies
           # These versions need updated manually
-          - flake8==7.1.0
-          - flake8-bugbear==24.4.26
+          - flake8==7.1.1
+          - flake8-bugbear==24.8.19
           - flake8-simplify==0.21.0
   - repo: https://github.com/asottile/yesqa
     rev: v1.5.0
@@ -77,7 +77,7 @@ repos:
         additional_dependencies: [tomli]
         files: ^(nx_cugraph|docs)/
   - repo: https://github.com/astral-sh/ruff-pre-commit
-    rev: v0.5.4
+    rev: v0.6.7
     hooks:
       - id: ruff
   - repo: https://github.com/pre-commit/pre-commit-hooks
diff --git a/python/nx-cugraph/nx_cugraph/__init__.py b/python/nx-cugraph/nx_cugraph/__init__.py
index 542256fa7..4404e57f6 100644
--- a/python/nx-cugraph/nx_cugraph/__init__.py
+++ b/python/nx-cugraph/nx_cugraph/__init__.py
@@ -12,6 +12,11 @@
 # limitations under the License.
 from networkx.exception import *
 
+from _nx_cugraph._version import __git_commit__, __version__
+from _nx_cugraph import _check_networkx_version
+
+_nxver: tuple[int, int] = _check_networkx_version()
+
 from . import utils
 
 from . import classes
@@ -32,7 +37,10 @@
 from . import algorithms
 from .algorithms import *
 
-from _nx_cugraph._version import __git_commit__, __version__
-from _nx_cugraph import _check_networkx_version
+from .interface import BackendInterface
 
-_check_networkx_version()
+BackendInterface.Graph = classes.Graph
+BackendInterface.DiGraph = classes.DiGraph
+BackendInterface.MultiGraph = classes.MultiGraph
+BackendInterface.MultiDiGraph = classes.MultiDiGraph
+del BackendInterface
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/bipartite/generators.py b/python/nx-cugraph/nx_cugraph/algorithms/bipartite/generators.py
index 60276b7d4..214970235 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/bipartite/generators.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/bipartite/generators.py
@@ -16,6 +16,7 @@
 import networkx as nx
 import numpy as np
 
+from nx_cugraph import _nxver
 from nx_cugraph.generators._utils import _create_using_class, _number_and_nodes
 from nx_cugraph.utils import index_dtype, networkx_algorithm
 
@@ -48,7 +49,7 @@ def complete_bipartite_graph(n1, n2, create_using=None):
         nodes.extend(range(n2) if nodes2 is None else nodes2)
         if len(set(nodes)) != len(nodes):
             raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes")
-    if nx.__version__[:3] <= "3.3":
+    if _nxver <= (3, 3):
         name = f"complete_bipartite_graph({orig_n1}, {orig_n2})"
     else:
         name = f"complete_bipartite_graph({n1}, {n2})"
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/community/louvain.py b/python/nx-cugraph/nx_cugraph/algorithms/community/louvain.py
index ea1318060..52c512c45 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/community/louvain.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/community/louvain.py
@@ -12,9 +12,9 @@
 # limitations under the License.
 import warnings
 
-import networkx as nx
 import pylibcugraph as plc
 
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_undirected_graph
 from nx_cugraph.utils import (
     _dtype_param,
@@ -27,7 +27,7 @@
 __all__ = ["louvain_communities"]
 
 # max_level argument was added to NetworkX 3.3
-if nx.__version__[:3] <= "3.2":
+if _nxver <= (3, 2):
     _max_level_param = {
         "max_level : int, optional": (
             "Upper limit of the number of macro-iterations (max: 500)."
@@ -81,7 +81,7 @@ def _louvain_communities(
     node_ids, clusters, modularity = plc.louvain(
         resource_handle=plc.ResourceHandle(),
         graph=G._get_plc_graph(weight, 1, dtype),
-        max_level=max_level,  # TODO: add this parameter to NetworkX
+        max_level=max_level,
         threshold=threshold,
         resolution=resolution,
         do_expensive_check=False,
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/core.py b/python/nx-cugraph/nx_cugraph/algorithms/core.py
index 8eb9a9946..e69ee88a1 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/core.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/core.py
@@ -15,6 +15,7 @@
 import pylibcugraph as plc
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_undirected_graph
 from nx_cugraph.utils import (
     _get_int_dtype,
@@ -58,9 +59,12 @@ def _(G):
 @networkx_algorithm(is_incomplete=True, version_added="23.12", _plc="k_truss_subgraph")
 def k_truss(G, k):
     if is_nx := isinstance(G, nx.Graph):
+        is_compat_graph = isinstance(G, nxcg.Graph)
         G = nxcg.from_networkx(G, preserve_all_attrs=True)
+    else:
+        is_compat_graph = False
     if nxcg.number_of_selfloops(G) > 0:
-        if nx.__version__[:3] <= "3.2":
+        if _nxver <= (3, 2):
             exc_class = nx.NetworkXError
         else:
             exc_class = nx.NetworkXNotImplemented
@@ -128,6 +132,7 @@ def k_truss(G, k):
         node_values,
         node_masks,
         key_to_id=key_to_id,
+        use_compat_graph=is_compat_graph,
     )
     new_graph.graph.update(G.graph)
     return new_graph
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/link_analysis/hits_alg.py b/python/nx-cugraph/nx_cugraph/algorithms/link_analysis/hits_alg.py
index e529b83ab..cc59fd5eb 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/link_analysis/hits_alg.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/link_analysis/hits_alg.py
@@ -15,6 +15,7 @@
 import numpy as np
 import pylibcugraph as plc
 
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_graph
 from nx_cugraph.utils import (
     _dtype_param,
@@ -53,7 +54,7 @@ def hits(
     if nstart is not None:
         nstart = G._dict_to_nodearray(nstart, 0, dtype)
     if max_iter <= 0:
-        if nx.__version__[:3] <= "3.2":
+        if _nxver <= (3, 2):
             raise ValueError("`maxiter` must be a positive integer.")
         raise nx.PowerIterationFailedConvergence(max_iter)
     try:
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/operators/unary.py b/python/nx-cugraph/nx_cugraph/algorithms/operators/unary.py
index f53b34589..75dc5fbc7 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/operators/unary.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/operators/unary.py
@@ -23,6 +23,7 @@
 
 @networkx_algorithm(version_added="24.02")
 def complement(G):
+    is_compat_graph = isinstance(G, nxcg.Graph)
     G = _to_graph(G)
     N = G._N
     # Upcast to int64 so indices don't overflow.
@@ -43,6 +44,7 @@ def complement(G):
         src_indices.astype(index_dtype),
         dst_indices.astype(index_dtype),
         key_to_id=G.key_to_id,
+        use_compat_graph=is_compat_graph,
     )
 
 
@@ -51,10 +53,16 @@ def reverse(G, copy=True):
     if not G.is_directed():
         raise nx.NetworkXError("Cannot reverse an undirected graph.")
     if isinstance(G, nx.Graph):
-        if not copy:
+        is_compat_graph = isinstance(G, nxcg.Graph)
+        if not copy and not is_compat_graph:
             raise RuntimeError(
                 "Using `copy=False` is invalid when using a NetworkX graph "
                 "as input to `nx_cugraph.reverse`"
             )
         G = nxcg.from_networkx(G, preserve_all_attrs=True)
-    return G.reverse(copy=copy)
+    else:
+        is_compat_graph = False
+    rv = G.reverse(copy=copy)
+    if is_compat_graph:
+        return rv._to_compat_graph()
+    return rv
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/generic.py b/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/generic.py
index 7d6d77f34..ab3c72143 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/generic.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/generic.py
@@ -14,6 +14,7 @@
 import numpy as np
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_graph
 from nx_cugraph.utils import _dtype_param, _get_float_dtype, networkx_algorithm
 
@@ -57,7 +58,7 @@ def shortest_path(
                 paths = nxcg.all_pairs_dijkstra_path(G, weight=weight, dtype=dtype)
             else:  # method == 'bellman-ford':
                 paths = nxcg.all_pairs_bellman_ford_path(G, weight=weight, dtype=dtype)
-            if nx.__version__[:3] <= "3.4":
+            if _nxver <= (3, 4):
                 paths = dict(paths)
         # To target
         elif method == "unweighted":
@@ -129,7 +130,7 @@ def shortest_path_length(
         # To target
         elif method == "unweighted":
             lengths = nxcg.single_target_shortest_path_length(G, target)
-            if nx.__version__[:3] <= "3.4":
+            if _nxver <= (3, 4):
                 lengths = dict(lengths)
         elif method == "dijkstra":
             lengths = nxcg.single_source_dijkstra_path_length(
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/unweighted.py b/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/unweighted.py
index 0e98c366e..e9c515632 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/unweighted.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/shortest_paths/unweighted.py
@@ -17,6 +17,7 @@
 import numpy as np
 import pylibcugraph as plc
 
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_graph
 from nx_cugraph.utils import _groupby, index_dtype, networkx_algorithm
 
@@ -43,7 +44,7 @@ def single_source_shortest_path_length(G, source, cutoff=None):
 def single_target_shortest_path_length(G, target, cutoff=None):
     G = _to_graph(G)
     rv = _bfs(G, target, cutoff, "Target", return_type="length")
-    if nx.__version__[:3] <= "3.4":
+    if _nxver <= (3, 4):
         return iter(rv.items())
     return rv
 
@@ -61,7 +62,7 @@ def bidirectional_shortest_path(G, source, target):
     # TODO PERF: do bidirectional traversal in core
     G = _to_graph(G)
     if source not in G or target not in G:
-        if nx.__version__[:3] <= "3.3":
+        if _nxver <= (3, 3):
             raise nx.NodeNotFound(
                 f"Either source {source} or target {target} is not in G"
             )
diff --git a/python/nx-cugraph/nx_cugraph/algorithms/traversal/breadth_first_search.py b/python/nx-cugraph/nx_cugraph/algorithms/traversal/breadth_first_search.py
index 5e4466d7d..72d0079cf 100644
--- a/python/nx-cugraph/nx_cugraph/algorithms/traversal/breadth_first_search.py
+++ b/python/nx-cugraph/nx_cugraph/algorithms/traversal/breadth_first_search.py
@@ -18,6 +18,7 @@
 import pylibcugraph as plc
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 from nx_cugraph.convert import _to_graph
 from nx_cugraph.utils import _groupby, index_dtype, networkx_algorithm
 
@@ -57,7 +58,7 @@ def _bfs(G, source, *, depth_limit=None, reverse=False):
     return distances[mask], predecessors[mask], node_ids[mask]
 
 
-if nx.__version__[:3] <= "3.3":
+if _nxver <= (3, 3):
 
     @networkx_algorithm(is_incomplete=True, version_added="24.02", _plc="bfs")
     def generic_bfs_edges(
@@ -132,13 +133,15 @@ def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
         raise NotImplementedError(
             "sort_neighbors argument in bfs_tree is not currently supported"
         )
+    is_compat_graph = isinstance(G, nxcg.Graph)
     G = _check_G_and_source(G, source)
     if depth_limit is not None and depth_limit < 1:
-        return nxcg.DiGraph.from_coo(
+        return nxcg.CudaDiGraph.from_coo(
             1,
             cp.array([], dtype=index_dtype),
             cp.array([], dtype=index_dtype),
             id_to_key=[source],
+            use_compat_graph=is_compat_graph,
         )
 
     distances, predecessors, node_ids = _bfs(
@@ -148,11 +151,12 @@ def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
         reverse=reverse,
     )
     if predecessors.size == 0:
-        return nxcg.DiGraph.from_coo(
+        return nxcg.CudaDiGraph.from_coo(
             1,
             cp.array([], dtype=index_dtype),
             cp.array([], dtype=index_dtype),
             id_to_key=[source],
+            use_compat_graph=is_compat_graph,
         )
     # TODO: create renumbering helper function(s)
     unique_node_ids = cp.unique(cp.hstack((predecessors, node_ids)))
@@ -170,11 +174,12 @@ def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
             old_index: new_index
             for new_index, old_index in enumerate(unique_node_ids.tolist())
         }
-    return nxcg.DiGraph.from_coo(
+    return nxcg.CudaDiGraph.from_coo(
         unique_node_ids.size,
         src_indices,
         dst_indices,
         key_to_id=key_to_id,
+        use_compat_graph=is_compat_graph,
     )
 
 
diff --git a/python/nx-cugraph/nx_cugraph/classes/__init__.py b/python/nx-cugraph/nx_cugraph/classes/__init__.py
index 19a5357da..71168e536 100644
--- a/python/nx-cugraph/nx_cugraph/classes/__init__.py
+++ b/python/nx-cugraph/nx_cugraph/classes/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2023, NVIDIA CORPORATION.
+# Copyright (c) 2023-2024, NVIDIA CORPORATION.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
@@ -10,9 +10,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from .graph import Graph
-from .digraph import DiGraph
-from .multigraph import MultiGraph
-from .multidigraph import MultiDiGraph
+from .graph import CudaGraph, Graph
+from .digraph import CudaDiGraph, DiGraph
+from .multigraph import CudaMultiGraph, MultiGraph
+from .multidigraph import CudaMultiDiGraph, MultiDiGraph
 
 from .function import *
diff --git a/python/nx-cugraph/nx_cugraph/classes/digraph.py b/python/nx-cugraph/nx_cugraph/classes/digraph.py
index e5cfb8f68..178bf44f1 100644
--- a/python/nx-cugraph/nx_cugraph/classes/digraph.py
+++ b/python/nx-cugraph/nx_cugraph/classes/digraph.py
@@ -18,34 +18,108 @@
 import cupy as cp
 import networkx as nx
 import numpy as np
+from networkx.classes.digraph import (
+    _CachedPropertyResetterAdjAndSucc,
+    _CachedPropertyResetterPred,
+)
 
 import nx_cugraph as nxcg
 
 from ..utils import index_dtype
-from .graph import Graph
+from .graph import CudaGraph, Graph
 
 if TYPE_CHECKING:  # pragma: no cover
     from nx_cugraph.typing import AttrKey
 
-__all__ = ["DiGraph"]
+__all__ = ["CudaDiGraph", "DiGraph"]
 
 networkx_api = nxcg.utils.decorators.networkx_class(nx.DiGraph)
 
 
-class DiGraph(Graph):
-    #################
-    # Class methods #
-    #################
+class DiGraph(nx.DiGraph, Graph):
+    _nx_attrs = ("_node", "_adj", "_succ", "_pred")
+
+    name = Graph.name
+    _node = Graph._node
+
+    @property
+    @networkx_api
+    def _adj(self):
+        if (adj := self.__dict__["_adj"]) is None:
+            self._reify_networkx()
+            adj = self.__dict__["_adj"]
+        return adj
+
+    @_adj.setter
+    def _adj(self, val):
+        self._prepare_setter()
+        _CachedPropertyResetterAdjAndSucc.__set__(None, self, val)
+        if cache := getattr(self, "__networkx_cache__", None):
+            cache.clear()
+
+    @property
+    @networkx_api
+    def _succ(self):
+        if (succ := self.__dict__["_succ"]) is None:
+            self._reify_networkx()
+            succ = self.__dict__["_succ"]
+        return succ
+
+    @_succ.setter
+    def _succ(self, val):
+        self._prepare_setter()
+        _CachedPropertyResetterAdjAndSucc.__set__(None, self, val)
+        if cache := getattr(self, "__networkx_cache__", None):
+            cache.clear()
+
+    @property
+    @networkx_api
+    def _pred(self):
+        if (pred := self.__dict__["_pred"]) is None:
+            self._reify_networkx()
+            pred = self.__dict__["_pred"]
+        return pred
+
+    @_pred.setter
+    def _pred(self, val):
+        self._prepare_setter()
+        _CachedPropertyResetterPred.__set__(None, self, val)
+        if cache := getattr(self, "__networkx_cache__", None):
+            cache.clear()
 
     @classmethod
     @networkx_api
     def is_directed(cls) -> bool:
         return True
 
+    @classmethod
+    @networkx_api
+    def is_multigraph(cls) -> bool:
+        return False
+
+    @classmethod
+    def to_cudagraph_class(cls) -> type[CudaDiGraph]:
+        return CudaDiGraph
+
     @classmethod
     def to_networkx_class(cls) -> type[nx.DiGraph]:
         return nx.DiGraph
 
+
+class CudaDiGraph(CudaGraph):
+    #################
+    # Class methods #
+    #################
+
+    is_directed = classmethod(DiGraph.is_directed.__func__)
+    is_multigraph = classmethod(DiGraph.is_multigraph.__func__)
+    to_cudagraph_class = classmethod(DiGraph.to_cudagraph_class.__func__)
+    to_networkx_class = classmethod(DiGraph.to_networkx_class.__func__)
+
+    @classmethod
+    def _to_compat_graph_class(cls) -> type[DiGraph]:
+        return DiGraph
+
     @networkx_api
     def size(self, weight: AttrKey | None = None) -> int:
         if weight is not None:
@@ -57,7 +131,7 @@ def size(self, weight: AttrKey | None = None) -> int:
     ##########################
 
     @networkx_api
-    def reverse(self, copy: bool = True) -> DiGraph:
+    def reverse(self, copy: bool = True) -> CudaDiGraph:
         return self._copy(not copy, self.__class__, reverse=True)
 
     @networkx_api
@@ -162,6 +236,7 @@ def to_undirected(self, reciprocal=False, as_view=False):
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=False,
         )
         if as_view:
             rv.graph = self.graph
diff --git a/python/nx-cugraph/nx_cugraph/classes/graph.py b/python/nx-cugraph/nx_cugraph/classes/graph.py
index 7425eacb2..cfe1e1c87 100644
--- a/python/nx-cugraph/nx_cugraph/classes/graph.py
+++ b/python/nx-cugraph/nx_cugraph/classes/graph.py
@@ -20,8 +20,13 @@
 import networkx as nx
 import numpy as np
 import pylibcugraph as plc
+from networkx.classes.graph import (
+    _CachedPropertyResetterAdj,
+    _CachedPropertyResetterNode,
+)
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import index_dtype
 
@@ -40,57 +45,246 @@
         any_ndarray,
     )
 
-__all__ = ["Graph"]
+__all__ = ["CudaGraph", "Graph"]
 
 networkx_api = nxcg.utils.decorators.networkx_class(nx.Graph)
 
+# The "everything" cache key is an internal implementation detail of NetworkX
+# that may change between releases.
+if _nxver < (3, 4):
+    _CACHE_KEY = (
+        True,  # Include all edge values
+        True,  # Include all node values
+        True,  # Include `.graph` attributes
+    )
+else:
+    _CACHE_KEY = (
+        True,  # Include all edge values
+        True,  # Include all node values
+        # `.graph` attributes are always included now
+    )
+
+# Use to indicate when a full conversion to GPU failed so we don't try again.
+_CANT_CONVERT_TO_GPU = "_CANT_CONVERT_TO_GPU"
+
+
+# `collections.UserDict` was the preferred way to subclass dict, but now
+# subclassing dict directly is much better supported and should work here.
+# This class should only be necessary if the user clears the cache manually.
+class _GraphCache(dict):
+    """Cache that ensures Graph will reify into a NetworkX graph when cleared."""
+
+    _graph: Graph
 
-class Graph:
+    def __init__(self, graph: Graph):
+        self._graph = graph
+
+    def clear(self) -> None:
+        self._graph._reify_networkx()
+        super().clear()
+
+
+class Graph(nx.Graph):
     # Tell networkx to dispatch calls with this object to nx-cugraph
     __networkx_backend__: ClassVar[str] = "cugraph"  # nx >=3.2
     __networkx_plugin__: ClassVar[str] = "cugraph"  # nx <3.2
 
+    # Core attributes of NetowkrX graphs that will be copied and cleared as appropriate.
+    # These attributes comprise the edge and node data model for NetworkX graphs.
+    _nx_attrs = ("_node", "_adj")
+
     # Allow networkx dispatch machinery to cache conversions.
     # This means we should clear the cache if we ever mutate the object!
-    __networkx_cache__: dict | None
+    __networkx_cache__: _GraphCache | None
 
     # networkx properties
     graph: dict
-    graph_attr_dict_factory: ClassVar[type] = dict
+    # Should we declare type annotations for the rest?
+
+    # Properties that trigger copying to the CPU
+    def _prepare_setter(self):
+        """Be careful when setting private attributes which may be used during init."""
+        if (
+            # If not present, then this must be in init
+            any(attr not in self.__dict__ for attr in self._nx_attrs)
+            # Already on the CPU
+            or not any(self.__dict__[attr] is None for attr in self._nx_attrs)
+        ):
+            return
+        if self._is_on_gpu:
+            # Copy from GPU to CPU
+            self._reify_networkx()
+            return
+        # Default values
+        for attr in self._nx_attrs:
+            if self.__dict__[attr] is None:
+                if attr == "_succ":
+                    self.__dict__[attr] = self.__dict__["_adj"]
+                else:
+                    self.__dict__[attr] = {}
 
-    # Not networkx properties
-    # We store edge data in COO format with {src,dst}_indices and edge_values.
-    src_indices: cp.ndarray[IndexValue]
-    dst_indices: cp.ndarray[IndexValue]
-    edge_values: dict[AttrKey, cp.ndarray[EdgeValue]]
-    edge_masks: dict[AttrKey, cp.ndarray[bool]]
-    node_values: dict[AttrKey, any_ndarray[NodeValue]]
-    node_masks: dict[AttrKey, any_ndarray[bool]]
-    key_to_id: dict[NodeKey, IndexValue] | None
-    _id_to_key: list[NodeKey] | None
-    _N: int
-    _node_ids: cp.ndarray[IndexValue] | None  # holds plc.SGGraph.vertices_array data
+    @property
+    @networkx_api
+    def _node(self):
+        if (node := self.__dict__["_node"]) is None:
+            self._reify_networkx()
+            node = self.__dict__["_node"]
+        return node
+
+    @_node.setter
+    def _node(self, val):
+        self._prepare_setter()
+        _CachedPropertyResetterNode.__set__(None, self, val)
+        if cache := getattr(self, "__networkx_cache__", None):
+            cache.clear()
 
-    # Used by graph._get_plc_graph
-    _plc_type_map: ClassVar[dict[np.dtype, np.dtype]] = {
-        # signed int
-        np.dtype(np.int8): np.dtype(np.float32),
-        np.dtype(np.int16): np.dtype(np.float32),
-        np.dtype(np.int32): np.dtype(np.float64),
-        np.dtype(np.int64): np.dtype(np.float64),  # raise if abs(x) > 2**53
-        # unsigned int
-        np.dtype(np.uint8): np.dtype(np.float32),
-        np.dtype(np.uint16): np.dtype(np.float32),
-        np.dtype(np.uint32): np.dtype(np.float64),
-        np.dtype(np.uint64): np.dtype(np.float64),  # raise if x > 2**53
-        # other
-        np.dtype(np.bool_): np.dtype(np.float32),
-        np.dtype(np.float16): np.dtype(np.float32),
-    }
-    _plc_allowed_edge_types: ClassVar[set[np.dtype]] = {
-        np.dtype(np.float32),
-        np.dtype(np.float64),
-    }
+    @property
+    @networkx_api
+    def _adj(self):
+        if (adj := self.__dict__["_adj"]) is None:
+            self._reify_networkx()
+            adj = self.__dict__["_adj"]
+        return adj
+
+    @_adj.setter
+    def _adj(self, val):
+        self._prepare_setter()
+        _CachedPropertyResetterAdj.__set__(None, self, val)
+        if cache := getattr(self, "__networkx_cache__", None):
+            cache.clear()
+
+    @property
+    def _is_on_gpu(self) -> bool:
+        """Whether the full graph is on device (in the cache).
+
+        This returns False when only a subset of the graph (such as only
+        edge indices and edge attribute) is on device.
+
+        The graph may be on host (CPU) and device (GPU) at the same time.
+        """
+        cache = getattr(self, "__networkx_cache__", None)
+        if not cache:
+            return False
+        return _CACHE_KEY in cache.get("backends", {}).get("cugraph", {})
+
+    @property
+    def _is_on_cpu(self) -> bool:
+        """Whether the graph is on host as a NetworkX graph.
+
+        This means the core data structures that comprise a NetworkX graph
+        (such as ``G._node`` and ``G._adj``) are present.
+
+        The graph may be on host (CPU) and device (GPU) at the same time.
+        """
+        return self.__dict__["_node"] is not None
+
+    @property
+    def _cudagraph(self):
+        """Return the full ``CudaGraph`` on device, computing if necessary, or None."""
+        nx_cache = getattr(self, "__networkx_cache__", None)
+        if nx_cache is None:
+            nx_cache = {}
+        elif _CANT_CONVERT_TO_GPU in nx_cache:
+            return None
+        cache = nx_cache.setdefault("backends", {}).setdefault("cugraph", {})
+        if (Gcg := cache.get(_CACHE_KEY)) is not None:
+            if isinstance(Gcg, Graph):
+                # This shouldn't happen during normal use, but be extra-careful anyway
+                return Gcg._cudagraph
+            return Gcg
+        if self.__dict__["_node"] is None:
+            raise RuntimeError(
+                f"{type(self).__name__} cannot be converted to the GPU, because it is "
+                "not on the CPU! This is not supposed to be possible. If you believe "
+                "you have found a bug, please report a minimum reproducible example to "
+                "https://github.com/rapidsai/cugraph/issues/new/choose"
+            )
+        try:
+            Gcg = nxcg.from_networkx(
+                self, preserve_edge_attrs=True, preserve_node_attrs=True
+            )
+        except Exception:
+            # Should we warn that the full graph can't be on GPU?
+            nx_cache[_CANT_CONVERT_TO_GPU] = True
+            return None
+        Gcg.graph = self.graph
+        cache[_CACHE_KEY] = Gcg
+        return Gcg
+
+    @_cudagraph.setter
+    def _cudagraph(self, val, *, clear_cpu=True):
+        """Set the full ``CudaGraph`` for this graph, or remove from device if None."""
+        if (cache := getattr(self, "__networkx_cache__", None)) is None:
+            # Should we warn?
+            return
+        # TODO: pay close attention to when we should clear the cache, since
+        # this may or may not be a mutation.
+        cache = cache.setdefault("backends", {}).setdefault("cugraph", {})
+        if val is None:
+            cache.pop(_CACHE_KEY, None)
+        else:
+            self.graph = val.graph
+            cache[_CACHE_KEY] = val
+            if clear_cpu:
+                for key in self._nx_attrs:
+                    self.__dict__[key] = None
+
+    @nx.Graph.name.setter
+    def name(self, s):
+        # Don't clear the cache when setting the name, since `.graph` is shared.
+        # There is a very small risk here for the cache to become (slightly)
+        # insconsistent if graphs from other backends are cached.
+        self.graph["name"] = s
+
+    @classmethod
+    @networkx_api
+    def is_directed(cls) -> bool:
+        return False
+
+    @classmethod
+    @networkx_api
+    def is_multigraph(cls) -> bool:
+        return False
+
+    @classmethod
+    def to_cudagraph_class(cls) -> type[CudaGraph]:
+        return CudaGraph
+
+    @classmethod
+    @networkx_api
+    def to_directed_class(cls) -> type[nxcg.DiGraph]:
+        return nxcg.DiGraph
+
+    @classmethod
+    def to_networkx_class(cls) -> type[nx.Graph]:
+        return nx.Graph
+
+    @classmethod
+    @networkx_api
+    def to_undirected_class(cls) -> type[Graph]:
+        return Graph
+
+    def __init__(self, incoming_graph_data=None, **attr):
+        super().__init__(incoming_graph_data, **attr)
+        self.__networkx_cache__ = _GraphCache(self)
+
+    def _reify_networkx(self) -> None:
+        """Copy graph to host (CPU) if necessary."""
+        if self.__dict__["_node"] is None:
+            # After we make this into an nx graph, we rely on the cache being correct
+            Gcg = self._cudagraph
+            G = nxcg.to_networkx(Gcg)
+            for key in self._nx_attrs:
+                self.__dict__[key] = G.__dict__[key]
+
+    def _become(self, other: Graph):
+        if self.__class__ is not other.__class__:
+            raise TypeError(
+                "Attempting to update graph inplace with graph of different type!"
+            )
+        # Begin with the simplest implementation; do we need to do more?
+        self.__dict__.update(other.__dict__)
+        return self
 
     ####################
     # Creation methods #
@@ -109,9 +303,10 @@ def from_coo(
         *,
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
-        new_graph = object.__new__(cls)
+    ) -> Graph | CudaGraph:
+        new_graph = object.__new__(cls.to_cudagraph_class())
         new_graph.__networkx_cache__ = {}
         new_graph.src_indices = src_indices
         new_graph.dst_indices = dst_indices
@@ -173,7 +368,8 @@ def from_coo(
         isolates = nxcg.algorithms.isolate._isolates(new_graph)
         if len(isolates) > 0:
             new_graph._node_ids = cp.arange(new_graph._N, dtype=index_dtype)
-
+        if use_compat_graph or use_compat_graph is None and issubclass(cls, Graph):
+            new_graph = new_graph._to_compat_graph()
         return new_graph
 
     @classmethod
@@ -188,8 +384,9 @@ def from_csr(
         *,
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
+    ) -> Graph | CudaGraph:
         N = indptr.size - 1
         src_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
@@ -205,6 +402,7 @@ def from_csr(
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -220,8 +418,9 @@ def from_csc(
         *,
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
+    ) -> Graph | CudaGraph:
         N = indptr.size - 1
         dst_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
@@ -237,6 +436,7 @@ def from_csc(
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -254,8 +454,9 @@ def from_dcsr(
         *,
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
+    ) -> Graph | CudaGraph:
         src_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
             np.repeat(compressed_srcs.get(), cp.diff(indptr).get())
@@ -270,6 +471,7 @@ def from_dcsr(
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -287,8 +489,9 @@ def from_dcsc(
         *,
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
+    ) -> Graph | CudaGraph:
         dst_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
             np.repeat(compressed_dsts.get(), cp.diff(indptr).get())
@@ -303,13 +506,75 @@ def from_dcsc(
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
-    def __new__(cls, incoming_graph_data=None, **attr) -> Graph:
+
+class CudaGraph:
+    # Tell networkx to dispatch calls with this object to nx-cugraph
+    __networkx_backend__: ClassVar[str] = "cugraph"  # nx >=3.2
+    __networkx_plugin__: ClassVar[str] = "cugraph"  # nx <3.2
+
+    # Allow networkx dispatch machinery to cache conversions.
+    # This means we should clear the cache if we ever mutate the object!
+    __networkx_cache__: dict | None
+
+    # networkx properties
+    graph: dict
+    graph_attr_dict_factory: ClassVar[type] = dict
+
+    # Not networkx properties
+    # We store edge data in COO format with {src,dst}_indices and edge_values.
+    src_indices: cp.ndarray[IndexValue]
+    dst_indices: cp.ndarray[IndexValue]
+    edge_values: dict[AttrKey, cp.ndarray[EdgeValue]]
+    edge_masks: dict[AttrKey, cp.ndarray[bool]]
+    node_values: dict[AttrKey, any_ndarray[NodeValue]]
+    node_masks: dict[AttrKey, any_ndarray[bool]]
+    key_to_id: dict[NodeKey, IndexValue] | None
+    _id_to_key: list[NodeKey] | None
+    _N: int
+    _node_ids: cp.ndarray[IndexValue] | None  # holds plc.SGGraph.vertices_array data
+
+    # Used by graph._get_plc_graph
+    _plc_type_map: ClassVar[dict[np.dtype, np.dtype]] = {
+        # signed int
+        np.dtype(np.int8): np.dtype(np.float32),
+        np.dtype(np.int16): np.dtype(np.float32),
+        np.dtype(np.int32): np.dtype(np.float64),
+        np.dtype(np.int64): np.dtype(np.float64),  # raise if abs(x) > 2**53
+        # unsigned int
+        np.dtype(np.uint8): np.dtype(np.float32),
+        np.dtype(np.uint16): np.dtype(np.float32),
+        np.dtype(np.uint32): np.dtype(np.float64),
+        np.dtype(np.uint64): np.dtype(np.float64),  # raise if x > 2**53
+        # other
+        np.dtype(np.bool_): np.dtype(np.float32),
+        np.dtype(np.float16): np.dtype(np.float32),
+    }
+    _plc_allowed_edge_types: ClassVar[set[np.dtype]] = {
+        np.dtype(np.float32),
+        np.dtype(np.float64),
+    }
+
+    ####################
+    # Creation methods #
+    ####################
+
+    from_coo = classmethod(Graph.from_coo.__func__)
+    from_csr = classmethod(Graph.from_csr.__func__)
+    from_csc = classmethod(Graph.from_csc.__func__)
+    from_dcsr = classmethod(Graph.from_dcsr.__func__)
+    from_dcsc = classmethod(Graph.from_dcsc.__func__)
+
+    def __new__(cls, incoming_graph_data=None, **attr) -> CudaGraph:
         if incoming_graph_data is None:
             new_graph = cls.from_coo(
-                0, cp.empty(0, index_dtype), cp.empty(0, index_dtype)
+                0,
+                cp.empty(0, index_dtype),
+                cp.empty(0, index_dtype),
+                use_compat_graph=False,
             )
         elif incoming_graph_data.__class__ is cls:
             new_graph = incoming_graph_data.copy()
@@ -318,34 +583,30 @@ def __new__(cls, incoming_graph_data=None, **attr) -> Graph:
         else:
             raise NotImplementedError
         new_graph.graph.update(attr)
+        # We could return Graph here (if configured), but let's not for now
         return new_graph
 
     #################
     # Class methods #
     #################
 
-    @classmethod
-    @networkx_api
-    def is_directed(cls) -> bool:
-        return False
+    is_directed = classmethod(Graph.is_directed.__func__)
+    is_multigraph = classmethod(Graph.is_multigraph.__func__)
+    to_cudagraph_class = classmethod(Graph.to_cudagraph_class.__func__)
+    to_networkx_class = classmethod(Graph.to_networkx_class.__func__)
 
     @classmethod
     @networkx_api
-    def is_multigraph(cls) -> bool:
-        return False
+    def to_directed_class(cls) -> type[nxcg.CudaDiGraph]:
+        return nxcg.CudaDiGraph
 
     @classmethod
     @networkx_api
-    def to_directed_class(cls) -> type[nxcg.DiGraph]:
-        return nxcg.DiGraph
-
-    @classmethod
-    def to_networkx_class(cls) -> type[nx.Graph]:
-        return nx.Graph
+    def to_undirected_class(cls) -> type[CudaGraph]:
+        return CudaGraph
 
     @classmethod
-    @networkx_api
-    def to_undirected_class(cls) -> type[Graph]:
+    def _to_compat_graph_class(cls) -> type[Graph]:
         return Graph
 
     ##############
@@ -438,7 +699,7 @@ def clear_edges(self) -> None:
             cache.clear()
 
     @networkx_api
-    def copy(self, as_view: bool = False) -> Graph:
+    def copy(self, as_view: bool = False) -> CudaGraph:
         # Does shallow copy in networkx
         return self._copy(as_view, self.__class__)
 
@@ -534,14 +795,19 @@ def size(self, weight: AttrKey | None = None) -> int:
         return int(cp.count_nonzero(self.src_indices <= self.dst_indices))
 
     @networkx_api
-    def to_directed(self, as_view: bool = False) -> nxcg.DiGraph:
+    def to_directed(self, as_view: bool = False) -> nxcg.CudaDiGraph:
         return self._copy(as_view, self.to_directed_class())
 
     @networkx_api
-    def to_undirected(self, as_view: bool = False) -> Graph:
+    def to_undirected(self, as_view: bool = False) -> CudaGraph:
         # Does deep copy in networkx
         return self._copy(as_view, self.to_undirected_class())
 
+    def _to_compat_graph(self) -> Graph:
+        rv = self._to_compat_graph_class()()
+        rv._cudagraph = self
+        return rv
+
     # Not implemented...
     # adj, adjacency, add_edge, add_edges_from, add_node,
     # add_nodes_from, add_weighted_edges_from, degree,
@@ -552,8 +818,8 @@ def to_undirected(self, as_view: bool = False) -> Graph:
     # Private methods #
     ###################
 
-    def _copy(self, as_view: bool, cls: type[Graph], reverse: bool = False):
-        # DRY warning: see also MultiGraph._copy
+    def _copy(self, as_view: bool, cls: type[CudaGraph], reverse: bool = False):
+        # DRY warning: see also CudaMultiGraph._copy
         src_indices = self.src_indices
         dst_indices = self.dst_indices
         edge_values = self.edge_values
@@ -593,6 +859,7 @@ def _copy(self, as_view: bool, cls: type[Graph], reverse: bool = False):
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=False,
         )
         if as_view:
             rv.graph = self.graph
@@ -689,6 +956,14 @@ def _get_plc_graph(
             src_indices = src_indices.astype(index_dtype)
             dst_indices = dst_indices.astype(index_dtype)
 
+        # This sets drop_multi_edges=True for non-multigraph input, which means
+        # the data in self.src_indices and self.dst_indices may not be
+        # identical to that contained in the returned pcl.SGGraph (the returned
+        # SGGraph may have fewer edges since duplicates are dropped). Ideally
+        # self.src_indices and self.dst_indices would be updated to have
+        # duplicate edges removed for non-multigraph instances, but that
+        # requires additional code which would be redundant and likely not as
+        # performant as the code in PLC.
         return plc.SGGraph(
             resource_handle=plc.ResourceHandle(),
             graph_properties=plc.GraphProperties(
@@ -702,10 +977,11 @@ def _get_plc_graph(
             renumber=False,
             do_expensive_check=False,
             vertices_array=self._node_ids,
+            drop_multi_edges=not self.is_multigraph(),
         )
 
     def _sort_edge_indices(self, primary="src"):
-        # DRY warning: see also MultiGraph._sort_edge_indices
+        # DRY warning: see also CudaMultiGraph._sort_edge_indices
         if primary == "src":
             stacked = cp.vstack((self.dst_indices, self.src_indices))
         elif primary == "dst":
@@ -727,7 +1003,7 @@ def _sort_edge_indices(self, primary="src"):
             {key: val[indices] for key, val in self.edge_masks.items()}
         )
 
-    def _become(self, other: Graph):
+    def _become(self, other: CudaGraph):
         if self.__class__ is not other.__class__:
             raise TypeError(
                 "Attempting to update graph inplace with graph of different type!"
diff --git a/python/nx-cugraph/nx_cugraph/classes/multidigraph.py b/python/nx-cugraph/nx_cugraph/classes/multidigraph.py
index 2e7a55a9e..5a6595567 100644
--- a/python/nx-cugraph/nx_cugraph/classes/multidigraph.py
+++ b/python/nx-cugraph/nx_cugraph/classes/multidigraph.py
@@ -16,24 +16,51 @@
 
 import nx_cugraph as nxcg
 
-from .digraph import DiGraph
-from .multigraph import MultiGraph
+from .digraph import CudaDiGraph, DiGraph
+from .graph import Graph
+from .multigraph import CudaMultiGraph, MultiGraph
 
-__all__ = ["MultiDiGraph"]
+__all__ = ["CudaMultiDiGraph", "MultiDiGraph"]
 
 networkx_api = nxcg.utils.decorators.networkx_class(nx.MultiDiGraph)
 
 
-class MultiDiGraph(MultiGraph, DiGraph):
+class MultiDiGraph(nx.MultiDiGraph, MultiGraph, DiGraph):
+    name = Graph.name
+    _node = Graph._node
+    _adj = DiGraph._adj
+    _succ = DiGraph._succ
+    _pred = DiGraph._pred
+
     @classmethod
     @networkx_api
     def is_directed(cls) -> bool:
         return True
 
+    @classmethod
+    @networkx_api
+    def is_multigraph(cls) -> bool:
+        return True
+
+    @classmethod
+    def to_cudagraph_class(cls) -> type[CudaMultiDiGraph]:
+        return CudaMultiDiGraph
+
     @classmethod
     def to_networkx_class(cls) -> type[nx.MultiDiGraph]:
         return nx.MultiDiGraph
 
+
+class CudaMultiDiGraph(CudaMultiGraph, CudaDiGraph):
+    is_directed = classmethod(MultiDiGraph.is_directed.__func__)
+    is_multigraph = classmethod(MultiDiGraph.is_multigraph.__func__)
+    to_cudagraph_class = classmethod(MultiDiGraph.to_cudagraph_class.__func__)
+    to_networkx_class = classmethod(MultiDiGraph.to_networkx_class.__func__)
+
+    @classmethod
+    def _to_compat_graph_class(cls) -> type[MultiDiGraph]:
+        return MultiDiGraph
+
     ##########################
     # NetworkX graph methods #
     ##########################
diff --git a/python/nx-cugraph/nx_cugraph/classes/multigraph.py b/python/nx-cugraph/nx_cugraph/classes/multigraph.py
index 23d9faa87..c8c8f1dfb 100644
--- a/python/nx-cugraph/nx_cugraph/classes/multigraph.py
+++ b/python/nx-cugraph/nx_cugraph/classes/multigraph.py
@@ -22,7 +22,7 @@
 import nx_cugraph as nxcg
 
 from ..utils import index_dtype
-from .graph import Graph
+from .graph import CudaGraph, Graph, _GraphCache
 
 if TYPE_CHECKING:
     from nx_cugraph.typing import (
@@ -34,32 +34,47 @@
         NodeValue,
         any_ndarray,
     )
-__all__ = ["MultiGraph"]
+__all__ = ["MultiGraph", "CudaMultiGraph"]
 
 networkx_api = nxcg.utils.decorators.networkx_class(nx.MultiGraph)
 
 
-class MultiGraph(Graph):
-    # networkx properties
-    edge_key_dict_factory: ClassVar[type] = dict
+class MultiGraph(nx.MultiGraph, Graph):
+    name = Graph.name
+    _node = Graph._node
+    _adj = Graph._adj
 
-    # Not networkx properties
+    @classmethod
+    @networkx_api
+    def is_directed(cls) -> bool:
+        return False
 
-    # In a MultiGraph, each edge has a unique `(src, dst, key)` key.
-    # By default, `key` is 0 if possible, else 1, else 2, etc.
-    # This key can be any hashable Python object in NetworkX.
-    # We don't use a dict for our data structure here, because
-    # that would require a `(src, dst, key)` key.
-    # Instead, we keep `edge_keys` and/or `edge_indices`.
-    # `edge_keys` is the list of Python objects for each edge.
-    # `edge_indices` is for the common case of default multiedge keys,
-    # in which case we can store it as a cupy array.
-    # `edge_indices` is generally preferred. It is possible to provide
-    # both where edge_indices is the default and edge_keys is anything.
-    # It is also possible for them both to be None, which means the
-    # default edge indices has not yet been calculated.
-    edge_indices: cp.ndarray[IndexValue] | None
-    edge_keys: list[EdgeKey] | None
+    @classmethod
+    @networkx_api
+    def is_multigraph(cls) -> bool:
+        return True
+
+    @classmethod
+    def to_cudagraph_class(cls) -> type[CudaMultiGraph]:
+        return CudaMultiGraph
+
+    @classmethod
+    @networkx_api
+    def to_directed_class(cls) -> type[nxcg.MultiDiGraph]:
+        return nxcg.MultiDiGraph
+
+    @classmethod
+    def to_networkx_class(cls) -> type[nx.MultiGraph]:
+        return nx.MultiGraph
+
+    @classmethod
+    @networkx_api
+    def to_undirected_class(cls) -> type[MultiGraph]:
+        return MultiGraph
+
+    def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr):
+        super().__init__(incoming_graph_data, multigraph_input, **attr)
+        self.__networkx_cache__ = _GraphCache(self)
 
     ####################
     # Creation methods #
@@ -80,9 +95,10 @@ def from_coo(
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
         edge_keys: list[EdgeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> MultiGraph:
-        new_graph = super().from_coo(
+    ) -> MultiGraph | CudaMultiGraph:
+        new_graph = super(cls.to_undirected_class(), cls).from_coo(
             N,
             src_indices,
             dst_indices,
@@ -92,6 +108,7 @@ def from_coo(
             node_masks,
             key_to_id=key_to_id,
             id_to_key=id_to_key,
+            use_compat_graph=False,
             **attr,
         )
         new_graph.edge_indices = edge_indices
@@ -102,6 +119,8 @@ def from_coo(
             and len(new_graph.edge_keys) != src_indices.size
         ):
             raise ValueError
+        if use_compat_graph or use_compat_graph is None and issubclass(cls, Graph):
+            new_graph = new_graph._to_compat_graph()
         return new_graph
 
     @classmethod
@@ -118,8 +137,9 @@ def from_csr(
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
         edge_keys: list[EdgeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> MultiGraph:
+    ) -> MultiGraph | CudaMultiGraph:
         N = indptr.size - 1
         src_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
@@ -137,6 +157,7 @@ def from_csr(
             key_to_id=key_to_id,
             id_to_key=id_to_key,
             edge_keys=edge_keys,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -154,8 +175,9 @@ def from_csc(
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
         edge_keys: list[EdgeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> MultiGraph:
+    ) -> MultiGraph | CudaMultiGraph:
         N = indptr.size - 1
         dst_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
@@ -173,6 +195,7 @@ def from_csc(
             key_to_id=key_to_id,
             id_to_key=id_to_key,
             edge_keys=edge_keys,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -192,8 +215,9 @@ def from_dcsr(
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
         edge_keys: list[EdgeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> MultiGraph:
+    ) -> MultiGraph | CudaMultiGraph:
         src_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
             np.repeat(compressed_srcs.get(), cp.diff(indptr).get())
@@ -210,6 +234,7 @@ def from_dcsr(
             key_to_id=key_to_id,
             id_to_key=id_to_key,
             edge_keys=edge_keys,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
@@ -229,8 +254,9 @@ def from_dcsc(
         key_to_id: dict[NodeKey, IndexValue] | None = None,
         id_to_key: list[NodeKey] | None = None,
         edge_keys: list[EdgeKey] | None = None,
+        use_compat_graph: bool | None = None,
         **attr,
-    ) -> Graph:
+    ) -> MultiGraph | CudaGraph:
         dst_indices = cp.array(
             # cp.repeat is slow to use here, so use numpy instead
             np.repeat(compressed_dsts.get(), cp.diff(indptr).get())
@@ -247,12 +273,46 @@ def from_dcsc(
             key_to_id=key_to_id,
             id_to_key=id_to_key,
             edge_keys=edge_keys,
+            use_compat_graph=use_compat_graph,
             **attr,
         )
 
+
+class CudaMultiGraph(CudaGraph):
+    # networkx properties
+    edge_key_dict_factory: ClassVar[type] = dict
+
+    # Not networkx properties
+
+    # In a MultiGraph, each edge has a unique `(src, dst, key)` key.
+    # By default, `key` is 0 if possible, else 1, else 2, etc.
+    # This key can be any hashable Python object in NetworkX.
+    # We don't use a dict for our data structure here, because
+    # that would require a `(src, dst, key)` key.
+    # Instead, we keep `edge_keys` and/or `edge_indices`.
+    # `edge_keys` is the list of Python objects for each edge.
+    # `edge_indices` is for the common case of default multiedge keys,
+    # in which case we can store it as a cupy array.
+    # `edge_indices` is generally preferred. It is possible to provide
+    # both where edge_indices is the default and edge_keys is anything.
+    # It is also possible for them both to be None, which means the
+    # default edge indices has not yet been calculated.
+    edge_indices: cp.ndarray[IndexValue] | None
+    edge_keys: list[EdgeKey] | None
+
+    ####################
+    # Creation methods #
+    ####################
+
+    from_coo = classmethod(MultiGraph.from_coo.__func__)
+    from_csr = classmethod(MultiGraph.from_csr.__func__)
+    from_csc = classmethod(MultiGraph.from_csc.__func__)
+    from_dcsr = classmethod(MultiGraph.from_dcsr.__func__)
+    from_dcsc = classmethod(MultiGraph.from_dcsc.__func__)
+
     def __new__(
         cls, incoming_graph_data=None, multigraph_input=None, **attr
-    ) -> MultiGraph:
+    ) -> CudaMultiGraph:
         if isinstance(incoming_graph_data, dict) and multigraph_input is not False:
             new_graph = nxcg.from_networkx(
                 nx.MultiGraph(incoming_graph_data, multigraph_input=multigraph_input),
@@ -267,28 +327,23 @@ def __new__(
     # Class methods #
     #################
 
-    @classmethod
-    @networkx_api
-    def is_directed(cls) -> bool:
-        return False
+    is_directed = classmethod(MultiGraph.is_directed.__func__)
+    is_multigraph = classmethod(MultiGraph.is_multigraph.__func__)
+    to_cudagraph_class = classmethod(MultiGraph.to_cudagraph_class.__func__)
+    to_networkx_class = classmethod(MultiGraph.to_networkx_class.__func__)
 
     @classmethod
     @networkx_api
-    def is_multigraph(cls) -> bool:
-        return True
+    def to_directed_class(cls) -> type[nxcg.CudaMultiDiGraph]:
+        return nxcg.CudaMultiDiGraph
 
     @classmethod
     @networkx_api
-    def to_directed_class(cls) -> type[nxcg.MultiDiGraph]:
-        return nxcg.MultiDiGraph
-
-    @classmethod
-    def to_networkx_class(cls) -> type[nx.MultiGraph]:
-        return nx.MultiGraph
+    def to_undirected_class(cls) -> type[CudaMultiGraph]:
+        return CudaMultiGraph
 
     @classmethod
-    @networkx_api
-    def to_undirected_class(cls) -> type[MultiGraph]:
+    def _to_compat_graph_class(cls) -> type[MultiGraph]:
         return MultiGraph
 
     ##########################
@@ -308,7 +363,7 @@ def clear_edges(self) -> None:
         self.edge_keys = None
 
     @networkx_api
-    def copy(self, as_view: bool = False) -> MultiGraph:
+    def copy(self, as_view: bool = False) -> CudaMultiGraph:
         # Does shallow copy in networkx
         return self._copy(as_view, self.__class__)
 
@@ -391,11 +446,11 @@ def has_edge(self, u: NodeKey, v: NodeKey, key: EdgeKey | None = None) -> bool:
         return any(edge_keys[i] == key for i in indices.tolist())
 
     @networkx_api
-    def to_directed(self, as_view: bool = False) -> nxcg.MultiDiGraph:
+    def to_directed(self, as_view: bool = False) -> nxcg.CudaMultiDiGraph:
         return self._copy(as_view, self.to_directed_class())
 
     @networkx_api
-    def to_undirected(self, as_view: bool = False) -> MultiGraph:
+    def to_undirected(self, as_view: bool = False) -> CudaMultiGraph:
         # Does deep copy in networkx
         return self._copy(as_view, self.to_undirected_class())
 
@@ -403,8 +458,8 @@ def to_undirected(self, as_view: bool = False) -> MultiGraph:
     # Private methods #
     ###################
 
-    def _copy(self, as_view: bool, cls: type[Graph], reverse: bool = False):
-        # DRY warning: see also Graph._copy
+    def _copy(self, as_view: bool, cls: type[CudaGraph], reverse: bool = False):
+        # DRY warning: see also CudaGraph._copy
         src_indices = self.src_indices
         dst_indices = self.dst_indices
         edge_indices = self.edge_indices
@@ -451,6 +506,7 @@ def _copy(self, as_view: bool, cls: type[Graph], reverse: bool = False):
             key_to_id=key_to_id,
             id_to_key=id_to_key,
             edge_keys=edge_keys,
+            use_compat_graph=False,
         )
         if as_view:
             rv.graph = self.graph
@@ -460,7 +516,7 @@ def _copy(self, as_view: bool, cls: type[Graph], reverse: bool = False):
         return rv
 
     def _sort_edge_indices(self, primary="src"):
-        # DRY warning: see also Graph._sort_edge_indices
+        # DRY warning: see also CudaGraph._sort_edge_indices
         if self.edge_indices is None and self.edge_keys is None:
             return super()._sort_edge_indices(primary=primary)
         if primary == "src":
diff --git a/python/nx-cugraph/nx_cugraph/convert.py b/python/nx-cugraph/nx_cugraph/convert.py
index 56d16d837..a872f13ac 100644
--- a/python/nx-cugraph/nx_cugraph/convert.py
+++ b/python/nx-cugraph/nx_cugraph/convert.py
@@ -12,6 +12,7 @@
 # limitations under the License.
 from __future__ import annotations
 
+import functools
 import itertools
 import operator as op
 from collections import Counter, defaultdict
@@ -23,9 +24,13 @@
 import numpy as np
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from .utils import index_dtype, networkx_algorithm
-from .utils.misc import pairwise
+from .utils.misc import _And_NotImplementedError, pairwise
+
+if _nxver >= (3, 4):
+    from networkx.utils.backends import _get_cache_key, _get_from_cache, _set_to_cache
 
 if TYPE_CHECKING:  # pragma: no cover
     from nx_cugraph.typing import AttrKey, Dtype, EdgeValue, NodeValue, any_ndarray
@@ -60,6 +65,27 @@ def _iterate_values(graph, adj, is_dicts, func):
     return func(it), False
 
 
+# Consider adding this to `utils` if it is useful elsewhere
+def _fallback_decorator(func):
+    """Catch and convert exceptions to ``NotImplementedError``; use as a decorator.
+
+    ``nx.NetworkXError`` are raised without being converted. This allows
+    falling back to other backends if, for example, conversion to GPU failed.
+    """
+
+    @functools.wraps(func)
+    def inner(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except nx.NetworkXError:
+            raise
+        except Exception as exc:
+            raise _And_NotImplementedError(exc) from exc
+
+    return inner
+
+
+@_fallback_decorator
 def from_networkx(
     graph: nx.Graph,
     edge_attrs: AttrKey | dict[AttrKey, EdgeValue | None] | None = None,
@@ -74,7 +100,8 @@ def from_networkx(
     as_directed: bool = False,
     name: str | None = None,
     graph_name: str | None = None,
-) -> nxcg.Graph:
+    use_compat_graph: bool | None = False,
+) -> nxcg.Graph | nxcg.CudaGraph:
     """Convert a networkx graph to nx_cugraph graph; can convert all attributes.
 
     Parameters
@@ -114,10 +141,16 @@ def from_networkx(
         The name of the algorithm when dispatched from networkx.
     graph_name : str, optional
         The name of the graph argument geing converted when dispatched from networkx.
+    use_compat_graph : bool or None, default False
+        Indicate whether to return a graph that is compatible with NetworkX graph.
+        For example, ``nx_cugraph.Graph`` can be used as a NetworkX graph and can
+        reside in host (CPU) or device (GPU) memory. The default is False, which
+        will return e.g. ``nx_cugraph.CudaGraph`` that only resides on device (GPU)
+        and is not fully compatible as a NetworkX graph.
 
     Returns
     -------
-    nx_cugraph.Graph
+    nx_cugraph.Graph or nx_cugraph.CudaGraph
 
     Notes
     -----
@@ -145,6 +178,41 @@ def from_networkx(
             graph = G
         else:
             raise TypeError(f"Expected networkx.Graph; got {type(graph)}")
+    elif isinstance(graph, nxcg.Graph):
+        if (
+            use_compat_graph
+            # Use compat graphs by default
+            or use_compat_graph is None
+            and (_nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs)
+        ):
+            return graph
+        if graph._is_on_gpu:
+            return graph._cudagraph
+        if not graph._is_on_cpu:
+            raise RuntimeError(
+                f"{type(graph).__name__} cannot be converted to the GPU, because it is "
+                "not on the CPU! This is not supposed to be possible. If you believe "
+                "you have found a bug, please report a minimum reproducible example to "
+                "https://github.com/rapidsai/cugraph/issues/new/choose"
+            )
+        if _nxver >= (3, 4):
+            cache_key = _get_cache_key(
+                edge_attrs=edge_attrs,
+                node_attrs=node_attrs,
+                preserve_edge_attrs=preserve_edge_attrs,
+                preserve_node_attrs=preserve_node_attrs,
+                preserve_graph_attrs=preserve_graph_attrs,
+            )
+            cache = getattr(graph, "__networkx_cache__", None)
+            if cache is not None:
+                cache = cache.setdefault("backends", {}).setdefault("cugraph", {})
+                compat_key, rv = _get_from_cache(cache, cache_key)
+                if rv is not None:
+                    if isinstance(rv, nxcg.Graph):
+                        # This shouldn't happen during normal use, but be extra-careful
+                        rv = rv._cudagraph
+                    if rv is not None:
+                        return rv
 
     if preserve_all_attrs:
         preserve_edge_attrs = True
@@ -165,7 +233,12 @@ def from_networkx(
         else:
             node_attrs = {node_attrs: None}
 
-    if graph.__class__ in {nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph}:
+    if graph.__class__ in {
+        nx.Graph,
+        nx.DiGraph,
+        nx.MultiGraph,
+        nx.MultiDiGraph,
+    } or isinstance(graph, nxcg.Graph):
         # This is a NetworkX private attribute, but is much faster to use
         adj = graph._adj
     else:
@@ -455,9 +528,9 @@ def func(it, edge_attr=edge_attr, dtype=dtype):
                 # if vals.ndim > 1: ...
     if graph.is_multigraph():
         if graph.is_directed() or as_directed:
-            klass = nxcg.MultiDiGraph
+            klass = nxcg.CudaMultiDiGraph
         else:
-            klass = nxcg.MultiGraph
+            klass = nxcg.CudaMultiGraph
         rv = klass.from_coo(
             N,
             src_indices,
@@ -469,12 +542,13 @@ def func(it, edge_attr=edge_attr, dtype=dtype):
             node_masks,
             key_to_id=key_to_id,
             edge_keys=edge_keys,
+            use_compat_graph=False,
         )
     else:
         if graph.is_directed() or as_directed:
-            klass = nxcg.DiGraph
+            klass = nxcg.CudaDiGraph
         else:
-            klass = nxcg.Graph
+            klass = nxcg.CudaGraph
         rv = klass.from_coo(
             N,
             src_indices,
@@ -484,9 +558,22 @@ def func(it, edge_attr=edge_attr, dtype=dtype):
             node_values,
             node_masks,
             key_to_id=key_to_id,
+            use_compat_graph=False,
         )
     if preserve_graph_attrs:
         rv.graph.update(graph.graph)  # deepcopy?
+    if _nxver >= (3, 4) and isinstance(graph, nxcg.Graph) and cache is not None:
+        # Make sure this conversion is added to the cache, and make all of
+        # our graphs share the same `.graph` attribute for consistency.
+        rv.graph = graph.graph
+        _set_to_cache(cache, cache_key, rv)
+    if (
+        use_compat_graph
+        # Use compat graphs by default
+        or use_compat_graph is None
+        and (_nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs)
+    ):
+        return rv._to_compat_graph()
     return rv
 
 
@@ -535,14 +622,16 @@ def _iter_attr_dicts(
     return full_dicts
 
 
-def to_networkx(G: nxcg.Graph, *, sort_edges: bool = False) -> nx.Graph:
+def to_networkx(
+    G: nxcg.Graph | nxcg.CudaGraph, *, sort_edges: bool = False
+) -> nx.Graph:
     """Convert a nx_cugraph graph to networkx graph.
 
     All edge and node attributes and ``G.graph`` properties are converted.
 
     Parameters
     ----------
-    G : nx_cugraph.Graph
+    G : nx_cugraph.Graph or nx_cugraph.CudaGraph
     sort_edges : bool, default False
         Whether to sort the edge data of the input graph by (src, dst) indices
         before converting. This can be useful to convert to networkx graphs
@@ -557,6 +646,9 @@ def to_networkx(G: nxcg.Graph, *, sort_edges: bool = False) -> nx.Graph:
     --------
     from_networkx : The opposite; convert networkx graph to nx_cugraph graph
     """
+    if isinstance(G, nxcg.Graph):
+        # These graphs are already NetworkX graphs :)
+        return G
     rv = G.to_networkx_class()()
     id_to_key = G.id_to_key
     if sort_edges:
@@ -623,13 +715,13 @@ def _to_graph(
     edge_attr: AttrKey | None = None,
     edge_default: EdgeValue | None = 1,
     edge_dtype: Dtype | None = None,
-) -> nxcg.Graph | nxcg.DiGraph:
+) -> nxcg.CudaGraph | nxcg.CudaDiGraph:
     """Ensure that input type is a nx_cugraph graph, and convert if necessary.
 
     Directed and undirected graphs are both allowed.
     This is an internal utility function and may change or be removed.
     """
-    if isinstance(G, nxcg.Graph):
+    if isinstance(G, nxcg.CudaGraph):
         return G
     if isinstance(G, nx.Graph):
         return from_networkx(
@@ -644,15 +736,15 @@ def _to_directed_graph(
     edge_attr: AttrKey | None = None,
     edge_default: EdgeValue | None = 1,
     edge_dtype: Dtype | None = None,
-) -> nxcg.DiGraph:
-    """Ensure that input type is a nx_cugraph DiGraph, and convert if necessary.
+) -> nxcg.CudaDiGraph:
+    """Ensure that input type is a nx_cugraph CudaDiGraph, and convert if necessary.
 
     Undirected graphs will be converted to directed.
     This is an internal utility function and may change or be removed.
     """
-    if isinstance(G, nxcg.DiGraph):
+    if isinstance(G, nxcg.CudaDiGraph):
         return G
-    if isinstance(G, nxcg.Graph):
+    if isinstance(G, nxcg.CudaGraph):
         return G.to_directed()
     if isinstance(G, nx.Graph):
         return from_networkx(
@@ -670,13 +762,13 @@ def _to_undirected_graph(
     edge_attr: AttrKey | None = None,
     edge_default: EdgeValue | None = 1,
     edge_dtype: Dtype | None = None,
-) -> nxcg.Graph:
-    """Ensure that input type is a nx_cugraph Graph, and convert if necessary.
+) -> nxcg.CudaGraph:
+    """Ensure that input type is a nx_cugraph CudaGraph, and convert if necessary.
 
     Only undirected graphs are allowed. Directed graphs will raise ValueError.
     This is an internal utility function and may change or be removed.
     """
-    if isinstance(G, nxcg.Graph):
+    if isinstance(G, nxcg.CudaGraph):
         if G.is_directed():
             raise ValueError("Only undirected graphs supported; got a directed graph")
         return G
@@ -688,7 +780,7 @@ def _to_undirected_graph(
     raise TypeError
 
 
-@networkx_algorithm(version_added="24.08")
+@networkx_algorithm(version_added="24.08", fallback=True)
 def from_dict_of_lists(d, create_using=None):
     from .generators._utils import _create_using_class
 
diff --git a/python/nx-cugraph/nx_cugraph/convert_matrix.py b/python/nx-cugraph/nx_cugraph/convert_matrix.py
index 38139b913..549759028 100644
--- a/python/nx-cugraph/nx_cugraph/convert_matrix.py
+++ b/python/nx-cugraph/nx_cugraph/convert_matrix.py
@@ -14,6 +14,8 @@
 import networkx as nx
 import numpy as np
 
+from nx_cugraph import _nxver
+
 from .generators._utils import _create_using_class
 from .utils import _cp_iscopied_asarray, index_dtype, networkx_algorithm
 
@@ -24,7 +26,7 @@
 
 
 # Value columns with string dtype is not supported
-@networkx_algorithm(is_incomplete=True, version_added="23.12")
+@networkx_algorithm(is_incomplete=True, version_added="23.12", fallback=True)
 def from_pandas_edgelist(
     df,
     source="source",
@@ -138,7 +140,7 @@ def from_pandas_edgelist(
         and (
             # In nx <= 3.3, `edge_key` was ignored if `edge_attr` is None
             edge_attr is not None
-            or nx.__version__[:3] > "3.3"
+            or _nxver > (3, 3)
         )
     ):
         try:
@@ -161,7 +163,7 @@ def from_pandas_edgelist(
     return G
 
 
-@networkx_algorithm(version_added="23.12")
+@networkx_algorithm(version_added="23.12", fallback=True)
 def from_scipy_sparse_array(
     A, parallel_edges=False, create_using=None, edge_attribute="weight"
 ):
diff --git a/python/nx-cugraph/nx_cugraph/generators/_utils.py b/python/nx-cugraph/nx_cugraph/generators/_utils.py
index e38ace5b2..bc9ab84bd 100644
--- a/python/nx-cugraph/nx_cugraph/generators/_utils.py
+++ b/python/nx-cugraph/nx_cugraph/generators/_utils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2023, NVIDIA CORPORATION.
+# Copyright (c) 2023-2024, NVIDIA CORPORATION.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
@@ -16,6 +16,7 @@
 import networkx as nx
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import index_dtype
 
@@ -74,7 +75,7 @@ def _common_small_graph(n, nodes, create_using, *, allow_directed=True):
     return G
 
 
-def _create_using_class(create_using, *, default=nxcg.Graph):
+def _create_using_class(create_using, *, default=nx.Graph):
     """Handle ``create_using`` argument and return a Graph type from nx_cugraph."""
     inplace = False
     if create_using is None:
@@ -85,16 +86,17 @@ def _create_using_class(create_using, *, default=nxcg.Graph):
         create_using, "is_multigraph"
     ):
         raise TypeError("create_using is not a valid graph type or instance")
-    elif not isinstance(create_using, nxcg.Graph):
+    elif not isinstance(create_using, (nxcg.Graph, nxcg.CudaGraph)):
         raise NotImplementedError(
             f"create_using with object of type {type(create_using)} is not supported "
-            "by the cugraph backend; only nx_cugraph.Graph objects are allowed."
+            "by the cugraph backend; only nx_cugraph.Graph or nx_cugraph.CudaGraph "
+            "objects are allowed."
         )
     else:
         inplace = True
         G = create_using
         G.clear()
-    if not isinstance(G, nxcg.Graph):
+    if not isinstance(G, (nxcg.Graph, nxcg.CudaGraph)):
         if G.is_multigraph():
             if G.is_directed():
                 graph_class = nxcg.MultiDiGraph
@@ -104,10 +106,12 @@ def _create_using_class(create_using, *, default=nxcg.Graph):
             graph_class = nxcg.DiGraph
         else:
             graph_class = nxcg.Graph
+        if _nxver >= (3, 3) and not nx.config.backends.cugraph.use_compat_graphs:
+            graph_class = graph_class.to_cudagraph_class()
         if G.__class__ not in {nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph}:
             raise NotImplementedError(
                 f"create_using with type {type(G)} is not supported by the cugraph "
-                "backend; only standard networkx or nx_cugraph Graph objects are "
+                "backend; only standard networkx or nx_cugraph graph objects are "
                 "allowed (but not customized subclasses derived from them)."
             )
     else:
diff --git a/python/nx-cugraph/nx_cugraph/generators/classic.py b/python/nx-cugraph/nx_cugraph/generators/classic.py
index a548beea3..cfcb2a3af 100644
--- a/python/nx-cugraph/nx_cugraph/generators/classic.py
+++ b/python/nx-cugraph/nx_cugraph/generators/classic.py
@@ -18,6 +18,7 @@
 import numpy as np
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import _get_int_dtype, index_dtype, networkx_algorithm
 from ._utils import (
@@ -102,7 +103,9 @@ def complete_graph(n, create_using=None):
 @networkx_algorithm(version_added="23.12")
 def complete_multipartite_graph(*subset_sizes):
     if not subset_sizes:
-        return nxcg.Graph()
+        if _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs:
+            return nxcg.Graph()
+        return nxcg.CudaGraph()
     try:
         subset_sizes = [_ensure_int(size) for size in subset_sizes]
     except TypeError:
@@ -139,6 +142,8 @@ def complete_multipartite_graph(*subset_sizes):
         dst_indices,
         node_values={"subset": subsets_array},
         id_to_key=nodes,
+        use_compat_graph=_nxver < (3, 3)
+        or nx.config.backends.cugraph.use_compat_graphs,
     )
 
 
diff --git a/python/nx-cugraph/nx_cugraph/generators/community.py b/python/nx-cugraph/nx_cugraph/generators/community.py
index 9b0e0848d..4e5063cc3 100644
--- a/python/nx-cugraph/nx_cugraph/generators/community.py
+++ b/python/nx-cugraph/nx_cugraph/generators/community.py
@@ -11,8 +11,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import cupy as cp
+import networkx as nx
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import networkx_algorithm
 from ._utils import (
@@ -42,4 +44,7 @@ def caveman_graph(l, k):  # noqa: E741
     dst_cliques.extend(dst_clique + i * k for i in range(1, l))
     src_indices = cp.hstack(src_cliques)
     dst_indices = cp.hstack(dst_cliques)
-    return nxcg.Graph.from_coo(l * k, src_indices, dst_indices)
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
+        l * k, src_indices, dst_indices, use_compat_graph=use_compat_graph
+    )
diff --git a/python/nx-cugraph/nx_cugraph/generators/ego.py b/python/nx-cugraph/nx_cugraph/generators/ego.py
index 66c9c8b95..9a91fa0b6 100644
--- a/python/nx-cugraph/nx_cugraph/generators/ego.py
+++ b/python/nx-cugraph/nx_cugraph/generators/ego.py
@@ -32,7 +32,10 @@ def ego_graph(
 ):
     """Weighted ego_graph with negative cycles is not yet supported. `NotImplementedError` will be raised if there are negative `distance` edge weights."""  # noqa: E501
     if isinstance(G, nx.Graph):
+        is_compat_graph = isinstance(G, nxcg.Graph)
         G = nxcg.from_networkx(G, preserve_all_attrs=True)
+    else:
+        is_compat_graph = False
     if n not in G:
         if distance is None:
             raise nx.NodeNotFound(f"Source {n} is not in G")
@@ -100,7 +103,10 @@ def ego_graph(
             node_mask &= node_ids != src_index
         node_ids = node_ids[node_mask]
     if node_ids.size == G._N:
-        return G.copy()
+        rv = G.copy()
+        if is_compat_graph:
+            return rv._to_compat_graph()
+        return rv
     # TODO: create renumbering helper function(s)
     node_ids.sort()  # TODO: is this ever necessary? Keep for safety
     node_values = {key: val[node_ids] for key, val in G.node_values.items()}
@@ -137,6 +143,7 @@ def ego_graph(
         "node_values": node_values,
         "node_masks": node_masks,
         "key_to_id": key_to_id,
+        "use_compat_graph": False,
     }
     if G.is_multigraph():
         if G.edge_keys is not None:
@@ -147,6 +154,8 @@ def ego_graph(
             kwargs["edge_indices"] = G.edge_indices[edge_mask]
     rv = G.__class__.from_coo(**kwargs)
     rv.graph.update(G.graph)
+    if is_compat_graph:
+        return rv._to_compat_graph()
     return rv
 
 
diff --git a/python/nx-cugraph/nx_cugraph/generators/small.py b/python/nx-cugraph/nx_cugraph/generators/small.py
index 45487571c..d0c03cb7d 100644
--- a/python/nx-cugraph/nx_cugraph/generators/small.py
+++ b/python/nx-cugraph/nx_cugraph/generators/small.py
@@ -14,6 +14,7 @@
 import networkx as nx
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import index_dtype, networkx_algorithm
 from ._utils import _IS_NX32_OR_LESS, _create_using_class
@@ -449,7 +450,14 @@ def pappus_graph():
         index_dtype,
     )
     # fmt: on
-    return nxcg.Graph.from_coo(18, src_indices, dst_indices, name="Pappus Graph")
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
+        18,
+        src_indices,
+        dst_indices,
+        name="Pappus Graph",
+        use_compat_graph=use_compat_graph,
+    )
 
 
 @networkx_algorithm(version_added="23.12")
diff --git a/python/nx-cugraph/nx_cugraph/generators/social.py b/python/nx-cugraph/nx_cugraph/generators/social.py
index 07e82c63f..09d405e75 100644
--- a/python/nx-cugraph/nx_cugraph/generators/social.py
+++ b/python/nx-cugraph/nx_cugraph/generators/social.py
@@ -11,9 +11,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import cupy as cp
+import networkx as nx
 import numpy as np
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from ..utils import index_dtype, networkx_algorithm
 
@@ -77,7 +79,8 @@ def davis_southern_women_graph():
         "E13", "E14",
     ]
     # fmt: on
-    return nxcg.Graph.from_coo(
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
         32,
         src_indices,
         dst_indices,
@@ -85,6 +88,7 @@ def davis_southern_women_graph():
         id_to_key=women + events,
         top=women,
         bottom=events,
+        use_compat_graph=use_compat_graph,
     )
 
 
@@ -111,7 +115,14 @@ def florentine_families_graph():
         "Salviati", "Strozzi", "Tornabuoni"
     ]
     # fmt: on
-    return nxcg.Graph.from_coo(15, src_indices, dst_indices, id_to_key=nodes)
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
+        15,
+        src_indices,
+        dst_indices,
+        id_to_key=nodes,
+        use_compat_graph=use_compat_graph,
+    )
 
 
 @networkx_algorithm(version_added="23.12")
@@ -165,13 +176,15 @@ def karate_club_graph():
         "Officer", "Officer", "Officer", "Officer", "Officer", "Officer",
     ])
     # fmt: on
-    return nxcg.Graph.from_coo(
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
         34,
         src_indices,
         dst_indices,
         edge_values={"weight": weights},
         node_values={"club": clubs},
         name="Zachary's Karate Club",
+        use_compat_graph=use_compat_graph,
     )
 
 
@@ -289,6 +302,12 @@ def les_miserables_graph():
         "Zephine",
     ]
     # fmt: on
-    return nxcg.Graph.from_coo(
-        77, src_indices, dst_indices, edge_values={"weight": weights}, id_to_key=nodes
+    use_compat_graph = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    return nxcg.CudaGraph.from_coo(
+        77,
+        src_indices,
+        dst_indices,
+        edge_values={"weight": weights},
+        id_to_key=nodes,
+        use_compat_graph=use_compat_graph,
     )
diff --git a/python/nx-cugraph/nx_cugraph/interface.py b/python/nx-cugraph/nx_cugraph/interface.py
index 4007230ef..1a3d08409 100644
--- a/python/nx-cugraph/nx_cugraph/interface.py
+++ b/python/nx-cugraph/nx_cugraph/interface.py
@@ -18,6 +18,7 @@
 import networkx as nx
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 
 class BackendInterface:
@@ -32,11 +33,19 @@ def convert_from_nx(graph, *args, edge_attrs=None, weight=None, **kwargs):
                     "edge_attrs and weight arguments should not both be given"
                 )
             edge_attrs = {weight: 1}
-        return nxcg.from_networkx(graph, *args, edge_attrs=edge_attrs, **kwargs)
+        return nxcg.from_networkx(
+            graph,
+            *args,
+            edge_attrs=edge_attrs,
+            use_compat_graph=_nxver < (3, 3)
+            or nx.config.backends.cugraph.use_compat_graphs,
+            **kwargs,
+        )
 
     @staticmethod
     def convert_to_nx(obj, *, name: str | None = None):
-        if isinstance(obj, nxcg.Graph):
+        if isinstance(obj, nxcg.CudaGraph):
+            # Observe that this does not try to convert Graph!
             return nxcg.to_networkx(obj)
         return obj
 
@@ -62,19 +71,32 @@ def key(testpath):
                 return (testname, frozenset({classname, filename}))
             return (testname, frozenset({filename}))
 
+        use_compat_graph = (
+            _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+        )
+        fallback = use_compat_graph or nx.utils.backends._dispatchable._fallback_to_nx
+
         # Reasons for xfailing
+        # For nx version <= 3.1
         no_weights = "weighted implementation not currently supported"
         no_multigraph = "multigraphs not currently supported"
+        # For nx version <= 3.2
+        nx_cugraph_in_test_setup = (
+            "nx-cugraph Graph is incompatible in test setup in nx versions < 3.3"
+        )
+        # For all versions
         louvain_different = "Louvain may be different due to RNG"
-        no_string_dtype = "string edge values not currently supported"
         sssp_path_different = "sssp may choose a different valid path"
+        tuple_elements_preferred = "elements are tuples instead of lists"
+        no_mixed_dtypes_for_nodes = (
+            # This one is tricky b/c we don't raise; all dtypes are treated as str
+            "mixed dtypes (str, int, float) for single node property not supported"
+        )
+        # These shouldn't fail if using Graph or falling back to networkx
+        no_string_dtype = "string edge values not currently supported"
         no_object_dtype_for_edges = (
             "Edges don't support object dtype (lists, strings, etc.)"
         )
-        tuple_elements_preferred = "elements are tuples instead of lists"
-        nx_cugraph_in_test_setup = (
-            "nx-cugraph Graph is incompatible in test setup in nx versions < 3.3"
-        )
 
         xfail = {
             # This is removed while strongly_connected_components() is not
@@ -98,38 +120,6 @@ def key(testpath):
                 "test_cycles.py:TestMinimumCycleBasis."
                 "test_gh6787_and_edge_attribute_names"
             ): sssp_path_different,
-            key(
-                "test_graph_hashing.py:test_isomorphic_edge_attr"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_graph_hashing.py:test_isomorphic_edge_attr_and_node_attr"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_graph_hashing.py:test_isomorphic_edge_attr_subgraph_hash"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_graph_hashing.py:"
-                "test_isomorphic_edge_attr_and_node_attr_subgraph_hash"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_summarization.py:TestSNAPNoEdgeTypes.test_summary_graph"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_summarization.py:TestSNAPUndirected.test_summary_graph"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_summarization.py:TestSNAPDirected.test_summary_graph"
-            ): no_object_dtype_for_edges,
-            key("test_gexf.py:TestGEXF.test_relabel"): no_object_dtype_for_edges,
-            key(
-                "test_gml.py:TestGraph.test_parse_gml_cytoscape_bug"
-            ): no_object_dtype_for_edges,
-            key("test_gml.py:TestGraph.test_parse_gml"): no_object_dtype_for_edges,
-            key("test_gml.py:TestGraph.test_read_gml"): no_object_dtype_for_edges,
-            key("test_gml.py:TestGraph.test_data_types"): no_object_dtype_for_edges,
-            key(
-                "test_gml.py:TestPropertyLists.test_reading_graph_with_list_property"
-            ): no_object_dtype_for_edges,
             key(
                 "test_relabel.py:"
                 "test_relabel_preserve_node_order_partial_mapping_with_copy_false"
@@ -138,48 +128,107 @@ def key(testpath):
                 "test_gml.py:"
                 "TestPropertyLists.test_reading_graph_with_single_element_list_property"
             ): tuple_elements_preferred,
-            key(
-                "test_relabel.py:"
-                "TestRelabel.test_relabel_multidigraph_inout_merge_nodes"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multigraph_merge_inplace"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multidigraph_merge_inplace"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multidigraph_inout_copy"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multigraph_merge_copy"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multidigraph_merge_copy"
-            ): no_string_dtype,
-            key(
-                "test_relabel.py:TestRelabel.test_relabel_multigraph_nonnumeric_key"
-            ): no_string_dtype,
-            key("test_contraction.py:test_multigraph_path"): no_object_dtype_for_edges,
-            key(
-                "test_contraction.py:test_directed_multigraph_path"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_contraction.py:test_multigraph_blockmodel"
-            ): no_object_dtype_for_edges,
-            key(
-                "test_summarization.py:TestSNAPUndirectedMulti.test_summary_graph"
-            ): no_string_dtype,
-            key(
-                "test_summarization.py:TestSNAPDirectedMulti.test_summary_graph"
-            ): no_string_dtype,
         }
+        if not fallback:
+            xfail.update(
+                {
+                    key(
+                        "test_graph_hashing.py:test_isomorphic_edge_attr"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_graph_hashing.py:test_isomorphic_edge_attr_and_node_attr"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_graph_hashing.py:test_isomorphic_edge_attr_subgraph_hash"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_graph_hashing.py:"
+                        "test_isomorphic_edge_attr_and_node_attr_subgraph_hash"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_summarization.py:TestSNAPNoEdgeTypes.test_summary_graph"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_summarization.py:TestSNAPUndirected.test_summary_graph"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_summarization.py:TestSNAPDirected.test_summary_graph"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gexf.py:TestGEXF.test_relabel"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gml.py:TestGraph.test_parse_gml_cytoscape_bug"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gml.py:TestGraph.test_parse_gml"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gml.py:TestGraph.test_read_gml"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gml.py:TestGraph.test_data_types"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_gml.py:"
+                        "TestPropertyLists.test_reading_graph_with_list_property"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multidigraph_inout_merge_nodes"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multigraph_merge_inplace"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multidigraph_merge_inplace"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multidigraph_inout_copy"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:TestRelabel.test_relabel_multigraph_merge_copy"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multidigraph_merge_copy"
+                    ): no_string_dtype,
+                    key(
+                        "test_relabel.py:"
+                        "TestRelabel.test_relabel_multigraph_nonnumeric_key"
+                    ): no_string_dtype,
+                    key(
+                        "test_contraction.py:test_multigraph_path"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_contraction.py:test_directed_multigraph_path"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_contraction.py:test_multigraph_blockmodel"
+                    ): no_object_dtype_for_edges,
+                    key(
+                        "test_summarization.py:"
+                        "TestSNAPUndirectedMulti.test_summary_graph"
+                    ): no_string_dtype,
+                    key(
+                        "test_summarization.py:TestSNAPDirectedMulti.test_summary_graph"
+                    ): no_string_dtype,
+                }
+            )
+        else:
+            xfail.update(
+                {
+                    key(
+                        "test_gml.py:"
+                        "TestPropertyLists.test_reading_graph_with_list_property"
+                    ): no_mixed_dtypes_for_nodes,
+                }
+            )
 
-        from packaging.version import parse
-
-        nxver = parse(nx.__version__)
-
-        if nxver.major == 3 and nxver.minor <= 2:
+        if _nxver <= (3, 2):
             xfail.update(
                 {
                     # NetworkX versions prior to 3.2.1 have tests written to
@@ -216,7 +265,7 @@ def key(testpath):
                 }
             )
 
-        if nxver.major == 3 and nxver.minor <= 1:
+        if _nxver <= (3, 1):
             # MAINT: networkx 3.0, 3.1
             # NetworkX 3.2 added the ability to "fallback to nx" if backend algorithms
             # raise NotImplementedError or `can_run` returns False. The tests below
@@ -332,24 +381,25 @@ def key(testpath):
                 xfail[key("test_louvain.py:test_threshold")] = (
                     "Louvain does not support seed parameter"
                 )
-            if nxver.major == 3 and nxver.minor >= 2:
-                xfail.update(
-                    {
-                        key(
-                            "test_convert_pandas.py:TestConvertPandas."
-                            "test_from_edgelist_multi_attr_incl_target"
-                        ): no_string_dtype,
-                        key(
-                            "test_convert_pandas.py:TestConvertPandas."
-                            "test_from_edgelist_multidigraph_and_edge_attr"
-                        ): no_string_dtype,
-                        key(
-                            "test_convert_pandas.py:TestConvertPandas."
-                            "test_from_edgelist_int_attr_name"
-                        ): no_string_dtype,
-                    }
-                )
-                if nxver.minor == 2:
+            if _nxver >= (3, 2):
+                if not fallback:
+                    xfail.update(
+                        {
+                            key(
+                                "test_convert_pandas.py:TestConvertPandas."
+                                "test_from_edgelist_multi_attr_incl_target"
+                            ): no_string_dtype,
+                            key(
+                                "test_convert_pandas.py:TestConvertPandas."
+                                "test_from_edgelist_multidigraph_and_edge_attr"
+                            ): no_string_dtype,
+                            key(
+                                "test_convert_pandas.py:TestConvertPandas."
+                                "test_from_edgelist_int_attr_name"
+                            ): no_string_dtype,
+                        }
+                    )
+                if _nxver[1] == 2:
                     different_iteration_order = "Different graph data iteration order"
                     xfail.update(
                         {
@@ -366,7 +416,7 @@ def key(testpath):
                             ): different_iteration_order,
                         }
                     )
-                elif nxver.minor >= 3:
+                elif _nxver[1] >= 3:
                     xfail.update(
                         {
                             key("test_louvain.py:test_max_level"): louvain_different,
diff --git a/python/nx-cugraph/nx_cugraph/relabel.py b/python/nx-cugraph/nx_cugraph/relabel.py
index 20d1337a9..e38e18c77 100644
--- a/python/nx-cugraph/nx_cugraph/relabel.py
+++ b/python/nx-cugraph/nx_cugraph/relabel.py
@@ -29,13 +29,18 @@
 
 @networkx_algorithm(version_added="24.08")
 def relabel_nodes(G, mapping, copy=True):
+    G_orig = G
     if isinstance(G, nx.Graph):
-        if not copy:
+        is_compat_graph = isinstance(G, nxcg.Graph)
+        if not copy and not is_compat_graph:
             raise RuntimeError(
                 "Using `copy=False` is invalid when using a NetworkX graph "
                 "as input to `nx_cugraph.relabel_nodes`"
             )
         G = nxcg.from_networkx(G, preserve_all_attrs=True)
+    else:
+        is_compat_graph = False
+
     it = range(G._N) if G.key_to_id is None else G.id_to_key
     if callable(mapping):
         previd_to_key = [mapping(node) for node in it]
@@ -225,12 +230,13 @@ def relabel_nodes(G, mapping, copy=True):
         node_masks=node_masks,
         id_to_key=newid_to_key,
         key_to_id=key_to_newid,
+        use_compat_graph=is_compat_graph,
         **extra_kwargs,
     )
     rv.graph.update(G.graph)
     if not copy:
-        G._become(rv)
-        return G
+        G_orig._become(rv)
+        return G_orig
     return rv
 
 
@@ -241,7 +247,10 @@ def convert_node_labels_to_integers(
     if ordering not in {"default", "sorted", "increasing degree", "decreasing degree"}:
         raise nx.NetworkXError(f"Unknown node ordering: {ordering}")
     if isinstance(G, nx.Graph):
+        is_compat_graph = isinstance(G, nxcg.Graph)
         G = nxcg.from_networkx(G, preserve_all_attrs=True)
+    else:
+        is_compat_graph = False
     G = G.copy()
     if label_attribute is not None:
         prev_vals = G.id_to_key
@@ -279,4 +288,6 @@ def convert_node_labels_to_integers(
         key_to_id = G.key_to_id
         G.key_to_id = {i: key_to_id[n] for i, (d, n) in enumerate(pairs, first_label)}
     G._id_to_key = id_to_key
+    if is_compat_graph:
+        return G._to_compat_graph()
     return G
diff --git a/python/nx-cugraph/nx_cugraph/tests/pytest.ini b/python/nx-cugraph/nx_cugraph/tests/pytest.ini
new file mode 100644
index 000000000..7b0a9f29f
--- /dev/null
+++ b/python/nx-cugraph/nx_cugraph/tests/pytest.ini
@@ -0,0 +1,4 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+
+[pytest]
+addopts = --tb=native
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_bfs.py b/python/nx-cugraph/nx_cugraph/tests/test_bfs.py
index c2b22e989..ad2c62c1f 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_bfs.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_bfs.py
@@ -12,11 +12,10 @@
 # limitations under the License.
 import networkx as nx
 import pytest
-from packaging.version import parse
 
-nxver = parse(nx.__version__)
+from nx_cugraph import _nxver
 
-if nxver.major == 3 and nxver.minor < 2:
+if _nxver < (3, 2):
     pytest.skip("Need NetworkX >=3.2 to test clustering", allow_module_level=True)
 
 
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_classes.py b/python/nx-cugraph/nx_cugraph/tests/test_classes.py
new file mode 100644
index 000000000..0ac238b35
--- /dev/null
+++ b/python/nx-cugraph/nx_cugraph/tests/test_classes.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import nx_cugraph as nxcg
+
+
+def test_class_to_class():
+    """Basic sanity checks to ensure metadata relating graph classes are accurate."""
+    for prefix in ["", "Cuda"]:
+        for suffix in ["Graph", "DiGraph", "MultiGraph", "MultiDiGraph"]:
+            cls_name = f"{prefix}{suffix}"
+            cls = getattr(nxcg, cls_name)
+            assert cls.__name__ == cls_name
+            G = cls()
+            assert cls is G.__class__
+            # cudagraph
+            val = cls.to_cudagraph_class()
+            val2 = G.to_cudagraph_class()
+            assert val is val2
+            assert val.__name__ == f"Cuda{suffix}"
+            assert val.__module__.startswith("nx_cugraph")
+            assert cls.is_directed() == G.is_directed() == val.is_directed()
+            assert cls.is_multigraph() == G.is_multigraph() == val.is_multigraph()
+            # networkx
+            val = cls.to_networkx_class()
+            val2 = G.to_networkx_class()
+            assert val is val2
+            assert val.__name__ == suffix
+            assert val.__module__.startswith("networkx")
+            val = val()
+            assert cls.is_directed() == G.is_directed() == val.is_directed()
+            assert cls.is_multigraph() == G.is_multigraph() == val.is_multigraph()
+            # directed
+            val = cls.to_directed_class()
+            val2 = G.to_directed_class()
+            assert val is val2
+            assert val.__module__.startswith("nx_cugraph")
+            assert val.is_directed()
+            assert cls.is_multigraph() == G.is_multigraph() == val.is_multigraph()
+            if "Di" in suffix:
+                assert val is cls
+            else:
+                assert "Di" in val.__name__
+                assert prefix in val.__name__
+                assert cls.to_undirected_class() is cls
+            # undirected
+            val = cls.to_undirected_class()
+            val2 = G.to_undirected_class()
+            assert val is val2
+            assert val.__module__.startswith("nx_cugraph")
+            assert not val.is_directed()
+            assert cls.is_multigraph() == G.is_multigraph() == val.is_multigraph()
+            if "Di" not in suffix:
+                assert val is cls
+            else:
+                assert "Di" not in val.__name__
+                assert prefix in val.__name__
+                assert cls.to_directed_class() is cls
+            # "zero"
+            if prefix == "Cuda":
+                val = cls._to_compat_graph_class()
+                val2 = G._to_compat_graph_class()
+                assert val is val2
+                assert val.__name__ == suffix
+                assert val.__module__.startswith("nx_cugraph")
+                assert val.to_cudagraph_class() is cls
+                assert cls.is_directed() == G.is_directed() == val.is_directed()
+                assert cls.is_multigraph() == G.is_multigraph() == val.is_multigraph()
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_cluster.py b/python/nx-cugraph/nx_cugraph/tests/test_cluster.py
index ad4770f1a..fd8e1b3cf 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_cluster.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_cluster.py
@@ -12,11 +12,10 @@
 # limitations under the License.
 import networkx as nx
 import pytest
-from packaging.version import parse
 
-nxver = parse(nx.__version__)
+from nx_cugraph import _nxver
 
-if nxver.major == 3 and nxver.minor < 2:
+if _nxver < (3, 2):
     pytest.skip("Need NetworkX >=3.2 to test clustering", allow_module_level=True)
 
 
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_convert.py b/python/nx-cugraph/nx_cugraph/tests/test_convert.py
index 634b28e96..3d109af8a 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_convert.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_convert.py
@@ -13,13 +13,10 @@
 import cupy as cp
 import networkx as nx
 import pytest
-from packaging.version import parse
 
 import nx_cugraph as nxcg
 from nx_cugraph import interface
 
-nxver = parse(nx.__version__)
-
 
 @pytest.mark.parametrize(
     "graph_class", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph]
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py b/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py
index 5474f9d79..0697a744e 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py
@@ -12,16 +12,13 @@
 # limitations under the License.
 import networkx as nx
 import pytest
-from packaging.version import parse
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from .testing_utils import assert_graphs_equal
 
-nxver = parse(nx.__version__)
-
-
-if nxver.major == 3 and nxver.minor < 2:
+if _nxver < (3, 2):
     pytest.skip("Need NetworkX >=3.2 to test ego_graph", allow_module_level=True)
 
 
@@ -49,7 +46,12 @@ def test_ego_graph_cycle_graph(
     kwargs = {"radius": radius, "center": center, "undirected": undirected}
     Hnx = nx.ego_graph(Gnx, n, **kwargs)
     Hcg = nx.ego_graph(Gnx, n, **kwargs, backend="cugraph")
+    use_compat_graphs = _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs
+    assert_graphs_equal(Hnx, Hcg._cudagraph if use_compat_graphs else Hcg)
+    Hcg = nx.ego_graph(Gcg, n, **kwargs)
     assert_graphs_equal(Hnx, Hcg)
+    Hcg = nx.ego_graph(Gcg._to_compat_graph(), n, **kwargs)
+    assert_graphs_equal(Hnx, Hcg._cudagraph)
     with pytest.raises(nx.NodeNotFound, match="not in G"):
         nx.ego_graph(Gnx, -1, **kwargs)
     with pytest.raises(nx.NodeNotFound, match="not in G"):
@@ -61,20 +63,36 @@ def test_ego_graph_cycle_graph(
 
     kwargs["distance"] = "weight"
     H2nx = nx.ego_graph(Gnx, n, **kwargs)
-    is_nx32 = nxver.major == 3 and nxver.minor == 2
+    is_nx32 = _nxver[:2] == (3, 2)
     if undirected and Gnx.is_directed() and Gnx.is_multigraph():
         if is_nx32:
             # `should_run` was added in nx 3.3
             match = "Weighted ego_graph with undirected=True not implemented"
+        elif _nxver >= (3, 4):
+            match = "not implemented by 'cugraph'"
         else:
             match = "not implemented by cugraph"
-        with pytest.raises(RuntimeError, match=match):
+        with pytest.raises(
+            RuntimeError if _nxver < (3, 4) else NotImplementedError, match=match
+        ):
             nx.ego_graph(Gnx, n, **kwargs, backend="cugraph")
         with pytest.raises(NotImplementedError, match="ego_graph"):
-            nx.ego_graph(Gcg, n, **kwargs)
+            nx.ego_graph(Gcg, n, **kwargs, backend="cugraph")
+        if _nxver < (3, 4):
+            with pytest.raises(NotImplementedError, match="ego_graph"):
+                nx.ego_graph(Gcg, n, **kwargs)
+        else:
+            # This is an interesting case. `nxcg.ego_graph` is not implemented for
+            # these arguments, so it falls back to networkx. Hence, as it is currently
+            # implemented, the input graph is `nxcg.CudaGraph`, but the output graph
+            # is `nx.Graph`. Should networkx convert back to "cugraph" backend?
+            # TODO: make fallback to networkx configurable.
+            H2cg = nx.ego_graph(Gcg, n, **kwargs)
+            assert type(H2nx) is type(H2cg)
+            assert_graphs_equal(H2nx, nxcg.from_networkx(H2cg, preserve_all_attrs=True))
     else:
         H2cg = nx.ego_graph(Gnx, n, **kwargs, backend="cugraph")
-        assert_graphs_equal(H2nx, H2cg)
+        assert_graphs_equal(H2nx, H2cg._cudagraph if use_compat_graphs else H2cg)
         with pytest.raises(nx.NodeNotFound, match="not found in graph"):
             nx.ego_graph(Gnx, -1, **kwargs)
         with pytest.raises(nx.NodeNotFound, match="not found in graph"):
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_generators.py b/python/nx-cugraph/nx_cugraph/tests/test_generators.py
index c751b0fe2..5c405f1c9 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_generators.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_generators.py
@@ -13,25 +13,24 @@
 import networkx as nx
 import numpy as np
 import pytest
-from packaging.version import parse
 
 import nx_cugraph as nxcg
+from nx_cugraph import _nxver
 
 from .testing_utils import assert_graphs_equal
 
-nxver = parse(nx.__version__)
-
-
-if nxver.major == 3 and nxver.minor < 2:
+if _nxver < (3, 2):
     pytest.skip("Need NetworkX >=3.2 to test generators", allow_module_level=True)
 
 
 def compare(name, create_using, *args, is_vanilla=False):
     exc1 = exc2 = None
     func = getattr(nx, name)
-    if isinstance(create_using, nxcg.Graph):
+    if isinstance(create_using, nxcg.CudaGraph):
         nx_create_using = nxcg.to_networkx(create_using)
-    elif isinstance(create_using, type) and issubclass(create_using, nxcg.Graph):
+    elif isinstance(create_using, type) and issubclass(
+        create_using, (nxcg.Graph, nxcg.CudaGraph)
+    ):
         nx_create_using = create_using.to_networkx_class()
     elif isinstance(create_using, nx.Graph):
         nx_create_using = create_using.copy()
@@ -61,8 +60,27 @@ def compare(name, create_using, *args, is_vanilla=False):
         exc2 = exc
     if exc1 is not None or exc2 is not None:
         assert type(exc1) is type(exc2)
+        return
+    if isinstance(Gcg, nxcg.Graph):
+        # If the graph is empty, it may be on host, otherwise it should be on device
+        if len(G):
+            assert Gcg._is_on_gpu
+            assert not Gcg._is_on_cpu
+        assert_graphs_equal(G, Gcg._cudagraph)
     else:
         assert_graphs_equal(G, Gcg)
+    # Ensure the output type is correct
+    if is_vanilla:
+        if _nxver < (3, 3) or nx.config.backends.cugraph.use_compat_graphs:
+            assert isinstance(Gcg, nxcg.Graph)
+        else:
+            assert isinstance(Gcg, nxcg.CudaGraph)
+    elif isinstance(create_using, type) and issubclass(
+        create_using, (nxcg.Graph, nxcg.CudaGraph)
+    ):
+        assert type(Gcg) is create_using
+    elif isinstance(create_using, (nxcg.Graph, nxcg.CudaGraph)):
+        assert type(Gcg) is type(create_using)
 
 
 N = list(range(-1, 5))
@@ -76,6 +94,10 @@ def compare(name, create_using, *args, is_vanilla=False):
     nxcg.DiGraph,
     nxcg.MultiGraph,
     nxcg.MultiDiGraph,
+    nxcg.CudaGraph,
+    nxcg.CudaDiGraph,
+    nxcg.CudaMultiGraph,
+    nxcg.CudaMultiDiGraph,
     # These raise NotImplementedError
     # nx.Graph(),
     # nx.DiGraph(),
@@ -85,6 +107,10 @@ def compare(name, create_using, *args, is_vanilla=False):
     nxcg.DiGraph(),
     nxcg.MultiGraph(),
     nxcg.MultiDiGraph(),
+    nxcg.CudaGraph(),
+    nxcg.CudaDiGraph(),
+    nxcg.CudaMultiGraph(),
+    nxcg.CudaMultiDiGraph(),
     None,
     object,  # Bad input
     7,  # Bad input
@@ -158,7 +184,7 @@ def compare(name, create_using, *args, is_vanilla=False):
 @pytest.mark.parametrize("create_using", COMPLETE_CREATE_USING)
 def test_generator_noarg(name, create_using):
     print(name, create_using, type(create_using))
-    if isinstance(create_using, nxcg.Graph) and name in {
+    if isinstance(create_using, nxcg.CudaGraph) and name in {
         # fmt: off
         "bull_graph", "chvatal_graph", "cubical_graph", "diamond_graph",
         "house_graph", "house_x_graph", "icosahedral_graph", "krackhardt_kite_graph",
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_graph_methods.py b/python/nx-cugraph/nx_cugraph/tests/test_graph_methods.py
index 3120995a2..40a361b10 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_graph_methods.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_graph_methods.py
@@ -47,7 +47,7 @@ def _create_Gs():
 @pytest.mark.parametrize("Gnx", _create_Gs())
 @pytest.mark.parametrize("reciprocal", [False, True])
 def test_to_undirected_directed(Gnx, reciprocal):
-    Gcg = nxcg.DiGraph(Gnx)
+    Gcg = nxcg.CudaDiGraph(Gnx)
     assert_graphs_equal(Gnx, Gcg)
     Hnx1 = Gnx.to_undirected(reciprocal=reciprocal)
     Hcg1 = Gcg.to_undirected(reciprocal=reciprocal)
@@ -62,6 +62,6 @@ def test_multidigraph_to_undirected():
     Gnx.add_edge(0, 1)
     Gnx.add_edge(0, 1)
     Gnx.add_edge(1, 0)
-    Gcg = nxcg.MultiDiGraph(Gnx)
+    Gcg = nxcg.CudaMultiDiGraph(Gnx)
     with pytest.raises(NotImplementedError):
         Gcg.to_undirected()
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_match_api.py b/python/nx-cugraph/nx_cugraph/tests/test_match_api.py
index 176b531a6..1a61c69b3 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_match_api.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_match_api.py
@@ -14,13 +14,10 @@
 import inspect
 
 import networkx as nx
-from packaging.version import parse
 
 import nx_cugraph as nxcg
 from nx_cugraph.utils import networkx_algorithm
 
-nxver = parse(nx.__version__)
-
 
 def test_match_signature_and_names():
     """Simple test to ensure our signatures and basic module layout match networkx."""
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_multigraph.py b/python/nx-cugraph/nx_cugraph/tests/test_multigraph.py
index a8f189a47..9208eea09 100644
--- a/python/nx-cugraph/nx_cugraph/tests/test_multigraph.py
+++ b/python/nx-cugraph/nx_cugraph/tests/test_multigraph.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2023, NVIDIA CORPORATION.
+# Copyright (c) 2023-2024, NVIDIA CORPORATION.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
@@ -26,7 +26,7 @@ def test_get_edge_data(test_nxcugraph):
     G.add_edge(0, 3)
     G.add_edge(0, 3)
     if test_nxcugraph:
-        G = nxcg.MultiGraph(G)
+        G = nxcg.CudaMultiGraph(G)
     default = object()
     assert G.get_edge_data(0, 0, default=default) is default
     assert G.get_edge_data("a", "b", default=default) is default
@@ -60,7 +60,7 @@ def test_get_edge_data(test_nxcugraph):
     G = nx.MultiGraph()
     G.add_edge(0, 1)
     if test_nxcugraph:
-        G = nxcg.MultiGraph(G)
+        G = nxcg.CudaMultiGraph(G)
     assert G.get_edge_data(0, 1, default=default) == {0: {}}
     assert G.get_edge_data(0, 1, 0, default=default) == {}
     assert G.get_edge_data(0, 1, 1, default=default) is default
diff --git a/python/nx-cugraph/nx_cugraph/tests/test_pagerank.py b/python/nx-cugraph/nx_cugraph/tests/test_pagerank.py
new file mode 100644
index 000000000..252f9e6bb
--- /dev/null
+++ b/python/nx-cugraph/nx_cugraph/tests/test_pagerank.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import networkx as nx
+import pandas as pd
+import pytest
+
+
+def test_pagerank_multigraph():
+    """
+    Ensures correct pagerank for Graphs and MultiGraphs when using from_pandas_edgelist.
+
+    PageRank for MultiGraph should give different result compared to Graph; when using
+    a Graph, the duplicate edges should be dropped.
+    """
+    df = pd.DataFrame(
+        {"source": [0, 1, 1, 1, 1, 1, 1, 2], "target": [1, 2, 2, 2, 2, 2, 2, 3]}
+    )
+    expected_pr_for_G = nx.pagerank(nx.from_pandas_edgelist(df))
+    expected_pr_for_MultiG = nx.pagerank(
+        nx.from_pandas_edgelist(df, create_using=nx.MultiGraph)
+    )
+
+    G = nx.from_pandas_edgelist(df, backend="cugraph")
+    actual_pr_for_G = nx.pagerank(G, backend="cugraph")
+
+    MultiG = nx.from_pandas_edgelist(df, create_using=nx.MultiGraph, backend="cugraph")
+    actual_pr_for_MultiG = nx.pagerank(MultiG, backend="cugraph")
+
+    assert actual_pr_for_G == pytest.approx(expected_pr_for_G)
+    assert actual_pr_for_MultiG == pytest.approx(expected_pr_for_MultiG)
diff --git a/python/nx-cugraph/nx_cugraph/tests/testing_utils.py b/python/nx-cugraph/nx_cugraph/tests/testing_utils.py
index 529a96efd..50836acf5 100644
--- a/python/nx-cugraph/nx_cugraph/tests/testing_utils.py
+++ b/python/nx-cugraph/nx_cugraph/tests/testing_utils.py
@@ -17,7 +17,7 @@
 
 def assert_graphs_equal(Gnx, Gcg):
     assert isinstance(Gnx, nx.Graph)
-    assert isinstance(Gcg, nxcg.Graph)
+    assert isinstance(Gcg, nxcg.CudaGraph)
     assert (a := Gnx.number_of_nodes()) == (b := Gcg.number_of_nodes()), (a, b)
     assert (a := Gnx.number_of_edges()) == (b := Gcg.number_of_edges()), (a, b)
     assert (a := Gnx.is_directed()) == (b := Gcg.is_directed()), (a, b)
diff --git a/python/nx-cugraph/nx_cugraph/utils/decorators.py b/python/nx-cugraph/nx_cugraph/utils/decorators.py
index 3c5de4f29..16486996b 100644
--- a/python/nx-cugraph/nx_cugraph/utils/decorators.py
+++ b/python/nx-cugraph/nx_cugraph/utils/decorators.py
@@ -16,10 +16,14 @@
 from textwrap import dedent
 
 import networkx as nx
+from networkx import NetworkXError
 from networkx.utils.decorators import nodes_or_number, not_implemented_for
 
+from nx_cugraph import _nxver
 from nx_cugraph.interface import BackendInterface
 
+from .misc import _And_NotImplementedError
+
 try:
     from networkx.utils.backends import _registered_algorithms
 except ModuleNotFoundError:
@@ -44,6 +48,7 @@ class networkx_algorithm:
     version_added: str
     is_incomplete: bool
     is_different: bool
+    _fallback: bool
     _plc_names: set[str] | None
 
     def __new__(
@@ -59,6 +64,7 @@ def __new__(
         version_added: str,  # Required
         is_incomplete: bool = False,  # See self.extra_doc for details if True
         is_different: bool = False,  # See self.extra_doc for details if True
+        fallback: bool = False,  # Change non-nx exceptions to NotImplementedError
         _plc: str | set[str] | None = None,  # Hidden from user, may be removed someday
     ):
         if func is None:
@@ -70,10 +76,11 @@ def __new__(
                 version_added=version_added,
                 is_incomplete=is_incomplete,
                 is_different=is_different,
+                fallback=fallback,
                 _plc=_plc,
             )
         instance = object.__new__(cls)
-        if nodes_or_number is not None and nx.__version__[:3] > "3.2":
+        if nodes_or_number is not None and _nxver > (3, 2):
             func = nx.utils.decorators.nodes_or_number(nodes_or_number)(func)
         # update_wrapper sets __wrapped__, which will be used for the signature
         update_wrapper(instance, func)
@@ -100,6 +107,7 @@ def __new__(
         instance.version_added = version_added
         instance.is_incomplete = is_incomplete
         instance.is_different = is_different
+        instance.fallback = fallback
         # The docstring on our function is added to the NetworkX docstring.
         instance.extra_doc = (
             dedent(func.__doc__.lstrip("\n").rstrip()) if func.__doc__ else None
@@ -113,7 +121,7 @@ def __new__(
         # Set methods so they are in __dict__
         instance._can_run = instance._can_run
         instance._should_run = instance._should_run
-        if nodes_or_number is not None and nx.__version__[:3] <= "3.2":
+        if nodes_or_number is not None and _nxver <= (3, 2):
             instance = nx.utils.decorators.nodes_or_number(nodes_or_number)(instance)
         return instance
 
@@ -136,7 +144,14 @@ def _should_run(self, func):
         self.should_run = func
 
     def __call__(self, /, *args, **kwargs):
-        return self.__wrapped__(*args, **kwargs)
+        if not self.fallback:
+            return self.__wrapped__(*args, **kwargs)
+        try:
+            return self.__wrapped__(*args, **kwargs)
+        except NetworkXError:
+            raise
+        except Exception as exc:
+            raise _And_NotImplementedError(exc) from exc
 
     def __reduce__(self):
         return _restore_networkx_dispatched, (self.name,)
diff --git a/python/nx-cugraph/nx_cugraph/utils/misc.py b/python/nx-cugraph/nx_cugraph/utils/misc.py
index 8526524f1..01c25dd59 100644
--- a/python/nx-cugraph/nx_cugraph/utils/misc.py
+++ b/python/nx-cugraph/nx_cugraph/utils/misc.py
@@ -194,7 +194,7 @@ def _get_int_dtype(
 
 
 def _get_float_dtype(
-    dtype: Dtype, *, graph: nxcg.Graph | None = None, weight: EdgeKey | None = None
+    dtype: Dtype, *, graph: nxcg.CudaGraph | None = None, weight: EdgeKey | None = None
 ):
     """Promote dtype to float32 or float64 as appropriate."""
     if dtype is None:
@@ -238,3 +238,37 @@ def _cp_iscopied_asarray(a, *args, orig_object=None, **kwargs):
     ):
         return False, arr
     return True, arr
+
+
+class _And_NotImplementedError(NotImplementedError):
+    """Additionally make an exception a ``NotImplementedError``.
+
+    For example:
+
+    >>> try:
+    ...     raise _And_NotImplementedError(KeyError("missing"))
+    ... except KeyError:
+    ...     pass
+
+    or
+
+    >>> try:
+    ...     raise _And_NotImplementedError(KeyError("missing"))
+    ... except NotImplementedError:
+    ...     pass
+
+    """
+
+    def __new__(cls, exc):
+        exc_type = type(exc)
+        if issubclass(exc_type, NotImplementedError):
+            new_type = exc_type
+        else:
+            new_type = type(
+                f"{exc_type.__name__}{cls.__name__}",
+                (exc_type, NotImplementedError),
+                {},
+            )
+        instance = NotImplementedError.__new__(new_type)
+        instance.__init__(*exc.args)
+        return instance
diff --git a/python/nx-cugraph/pyproject.toml b/python/nx-cugraph/pyproject.toml
index f69451c02..d145aa549 100644
--- a/python/nx-cugraph/pyproject.toml
+++ b/python/nx-cugraph/pyproject.toml
@@ -18,15 +18,15 @@ authors = [
     { name = "NVIDIA Corporation" },
 ]
 license = { text = "Apache 2.0" }
-requires-python = ">=3.9"
+requires-python = ">=3.10"
 classifiers = [
     "Development Status :: 4 - Beta",
     "License :: OSI Approved :: Apache Software License",
     "Programming Language :: Python",
     "Programming Language :: Python :: 3",
-    "Programming Language :: Python :: 3.9",
     "Programming Language :: Python :: 3.10",
     "Programming Language :: Python :: 3.11",
+    "Programming Language :: Python :: 3.12",
     "Programming Language :: Python :: 3 :: Only",
     "Intended Audience :: Developers",
     "Topic :: Software Development :: Libraries :: Python Modules",
@@ -34,13 +34,12 @@ classifiers = [
 dependencies = [
     "cupy-cuda11x>=12.0.0",
     "networkx>=3.0",
-    "numpy>=1.23,<2.0a0",
-    "pylibcugraph==24.8.*",
+    "numpy>=1.23,<3.0a0",
+    "pylibcugraph==24.12.*,>=0.0.0a0",
 ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
 
 [project.optional-dependencies]
 test = [
-    "packaging>=21",
     "pandas",
     "pytest",
     "pytest-benchmark",
@@ -90,7 +89,7 @@ matrix-entry = "cuda_suffixed=true"
 
 [tool.black]
 line-length = 88
-target-version = ["py39", "py310", "py311"]
+target-version = ["py310", "py311", "py312"]
 
 [tool.isort]
 sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]
@@ -156,7 +155,7 @@ exclude_lines = [
 [tool.ruff]
 # https://github.com/charliermarsh/ruff/
 line-length = 88
-target-version = "py39"
+target-version = "py310"
 [tool.ruff.lint]
 unfixable = [
     "F841",  # unused-variable (Note: can leave useless expression)
@@ -170,6 +169,7 @@ external = [
 ]
 ignore = [
     # Would be nice to fix these
+    "B905",  # `zip()` without an explicit `strict=` parameter (Note: possible since py39 was dropped; we should do this!)
     "D100",  # Missing docstring in public module
     "D101",  # Missing docstring in public class
     "D102",  # Missing docstring in public method
@@ -215,6 +215,7 @@ ignore = [
     "SIM105",  # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster)
     "SIM108",  # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer)
     "TRY003",  # Avoid specifying long messages outside the exception class (Note: why?)
+    "UP038",  # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: tuple is faster for now)
 
     # Ignored categories
     "C90",  # mccabe (Too strict, but maybe we should make things less complex)
@@ -241,6 +242,7 @@ ignore = [
 # Allow assert, print, RNG, and no docstring
 "nx_cugraph/**/tests/*py" = ["S101", "S311", "T201", "D103", "D100"]
 "_nx_cugraph/__init__.py" = ["E501"]
+"nx_cugraph/__init__.py" = ["E402"]  # Allow module level import not at top of file
 "nx_cugraph/algorithms/**/*py" = ["D205", "D401"]  # Allow flexible docstrings for algorithms
 "nx_cugraph/generators/**/*py" = ["D205", "D401"]  # Allow flexible docstrings for generators
 "nx_cugraph/interface.py" = ["D401"]  # Flexible docstrings
diff --git a/python/nx-cugraph/run_nx_tests.sh b/python/nx-cugraph/run_nx_tests.sh
index bceec53b7..5fb173cf9 100755
--- a/python/nx-cugraph/run_nx_tests.sh
+++ b/python/nx-cugraph/run_nx_tests.sh
@@ -18,6 +18,10 @@
 #   testing takes longer.  Without it, tests will xfail when encountering a
 #   function that we don't implement.
 #
+# NX_CUGRAPH_USE_COMPAT_GRAPHS, {"True", "False"}, default is "True"
+#   Whether to use `nxcg.Graph` as the nx_cugraph backend graph.
+#   A Graph should be a compatible NetworkX graph, so fewer tests should fail.
+#
 # Coverage of `nx_cugraph.algorithms` is reported and is a good sanity check
 # that algorithms run.