diff --git a/pyproject.toml b/pyproject.toml index 9b617f667..684e636b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ dependencies = [ "pydantic-settings>=2.5.2", "uvicorn>=0.23.1; sys_platform != 'emscripten'", "jsonschema>=4.20.0", + "psutil>=5.9.0,<6.0.0", ] [project.optional-dependencies] diff --git a/src/mcp/client/stdio/__init__.py b/src/mcp/client/stdio/__init__.py index a75cfd764..344ac0731 100644 --- a/src/mcp/client/stdio/__init__.py +++ b/src/mcp/client/stdio/__init__.py @@ -6,6 +6,9 @@ import anyio import anyio.lowlevel +import anyio.to_thread +import psutil +from anyio.abc import Process from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from anyio.streams.text import TextReceiveStream from pydantic import BaseModel, Field @@ -14,9 +17,9 @@ from mcp.shared.message import SessionMessage from .win32 import ( + FallbackProcess, create_windows_process, get_windows_executable_command, - terminate_windows_process, ) # Environment variables to inherit by default @@ -178,15 +181,7 @@ async def stdin_writer(): try: yield read_stream, write_stream finally: - # Clean up process to prevent any dangling orphaned processes - try: - if sys.platform == "win32": - await terminate_windows_process(process) - else: - process.terminate() - except ProcessLookupError: - # Process already exited, which is fine - pass + await _shutdown_process(process) await read_stream.aclose() await write_stream.aclose() await read_stream_writer.aclose() @@ -223,6 +218,99 @@ async def _create_platform_compatible_process( if sys.platform == "win32": process = await create_windows_process(command, args, env, errlog, cwd) else: - process = await anyio.open_process([command, *args], env=env, stderr=errlog, cwd=cwd) + process = await anyio.open_process( + [command, *args], + env=env, + stderr=errlog, + cwd=cwd, + start_new_session=True, + ) return process + + +async def _shutdown_process(process: Process | FallbackProcess) -> None: + """ + MCP spec: stdio shutdown sequence + 1. Close input stream to server + 2. Wait for server to exit, or send SIGTERM if it doesn't exit in time + 3. Send SIGKILL if still not exited (forcibly kill on Windows) + """ + + # Close input stream to server + if process.stdin: + try: + await process.stdin.aclose() + except Exception: + # stdin might already be closed, which is fine + pass + + try: + # Wait for server to exit gracefully after stdin closes + with anyio.fail_after(2.0): + await process.wait() + except TimeoutError: + # 2. send SIGTERM if it doesn't exit in time + # 3. Send SIGKILL if still not exited (forcibly kill on Windows) + await _terminate_process_with_children(process) + except ProcessLookupError: + # Process already exited, which is fine + pass + + +async def _terminate_process_with_children(process: Process | FallbackProcess, timeout: float = 2.0) -> None: + """ + Terminate a process and all its children using psutil. + + This provides consistent behavior across platforms and properly + handles process trees without shell commands. + + Platform behavior: + - On Unix: psutil.terminate() sends SIGTERM, allowing graceful shutdown + - On Windows: psutil.terminate() calls TerminateProcess() which is immediate + and doesn't allow cleanup handlers to run. This can cause ResourceWarnings + for subprocess.Popen objects that don't get to clean up. + """ + pid = getattr(process, "pid", None) + if pid is None: + popen = getattr(process, "popen", None) + if popen: + pid = getattr(popen, "pid", None) + + if not pid: + # Process has no PID, cannot terminate + return + + try: + parent = psutil.Process(pid) + children = parent.children(recursive=True) + + # First, try graceful termination for all children + for child in children: + try: + child.terminate() + except psutil.NoSuchProcess: + pass + + # Then, also terminate the parent process + try: + parent.terminate() + except psutil.NoSuchProcess: + return + + # Wait for processes to exit gracefully, force kill any that remain + all_procs = children + [parent] + _, alive = await anyio.to_thread.run_sync(lambda: psutil.wait_procs(all_procs, timeout=timeout)) + for proc in alive: + try: + proc.kill() + except psutil.NoSuchProcess: + pass + + # Wait a bit more for force-killed processes + if alive: + await anyio.to_thread.run_sync(lambda: psutil.wait_procs(alive, timeout=0.5)) + + except psutil.NoSuchProcess: + # Process already terminated + pass diff --git a/src/mcp/client/stdio/win32.py b/src/mcp/client/stdio/win32.py index 7246b9dec..3e6a20640 100644 --- a/src/mcp/client/stdio/win32.py +++ b/src/mcp/client/stdio/win32.py @@ -9,8 +9,6 @@ from typing import BinaryIO, TextIO, cast import anyio -from anyio import to_thread -from anyio.abc import Process from anyio.streams.file import FileReadStream, FileWriteStream @@ -75,9 +73,18 @@ async def __aexit__( exc_val: BaseException | None, exc_tb: object | None, ) -> None: - """Terminate and wait on process exit inside a thread.""" - self.popen.terminate() - await to_thread.run_sync(self.popen.wait) + """Clean up process and streams. + + Attempts to terminate the process, but doesn't fail if termination + is not possible (e.g., process already dead or being handled elsewhere). + """ + try: + self.popen.terminate() + with anyio.move_on_after(0.5): + await self.wait() + except (ProcessLookupError, OSError): + # Process already dead or being handled elsewhere + pass # Close the file handles to prevent ResourceWarning if self.stdin: @@ -92,8 +99,13 @@ async def __aexit__( self.stderr.close() async def wait(self): - """Async wait for process completion.""" - return await to_thread.run_sync(self.popen.wait) + """ + Poll the process status instead of blocking wait + This allows anyio timeouts to work properly + """ + while self.popen.poll() is None: + await anyio.sleep(0.1) + return self.popen.returncode def terminate(self): """Terminate the subprocess immediately.""" @@ -103,6 +115,11 @@ def kill(self) -> None: """Kill the subprocess immediately (alias for terminate).""" self.terminate() + @property + def pid(self) -> int: + """Return the process ID.""" + return self.popen.pid + # ------------------------ # Updated function @@ -159,24 +176,3 @@ async def create_windows_process( bufsize=0, ) return FallbackProcess(popen_obj) - - -async def terminate_windows_process(process: Process | FallbackProcess): - """ - Terminate a Windows process. - - Note: On Windows, terminating a process with process.terminate() doesn't - always guarantee immediate process termination. - So we give it 2s to exit, or we call process.kill() - which sends a SIGKILL equivalent signal. - - Args: - process: The process to terminate - """ - try: - process.terminate() - with anyio.fail_after(2.0): - await process.wait() - except TimeoutError: - # Force kill if it doesn't terminate - process.kill() diff --git a/tests/client/test_stdio.py b/tests/client/test_stdio.py index c66a16ab9..2ed172df8 100644 --- a/tests/client/test_stdio.py +++ b/tests/client/test_stdio.py @@ -1,10 +1,17 @@ +import os import shutil +import sys +import tempfile +import textwrap +import time +import anyio import pytest from mcp.client.session import ClientSession from mcp.client.stdio import ( StdioServerParameters, + _create_platform_compatible_process, stdio_client, ) from mcp.shared.exceptions import McpError @@ -90,3 +97,540 @@ async def test_stdio_client_nonexistent_command(): or "not found" in error_message.lower() or "cannot find the file" in error_message.lower() # Windows error message ) + + +@pytest.mark.anyio +async def test_stdio_client_universal_cleanup(): + """ + Test that stdio_client completes cleanup within reasonable time + even when connected to processes that exit slowly. + """ + + # Use a Python script that simulates a long-running process + # This ensures consistent behavior across platforms + long_running_script = textwrap.dedent( + """ + import time + import sys + + # Simulate a long-running process + for i in range(100): + time.sleep(0.1) + # Flush to ensure output is visible + sys.stdout.flush() + sys.stderr.flush() + """ + ) + + server_params = StdioServerParameters( + command=sys.executable, + args=["-c", long_running_script], + ) + + start_time = time.time() + + with anyio.move_on_after(8.0) as cancel_scope: + async with stdio_client(server_params) as (read_stream, write_stream): + # Immediately exit - this triggers cleanup while process is still running + pass + + end_time = time.time() + elapsed = end_time - start_time + + # On Windows: 2s (stdin wait) + 2s (terminate wait) + overhead = ~5s expected + assert elapsed < 6.0, ( + f"stdio_client cleanup took {elapsed:.1f} seconds, expected < 6.0 seconds. " + f"This suggests the timeout mechanism may not be working properly." + ) + + # Check if we timed out + if cancel_scope.cancelled_caught: + pytest.fail( + "stdio_client cleanup timed out after 8.0 seconds. " + "This indicates the cleanup mechanism is hanging and needs fixing." + ) + + +@pytest.mark.anyio +@pytest.mark.skipif(sys.platform == "win32", reason="Windows signal handling is different") +async def test_stdio_client_sigint_only_process(): + """ + Test cleanup with a process that ignores SIGTERM but responds to SIGINT. + """ + # Create a Python script that ignores SIGTERM but handles SIGINT + script_content = textwrap.dedent( + """ + import signal + import sys + import time + + # Ignore SIGTERM (what process.terminate() sends) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + # Handle SIGINT (Ctrl+C signal) by exiting cleanly + def sigint_handler(signum, frame): + sys.exit(0) + + signal.signal(signal.SIGINT, sigint_handler) + + # Keep running until SIGINT received + while True: + time.sleep(0.1) + """ + ) + + server_params = StdioServerParameters( + command=sys.executable, + args=["-c", script_content], + ) + + start_time = time.time() + + try: + # Use anyio timeout to prevent test from hanging forever + with anyio.move_on_after(5.0) as cancel_scope: + async with stdio_client(server_params) as (read_stream, write_stream): + # Let the process start and begin ignoring SIGTERM + await anyio.sleep(0.5) + # Exit context triggers cleanup - this should not hang + pass + + if cancel_scope.cancelled_caught: + raise TimeoutError("Test timed out") + + end_time = time.time() + elapsed = end_time - start_time + + # Should complete quickly even with SIGTERM-ignoring process + # This will fail if cleanup only uses process.terminate() without fallback + assert elapsed < 5.0, ( + f"stdio_client cleanup took {elapsed:.1f} seconds with SIGTERM-ignoring process. " + f"Expected < 5.0 seconds. This suggests the cleanup needs SIGINT/SIGKILL fallback." + ) + except (TimeoutError, Exception) as e: + if isinstance(e, TimeoutError) or "timed out" in str(e): + pytest.fail( + "stdio_client cleanup timed out after 5.0 seconds with SIGTERM-ignoring process. " + "This confirms the cleanup needs SIGINT/SIGKILL fallback for processes that ignore SIGTERM." + ) + else: + raise + + +@pytest.mark.anyio +async def test_stdio_client_graceful_stdin_exit(): + """ + Test that a process exits gracefully when stdin is closed, + without needing SIGTERM or SIGKILL. + """ + # Create a Python script that exits when stdin is closed + script_content = textwrap.dedent( + """ + import sys + + # Read from stdin until it's closed + try: + while True: + line = sys.stdin.readline() + if not line: # EOF/stdin closed + break + except: + pass + + # Exit gracefully + sys.exit(0) + """ + ) + + server_params = StdioServerParameters( + command=sys.executable, + args=["-c", script_content], + ) + + start_time = time.time() + + # Use anyio timeout to prevent test from hanging forever + with anyio.move_on_after(5.0) as cancel_scope: + async with stdio_client(server_params) as (read_stream, write_stream): + # Let the process start and begin reading stdin + await anyio.sleep(0.2) + # Exit context triggers cleanup - process should exit from stdin closure + pass + + if cancel_scope.cancelled_caught: + pytest.fail( + "stdio_client cleanup timed out after 5.0 seconds. " + "Process should have exited gracefully when stdin was closed." + ) + + end_time = time.time() + elapsed = end_time - start_time + + # Should complete quickly with just stdin closure (no signals needed) + assert elapsed < 3.0, ( + f"stdio_client cleanup took {elapsed:.1f} seconds for stdin-aware process. " + f"Expected < 3.0 seconds since process should exit on stdin closure." + ) + + +@pytest.mark.anyio +async def test_stdio_client_stdin_close_ignored(): + """ + Test that when a process ignores stdin closure, the shutdown sequence + properly escalates to SIGTERM. + """ + # Create a Python script that ignores stdin closure but responds to SIGTERM + script_content = textwrap.dedent( + """ + import signal + import sys + import time + + # Set up SIGTERM handler to exit cleanly + def sigterm_handler(signum, frame): + sys.exit(0) + + signal.signal(signal.SIGTERM, sigterm_handler) + + # Close stdin immediately to simulate ignoring it + sys.stdin.close() + + # Keep running until SIGTERM + while True: + time.sleep(0.1) + """ + ) + + server_params = StdioServerParameters( + command=sys.executable, + args=["-c", script_content], + ) + + start_time = time.time() + + # Use anyio timeout to prevent test from hanging forever + with anyio.move_on_after(7.0) as cancel_scope: + async with stdio_client(server_params) as (read_stream, write_stream): + # Let the process start + await anyio.sleep(0.2) + # Exit context triggers cleanup + pass + + if cancel_scope.cancelled_caught: + pytest.fail( + "stdio_client cleanup timed out after 7.0 seconds. " + "Process should have been terminated via SIGTERM escalation." + ) + + end_time = time.time() + elapsed = end_time - start_time + + # Should take ~2 seconds (stdin close timeout) before SIGTERM is sent + # Total time should be between 2-4 seconds + assert 1.5 < elapsed < 4.5, ( + f"stdio_client cleanup took {elapsed:.1f} seconds for stdin-ignoring process. " + f"Expected between 2-4 seconds (2s stdin timeout + termination time)." + ) + + +class TestChildProcessCleanup: + """ + Tests for child process cleanup functionality using _terminate_process_with_children. + + These tests verify that child processes are properly terminated when the parent + is killed, addressing the issue where processes like npx spawn child processes + that need to be cleaned up. The tests cover various process tree scenarios: + + - Basic parent-child relationship (single child process) + - Multi-level process trees (parent → child → grandchild) + - Race conditions where parent exits during cleanup + + Note on Windows ResourceWarning: + On Windows, we may see ResourceWarning about subprocess still running. This is + expected behavior due to how Windows process termination works: + - anyio's process.terminate() calls Windows TerminateProcess() API + - TerminateProcess() immediately kills the process without allowing cleanup + - subprocess.Popen objects in the killed process can't run their cleanup code + - Python detects this during garbage collection and issues a ResourceWarning + + This warning does NOT indicate a process leak - the processes are properly + terminated. It only means the Popen objects couldn't clean up gracefully. + This is a fundamental difference between Windows and Unix process termination. + """ + + @staticmethod + def _escape_path_for_python(path: str) -> str: + """Escape a file path for use in Python code strings.""" + # Use forward slashes which work on all platforms and don't need escaping + return repr(path.replace("\\", "/")) + + @pytest.mark.anyio + @pytest.mark.filterwarnings("ignore::ResourceWarning" if sys.platform == "win32" else "default") + async def test_basic_child_process_cleanup(self): + """ + Test basic parent-child process cleanup. + Parent spawns a single child process that writes continuously to a file. + """ + # Create a marker file for the child process to write to + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + marker_file = f.name + + # Also create a file to verify parent started + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + parent_marker = f.name + + try: + # Parent script that spawns a child process + parent_script = textwrap.dedent( + f""" + import subprocess + import sys + import time + import os + + # Mark that parent started + with open({self._escape_path_for_python(parent_marker)}, 'w') as f: + f.write('parent started\\n') + + # Child script that writes continuously + child_script = f''' + import time + with open({self._escape_path_for_python(marker_file)}, 'a') as f: + while True: + f.write(f"{time.time()}") + f.flush() + time.sleep(0.1) + ''' + + # Start the child process + child = subprocess.Popen([sys.executable, '-c', child_script]) + + # Parent just sleeps + while True: + time.sleep(0.1) + """ + ) + + print("\nStarting child process termination test...") + + # Start the parent process + proc = await _create_platform_compatible_process(sys.executable, ["-c", parent_script]) + + # Wait for processes to start + await anyio.sleep(0.5) + + # Verify parent started + assert os.path.exists(parent_marker), "Parent process didn't start" + + # Verify child is writing + if os.path.exists(marker_file): + initial_size = os.path.getsize(marker_file) + await anyio.sleep(0.3) + size_after_wait = os.path.getsize(marker_file) + assert size_after_wait > initial_size, "Child process should be writing" + print(f"Child is writing (file grew from {initial_size} to {size_after_wait} bytes)") + + # Terminate using our function + print("Terminating process and children...") + from mcp.client.stdio import _terminate_process_with_children + + await _terminate_process_with_children(proc) + + # Verify processes stopped + await anyio.sleep(0.5) + if os.path.exists(marker_file): + size_after_cleanup = os.path.getsize(marker_file) + await anyio.sleep(0.5) + final_size = os.path.getsize(marker_file) + + print(f"After cleanup: file size {size_after_cleanup} -> {final_size}") + assert ( + final_size == size_after_cleanup + ), f"Child process still running! File grew by {final_size - size_after_cleanup} bytes" + + print("SUCCESS: Child process was properly terminated") + + finally: + # Clean up files + for f in [marker_file, parent_marker]: + try: + os.unlink(f) + except OSError: + pass + + @pytest.mark.anyio + @pytest.mark.filterwarnings("ignore::ResourceWarning" if sys.platform == "win32" else "default") + async def test_nested_process_tree(self): + """ + Test nested process tree cleanup (parent → child → grandchild). + Each level writes to a different file to verify all processes are terminated. + """ + # Create temporary files for each process level + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f1: + parent_file = f1.name + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f2: + child_file = f2.name + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f3: + grandchild_file = f3.name + + try: + # Simple nested process tree test + # We create parent -> child -> grandchild, each writing to a file + parent_script = textwrap.dedent( + f""" + import subprocess + import sys + import time + import os + + # Child will spawn grandchild and write to child file + child_script = f'''import subprocess + import sys + import time + + # Grandchild just writes to file + grandchild_script = \"\"\"import time + with open({self._escape_path_for_python(grandchild_file)}, 'a') as f: + while True: + f.write(f"gc {{time.time()}}") + f.flush() + time.sleep(0.1)\"\"\" + + # Spawn grandchild + subprocess.Popen([sys.executable, '-c', grandchild_script]) + + # Child writes to its file + with open({self._escape_path_for_python(child_file)}, 'a') as f: + while True: + f.write(f"c {time.time()}") + f.flush() + time.sleep(0.1)''' + + # Spawn child process + subprocess.Popen([sys.executable, '-c', child_script]) + + # Parent writes to its file + with open({self._escape_path_for_python(parent_file)}, 'a') as f: + while True: + f.write(f"p {time.time()}") + f.flush() + time.sleep(0.1) + """ + ) + + # Start the parent process + proc = await _create_platform_compatible_process(sys.executable, ["-c", parent_script]) + + # Let all processes start + await anyio.sleep(1.0) + + # Verify all are writing + for file_path, name in [(parent_file, "parent"), (child_file, "child"), (grandchild_file, "grandchild")]: + if os.path.exists(file_path): + initial_size = os.path.getsize(file_path) + await anyio.sleep(0.3) + new_size = os.path.getsize(file_path) + assert new_size > initial_size, f"{name} process should be writing" + + # Terminate the whole tree + from mcp.client.stdio import _terminate_process_with_children + + await _terminate_process_with_children(proc) + + # Verify all stopped + await anyio.sleep(0.5) + for file_path, name in [(parent_file, "parent"), (child_file, "child"), (grandchild_file, "grandchild")]: + if os.path.exists(file_path): + size1 = os.path.getsize(file_path) + await anyio.sleep(0.3) + size2 = os.path.getsize(file_path) + assert size1 == size2, f"{name} still writing after cleanup!" + + print("SUCCESS: All processes in tree terminated") + + finally: + # Clean up all marker files + for f in [parent_file, child_file, grandchild_file]: + try: + os.unlink(f) + except OSError: + pass + + @pytest.mark.anyio + @pytest.mark.filterwarnings("ignore::ResourceWarning" if sys.platform == "win32" else "default") + async def test_early_parent_exit(self): + """ + Test cleanup when parent exits during termination sequence. + Tests the race condition where parent might die during our termination + sequence but we can still clean up the children via the process group. + """ + # Create a temporary file for the child + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + marker_file = f.name + + try: + # Parent that spawns child and waits briefly + parent_script = textwrap.dedent( + f""" + import subprocess + import sys + import time + import signal + + # Child that continues running + child_script = f'''import time + with open({self._escape_path_for_python(marker_file)}, 'a') as f: + while True: + f.write(f"child {time.time()}") + f.flush() + time.sleep(0.1)''' + + # Start child in same process group + subprocess.Popen([sys.executable, '-c', child_script]) + + # Parent waits a bit then exits on SIGTERM + def handle_term(sig, frame): + sys.exit(0) + + signal.signal(signal.SIGTERM, handle_term) + + # Wait + while True: + time.sleep(0.1) + """ + ) + + # Start the parent process + proc = await _create_platform_compatible_process(sys.executable, ["-c", parent_script]) + + # Let child start writing + await anyio.sleep(0.5) + + # Verify child is writing + if os.path.exists(marker_file): + size1 = os.path.getsize(marker_file) + await anyio.sleep(0.3) + size2 = os.path.getsize(marker_file) + assert size2 > size1, "Child should be writing" + + # Terminate - this will kill the process group even if parent exits first + from mcp.client.stdio import _terminate_process_with_children + + await _terminate_process_with_children(proc) + + # Verify child stopped + await anyio.sleep(0.5) + if os.path.exists(marker_file): + size3 = os.path.getsize(marker_file) + await anyio.sleep(0.3) + size4 = os.path.getsize(marker_file) + assert size3 == size4, "Child should be terminated" + + print("SUCCESS: Child terminated even with parent exit during cleanup") + + finally: + # Clean up marker file + try: + os.unlink(marker_file) + except OSError: + pass diff --git a/uv.lock b/uv.lock index cfcc8238e..58e22b883 100644 --- a/uv.lock +++ b/uv.lock @@ -560,6 +560,7 @@ dependencies = [ { name = "httpx" }, { name = "httpx-sse" }, { name = "jsonschema" }, + { name = "psutil" }, { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-multipart" }, @@ -605,6 +606,7 @@ requires-dist = [ { name = "httpx", specifier = ">=0.27" }, { name = "httpx-sse", specifier = ">=0.4" }, { name = "jsonschema", specifier = ">=4.20.0" }, + { name = "psutil", specifier = ">=5.9.0,<6.0.0" }, { name = "pydantic", specifier = ">=2.7.2,<3.0.0" }, { name = "pydantic-settings", specifier = ">=2.5.2" }, { name = "python-dotenv", marker = "extra == 'cli'", specifier = ">=1.0.0" }, @@ -1132,6 +1134,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, ] +[[package]] +name = "psutil" +version = "5.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/b6/ea8a7728f096a597f0032564e8013b705aa992a0990becd773dcc4d7b4a7/psutil-5.9.0.tar.gz", hash = "sha256:869842dbd66bb80c3217158e629d6fceaecc3a3166d3d1faee515b05dd26ca25", size = 478322, upload-time = "2021-12-29T21:27:59.163Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/48/2c6f566d35a38fb9f882e51d75425a6f1d097cb946e05b6aff98d450a151/psutil-5.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90a58b9fcae2dbfe4ba852b57bd4a1dded6b990a33d6428c7614b7d48eccb492", size = 238624, upload-time = "2021-12-29T21:26:42.964Z" }, + { url = "https://files.pythonhosted.org/packages/11/46/e790221e8281af5163517a17a20c88b10a75a5642d9c5106a868f2879edd/psutil-5.9.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff0d41f8b3e9ebb6b6110057e40019a432e96aae2008951121ba4e56040b84f3", size = 279343, upload-time = "2021-12-29T21:26:46.859Z" }, + { url = "https://files.pythonhosted.org/packages/6f/8a/d1810472a4950a31df385eafbc9bd20cde971814ff6533021dc565bf14ae/psutil-5.9.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:742c34fff804f34f62659279ed5c5b723bb0195e9d7bd9907591de9f8f6558e2", size = 281400, upload-time = "2021-12-29T21:26:51.801Z" }, + { url = "https://files.pythonhosted.org/packages/61/93/4251cfa58e5bbd7f92e1bfb965a0c41376cbcbc83c524a8b60d2678f0edd/psutil-5.9.0-cp310-cp310-win32.whl", hash = "sha256:8293942e4ce0c5689821f65ce6522ce4786d02af57f13c0195b40e1edb1db61d", size = 241383, upload-time = "2021-12-29T21:26:55.364Z" }, + { url = "https://files.pythonhosted.org/packages/9f/c9/7fb339d6a04db3b4ab94671536d11e03b23c056d1604e50e564075a96cd8/psutil-5.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:9b51917c1af3fa35a3f2dabd7ba96a2a4f19df3dec911da73875e1edaf22a40b", size = 245540, upload-time = "2021-12-29T21:26:59.088Z" }, +] + [[package]] name = "pycparser" version = "2.22"