Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
9eaa565
Initial state for new refactor of scheduler
markurtz Jul 31, 2025
218cdfc
Implement SchedulingStrategy and derivative classes so they work with…
markurtz Jul 31, 2025
a5ccd70
Finalize adapting scheduler strategies to new refactor logic, additio…
markurtz Jul 31, 2025
4926284
Initial refactoring of backend and generation objects to work with ne…
markurtz Aug 1, 2025
4a789bc
Finalize refactoring of backend package to work with new scheduler re…
markurtz Aug 1, 2025
d8858b6
Refactors to simplify and enable benchmark profiles
markurtz Aug 4, 2025
8098f33
Finalize refactor of profile, refactor benchmarker
markurtz Aug 5, 2025
6ea2396
Refactor benchmark object, benchmarker, and introduce aggregators as …
markurtz Aug 6, 2025
764ea4a
Refactor entyrpoints and benchmark pathways for new scheduler and ben…
markurtz Aug 7, 2025
92b41e4
Refactor and updates for progress and outputs to work with the new st…
markurtz Aug 9, 2025
8c70303
Latest state with the bulk of tests for the scheduler and bug fixes
markurtz Aug 11, 2025
78a0258
Tests and fixes for WorkerProcess and supporting changes
markurtz Aug 12, 2025
fcd79dc
Fix for inheritance chain in backend ABC
sjmonson Aug 13, 2025
d67119e
Update backend component unit tests
sjmonson Aug 13, 2025
1b2c185
Backend typing cleanup
sjmonson Aug 13, 2025
b5f510e
Add tests for openai completion code
sjmonson Aug 13, 2025
0c0a8f2
Implement worker group and worker tests, fix any bugs/issues that pop…
markurtz Aug 13, 2025
0c6f679
Implement tests and fixes for scheduler to finish of scheduler packag…
markurtz Aug 14, 2025
8fe4e2e
Refactors to enable tests for benchmarker package and supporting tools
markurtz Aug 19, 2025
3c6819f
Fixes for benchmarker aggregator and fixes from cursor for openai.py,…
markurtz Aug 19, 2025
6f3d753
Features/scheduler refactor tests (#284)
AlonKellner-RedHat Aug 19, 2025
c00ac91
Fixes for e2e enablement
markurtz Aug 20, 2025
e9b59a3
Fix progress displays
markurtz Aug 20, 2025
7bfc8a2
Fix: Interleave RPS worker timings
sjmonson Aug 25, 2025
7d04765
Don't spawn more workers than max_concurrency
sjmonson Aug 25, 2025
8e0e5ea
Fix issue when procs don't evenly divide concurrency
sjmonson Aug 25, 2025
be15fe3
fixes and updates for initial core PR for utils that has been posted
markurtz Aug 20, 2025
83b01c0
Latest state updates for perf fixes for multiprocessing communication
markurtz Aug 26, 2025
427caea
latest state and fixes from review
markurtz Aug 26, 2025
40f051a
Add helper for converting literals to list of strings
sjmonson Aug 27, 2025
967c427
Fix incorrect field in benchmark object test
sjmonson Aug 28, 2025
f3bf683
Rework of underlying messaging again to get better performance
markurtz Aug 28, 2025
ac07cc6
Attempts to fix stranded messages
markurtz Aug 28, 2025
2fbf052
Fixes for new refactor runs
markurtz Aug 28, 2025
fc1d6df
Pass MP context to InterProcessMessaging
sjmonson Aug 28, 2025
0ecebf2
Almost working e2e
markurtz Aug 28, 2025
7360368
Add a failing test for Generic Serializing
sjmonson Aug 29, 2025
80beb67
quick utils enhancements
markurtz Aug 29, 2025
3509a7b
quick update to tests
markurtz Aug 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -230,3 +230,6 @@ src/ui/next-env.d.ts
!src/ui/public/manifest.json
!src/ui/serve.json
.eslintcache

# vllm-sim
bin/
File renamed without changes.
18 changes: 12 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,13 @@ keywords = [
]
dependencies = [
"click>=8.0.0,<8.2.0",
"culsans~=0.9.0",
"datasets",
"eval_type_backport",
"ftfy>=6.0.0",
"httpx[http2]<1.0.0",
"loguru",
"msgpack",
"numpy",
"pillow",
"protobuf",
Expand All @@ -57,6 +60,7 @@ dependencies = [
"pyyaml>=6.0.0",
"rich",
"transformers",
"uvloop>=0.18",
]

[project.optional-dependencies]
Expand Down Expand Up @@ -139,6 +143,7 @@ ignore_missing_imports=true


[tool.ruff]
target-version = "py39"
line-length = 88
indent-width = 4
exclude = ["build", "dist", "env", ".venv"]
Expand All @@ -149,15 +154,16 @@ indent-style = "space"

[tool.ruff.lint]
ignore = [
"PLR0913",
"TC001",
"COM812",
"ISC001",
"TC002",
"COM812", # ignore trailing comma errors due to older Python versions
"PD011", # ignore .values usage since ruff assumes it's a Pandas DataFrame
"PLR0913", # ignore too many arguments in function definitions
"PLW1514", # allow Path.open without encoding
"RET505", # allow `else` blocks
"RET506", # allow `else` blocks
"PD011", # ignore .values usage since ruff assumes it's a Pandas DataFrame
"S311", # allow standard pseudo-random generators
"TC001", # ignore imports used only for type checking
"TC002", # ignore imports used only for type checking
"TC003", # ignore imports used only for type checking
]
select = [
# Rules reference: https://docs.astral.sh/ruff/rules/
Expand Down
Empty file added research/__init__.py
Empty file.
Empty file.
Empty file.
Empty file.
206 changes: 206 additions & 0 deletions research/multiprocesssing_communication_perf/test_encoding_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
from __future__ import annotations

import csv
import io
import pickle
import random
import sys
import time
from typing import Any

import click
import numpy as np
from pydantic import BaseModel

from guidellm.utils import EncodingTypesAlias, MessageEncoding, SerializationTypesAlias

from .utils import create_all_test_objects


def calculate_size(obj: Any) -> int:
if isinstance(obj, BaseModel):
return sys.getsizeof(obj.__dict__)

if isinstance(obj, (tuple, list)) and any(
isinstance(item, BaseModel) for item in obj
):
return sum(
sys.getsizeof(item.__dict__)
if isinstance(item, BaseModel)
else sys.getsizeof(item)
for item in obj
)
elif isinstance(obj, dict) and any(
isinstance(value, BaseModel) for value in obj.values()
):
return sum(
sys.getsizeof(value.__dict__)
if isinstance(value, BaseModel)
else sys.getsizeof(value)
for value in obj.values()
if isinstance(value, BaseModel)
)

return sys.getsizeof(obj)


def time_encode_decode(
objects: list[Any],
serialization: SerializationTypesAlias,
encoding: EncodingTypesAlias,
pydantic_models: list[type[BaseModel]] | None,
num_iterations: int,
) -> tuple[float, float, float, float]:
message_encoding = MessageEncoding(serialization=serialization, encoding=encoding)
if pydantic_models:
for model in pydantic_models:
message_encoding.register_pydantic(model)
msg_sizes = []
decoded = []
encode_time = 0.0
decode_time = 0.0

for _ in range(num_iterations):
for obj in objects:
start = time.perf_counter_ns()
message = message_encoding.encode(obj)
pickled_msg = pickle.dumps(message)
end = time.perf_counter_ns()
encode_time += end - start

msg_sizes.append(calculate_size(pickled_msg))

start = time.perf_counter_ns()
message = pickle.loads(pickled_msg)
decoded.append(message_encoding.decode(message=message))
end = time.perf_counter_ns()
decode_time += end - start

correct = 0
for obj, dec in zip(objects, decoded):
if (
obj == dec
or type(obj) is type(dec)
and (
(
hasattr(obj, "model_dump")
and hasattr(dec, "model_dump")
and obj.model_dump() == dec.model_dump()
)
or str(obj) == str(dec)
)
):
correct += 1

percent_differences = 100.0 * correct / len(objects)
avg_msg_size = np.mean(msg_sizes)

return (
encode_time / len(objects),
decode_time / len(objects),
avg_msg_size,
percent_differences,
)


def run_benchmarks(objects_size: int, num_objects: int, num_iterations: int):
results = {}

for obj_type, objects, pydantic_models in create_all_test_objects(
objects_size=objects_size,
num_objects=num_objects,
):
for serialization in ("dict", "sequence", None):
for encoding in ("msgpack", "msgspec", None):
try:
encode_time, decode_time, avg_msg_size, percent_differences = (
time_encode_decode(
objects=objects,
serialization=serialization,
encoding=encoding,
pydantic_models=pydantic_models,
num_iterations=num_iterations,
)
)
error = None
except Exception as err:
print(
f"Error occurred while benchmarking {obj_type} for "
f"serialization={serialization} and encoding={encoding}: {err}"
)
error = err
encode_time = None
decode_time = None
avg_msg_size = None
percent_differences = None

results[f"{obj_type}_{serialization}_{encoding}"] = {
"obj_type": obj_type,
"serialization": serialization,
"encoding": encoding,
"encode_time": encode_time,
"decode_time": decode_time,
"total_time": (
encode_time + decode_time
if encode_time is not None and decode_time is not None
else None
),
"avg_msg_size": avg_msg_size,
"percent_differences": percent_differences,
"err": error,
}

# Print results as a CSV table

# Create CSV output
output = io.StringIO()
writer = csv.writer(output)

# Write header
writer.writerow(
[
"Object Type",
"Serialization",
"Encoding",
"Encode Time (ns)",
"Decode Time (ns)",
"Total Time (ns)",
"Avg Message Size (bytes)",
"Accuracy (%)",
"Error",
]
)

# Write data rows
for result in results.values():
writer.writerow(
[
result["obj_type"],
result["serialization"],
result["encoding"],
result["encode_time"],
result["decode_time"],
result["total_time"],
result["avg_msg_size"],
result["percent_differences"],
result["err"],
]
)

# Print the CSV table
print(output.getvalue())


@click.command()
@click.option("--size", default=1024, type=int, help="Size of each object in bytes")
@click.option(
"--objects", default=1000, type=int, help="Number of objects to benchmark"
)
@click.option("--iterations", default=5, type=int, help="Number of iterations to run")
def main(size, objects, iterations):
random.seed(42)
run_benchmarks(objects_size=size, num_objects=objects, num_iterations=iterations)


if __name__ == "__main__":
run_benchmarks(objects_size=1024, num_objects=10, num_iterations=5)
Loading