Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
scripts/.ipynb_checkpoints/
251 changes: 251 additions & 0 deletions scripts/deep_analysis_example.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "aadf8bdc-2b8a-4737-992f-0ebf7e66cd8f",
"metadata": {},
"outputs": [],
"source": [
"%%cmd\n",
"python -m pip install flatten_json\n",
"python -m pip install pandas\n",
"python -m pip install numpy\n",
"python -m pip install matplotlib"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bbdc923a-44fd-4250-990b-80771446bbf0",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import json\n",
"from collections import defaultdict\n",
"import numpy as np\n",
"import argparse\n",
"import sys\n",
"from flatten_json import flatten\n",
"import matplotlib.pyplot as plt\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "099d7d0d-febc-4562-801a-767c4b028f56",
"metadata": {},
"outputs": [],
"source": [
"def aggregate_loops_passes(json):\n",
" results_per_frame = []\n",
" num_loops = len(json)\n",
" for loop_results in json:\n",
" for frame_index, frame_results in enumerate(loop_results[\"per_frame_results\"]):\n",
" if frame_index >= len(results_per_frame):\n",
" results_per_frame.append(defaultdict(int))\n",
" results_per_frame[frame_index]['sequence_time_ns'] = frame_results['sequence_time_ns']\n",
" for command_buffer_timings in frame_results[\n",
" \"command_buffer_timings\"\n",
" ].values():\n",
" for scope_name, scope_timings in command_buffer_timings[\n",
" \"scope_timings\"\n",
" ].items():\n",
" for scope_timing in scope_timings:\n",
" results_per_frame[frame_index][scope_name] += (\n",
" scope_timing[\"end\"] - scope_timing[\"start\"]\n",
" ) / num_loops / 1_000_000 # in ms\n",
" for metric_name, metric in frame_results[\"metrics\"].items():\n",
" # TODO: Flatten this in rust to fan_speed_rpm\n",
" if metric_name == \"fan_speed\":\n",
" value = metric[\"Percent\"] if \"Percent\" in metric else metric[\"Rpm\"]\n",
" results_per_frame[frame_index][\"fan_speed_rpm\"] += (\n",
" value / num_loops\n",
" )\n",
" # Filter out unavailable data and the timestamp\n",
" elif metric is not None and metric_name != \"timestamp\":\n",
" results_per_frame[frame_index][metric_name] += metric / num_loops\n",
" # TODO: Aggregate CPU timings\n",
" return pd.DataFrame([flatten(x) for x in results_per_frame])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "939b04cb-7ffd-4963-8dac-71c0a76275cf",
"metadata": {},
"outputs": [],
"source": [
"files = [\n",
" '../../a_ray_tracing_inline.csv',\n",
" '../../a_ray_tracing_pipeline.csv',\n",
" '../../b_ray_tracing_inline.csv',\n",
" '../../b_ray_tracing_pipeline.csv',\n",
"]\n",
"\n",
"scores = {}\n",
"\n",
"for file in files:\n",
" # Read csv\n",
" file_scores = pd.read_csv(file)\n",
" # Remove empty results\n",
" file_scores = file_scores.loc[:, (file_scores != 0).any(axis=0)]\n",
" scores[file] = file_scores\n",
"scores = pd.concat(scores).groupby(level=0, sort=False).mean().T.drop('Loop', errors=\"ignore\")\n",
"scores"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2fbaaf11-5b32-43ed-84d1-8894ec6de013",
"metadata": {},
"outputs": [],
"source": [
"scores.plot(kind=\"bar\", figsize=(20,5), colormap='Dark2', grid=True, rot=0)"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we add plotting of all the different score categories. So compare the following:

  • a_raytracing_inline, a_raytracing_pipeline vs b_raytracing_inline vs b_raytracing_pipeline
  • a_rasterization_inline, a_rasterization_pipeline vs b_rasterization_inline vs b_rasterization_pipeline
  • etc..

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added it as a separate notebook.

]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a60a4c63-b884-4558-98bd-ee3d14bbc569",
"metadata": {},
"outputs": [],
"source": [
"files = [\n",
" '../../a_ray_tracing_inline_deep.json',\n",
" '../../b_ray_tracing_inline_deep.json',\n",
" '../../a_ray_tracing_pipeline_deep.json',\n",
" '../../b_ray_tracing_pipeline_deep.json',\n",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't think we should rely on hardcoded file names/paths

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

People will have to enter their own file paths here

"]\n",
"\n",
"results = {}\n",
"\n",
"# Load all files into one large dataframe\n",
"for path in files:\n",
" with open(path, \"r\") as json_file:\n",
" # We agregate passes within each frame, so we get one number per pass per frame per input file\n",
" json_data = aggregate_loops_passes(json.load(json_file))\n",
" results[path] = json_data\n",
"full_dataset = pd.concat(results)\n",
"full_dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6046a029-f421-4a9e-a9d6-9889488ff874",
"metadata": {},
"outputs": [],
"source": [
"relevant_metrics = [\n",
" 'reflection-hits-shading',\n",
" 'water-compositing',\n",
" 'blur',\n",
" 'diffuse-spatial-filter',\n",
" 'spd',\n",
" 'sun-direct-lighting',\n",
" 'reflection-ray-tracing-inline',\n",
" 'trace-diffuse-nee-rays',\n",
" 'render pass',\n",
" 'shadow-ray-tracing-pipeline',\n",
" 'compositing',\n",
" 'build-gbuffers_0',\n",
" 'scale-raster',\n",
" 'Batch refit bottom level',\n",
" 'clock_speed_in_mhz',\n",
" 'board_power_usage_in_w',\n",
" 'vram_usage_in_mb',\n",
" 'edge_temperature_in_c'\n",
"]\n",
"# We want all relevant metrics with the sequence time, to properly plot on the x axis\n",
"relevant_metrics_with_time = relevant_metrics + [\"sequence_time_ns\"]\n",
"metrics = full_dataset[relevant_metrics_with_time]\n",
"\n",
"# Use sequence time and input name as index, each row will have a unique time and input\n",
"metrics = metrics.reset_index().set_index(['sequence_time_ns', 'level_0']).drop('level_1', axis=1)\n",
"\n",
"# Make each input file a column, and put the pass names into a specific column\n",
"metrics = metrics.stack().unstack(1).reset_index()\n",
"\n",
"# From nano seconds to seconds\n",
"metrics['sequence_time_ns'] = metrics['sequence_time_ns'] / 1_000_000_000\n",
"metrics"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "217b62b6-2500-45b5-b3f0-24449387fddd",
"metadata": {},
"outputs": [],
"source": [
"# Print all possible metrics\n",
"full_dataset.columns.tolist()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6740136-668a-44a6-9e35-53f80d586ea5",
"metadata": {},
"outputs": [],
"source": [
"for graph_name in metrics['level_1'].unique():\n",
" # Grab the metric we want to plot\n",
" selected_metric = metrics[metrics['level_1'] == graph_name]\n",
" selected_metric = selected_metric.drop('level_1', axis=1)\n",
"\n",
" # Filter outliers out of view\n",
" max_mean = selected_metric[files].mean().mean()\n",
" max_mean = 0 if pd.isna(max_mean) else max_mean\n",
" max_std = selected_metric[files].std(axis=1).max()\n",
" max_std = selected_metric[files].max() / 3.0 if pd.isna(max_std) else max_std\n",
"\n",
" # Plot results \n",
" selected_metric.infer_objects(copy=False).interpolate(method='linear').plot(\n",
" x='sequence_time_ns', \n",
" ylabel='shader execution time in ms',\n",
" xlabel='benchmark timeline in seconds', \n",
" ylim= (max(0, max_mean - max_std * 3), max_mean + max_std * 3),\n",
" figsize=(20,10), \n",
" colormap='Dark2', \n",
" grid=True, \n",
" legend=True,\n",
" title=graph_name\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ea8273f-13f5-4cc9-82c9-9fe3cf6dfeac",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}