Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions benchmark/scripts/benchmark_fused_linear_jsd.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

from liger_kernel.transformers.fused_linear_jsd import LigerFusedLinearJSD
from liger_kernel.utils import infer_device
from liger_kernel.utils import get_total_gpu_memory

device = infer_device()

Expand Down Expand Up @@ -233,14 +234,19 @@ def full():

if __name__ == "__main__":
args = parse_benchmark_script_args()

gpu_memory_gbs = get_total_gpu_memory()
if gpu_memory_gbs >= 69:
vocab_size = 128256
else:
vocab_size = 65536

common_configs = {
"kernel_name": "fused_linear_jsd",
"x_name": "BT",
"x_label": "B x T",
"x_values": [2**i for i in range(10, 14)],
"kernel_providers": ["liger", "torch"],
"extra_benchmark_configs": [{"H": 4096, "V": 128256, "mode": "forward", "dtype": torch.bfloat16}],
"extra_benchmark_configs": [{"H": 4096, "V": vocab_size, "mode": "forward", "dtype": torch.bfloat16}],
Comment on lines +237 to +249
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's lower the upper bound of x_values instead of vocab_size for now.

We can discuss what configs should be scalable if there's memory constraint, see #1051

"overwrite": args.overwrite,
}

Expand Down
2 changes: 1 addition & 1 deletion src/liger_kernel/ops/fused_linear_jsd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576 https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
# However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
# The optimal maximum block size depends on your hardware, your kernel, and your dtype
MAX_FUSED_SIZE = 4096 if infer_device() == "xpu" else 65536 // 2
MAX_FUSED_SIZE = 4096 if infer_device() == "npu" else 65536 // 2
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

append instead of replace

Suggested change
MAX_FUSED_SIZE = 4096 if infer_device() == "npu" else 65536 // 2
MAX_FUSED_SIZE = 4096 if infer_device() in ["npu", "xpu"] else 65536 // 2



def fused_linear_jsd_forward(
Expand Down