Skip to content

Commit 5ed1a15

Browse files
committed
Initial (hacky) performance tests
Create initial performance tests measuring the time between hotplug API request and vCPUs being available to the guest. Note: These tests are NOT designed to be merged or used in CI, they are merely investigative tests for the latency of vCPU hotplugging Signed-off-by: James Curtis <[email protected]>
1 parent 0ba64b9 commit 5ed1a15

File tree

10 files changed

+354
-2
lines changed

10 files changed

+354
-2
lines changed

src/vmm/src/builder.rs

+22-1
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,28 @@ fn create_vmm_and_vcpus(
185185
let resource_allocator = ResourceAllocator::new()?;
186186

187187
// Instantiate the MMIO device manager.
188-
let mmio_device_manager = MMIODeviceManager::new();
188+
let mut mmio_device_manager = MMIODeviceManager::new();
189+
190+
#[cfg(target_arch = "x86_64")]
191+
{
192+
// For x86, we need to create the interrupt controller before calling `KVM_CREATE_VCPUS`,
193+
// but we also need it before the instantiation of the ACPI Device manager,
194+
// this is because the CpuContainer needs to create and register IRQs
195+
setup_interrupt_controller(&mut vm)?;
196+
197+
// The boot timer device needs to be the first device attached in order
198+
// to maintain the same MMIO address referenced in the documentation
199+
// and tests.
200+
// This has to instantiated here, before the CpuContainer, to ensure that it gets the
201+
// correct address, the first page of MMIO memory.
202+
if boot_timer_enabled {
203+
let mut boot_timer = crate::devices::pseudo::BootTimer::new(TimestampUs::default());
204+
205+
mmio_device_manager
206+
.register_mmio_boot_timer(&mut resource_allocator, boot_timer)
207+
.map_err(RegisterMmioDevice)?;
208+
}
209+
}
189210

190211
// Instantiate ACPI device manager.
191212
#[cfg(target_arch = "x86_64")]

src/vmm/src/devices/pseudo/boot_timer.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ const MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE: u8 = 123;
1010
/// Pseudo device to record the kernel boot time.
1111
#[derive(Debug)]
1212
pub struct BootTimer {
13-
start_ts: TimestampUs,
13+
pub start_ts: TimestampUs,
1414
}
1515

1616
impl BootTimer {

src/vmm/src/lib.rs

+8
Original file line numberDiff line numberDiff line change
@@ -621,6 +621,8 @@ impl Vmm {
621621
&mut self,
622622
config: HotplugVcpuConfig,
623623
) -> Result<MachineConfigUpdate, HotplugVcpuError> {
624+
use utils::time::TimestampUs;
625+
624626
use crate::logger::IncMetric;
625627
if config.add < 1 {
626628
return Err(HotplugVcpuError::VcpuCountTooLow);
@@ -689,6 +691,12 @@ impl Vmm {
689691
self.resume_vcpu_threads(start_idx.into())?;
690692

691693
self.acpi_device_manager.notify_cpu_container()?;
694+
if let Some(devices::BusDevice::BootTimer(dev)) = self
695+
.mmio_device_manager
696+
.get_device(DeviceType::BootTimer, "BootTimer")
697+
{
698+
dev.lock().unwrap().start_ts = TimestampUs::default()
699+
}
692700

693701
Ok(new_machine_config)
694702
}

tests/conftest.py

+8
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,9 @@ def rootfs_fxt(request, record_property):
361361
guest_kernel_linux_5_10 = pytest.fixture(
362362
guest_kernel_fxt, params=kernel_params("vmlinux-5.10*")
363363
)
364+
guest_kernel_linux_acpi_only = pytest.fixture(
365+
guest_kernel_fxt, params=kernel_params("vmlinux-5.10.221")
366+
)
364367
# Use the unfiltered selector, since we don't officially support 6.1 yet.
365368
# TODO: switch to default selector once we add full 6.1 support.
366369
guest_kernel_linux_6_1 = pytest.fixture(
@@ -394,6 +397,11 @@ def uvm_plain_rw(microvm_factory, guest_kernel_linux_5_10, rootfs_rw):
394397
return microvm_factory.build(guest_kernel_linux_5_10, rootfs_rw)
395398

396399

400+
@pytest.fixture
401+
def uvm_hotplug(microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw):
402+
return microvm_factory.build(guest_kernel_linux_acpi_only, rootfs_rw)
403+
404+
397405
@pytest.fixture
398406
def uvm_nano(uvm_plain):
399407
"""A preconfigured uvm with 2vCPUs and 256MiB of memory

tests/host_tools/1-cpu-hotplug.rules

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
SUBSYSTEM=="cpu", ACTION=="add", ATTR{online}!="1", ATTR{online}="1"

tests/host_tools/hotplug.py

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# import pandas
2+
# import re
3+
# from framework.microvm import MicroVMFactory
4+
#
5+
# KERNEL = "vmlinux-5.10.221"
6+
# ROOTFS = "ubuntu-22.04.ext4"
7+
#
8+
#
9+
# def run_tests():
10+
# factory = MicrovmFactory(fc_binary_path, jailer_binary_path)
11+
# manual_data = test_manual_latency(factory)
12+
# manual_data.to_csv("~/dev/results/manual_hotplug_data.csv")
13+
#
14+
# def test_manual_latency(microvm_factory):
15+
# """Test the latency for hotplugging and booting CPUs in the guest"""
16+
# fc_binary_path, jailer_binary_path = build_tools.get_firecracker_binaries()
17+
# df = pandas.DataFrame(columns=["vcpus", "api", "onlining"])
18+
# gcc_compile(Path("./hotplug_time.c"), Path("./hotplug_time.o"))
19+
# data = []
20+
# for vcpu_count in range(2, 30, 2):
21+
# for i in range(50):
22+
# uvm_hotplug = microvm_factory.build(KERNEL, ROOTFS)
23+
# uvm_hotplug.jailer.extra_args.update({"boot-timer": None, "no-seccomp": None})
24+
# uvm_hotplug.help.enable_console()
25+
# uvm_hotplug.spawn()
26+
# uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
27+
# uvm_hotplug.add_net_iface()
28+
# uvm_hotplug.start()
29+
# uvm_hotplug.ssh.scp_put(Path("./host_tools/hotplug.sh"), Path("/home/hotplug.sh"))
30+
# uvm_hotplug.ssh.scp_put(Path("./host_tools//hotplug_time.o"), Path("/home/hotplug_time.o"))
31+
# uvm_hotplug.ssh.run("tmux new-session -d /bin/bash /home/hotplug.sh > /home/test 2>&1")
32+
#
33+
#
34+
# uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
35+
#
36+
# time.sleep(0.25)
37+
# # Extract API call duration
38+
# api_duration = float(re.findall(r"Total previous API call duration: (\d+) us\.", uvm_hotplug.log_data)[-1]) / 1000
39+
# timestamp = float(re.findall(r"Guest-boot-time\s+\=\s+(\d+)\s+us", uvm_hotplug.log_data)[0]) / 1000
40+
# data.append({"vcpus" : vcpu_count, "api": api_duration, "onlining": timestamp})
41+
# return pandas.DataFrame.from_dict(data)
42+
43+
# def test_custom_udev_latency():
44+
# """Test the latency for hotplugging and booting CPUs in the guest"""
45+
# fc_binary_path, jailer_binary_path = build_tools.get_firecracker_binaries()
46+
# df = pandas.DataFrame(columns=["vcpus", "api", "onlining"])
47+
# gcc_compile(Path("./hotplug_time.c"), Path("./hotplug_time.o"))
48+
# data = []
49+
# for vcpu_count in range(2, 30, 2):
50+
# for i in range(50):
51+
# uvm_hotplug = microvm_factory.build(KERNEL, ROOTFS)
52+
# uvm_hotplug.jailer.extra_args.update({"boot-timer": None, "no-seccomp": None})
53+
# uvm_hotplug.help.enable_console()
54+
# uvm_hotplug.spawn()
55+
# uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
56+
# uvm_hotplug.add_net_iface()
57+
# uvm_hotplug.start()
58+
# uvm_hotplug.ssh.scp_put(Path("./host_tools/hotplug.sh"), Path("/home/hotplug.sh"))
59+
# uvm_hotplug.ssh.scp_put(Path("./host_tools//hotplug_time.o"), Path("/home/hotplug_time.o"))
60+
# uvm_hotplug.ssh.run("tmux new-session -d /bin/bash /home/hotplug.sh > /home/test 2>&1")
61+
#
62+
#
63+
# uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
64+
#
65+
# time.sleep(0.25)
66+
# # Extract API call duration
67+
# api_duration = float(re.findall(r"Total previous API call duration: (\d+) us\.", uvm_hotplug.log_data)[-1]) / 1000
68+
# timestamp = float(re.findall(r"Guest-boot-time\s+\=\s+(\d+)\s+us", uvm_hotplug.log_data)[0]) / 1000
69+
# data.append({"vcpus" : vcpu_count, "api": api_duration, "onlining": timestamp})
70+
#

tests/host_tools/hotplug.sh

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#!/bin/bash
2+
3+
while :; do
4+
[[ -d /sys/devices/system/cpu/cpu1 ]] && break
5+
done
6+
7+
readarray -t offline_cpus < <(lscpu -p=cpu --offline | sed '/^#/d')
8+
9+
for cpu_idx in ${offline_cpus[@]}; do
10+
echo 1 >/sys/devices/system/cpu/cpu$cpu_idx/online
11+
done
12+
13+
/home/hotplug_time.o

tests/host_tools/hotplug_time.c

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
// Init wrapper for boot timing. It points at /sbin/init.
5+
6+
#include <fcntl.h>
7+
#include <sys/mman.h>
8+
#include <sys/types.h>
9+
#include <unistd.h>
10+
11+
// Base address values are defined in arch/src/lib.rs as arch::MMIO_MEM_START.
12+
// Values are computed in arch/src/<arch>/mod.rs from the architecture layouts.
13+
// Position on the bus is defined by MMIO_LEN increments, where MMIO_LEN is
14+
// defined as 0x1000 in vmm/src/device_manager/mmio.rs.
15+
#ifdef __x86_64__
16+
#define MAGIC_MMIO_SIGNAL_GUEST_BOOT_COMPLETE 0xd0000000
17+
#endif
18+
#ifdef __aarch64__
19+
#define MAGIC_MMIO_SIGNAL_GUEST_BOOT_COMPLETE 0x40000000
20+
#endif
21+
22+
#define MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE 123
23+
24+
int main() {
25+
int fd = open("/dev/mem", (O_RDWR | O_SYNC | O_CLOEXEC));
26+
int mapped_size = getpagesize();
27+
28+
char *map_base = mmap(NULL, mapped_size, PROT_WRITE, MAP_SHARED, fd,
29+
MAGIC_MMIO_SIGNAL_GUEST_BOOT_COMPLETE);
30+
31+
*map_base = MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE;
32+
msync(map_base, mapped_size, MS_ASYNC);
33+
}

tests/host_tools/hotplug_time.o

879 KB
Binary file not shown.

0 commit comments

Comments
 (0)