Skip to content

Commit a1ac181

Browse files
damanm24Daman Mulye
andauthored
petri: lower severity of com3 emitted logs for all vmm_tests (#2003)
#1937 introduced new vmm_tests which seem to be flaky as they will occasionally timeout. Upon further investigation, the following message can be observed in the petri logs: `hv_vmbus: probe failed for device 766e96f8-2ceb-437e-afe3-a93169e48a7b`. It's likely that openhcl is busy with other tasks (such as writing to com3) so it is unable to handle the vmbus probe request from vtl0. This PR attempts to fix that by only emitting high severity logs once kmsg is available. --------- Co-authored-by: Daman Mulye <[email protected]>
1 parent 801e808 commit a1ac181

File tree

1 file changed

+48
-7
lines changed

1 file changed

+48
-7
lines changed

petri/src/vm/mod.rs

Lines changed: 48 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,8 @@ impl<T: PetriVmmBackend> PetriVmBuilder<T> {
228228
/// event (if configured). Does not configure and start pipette. Should
229229
/// only be used for testing platforms that pipette does not support.
230230
pub async fn run_without_agent(self) -> anyhow::Result<PetriVm<T>> {
231-
self.run_core().await
231+
let (vm, _) = self.run_core(false).await?;
232+
Ok(vm)
232233
}
233234

234235
/// Build and run the VM, then wait for the VM to emit the expected boot
@@ -237,12 +238,14 @@ impl<T: PetriVmmBackend> PetriVmBuilder<T> {
237238
assert!(self.config.agent_image.is_some());
238239
assert!(self.config.agent_image.as_ref().unwrap().contains_pipette());
239240

240-
let mut vm = self.run_core().await?;
241-
let client = vm.wait_for_agent().await?;
242-
Ok((vm, client))
241+
let (vm, agent) = self.run_core(true).await?;
242+
Ok((vm, agent.unwrap()))
243243
}
244244

245-
async fn run_core(self) -> anyhow::Result<PetriVm<T>> {
245+
async fn run_core(
246+
self,
247+
with_agent: bool,
248+
) -> anyhow::Result<(PetriVm<T>, Option<PipetteClient>)> {
246249
let arch = self.config.arch;
247250
let expect_reset = self.expect_reset();
248251

@@ -268,9 +271,22 @@ impl<T: PetriVmmBackend> PetriVmBuilder<T> {
268271
vm.wait_for_reset_core().await?;
269272
}
270273

274+
let client = if with_agent {
275+
Some(vm.wait_for_agent().await?)
276+
} else {
277+
None
278+
};
279+
280+
if with_agent {
281+
let result = vm.set_console_loglevel(3).await;
282+
if result.is_err() {
283+
tracing::warn!("failed to set console loglevel: {}", result.unwrap_err());
284+
}
285+
}
286+
271287
vm.wait_for_expected_boot_event().await?;
272288

273-
Ok(vm)
289+
Ok((vm, client))
274290
}
275291

276292
fn expect_reset(&self) -> bool {
@@ -675,7 +691,12 @@ impl<T: PetriVmmBackend> PetriVm<T> {
675691
/// Wait for the VM to reset and pipette to connect.
676692
pub async fn wait_for_reset(&mut self) -> anyhow::Result<PipetteClient> {
677693
self.wait_for_reset_no_agent().await?;
678-
self.wait_for_agent().await
694+
let client = self.wait_for_agent().await?;
695+
let result = self.set_console_loglevel(3).await;
696+
if result.is_err() {
697+
tracing::warn!("failed to set console loglevel: {}", result.unwrap_err());
698+
}
699+
Ok(client)
679700
}
680701

681702
async fn wait_for_reset_core(&mut self) -> anyhow::Result<()> {
@@ -866,6 +887,26 @@ impl<T: PetriVmmBackend> PetriVm<T> {
866887
anyhow::bail!("VM is not configured with OpenHCL")
867888
}
868889
}
890+
891+
async fn set_console_loglevel(&self, level: u8) -> anyhow::Result<()> {
892+
match self.openhcl_diag() {
893+
Ok(diag) => {
894+
diag.kmsg().await?;
895+
let res = diag
896+
.run_vtl2_command("dmesg", &["-n", &level.to_string()])
897+
.await?;
898+
899+
if !res.exit_status.success() {
900+
anyhow::bail!("failed to set console loglevel: {:?}", res);
901+
}
902+
}
903+
Err(e) => {
904+
anyhow::bail!("failed to open VTl2 diagnostic channel: {}", e);
905+
}
906+
};
907+
908+
Ok(())
909+
}
869910
}
870911

871912
/// A running VM that tests can interact with.

0 commit comments

Comments
 (0)