Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions database/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1034,6 +1034,10 @@ impl BenchmarkRequest {
pub fn is_completed(&self) -> bool {
matches!(self.status, BenchmarkRequestStatus::Completed { .. })
}

pub fn is_in_progress(&self) -> bool {
matches!(self.status, BenchmarkRequestStatus::InProgress { .. })
}
}

/// Cached information about benchmark requests in the DB
Expand Down
1 change: 1 addition & 0 deletions site/frontend/src/pages/status_new/data.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export type BenchmarkRequest = {
requestType: BenchmarkRequestType;
createdAt: string;
completedAt: string | null;
endEstimated: boolean;
durationS: number | null;
errors: Dict<string>;
};
Expand Down
9 changes: 7 additions & 2 deletions site/frontend/src/pages/status_new/page.vue
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,13 @@ loadStatusData(loading);
req.status === "Completed" && req.hasPendingJobs ? "*" : ""
}}
</td>
<td v-html="formatISODate(req.completedAt)"></td>
<td v-html="getDuration(req)"></td>
<td>
{{ formatISODate(req.completedAt) }}
<span v-if="req.endEstimated">(est.)</span>
</td>
<td>
{{ getDuration(req) }}
</td>

<td v-if="hasErrors(req.errors)">
<button @click="toggleExpandedErrors(req.tag)">
Expand Down
2 changes: 2 additions & 0 deletions site/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,8 @@ pub mod status_new {
pub request_type: BenchmarkRequestType,
pub created_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
// If true, then `completed_at` is only an estimation of when will the request complete
pub end_estimated: bool,
pub duration_s: Option<u64>,
pub errors: HashMap<String, String>,
}
Expand Down
103 changes: 85 additions & 18 deletions site/src/request_handlers/status_page_new.rs
Original file line number Diff line number Diff line change
@@ -1,45 +1,110 @@
use std::sync::Arc;

use crate::api::status_new;
use crate::job_queue::build_queue;
use crate::load::SiteCtxt;
use chrono::{DateTime, Utc};
use database::{
BenchmarkJob, BenchmarkJobStatus, BenchmarkRequest, BenchmarkRequestStatus,
BenchmarkRequestType, Connection,
};
use hashbrown::HashMap;
use std::sync::Arc;
use std::time::Duration;

pub async fn handle_status_page_new(ctxt: Arc<SiteCtxt>) -> anyhow::Result<status_new::Response> {
let conn = ctxt.conn().await;

let index = conn.load_benchmark_request_index().await?;

// The queue contains any in-progress request(s) and then the following requests in queue order
// We reverse so that it starts with the request that will be benchmarked the latest
let mut queue: Vec<status_new::BenchmarkRequest> = build_queue(&*conn, &index)
.await?
let queue = build_queue(&*conn, &index).await?;
let completed = conn.get_last_n_completed_benchmark_requests(10).await?;

// Figure out approximately how long was the most recent master benchmark request
let expected_duration = completed
.iter()
.filter(|req| req.request.is_master())
.filter_map(|req| match req.request.status() {
BenchmarkRequestStatus::Completed { duration, .. } => Some(duration),
_ => None,
})
.next()
.unwrap_or(Duration::from_secs(3600));

let in_progress_jobs = conn.get_jobs_of_in_progress_benchmark_requests().await?;

// Here we compute the estimated end time for queued requests, and convert the requests to their
// frontend representation.
// We assume that at most a single request is in progress

let now = Utc::now();

// The estimated start time of the current in-progress request
let current_request_start = if let Some(req) = queue.first().take_if(|req| req.is_in_progress())
{
// Here we need to somehow guess when did the current in-progress request actually start,
// as we do not have that information readily available
let request_jobs = in_progress_jobs
.get(req.tag().expect("In progress request without a tag"))
.map(|jobs| jobs.as_slice())
.unwrap_or(&[]);

// Take the earliest start time, if some job has already started
// If there are no started jobs yet, just fall back to the current time (we guess that a
// job will start "any time now")
request_jobs
.iter()
.filter_map(|job| match job.status() {
BenchmarkJobStatus::Queued => None,
BenchmarkJobStatus::InProgress { started_at, .. }
| BenchmarkJobStatus::Completed { started_at, .. } => Some(*started_at),
})
.min()
.unwrap_or(now)
} else {
// Assume that the next request (if any) will start at any given moment
now
};

// Estimate when the current in-progress request should end
// This ignores the fact that different kinds of requests (e.g. release ones) can have different
// durations, but these are rare and it's not worth the complexity to have multiple estimates
// here.
let current_request_end = current_request_start + expected_duration;

let mut requests: Vec<status_new::BenchmarkRequest> = queue
.into_iter()
.map(|req| request_to_ui(&req, HashMap::new()))
.enumerate()
.map(|(index, req)| {
let estimated_end = if req.is_in_progress() {
current_request_end
} else {
current_request_end + expected_duration * (index as u32)
};
request_to_ui(&req, HashMap::default(), Some(estimated_end))
})
.collect();
queue.reverse();
// And then we add N most recently completed requests to it
let completed = conn.get_last_n_completed_benchmark_requests(10).await?;
queue.extend(

// We reverse the queued requests so that they start with the request that will be benchmarked the latest
requests.reverse();
// And then we add the completed requests
requests.extend(
completed
.into_iter()
.map(|req| request_to_ui(&req.request, req.errors)),
.map(|req| request_to_ui(&req.request, req.errors, None)),
);

let collectors = build_collectors(conn.as_ref()).await?;
let collectors = build_collectors(conn.as_ref(), &in_progress_jobs).await?;

Ok(status_new::Response {
requests: queue,
requests,
collectors,
})
}

async fn build_collectors(conn: &dyn Connection) -> anyhow::Result<Vec<status_new::Collector>> {
let in_progress_jobs = conn.get_jobs_of_in_progress_benchmark_requests().await?;
async fn build_collectors(
conn: &dyn Connection,
in_progress_jobs: &HashMap<String, Vec<BenchmarkJob>>,
) -> anyhow::Result<Vec<status_new::Collector>> {
let collectors = conn.get_collector_configs().await?;
let mut collector_map: HashMap<String, status_new::Collector> = collectors
.into_iter()
Expand Down Expand Up @@ -120,11 +185,12 @@ fn job_status_to_priority(status: status_new::BenchmarkJobStatus) -> u32 {
fn request_to_ui(
req: &BenchmarkRequest,
errors: HashMap<String, String>,
estimated_end: Option<DateTime<Utc>>,
) -> status_new::BenchmarkRequest {
let (completed_at, duration_s) = match req.status() {
BenchmarkRequestStatus::WaitingForArtifacts => (None, None),
BenchmarkRequestStatus::ArtifactsReady => (None, None),
BenchmarkRequestStatus::InProgress => (None, None),
BenchmarkRequestStatus::WaitingForArtifacts => (estimated_end, None),
BenchmarkRequestStatus::ArtifactsReady => (estimated_end, None),
BenchmarkRequestStatus::InProgress => (estimated_end, None),
BenchmarkRequestStatus::Completed {
completed_at,
duration,
Expand All @@ -150,6 +216,7 @@ fn request_to_ui(
completed_at,
duration_s,
errors,
end_estimated: estimated_end.is_some(),
}
}

Expand Down
Loading