Skip to content

Commit 8358285

Browse files
committed
allocate space up front, remove generics
1 parent 94b53cf commit 8358285

File tree

7 files changed

+99
-183
lines changed

7 files changed

+99
-183
lines changed

vm/loader/page_table/src/aarch64.rs

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,8 @@
33

44
//! Methods to construct page tables on Aarch64.
55
6-
use crate::PageTableBuffer;
76
use bitfield_struct::bitfield;
87

9-
//TODO(babayet2) nonsensical
10-
pub const MAX_PAGE_TABLE_REGION_SIZE: usize = 1024 * 1024;
11-
128
/// Some memory attributes. Refer to the ARM VMSA
139
/// manual for further details and other types.
1410
#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
@@ -677,16 +673,13 @@ impl<'a> Arm64PageTableSpace<'a> {
677673
}
678674

679675
/// Build a set of Aarch64 page tables identity mapping the given region.
680-
pub fn build_identity_page_tables_aarch64<P>(
676+
pub fn build_identity_page_tables_aarch64<'a>(
681677
page_table_gpa: u64,
682678
start_gpa: u64,
683679
size: u64,
684680
memory_attribute_indirection: MemoryAttributeIndirectionEl1,
685-
page_table_region_size: usize,
686-
page_table_space: &mut P,
687-
) where
688-
P: PageTableBuffer<Element = u8>,
689-
{
681+
page_table_space: &'a mut [u8],
682+
) -> &'a [u8] {
690683
// start_gpa and size must be 2MB aligned.
691684
if !aligned(start_gpa, Arm64PageSize::Large) {
692685
panic!("start_gpa not 2mb aligned");
@@ -696,12 +689,8 @@ pub fn build_identity_page_tables_aarch64<P>(
696689
panic!("size not 2mb aligned");
697690
}
698691

699-
for _ in 0..page_table_region_size {
700-
page_table_space.push(0);
701-
}
702-
703692
let mut page_tables =
704-
Arm64PageTableSpace::new(page_table_gpa as usize, page_table_space.as_mut_slice()).unwrap();
693+
Arm64PageTableSpace::new(page_table_gpa as usize, page_table_space).unwrap();
705694
page_tables
706695
.map_range(
707696
start_gpa,
@@ -716,7 +705,7 @@ pub fn build_identity_page_tables_aarch64<P>(
716705

717706
let used_space = page_tables.used_space();
718707

719-
page_table_space.truncate(used_space);
708+
&page_table_space[0..used_space]
720709
}
721710

722711
#[cfg(test)]

vm/loader/page_table/src/lib.rs

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -18,27 +18,3 @@ pub enum IdentityMapSize {
1818
/// Identity-map the bottom 8GB
1919
Size8Gb,
2020
}
21-
22-
/// A trait for an indexable, mutable, and extendable working memory buffer for page table building
23-
pub trait PageTableBuffer:
24-
core::ops::Index<usize, Output = Self::Element> + core::ops::IndexMut<usize, Output = Self::Element>
25-
{
26-
/// Associated Type defining the element type stored in the buffer
27-
type Element;
28-
29-
fn new() -> Self;
30-
31-
fn push(&mut self, item: Self::Element);
32-
33-
fn extend(&mut self, items: &[Self::Element]);
34-
35-
fn len(&self) -> usize;
36-
37-
fn as_slice(&self) -> &[Self::Element];
38-
39-
fn as_mut_slice(&mut self) -> &mut [Self::Element];
40-
41-
fn truncate(&mut self, new_len: usize);
42-
43-
fn iter_mut(&mut self) -> core::slice::IterMut<'_, Self::Element>;
44-
}

vm/loader/page_table/src/x64.rs

Lines changed: 48 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@
44
//! Methods to construct page tables on x64.
55
66
use crate::IdentityMapSize;
7-
use crate::PageTableBuffer;
87
use zerocopy::FromBytes;
9-
use zerocopy::FromZeros;
108
use zerocopy::Immutable;
119
use zerocopy::IntoBytes;
1210
use zerocopy::KnownLayout;
@@ -18,6 +16,7 @@ const X64_PTE_DIRTY: u64 = 1 << 6;
1816
const X64_PTE_LARGE_PAGE: u64 = 1 << 7;
1917

2018
const PAGE_TABLE_ENTRY_COUNT: usize = 512;
19+
const PAGE_TABLE_ENTRY_SIZE: usize = 8;
2120

2221
const X64_PAGE_SHIFT: u64 = 12;
2322
const X64_PTE_BITS: u64 = 9;
@@ -31,6 +30,14 @@ pub const X64_LARGE_PAGE_SIZE: u64 = 0x200000;
3130
/// Number of bytes in a 1GB page for X64.
3231
pub const X64_1GB_PAGE_SIZE: u64 = 0x40000000;
3332

33+
/// Maximum number of page tables created for an x64 identity map
34+
pub const PAGE_TABLE_MAX_COUNT: usize = 13;
35+
36+
const PAGE_TABLE_SIZE: usize = PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_SIZE;
37+
38+
/// Maximum number of bytes needed to store an x64 identity map
39+
pub const PAGE_TABLE_MAX_BYTES: usize = PAGE_TABLE_MAX_COUNT * X64_PAGE_SIZE as usize;
40+
3441
#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3542
#[repr(transparent)]
3643
pub struct PageTableEntry {
@@ -341,11 +348,11 @@ impl PageTableBuilder {
341348
/// Build a set of X64 page tables identity mapping the given regions. `size` must be less than 512GB.
342349
/// This creates up to 3+N page tables: 1 PML4E and up to 2 PDPTE tables, and N page tables counted at 1 per GB of size,
343350
/// for 2MB mappings.
344-
pub fn build<P, F>(self, page_table: &mut P, flattened_page_table: &mut F)
345-
where
346-
P: PageTableBuffer<Element = PageTable>,
347-
F: PageTableBuffer<Element = u8>,
348-
{
351+
pub fn build<'a>(
352+
self,
353+
page_table: &mut [PageTable],
354+
flattened_page_table: &'a mut [u8],
355+
) -> &'a [u8] {
349356
const SIZE_512_GB: u64 = 0x8000000000;
350357

351358
if self.size == 0 {
@@ -389,25 +396,24 @@ impl PageTableBuilder {
389396
}
390397

391398
// Allocate single PML4E page table.
392-
page_table.push(PageTable::new_zeroed());
393-
let pml4_table_index = 0;
399+
let (mut page_table_index, pml4_table_index) = (0, 0);
394400
let confidential = self.confidential_bit.is_some();
395401

396402
let mut link_tables = |start_va: u64, end_va: u64, use_large_pages: bool| {
397403
let mut current_va = start_va;
398404
while current_va < end_va {
399405
let pdpte_table_index = {
400-
let next_index = page_table.len();
401406
let pml4_entry = page_table[pml4_table_index].entry(current_va, 3);
402407
if !pml4_entry.is_present() {
408+
page_table_index += 1;
403409
// Allocate and link PDPTE table.
404-
let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
410+
let output_address =
411+
page_table_gpa + page_table_index as u64 * X64_PAGE_SIZE;
405412
let mut new_entry =
406413
Self::build_pte(PageTableEntryType::Pde(output_address));
407414
self.set_pte_confidentiality(&mut new_entry, confidential);
408415
*pml4_entry = new_entry;
409-
page_table.push(PageTable::new_zeroed());
410-
next_index
416+
page_table_index
411417
} else {
412418
((self.get_addr_from_pte(pml4_entry) - page_table_gpa) / X64_PAGE_SIZE)
413419
.try_into()
@@ -416,26 +422,25 @@ impl PageTableBuilder {
416422
};
417423

418424
let pde_table_index = {
419-
let next_index = page_table.len();
420425
let pdpte_entry = page_table[pdpte_table_index].entry(current_va, 2);
421426
if !pdpte_entry.is_present() {
427+
page_table_index += 1;
422428
// Allocate and link PDE table.
423-
let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
429+
let output_address =
430+
page_table_gpa + page_table_index as u64 * X64_PAGE_SIZE;
424431
let mut new_entry =
425432
Self::build_pte(PageTableEntryType::Pde(output_address));
426433
self.set_pte_confidentiality(&mut new_entry, confidential);
427434
*pdpte_entry = new_entry;
428-
page_table.push(PageTable::new_zeroed());
429435

430-
next_index
436+
page_table_index
431437
} else {
432438
((self.get_addr_from_pte(pdpte_entry) - page_table_gpa) / X64_PAGE_SIZE)
433439
.try_into()
434440
.expect("Valid page table index")
435441
}
436442
};
437443

438-
let next_index = page_table.len();
439444
let pde_entry = page_table[pde_table_index].entry(current_va, 1);
440445
assert!(!pde_entry.is_present());
441446

@@ -449,15 +454,16 @@ impl PageTableBuilder {
449454
current_va += X64_LARGE_PAGE_SIZE;
450455
} else {
451456
let pt_table_index = if !pde_entry.is_present() {
457+
page_table_index += 1;
452458
// Allocate and link page table.
453-
let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
459+
let output_address =
460+
page_table_gpa + page_table_index as u64 * X64_PAGE_SIZE;
454461
let mut new_entry =
455462
Self::build_pte(PageTableEntryType::Pde(output_address));
456463
self.set_pte_confidentiality(&mut new_entry, confidential);
457464
*pde_entry = new_entry;
458-
page_table.push(PageTable::new_zeroed());
459465

460-
next_index
466+
page_table_index
461467
} else {
462468
((self.get_addr_from_pte(pde_entry) - page_table_gpa) / X64_PAGE_SIZE)
463469
.try_into()
@@ -487,7 +493,7 @@ impl PageTableBuilder {
487493
}
488494

489495
// Flatten page table vec into u8 vec
490-
flatten_page_table(page_table, flattened_page_table);
496+
flatten_page_table(page_table, flattened_page_table, page_table_index + 1)
491497
}
492498
}
493499

@@ -497,18 +503,15 @@ impl PageTableBuilder {
497503
/// An optional PML4E entry may be linked, with arguments being (link_target_gpa, linkage_gpa).
498504
/// link_target_gpa represents the GPA of the PML4E to link into the built page table.
499505
/// linkage_gpa represents the GPA at which the linked PML4E should be linked.
500-
pub fn build_page_tables_64<P, F>(
506+
pub fn build_page_tables_64<'a>(
501507
page_table_gpa: u64,
502508
address_bias: u64,
503509
identity_map_size: IdentityMapSize,
504510
pml4e_link: Option<(u64, u64)>,
505511
read_only: bool,
506-
page_table: &mut P,
507-
flattened_page_table: &mut F,
508-
) where
509-
P: PageTableBuffer<Element = PageTable>,
510-
F: PageTableBuffer<Element = u8>,
511-
{
512+
page_table: &mut [PageTable],
513+
flattened_page_table: &'a mut [u8],
514+
) -> &'a [u8] {
512515
// Allocate page tables. There are up to 6 total page tables:
513516
// 1 PML4E (Level 4) (omitted if the address bias is non-zero)
514517
// 1 PDPTE (Level 3)
@@ -519,9 +522,6 @@ pub fn build_page_tables_64<P, F>(
519522
IdentityMapSize::Size8Gb => 8,
520523
};
521524
let page_table_count = leaf_page_table_count + if address_bias == 0 { 2 } else { 1 };
522-
for _ in 0..page_table_count {
523-
page_table.push(PageTable::new_zeroed());
524-
}
525525
let mut page_table_allocator = page_table.iter_mut().enumerate();
526526

527527
// Allocate single PDPTE table.
@@ -586,11 +586,8 @@ pub fn build_page_tables_64<P, F>(
586586
}
587587
}
588588

589-
// All pagetables should be used, code bug if not.
590-
assert!(page_table_allocator.next().is_none());
591-
592589
// Flatten page table vec into u8 vec
593-
flatten_page_table(page_table, flattened_page_table)
590+
flatten_page_table(page_table, flattened_page_table, page_table_count)
594591
}
595592

596593
/// Align an address up to the start of the next page.
@@ -607,14 +604,22 @@ pub fn align_up_to_large_page_size(address: u64) -> u64 {
607604
pub fn align_up_to_1_gb_page_size(address: u64) -> u64 {
608605
(address + X64_1GB_PAGE_SIZE - 1) & !(X64_1GB_PAGE_SIZE - 1)
609606
}
610-
fn flatten_page_table<P, F>(page_table: &mut P, flattened_page_table: &mut F)
611-
where
612-
P: PageTableBuffer<Element = PageTable>,
613-
F: PageTableBuffer<Element = u8>,
614-
{
615-
for table_index in 0..page_table.len() {
616-
flattened_page_table.extend(page_table[table_index].as_bytes())
607+
608+
fn flatten_page_table<'a>(
609+
page_table: &mut [PageTable],
610+
flattened_page_table: &'a mut [u8],
611+
page_table_count: usize,
612+
) -> &'a [u8] {
613+
for (page_table, dst) in page_table
614+
.iter()
615+
.take(page_table_count)
616+
.zip(flattened_page_table.chunks_mut(PAGE_TABLE_SIZE))
617+
{
618+
let src = page_table.as_bytes();
619+
dst.copy_from_slice(src);
617620
}
621+
622+
&flattened_page_table[0..PAGE_TABLE_SIZE * page_table_count]
618623
}
619624

620625
#[cfg(test)]

vm/loader/src/common.rs

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,7 @@ use crate::importer::ImageLoad;
88
use crate::importer::SegmentRegister;
99
use crate::importer::TableRegister;
1010
use crate::importer::X86Register;
11-
use core::ops::Index;
12-
use core::ops::IndexMut;
1311
use hvdef::HV_PAGE_SIZE;
14-
use page_table::PageTableBuffer;
1512
use thiserror::Error;
1613
use vm_topology::memory::MemoryLayout;
1714
use x86defs::GdtEntry;
@@ -198,57 +195,3 @@ fn mtrr_mask(gpa_space_size: u8, maximum_address: u64) -> u64 {
198195

199196
result
200197
}
201-
202-
///
203-
pub struct VecPageTableBuffer<T: Clone> {
204-
inner: Vec<T>,
205-
}
206-
207-
impl<T: Clone> PageTableBuffer for VecPageTableBuffer<T> {
208-
type Element = T;
209-
210-
fn new() -> Self {
211-
VecPageTableBuffer { inner: Vec::new() }
212-
}
213-
214-
fn push(&mut self, item: T) {
215-
self.inner.push(item)
216-
}
217-
218-
fn len(&self) -> usize {
219-
self.inner.len()
220-
}
221-
222-
fn extend(&mut self, items: &[T]) {
223-
self.inner.extend_from_slice(items);
224-
}
225-
226-
fn truncate(&mut self, new_len: usize) {
227-
self.inner.truncate(new_len)
228-
}
229-
230-
fn as_mut_slice(&mut self) -> &mut [T] {
231-
self.inner.as_mut_slice()
232-
}
233-
234-
fn as_slice(&self) -> &[T] {
235-
self.inner.as_slice()
236-
}
237-
238-
fn iter_mut(&mut self) -> core::slice::IterMut<'_, T> {
239-
self.inner.iter_mut()
240-
}
241-
}
242-
243-
impl<T: Clone> Index<usize> for VecPageTableBuffer<T> {
244-
type Output = T;
245-
fn index(&self, index: usize) -> &T {
246-
&self.inner[index]
247-
}
248-
}
249-
250-
impl<T: Clone> IndexMut<usize> for VecPageTableBuffer<T> {
251-
fn index_mut(&mut self, index: usize) -> &mut T {
252-
&mut self.inner[index]
253-
}
254-
}

0 commit comments

Comments
 (0)