@@ -280,7 +280,6 @@ pub struct PageTableBuilder {
280280 confidential_bit : Option < u32 > ,
281281 map_reset_vector : bool ,
282282 read_only : bool ,
283- debug : bool ,
284283}
285284
286285impl PteOps for PageTableBuilder {
@@ -307,7 +306,6 @@ impl PageTableBuilder {
307306 local_map : None ,
308307 confidential_bit : None ,
309308 read_only : false ,
310- debug : false ,
311309 map_reset_vector : false ,
312310 }
313311 }
@@ -328,11 +326,6 @@ impl PageTableBuilder {
328326 self
329327 }
330328
331- pub fn with_debug ( mut self , debug : bool ) -> Self {
332- self . debug = debug;
333- self
334- }
335-
336329 /// Map the reset vector at page 0xFFFFF with a single page.
337330 pub fn with_reset_vector ( mut self , map_reset_vector : bool ) -> Self {
338331 self . map_reset_vector = map_reset_vector;
@@ -353,6 +346,8 @@ impl PageTableBuilder {
353346 page_table : & mut [ PageTable ] ,
354347 flattened_page_table : & ' a mut [ u8 ] ,
355348 ) -> & ' a [ u8 ] {
349+ assert ! ( flattened_page_table. len( ) == ( page_table. len( ) * PAGE_TABLE_SIZE ) ) ;
350+
356351 const SIZE_512_GB : u64 = 0x8000000000 ;
357352
358353 if self . size == 0 {
@@ -497,97 +492,131 @@ impl PageTableBuilder {
497492 }
498493}
499494
500- /// Build a set of X64 page tables identity mapping the bottom address
501- /// space with an optional address bias.
502- ///
503- /// An optional PML4E entry may be linked, with arguments being (link_target_gpa, linkage_gpa).
504- /// link_target_gpa represents the GPA of the PML4E to link into the built page table.
505- /// linkage_gpa represents the GPA at which the linked PML4E should be linked.
506- pub fn build_page_tables_64 < ' a > (
495+ #[ derive( Debug , Clone ) ]
496+ pub struct IdentityMapBuilder {
507497 page_table_gpa : u64 ,
508- address_bias : u64 ,
509498 identity_map_size : IdentityMapSize ,
499+ address_bias : u64 ,
510500 pml4e_link : Option < ( u64 , u64 ) > ,
511501 read_only : bool ,
512- page_table : & mut [ PageTable ] ,
513- flattened_page_table : & ' a mut [ u8 ] ,
514- ) -> & ' a [ u8 ] {
515- // Allocate page tables. There are up to 6 total page tables:
516- // 1 PML4E (Level 4) (omitted if the address bias is non-zero)
517- // 1 PDPTE (Level 3)
518- // 4 or 8 PDE tables (Level 2)
519- // Note that there are no level 1 page tables, as 2MB pages are used.
520- let leaf_page_table_count = match identity_map_size {
521- IdentityMapSize :: Size4Gb => 4 ,
522- IdentityMapSize :: Size8Gb => 8 ,
523- } ;
524- let page_table_count = leaf_page_table_count + if address_bias == 0 { 2 } else { 1 } ;
525- let mut page_table_allocator = page_table. iter_mut ( ) . enumerate ( ) ;
526-
527- // Allocate single PDPTE table.
528- let pdpte_table = if address_bias == 0 {
529- // Allocate single PML4E page table.
530- let ( _, pml4e_table) = page_table_allocator
531- . next ( )
532- . expect ( "pagetable should always be available, code bug if not" ) ;
533-
534- // PDPTE table is the next pagetable.
535- let ( pdpte_table_index, pdpte_table) = page_table_allocator
536- . next ( )
537- . expect ( "pagetable should always be available, code bug if not" ) ;
538-
539- // Set PML4E entry linking PML4E to PDPTE.
540- let output_address = page_table_gpa + pdpte_table_index as u64 * X64_PAGE_SIZE ;
541- pml4e_table. entries [ 0 ] . set_entry ( PageTableEntryType :: Pde ( output_address) , read_only) ;
542-
543- // Set PML4E entry to link the additional entry if specified.
544- if let Some ( ( link_target_gpa, linkage_gpa) ) = pml4e_link {
545- assert ! ( ( linkage_gpa & 0x7FFFFFFFFF ) == 0 ) ;
546- pml4e_table. entries [ linkage_gpa as usize >> 39 ]
547- . set_entry ( PageTableEntryType :: Pde ( link_target_gpa) , read_only) ;
548- }
502+ }
549503
550- pdpte_table
551- } else {
552- // PDPTE table is the first table, if no PML4E.
553- page_table_allocator
554- . next ( )
555- . expect ( "pagetable should always be available, code bug if not" )
556- . 1
557- } ;
558-
559- // Build PDEs that point to 2 MB pages.
560- let top_address = match identity_map_size {
561- IdentityMapSize :: Size4Gb => 0x100000000u64 ,
562- IdentityMapSize :: Size8Gb => 0x200000000u64 ,
563- } ;
564- let mut current_va = 0 ;
565-
566- while current_va < top_address {
567- // Allocate a new PDE table
568- let ( pde_table_index, pde_table) = page_table_allocator
569- . next ( )
570- . expect ( "pagetable should always be available, code bug if not" ) ;
571-
572- // Link PDPTE table to PDE table (L3 to L2)
573- let pdpte_index = get_amd64_pte_index ( current_va, 2 ) ;
574- let output_address = page_table_gpa + pde_table_index as u64 * X64_PAGE_SIZE ;
575- let pdpte_entry = & mut pdpte_table. entries [ pdpte_index as usize ] ;
576- assert ! ( !pdpte_entry. is_present( ) ) ;
577- pdpte_entry. set_entry ( PageTableEntryType :: Pde ( output_address) , read_only) ;
578-
579- // Set all 2MB entries in this PDE table.
580- for entry in pde_table. iter_mut ( ) {
581- entry. set_entry (
582- PageTableEntryType :: Leaf2MbPage ( current_va + address_bias) ,
583- read_only,
584- ) ;
585- current_va += X64_LARGE_PAGE_SIZE ;
504+ impl IdentityMapBuilder {
505+ pub fn new ( page_table_gpa : u64 , identity_map_size : IdentityMapSize ) -> Self {
506+ IdentityMapBuilder {
507+ page_table_gpa,
508+ identity_map_size,
509+ address_bias : 0 ,
510+ pml4e_link : None ,
511+ read_only : false ,
586512 }
587513 }
588514
589- // Flatten page table vec into u8 vec
590- flatten_page_table ( page_table, flattened_page_table, page_table_count)
515+ pub fn with_address_bias ( mut self , address_bias : u64 ) -> Self {
516+ self . address_bias = address_bias;
517+ self
518+ }
519+
520+ pub fn with_pml4e_link ( mut self , pml4e_link : ( u64 , u64 ) ) -> Self {
521+ self . pml4e_link = Some ( pml4e_link) ;
522+ self
523+ }
524+
525+ pub fn with_read_only ( mut self , read_only : bool ) -> Self {
526+ self . read_only = read_only;
527+ self
528+ }
529+
530+ /// Build a set of X64 page tables identity mapping the bottom address
531+ /// space with an optional address bias.
532+ ///
533+ /// An optional PML4E entry may be linked, with arguments being (link_target_gpa, linkage_gpa).
534+ /// link_target_gpa represents the GPA of the PML4E to link into the built page table.
535+ /// linkage_gpa represents the GPA at which the linked PML4E should be linked.
536+ pub fn build < ' a > (
537+ self ,
538+ page_table : & mut [ PageTable ] ,
539+ flattened_page_table : & ' a mut [ u8 ] ,
540+ ) -> & ' a [ u8 ] {
541+ assert ! ( flattened_page_table. len( ) == ( page_table. len( ) * PAGE_TABLE_SIZE ) ) ;
542+ // Allocate page tables. There are up to 6 total page tables:
543+ // 1 PML4E (Level 4) (omitted if the address bias is non-zero)
544+ // 1 PDPTE (Level 3)
545+ // 4 or 8 PDE tables (Level 2)
546+ // Note that there are no level 1 page tables, as 2MB pages are used.
547+ let leaf_page_table_count = match self . identity_map_size {
548+ IdentityMapSize :: Size4Gb => 4 ,
549+ IdentityMapSize :: Size8Gb => 8 ,
550+ } ;
551+ let page_table_count = leaf_page_table_count + if self . address_bias == 0 { 2 } else { 1 } ;
552+ let mut page_table_allocator = page_table. iter_mut ( ) . enumerate ( ) ;
553+
554+ // Allocate single PDPTE table.
555+ let pdpte_table = if self . address_bias == 0 {
556+ // Allocate single PML4E page table.
557+ let ( _, pml4e_table) = page_table_allocator
558+ . next ( )
559+ . expect ( "pagetable should always be available, code bug if not" ) ;
560+
561+ // PDPTE table is the next pagetable.
562+ let ( pdpte_table_index, pdpte_table) = page_table_allocator
563+ . next ( )
564+ . expect ( "pagetable should always be available, code bug if not" ) ;
565+
566+ // Set PML4E entry linking PML4E to PDPTE.
567+ let output_address = self . page_table_gpa + pdpte_table_index as u64 * X64_PAGE_SIZE ;
568+ pml4e_table. entries [ 0 ]
569+ . set_entry ( PageTableEntryType :: Pde ( output_address) , self . read_only ) ;
570+
571+ // Set PML4E entry to link the additional entry if specified.
572+ if let Some ( ( link_target_gpa, linkage_gpa) ) = self . pml4e_link {
573+ assert ! ( ( linkage_gpa & 0x7FFFFFFFFF ) == 0 ) ;
574+ pml4e_table. entries [ linkage_gpa as usize >> 39 ]
575+ . set_entry ( PageTableEntryType :: Pde ( link_target_gpa) , self . read_only ) ;
576+ }
577+
578+ pdpte_table
579+ } else {
580+ // PDPTE table is the first table, if no PML4E.
581+ page_table_allocator
582+ . next ( )
583+ . expect ( "pagetable should always be available, code bug if not" )
584+ . 1
585+ } ;
586+
587+ // Build PDEs that point to 2 MB pages.
588+ let top_address = match self . identity_map_size {
589+ IdentityMapSize :: Size4Gb => 0x100000000u64 ,
590+ IdentityMapSize :: Size8Gb => 0x200000000u64 ,
591+ } ;
592+ let mut current_va = 0 ;
593+
594+ while current_va < top_address {
595+ // Allocate a new PDE table
596+ let ( pde_table_index, pde_table) = page_table_allocator
597+ . next ( )
598+ . expect ( "pagetable should always be available, code bug if not" ) ;
599+
600+ // Link PDPTE table to PDE table (L3 to L2)
601+ let pdpte_index = get_amd64_pte_index ( current_va, 2 ) ;
602+ let output_address = self . page_table_gpa + pde_table_index as u64 * X64_PAGE_SIZE ;
603+ let pdpte_entry = & mut pdpte_table. entries [ pdpte_index as usize ] ;
604+ assert ! ( !pdpte_entry. is_present( ) ) ;
605+ pdpte_entry. set_entry ( PageTableEntryType :: Pde ( output_address) , self . read_only ) ;
606+
607+ // Set all 2MB entries in this PDE table.
608+ for entry in pde_table. iter_mut ( ) {
609+ entry. set_entry (
610+ PageTableEntryType :: Leaf2MbPage ( current_va + self . address_bias ) ,
611+ self . read_only ,
612+ ) ;
613+ current_va += X64_LARGE_PAGE_SIZE ;
614+ }
615+ }
616+
617+ // Flatten page table vec into u8 vec
618+ flatten_page_table ( page_table, flattened_page_table, page_table_count)
619+ }
591620}
592621
593622/// Align an address up to the start of the next page.
0 commit comments