@@ -32,32 +32,46 @@ use crate::fs::{FileSystem, Result};
32
32
33
33
use crate :: fs:: ext2:: Ext2 ;
34
34
use crate :: mem:: paging:: * ;
35
+ use crate :: mem:: AddressSpace ;
35
36
use crate :: utils:: sync:: Mutex ;
36
37
37
38
use super :: cache:: { Cache , CacheArc , CacheItem , Cacheable } ;
38
39
use super :: devfs:: { alloc_device_marker, Device } ;
39
40
use super :: inode:: INodeInterface ;
40
41
41
- type PageCacheKey = ( usize , usize ) ; // (block device pointer, offset )
42
- type PageCacheItem = CacheArc < CacheItem < PageCacheKey , CachedPage > > ;
42
+ type PageCacheKey = ( usize , usize ) ; // (owner ptr, index )
43
+ pub type PageCacheItem = CacheArc < CacheItem < PageCacheKey , CachedPage > > ;
43
44
44
- struct CachedPage {
45
- device : Weak < dyn CachedAccess > ,
45
+ struct DirtyMapping {
46
+ addr_space : AddressSpace ,
47
+ addr : VirtAddr ,
48
+ }
49
+
50
+ pub struct CachedPage {
51
+ owner : Weak < dyn CachedAccess > ,
46
52
offset : usize ,
47
53
page : PhysFrame ,
48
54
dirty : AtomicBool ,
55
+ dirty_mappings : Mutex < Vec < DirtyMapping > > ,
49
56
}
50
57
51
58
impl CachedPage {
52
- fn new ( device : Weak < dyn CachedAccess > , offset : usize ) -> Self {
53
- Self {
54
- device ,
59
+ fn new ( owner : Weak < dyn CachedAccess > , offset : usize ) -> Self {
60
+ let k = Self {
61
+ owner ,
55
62
offset,
56
63
page : FRAME_ALLOCATOR
57
64
. allocate_frame ( )
58
65
. expect ( "page_cache: out of memory" ) ,
59
66
dirty : AtomicBool :: new ( false ) ,
60
- }
67
+ dirty_mappings : Mutex :: new ( Vec :: new ( ) ) ,
68
+ } ;
69
+ // TODO: temporary hack. i mean this is fine but is there a cleaner way to do this. this is
70
+ // required since when the VM for the process umaps a page that contains a cached page, it
71
+ // will unmap this page which will decrease the refcnt to 0 and deallocate it.
72
+ get_vm_frames ( ) . unwrap ( ) [ k. page . start_address ( ) . as_u64 ( ) as usize / 4096usize ]
73
+ . inc_ref_count ( ) ;
74
+ k
61
75
}
62
76
63
77
fn data_mut ( & self ) -> & mut [ MaybeUninit < u8 > ] {
@@ -72,10 +86,14 @@ impl CachedPage {
72
86
unsafe { core:: slice:: from_raw_parts_mut ( data_ptr, Size4KiB :: SIZE as usize ) }
73
87
}
74
88
75
- fn data_addr ( & self ) -> PhysAddr {
89
+ pub fn data_addr ( & self ) -> PhysAddr {
76
90
self . page . start_address ( )
77
91
}
78
92
93
+ pub fn page ( & self ) -> PhysFrame {
94
+ self . page
95
+ }
96
+
79
97
fn make_key ( device : & Weak < dyn CachedAccess > , offset : usize ) -> PageCacheKey {
80
98
( device. as_ptr ( ) . addr ( ) , offset)
81
99
}
@@ -85,26 +103,35 @@ impl CachedPage {
85
103
self . dirty . load ( Ordering :: SeqCst )
86
104
}
87
105
88
- fn mark_dirty ( & self ) {
106
+ pub fn mark_dirty ( & self ) {
107
+ log:: error!( "marking dirty --------------------------------------" ) ;
89
108
self . dirty . store ( true , Ordering :: SeqCst ) ;
90
109
}
91
110
92
111
fn device ( & self ) -> Arc < dyn CachedAccess > {
93
- self . device . upgrade ( ) . unwrap ( )
112
+ self . owner . upgrade ( ) . unwrap ( )
94
113
}
95
114
96
115
fn sync ( & self ) {
97
116
if !self . is_dirty ( ) {
98
117
return ;
99
118
}
100
119
101
- // Commit the changes made to the cache to the disk.
102
- let disk = self . device ( ) ;
103
-
120
+ // Commit the changes made to the cache to the owner.
121
+ let owner = self . device ( ) ;
104
122
let offset_bytes = self . offset * Size4KiB :: SIZE as usize ;
105
- let sector = offset_bytes / disk. block_size ( ) ;
123
+ owner. write_direct ( offset_bytes, self . page ) ;
124
+
125
+ for mut mapping in self . dirty_mappings . lock_irq ( ) . drain ( ..) {
126
+ let mut offset_table = mapping. addr_space . offset_page_table ( ) ;
127
+ offset_table
128
+ . unmap ( Page :: < Size4KiB > :: containing_address ( mapping. addr ) )
129
+ . unwrap ( )
130
+ . 1
131
+ . flush ( ) ;
132
+ }
106
133
107
- disk . write_dma ( sector , self . data_addr ( ) , Size4KiB :: SIZE as usize ) ;
134
+ self . dirty . store ( false , Ordering :: SeqCst ) ;
108
135
}
109
136
}
110
137
@@ -116,12 +143,12 @@ impl Drop for CachedPage {
116
143
117
144
impl Cacheable < PageCacheKey > for CachedPage {
118
145
fn cache_key ( & self ) -> PageCacheKey {
119
- Self :: make_key ( & self . device , self . offset )
146
+ Self :: make_key ( & self . owner , self . offset )
120
147
}
121
148
}
122
149
123
150
lazy_static:: lazy_static! {
124
- static ref PAGE_CACHE : Arc <Cache <PageCacheKey , CachedPage >> = Cache :: new( ) ;
151
+ pub ( in crate :: fs ) static ref PAGE_CACHE : Arc <Cache <PageCacheKey , CachedPage >> = Cache :: new( ) ;
125
152
}
126
153
127
154
impl Cache < PageCacheKey , CachedPage > {
@@ -145,16 +172,16 @@ impl Cache<PageCacheKey, CachedPage> {
145
172
let device = device. upgrade ( ) . expect ( "page_cache: device dropped" ) ;
146
173
147
174
let aligned_offset = align_down ( offset as u64 , Size4KiB :: SIZE ) as usize ;
148
- let sector = aligned_offset / device. block_size ( ) ;
149
-
150
175
device
151
- . read_dma ( sector , page. data_addr ( ) , Size4KiB :: SIZE as usize )
176
+ . read_direct ( aligned_offset , page. page ( ) )
152
177
. expect ( "page_cache: failed to read block" ) ;
153
178
154
179
PAGE_CACHE . make_item_cached ( page)
155
180
}
156
181
}
157
182
183
+ // TODO: cache hit miss stats
184
+
158
185
pub struct DirtyRef < T : Sized > {
159
186
cache : PageCacheItem ,
160
187
ptr : * mut T ,
@@ -202,9 +229,12 @@ pub trait BlockDeviceInterface: Send + Sync {
202
229
fn write_block ( & self , sector : usize , buf : & [ u8 ] ) -> Option < usize > ;
203
230
}
204
231
205
- pub trait CachedAccess : BlockDeviceInterface {
232
+ pub trait CachedAccess : Send + Sync {
206
233
fn sref ( & self ) -> Weak < dyn CachedAccess > ;
207
234
235
+ fn read_direct ( & self , offset : usize , dest : PhysFrame ) -> Option < usize > ;
236
+ fn write_direct ( & self , offset : usize , src : PhysFrame ) -> Option < usize > ;
237
+
208
238
fn read ( & self , mut offset : usize , dest : & mut [ MaybeUninit < u8 > ] ) -> Option < usize > {
209
239
let mut loc = 0 ;
210
240
@@ -236,6 +266,9 @@ pub trait CachedAccess: BlockDeviceInterface {
236
266
let mut loc = 0 ;
237
267
238
268
while loc < buffer. len ( ) {
269
+ // TODO: If it is not found in the page cache, then, when the write perfectly falls on
270
+ // page size boundaries, the page is not even read from disk, but allocated and
271
+ // immediately marked dirty.
239
272
let page = PAGE_CACHE . get_page ( & self . sref ( ) , offset) ;
240
273
241
274
let page_offset = offset % Size4KiB :: SIZE as usize ;
@@ -318,6 +351,22 @@ impl CachedAccess for BlockDevice {
318
351
fn sref ( & self ) -> Weak < dyn CachedAccess > {
319
352
self . sref . clone ( )
320
353
}
354
+
355
+ fn read_direct ( & self , offset : usize , dest : PhysFrame ) -> Option < usize > {
356
+ self . dev . read_dma (
357
+ offset / self . dev . block_size ( ) ,
358
+ dest. start_address ( ) ,
359
+ Size4KiB :: SIZE as _ ,
360
+ )
361
+ }
362
+
363
+ fn write_direct ( & self , offset : usize , src : PhysFrame ) -> Option < usize > {
364
+ self . dev . write_dma (
365
+ offset / self . dev . block_size ( ) ,
366
+ src. start_address ( ) ,
367
+ Size4KiB :: SIZE as _ ,
368
+ )
369
+ }
321
370
}
322
371
323
372
impl INodeInterface for BlockDevice { }
0 commit comments