Skip to content

Commit a1e1b65

Browse files
committed
storage: a series fixups for stargz chunks
For rafs v6 in fscache daemon, we must make compatible with stargz chunks. Signed-off-by: Yan Song <[email protected]>
1 parent 147d176 commit a1e1b65

File tree

9 files changed

+97
-23
lines changed

9 files changed

+97
-23
lines changed

rafs/src/fs.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ use nydus_utils::metrics::{self, FopRecorder, StatsFop::*};
4141

4242
use crate::metadata::layout::RAFS_ROOT_INODE;
4343
use crate::metadata::{
44-
Inode, PostWalkAction, RafsInode, RafsSuper, RafsSuperMeta, DOT, DOTDOT,
45-
RAFS_DEFAULT_CHUNK_SIZE,
44+
Inode, PostWalkAction, RafsInode, RafsSuper, RafsSuperMeta, DOT, DOTDOT, RAFS_MAX_CHUNK_SIZE,
4645
};
4746
use crate::{RafsError, RafsIoReader, RafsResult};
4847

@@ -105,9 +104,9 @@ impl TryFrom<&RafsConfig> for BlobPrefetchConfig {
105104
type Error = RafsError;
106105

107106
fn try_from(c: &RafsConfig) -> RafsResult<Self> {
108-
if c.fs_prefetch.merging_size as u64 > RAFS_DEFAULT_CHUNK_SIZE {
107+
if c.fs_prefetch.merging_size as u64 > RAFS_MAX_CHUNK_SIZE {
109108
return Err(RafsError::Configure(
110-
"Merging size can't exceed chunk size".to_string(),
109+
"merging size can't exceed max chunk size".to_string(),
111110
));
112111
} else if c.fs_prefetch.enable && c.fs_prefetch.threads_count == 0 {
113112
return Err(RafsError::Configure(
@@ -924,8 +923,8 @@ impl FileSystem for Rafs {
924923
#[cfg(test)]
925924
pub(crate) mod tests {
926925
use super::*;
926+
use crate::metadata::RAFS_DEFAULT_CHUNK_SIZE;
927927
use crate::RafsIoRead;
928-
use storage::RAFS_MAX_CHUNK_SIZE;
929928

930929
pub fn new_rafs_backend() -> Box<Rafs> {
931930
let config = r#"
@@ -1075,7 +1074,7 @@ pub(crate) mod tests {
10751074
config.fs_prefetch.merging_size = RAFS_MAX_CHUNK_SIZE as usize + 1;
10761075
assert!(BlobPrefetchConfig::try_from(&config).is_err());
10771076

1078-
config.fs_prefetch.merging_size = RAFS_MAX_CHUNK_SIZE as usize;
1077+
config.fs_prefetch.merging_size = RAFS_DEFAULT_CHUNK_SIZE as usize;
10791078
config.fs_prefetch.bandwidth_rate = 1;
10801079
config.fs_prefetch.prefetch_all = true;
10811080
assert!(BlobPrefetchConfig::try_from(&config).is_ok());

rafs/src/metadata/layout/v6.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ use lazy_static::lazy_static;
1717
use nydus_utils::{compress, digest, round_up, ByteSize};
1818
use storage::device::{BlobFeatures, BlobInfo};
1919
use storage::meta::{BlobMetaHeaderOndisk, BLOB_FEATURE_4K_ALIGNED};
20+
use storage::RAFS_MAX_CHUNK_SIZE;
2021

2122
use crate::metadata::{layout::RafsXAttrs, RafsStore, RafsSuperFlags};
2223
use crate::{impl_bootstrap_converter, impl_pub_getter_setter, RafsIoReader, RafsIoWrite};
@@ -352,7 +353,10 @@ impl RafsV6SuperBlockExt {
352353
}
353354

354355
let chunk_size = u32::from_le(self.s_chunk_size) as u64;
355-
if !chunk_size.is_power_of_two() || chunk_size < EROFS_BLOCK_SIZE {
356+
if !chunk_size.is_power_of_two()
357+
|| chunk_size < EROFS_BLOCK_SIZE
358+
|| chunk_size > RAFS_MAX_CHUNK_SIZE
359+
{
356360
return Err(einval!("invalid chunk size in Rafs v6 extended superblock"));
357361
}
358362

@@ -1292,7 +1296,11 @@ impl RafsV6Blob {
12921296
}
12931297

12941298
let c_size = u32::from_le(self.chunk_size) as u64;
1295-
if c_size.count_ones() != 1 || c_size < EROFS_BLOCK_SIZE || c_size != chunk_size as u64 {
1299+
if c_size.count_ones() != 1
1300+
|| c_size < EROFS_BLOCK_SIZE
1301+
|| c_size > RAFS_MAX_CHUNK_SIZE
1302+
|| c_size != chunk_size as u64
1303+
{
12961304
error!(
12971305
"RafsV6Blob: idx {} invalid c_size {}, count_ones() {}",
12981306
blob_index,

src/bin/nydus-image/main.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ use serde::{Deserialize, Serialize};
2828
use nydus_app::{setup_logging, BuildTimeInfo};
2929
use nydus_rafs::RafsIoReader;
3030
use nydus_storage::factory::{BackendConfig, BlobFactory};
31-
use nydus_storage::RAFS_DEFAULT_CHUNK_SIZE;
31+
use nydus_storage::{RAFS_DEFAULT_CHUNK_SIZE, RAFS_MAX_CHUNK_SIZE};
3232
use nydus_utils::{compress, digest};
3333

3434
use crate::builder::{Builder, DiffBuilder, DirectoryBuilder, StargzBuilder};
@@ -966,7 +966,10 @@ impl Command {
966966
let param = v.trim_start_matches("0x").trim_end_matches("0X");
967967
let chunk_size =
968968
u32::from_str_radix(param, 16).context(format!("invalid chunk size {}", v))?;
969-
if chunk_size < 0x1000 || !chunk_size.is_power_of_two() {
969+
if chunk_size as u64 > RAFS_MAX_CHUNK_SIZE
970+
|| chunk_size < 0x1000
971+
|| !chunk_size.is_power_of_two()
972+
{
970973
bail!("invalid chunk size: {}", chunk_size);
971974
}
972975
Ok(chunk_size)

src/bin/nydusd/fs_cache.rs

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,16 @@ impl FsCacheHandler {
584584
}
585585
Some(obj) => match obj.fetch_range_uncompressed(msg.off, msg.len) {
586586
Ok(v) if v == msg.len as usize => {}
587-
_ => debug!("fscache: failed to read data from blob object"),
587+
Ok(v) => {
588+
warn!(
589+
"fscache: read data from blob object not matched: {} != {}",
590+
v, msg.len
591+
);
592+
}
593+
Err(e) => error!(
594+
"{}",
595+
format!("fscache: failed to read data from blob object: {}", e,)
596+
),
588597
},
589598
}
590599
}

storage/src/cache/cachedfile.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,34 @@ impl BlobObject for FileCacheEntry {
359359

360360
impl FileCacheEntry {
361361
fn do_fetch_chunks(&self, chunks: &[BlobIoChunk]) -> Result<usize> {
362+
if self.is_stargz() {
363+
// FIXME: for stargz, we need to implement fetching multiple chunks. here
364+
// is a heavy overhead workaround, needs to be optimized.
365+
for chunk in chunks {
366+
let mut buf = alloc_buf(chunk.uncompress_size() as usize);
367+
self.read_raw_chunk(chunk, &mut buf, false, None)
368+
.map_err(|e| {
369+
eio!(format!(
370+
"read_raw_chunk failed to read and decompress stargz chunk, {:?}",
371+
e
372+
))
373+
})?;
374+
if self.dio_enabled {
375+
self.adjust_buffer_for_dio(&mut buf)
376+
}
377+
Self::persist_chunk(&self.file, chunk.uncompress_offset(), &buf).map_err(|e| {
378+
eio!(format!(
379+
"do_fetch_chunk failed to persist stargz chunk, {:?}",
380+
e
381+
))
382+
})?;
383+
self.chunk_map
384+
.set_ready_and_clear_pending(chunk.as_base())
385+
.unwrap_or_else(|e| error!("set stargz chunk ready failed, {}", e));
386+
}
387+
return Ok(0);
388+
}
389+
362390
debug_assert!(!chunks.is_empty());
363391
let bitmap = self
364392
.chunk_map

storage/src/cache/fscache/mod.rs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -166,9 +166,6 @@ impl FileCacheEntry {
166166
if blob_info.has_feature(BlobFeatures::V5_NO_EXT_BLOB_TABLE) {
167167
return Err(einval!("fscache does not support Rafs v5 blobs"));
168168
}
169-
if blob_info.is_stargz() {
170-
return Err(einval!("fscache does not support stargz blob file"));
171-
}
172169
let file = blob_info
173170
.get_fscache_file()
174171
.ok_or_else(|| einval!("No fscache file associated with the blob_info"))?;
@@ -211,7 +208,7 @@ impl FileCacheEntry {
211208
is_get_blob_object_supported: true,
212209
is_compressed: false,
213210
is_direct_chunkmap: true,
214-
is_stargz: false,
211+
is_stargz: blob_info.is_stargz(),
215212
dio_enabled: true,
216213
need_validate: mgr.validate,
217214
prefetch_config,

storage/src/cache/worker.rs

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ use nydus_utils::async_helper::with_runtime;
2222
use nydus_utils::mpmc::Channel;
2323

2424
use crate::cache::{BlobCache, BlobIoRange};
25-
use crate::RAFS_MAX_CHUNK_SIZE;
25+
use crate::RAFS_DEFAULT_CHUNK_SIZE;
2626

2727
/// Configuration information for asynchronous workers.
2828
pub(crate) struct AsyncPrefetchConfig {
@@ -99,11 +99,14 @@ impl AsyncWorkerMgr {
9999
metrics: Arc<BlobcacheMetrics>,
100100
prefetch_config: Arc<AsyncPrefetchConfig>,
101101
) -> Result<Self> {
102-
// If the given value is less than maximum blob chunk size, it exceeds burst size of the
102+
// If the given value is less than default blob chunk size, it exceeds burst size of the
103103
// limiter ending up with throttling all throughput, so ensure bandwidth is bigger than
104-
// the maximum chunk size.
104+
// the default chunk size.
105105
let tweaked_bw_limit = if prefetch_config.bandwidth_rate != 0 {
106-
std::cmp::max(RAFS_MAX_CHUNK_SIZE as u32, prefetch_config.bandwidth_rate)
106+
std::cmp::max(
107+
RAFS_DEFAULT_CHUNK_SIZE as u32,
108+
prefetch_config.bandwidth_rate,
109+
)
107110
} else {
108111
0
109112
};

storage/src/lib.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ macro_rules! impl_getter {
7272

7373
/// Default blob chunk size.
7474
pub const RAFS_DEFAULT_CHUNK_SIZE: u64 = 1024 * 1024;
75-
/// Maximum blob chunk size.
76-
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024;
75+
/// Maximum blob chunk size, 16MB.
76+
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024 * 16;
7777

7878
/// Error codes related to storage subsystem.
7979
#[derive(Debug)]

storage/src/meta/mod.rs

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,7 @@ impl BlobMetaInfo {
407407
chunks: chunk_infos,
408408
base: base as *const u8,
409409
unmap_len: expected_size,
410+
is_stargz: blob_info.is_stargz(),
410411
});
411412

412413
Ok(BlobMetaInfo { state })
@@ -465,7 +466,9 @@ impl BlobMetaInfo {
465466
index += 1;
466467
let entry = &infos[index];
467468
self.validate_chunk(entry)?;
468-
if entry.uncompressed_offset() != last_end {
469+
470+
// For stargz chunks, disable this check.
471+
if !self.state.is_stargz && entry.uncompressed_offset() != last_end {
469472
return Err(einval!(format!(
470473
"mismatch uncompressed {} size {} last_end {}",
471474
entry.uncompressed_offset(),
@@ -562,7 +565,8 @@ impl BlobMetaInfo {
562565

563566
#[inline]
564567
fn validate_chunk(&self, entry: &BlobChunkInfoOndisk) -> Result<()> {
565-
if entry.compressed_end() > self.state.compressed_size
568+
// For stargz blob, self.state.compressed_size == 0, so don't validate it.
569+
if (!self.state.is_stargz && entry.compressed_end() > self.state.compressed_size)
566570
|| entry.uncompressed_end() > self.state.uncompressed_size
567571
{
568572
Err(einval!())
@@ -646,6 +650,8 @@ pub struct BlobMetaState {
646650
chunks: ManuallyDrop<Vec<BlobChunkInfoOndisk>>,
647651
base: *const u8,
648652
unmap_len: usize,
653+
/// The blob meta is for an stargz image.
654+
is_stargz: bool,
649655
}
650656

651657
// // Safe to Send/Sync because the underlying data structures are readonly
@@ -671,6 +677,25 @@ impl BlobMetaState {
671677
let mut start = 0;
672678
let mut end = 0;
673679

680+
if self.is_stargz {
681+
// FIXME: since stargz chunks are not currently allocated chunk index in the order of uncompressed_offset,
682+
// a binary search is not available for now, here is a heavy overhead workaround, need to be fixed.
683+
for i in 0..self.chunk_count {
684+
let off = if compressed {
685+
chunks[i as usize].compressed_offset()
686+
} else {
687+
chunks[i as usize].uncompressed_offset()
688+
};
689+
if addr == off {
690+
return Ok(i as usize);
691+
}
692+
}
693+
return Err(einval!(format!(
694+
"can't find stargz chunk by offset {}",
695+
addr,
696+
)));
697+
}
698+
674699
while left < right {
675700
let mid = left + size / 2;
676701
// SAFETY: the call is made safe by the following invariants:
@@ -799,6 +824,7 @@ mod tests {
799824
]),
800825
base: std::ptr::null(),
801826
unmap_len: 0,
827+
is_stargz: false,
802828
};
803829

804830
assert_eq!(state.get_chunk_index_nocheck(0, false).unwrap(), 0);
@@ -883,6 +909,7 @@ mod tests {
883909
]),
884910
base: std::ptr::null(),
885911
unmap_len: 0,
912+
is_stargz: false,
886913
};
887914
let info = BlobMetaInfo {
888915
state: Arc::new(state),

0 commit comments

Comments
 (0)