Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

repro and fix for #1788 #1853

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion gix-odb/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ pub struct Store {

/// The below state acts like a slot-map with each slot is mutable when the write lock is held, but readable independently of it.
/// This allows multiple file to be loaded concurrently if there is multiple handles requesting to load packs or additional indices.
/// The map is static and cannot typically change.
/// The map is static and cannot change.
/// It's read often and changed rarely.
pub(crate) files: Vec<types::MutableIndexAndPack>,

Expand Down
8 changes: 5 additions & 3 deletions gix-odb/src/store_impls/dynamic/load_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ impl super::Store {

// Figure out this number based on what we see while handling the existing indices
let mut num_loaded_indices = 0;
dbg!(indices_by_modification_time.len());
for (index_info, mtime) in indices_by_modification_time.into_iter().map(|(a, b, _)| (a, b)) {
match idx_by_index_path.remove(index_info.path()) {
Some(slot_idx) => {
Expand All @@ -266,7 +267,7 @@ impl super::Store {
Option::as_ref(&files_guard).expect("slot is set or we wouldn't know it points to this file");
if index_info.is_multi_index() && files.mtime() != mtime {
// we have a changed multi-pack index. We can't just change the existing slot as it may alter slot indices
// that are currently available. Instead we have to move what's there into a new slot, along with the changes,
// that are currently available. Instead, we have to move what's there into a new slot, along with the changes,
// and later free the slot or dispose of the index in the slot (like we do for removed/missing files).
index_paths_to_add.push_back((index_info, mtime, Some(slot_idx)));
// If the current slot is loaded, the soon-to-be copied multi-index path will be loaded as well.
Expand Down Expand Up @@ -295,10 +296,11 @@ impl super::Store {
.map_or(0, |idx| (idx + 1) % self.files.len());
let mut num_indices_checked = 0;
let mut needs_generation_change = false;
dbg!(index_paths_to_add.len(), next_possibly_free_index);
let mut slot_indices_to_remove: Vec<_> = idx_by_index_path.into_values().collect();
while let Some((mut index_info, mtime, move_from_slot_idx)) = index_paths_to_add.pop_front() {
'increment_slot_index: loop {
if num_indices_checked == self.files.len() {
if dbg!(num_indices_checked) == dbg!(self.files.len()) {
return Err(Error::InsufficientSlots {
current: self.files.len(),
needed: index_paths_to_add.len() + 1, /*the one currently popped off*/
Expand Down Expand Up @@ -502,7 +504,7 @@ impl super::Store {
}
// Unlike libgit2, do not sort by modification date, but by size and put the biggest indices first. That way
// the chance to hit an object should be higher. We leave it to the handle to sort by LRU.
// Git itself doesn't change the order which may safe time, but we want it to be stable which also helps some tests.
// Git itself doesn't change the order which may save time, but we want it to be stable which also helps some tests.
// NOTE: this will work well for well-packed repos or those using geometric repacking, but force us to open a lot
// of files when dealing with new objects, as there is no notion of recency here as would be with unmaintained
// repositories. Different algorithms should be provided, like newest packs first, and possibly a mix of both
Expand Down
8 changes: 8 additions & 0 deletions gix/src/commit.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
//!
#![allow(clippy::empty_docs)]

use std::convert::Infallible;

/// An empty array of a type usable with the `gix::easy` API to help declaring no parents should be used
pub const NO_PARENT_IDS: [gix_hash::ObjectId; 0] = [];

Expand All @@ -22,6 +24,12 @@ pub enum Error {
ReferenceEdit(#[from] crate::reference::edit::Error),
}

impl From<std::convert::Infallible> for Error {
fn from(_value: Infallible) -> Self {
unreachable!("cannot be invoked")
}
}

///
#[cfg(feature = "revision")]
pub mod describe {
Expand Down
63 changes: 63 additions & 0 deletions gix/tests/gix/remote/fetch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,15 @@ mod shallow {
mod blocking_and_async_io {
use std::sync::atomic::AtomicBool;

use gix::interrupt::IS_INTERRUPTED;
use gix::{
config::tree::Protocol,
remote::{fetch, fetch::Status, Direction::Fetch},
};
use gix_features::progress;
use gix_odb::store::init::Slots;
use gix_protocol::maybe_async;
use gix_testtools::tempfile;
use gix_testtools::tempfile::TempDir;

use crate::{
Expand Down Expand Up @@ -85,6 +88,66 @@ mod blocking_and_async_io {
try_repo_rw(name).unwrap()
}

#[test]
fn fetch_more_packs_than_can_be_handled() -> gix_testtools::Result {
fn create_empty_commit(repo: &gix::Repository) -> anyhow::Result<()> {
let name = repo.head_name()?.expect("no detached head");
repo.commit(
name.as_bstr(),
"empty",
gix::hash::ObjectId::empty_tree(repo.object_hash()),
repo.try_find_reference(name.as_ref())?.map(|r| r.id()),
)?;
Ok(())
}
for max_packs in 1..=2 {
let remote_dir = tempfile::tempdir()?;
let remote_repo = gix::init_bare(remote_dir.path())?;
create_empty_commit(&remote_repo)?;

let local_dir = tempfile::tempdir()?;
dbg!(max_packs);
let (local_repo, _) = gix::clone::PrepareFetch::new(
remote_repo.path(),
local_dir.path(),
gix::create::Kind::Bare,
Default::default(),
gix::open::Options::isolated().object_store_slots(Slots::Given(max_packs)),
)?
.fetch_only(gix::progress::Discard, &IS_INTERRUPTED)?;

let remote = local_repo
.branch_remote(
local_repo.head_ref()?.expect("branch available").name().shorten(),
Fetch,
)
.expect("remote is configured after clone")?;
for round in 1.. {
dbg!(round);
create_empty_commit(&remote_repo)?;
match remote
.connect(Fetch)?
.prepare_fetch(gix::progress::Discard, Default::default())?
.receive(gix::progress::Discard, &IS_INTERRUPTED)
{
Ok(out) => {
for local_tracking_branch_name in out.ref_map.mappings.into_iter().filter_map(|m| m.local) {
let r = local_repo.find_reference(&local_tracking_branch_name)?;
r.id()
.object()
.expect("object should be present after fetching, triggering pack refreshes works");
}
}
Err(err) => assert_eq!(
err.to_string(),
"It should indicate that the ODB is exhausted for now - we can't grow"
),
}
}
}
Ok(())
}

#[test]
#[cfg(feature = "blocking-network-client")]
#[allow(clippy::result_large_err)]
Expand Down
Loading