Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gradually getting rid of legacy shreds in tests #5276

Merged
merged 3 commits into from
Mar 14, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
201 changes: 8 additions & 193 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5904,45 +5904,6 @@ pub mod tests {
}
}

/*
#[test]
pub fn test_iteration_order() {
let slot = 0;
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

// Write entries
let num_entries = 8;
let entries = make_tiny_test_entries(num_entries);
let mut shreds = entries.to_single_entry_shreds();

for (i, b) in shreds.iter_mut().enumerate() {
b.set_index(1 << (i * 8));
b.set_slot(0);
}

blockstore
.write_shreds(&shreds)
.expect("Expected successful write of shreds");

let mut db_iterator = blockstore
.db
.cursor::<cf::Data>()
.expect("Expected to be able to open database iterator");

db_iterator.seek((slot, 1));

// Iterate through blockstore
for i in 0..num_entries {
assert!(db_iterator.valid());
let (_, current_index) = db_iterator.key().expect("Expected a valid key");
assert_eq!(current_index, (1 as u64) << (i * 8));
db_iterator.next();
}

}
*/

#[test]
fn test_get_slot_entries1() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
Expand All @@ -5966,45 +5927,6 @@ pub mod tests {
);
}

// This test seems to be unnecessary with introduction of data shreds. There are no
// guarantees that a particular shred index contains a complete entry
#[test]
#[ignore]
pub fn test_get_slot_entries2() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

// Write entries
let num_slots = 5_u64;
let mut index = 0;
for slot in 0..num_slots {
let entries = create_ticks(slot + 1, 0, Hash::default());
let last_entry = entries.last().unwrap().clone();
let mut shreds = entries_to_test_shreds(
&entries,
slot,
slot.saturating_sub(1),
false,
0,
true, // merkle_variant
);
for b in shreds.iter_mut() {
b.set_index(index);
b.set_slot(slot);
index += 1;
}
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
assert_eq!(
blockstore
.get_slot_entries(slot, u64::from(index - 1))
.unwrap(),
vec![last_entry],
);
}
}

#[test]
fn test_get_slot_entries3() {
// Test inserting/fetching shreds which contain multiple entries per shred
Expand Down Expand Up @@ -6743,106 +6665,6 @@ pub mod tests {
}
}

/*
#[test]
pub fn test_chaining_tree() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

let num_tree_levels = 6;
assert!(num_tree_levels > 1);
let branching_factor: u64 = 4;
// Number of slots that will be in the tree
let num_slots = (branching_factor.pow(num_tree_levels) - 1) / (branching_factor - 1);
let erasure_config = ErasureConfig::default();
let entries_per_slot = erasure_config.num_data() as u64;
assert!(entries_per_slot > 1);

let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);

// Insert tree one slot at a time in a random order
let mut slots: Vec<_> = (0..num_slots).collect();

// Get shreds for the slot
slots.shuffle(&mut thread_rng());
for slot in slots {
// Get shreds for the slot "slot"
let slot_shreds = &mut shreds
[(slot * entries_per_slot) as usize..((slot + 1) * entries_per_slot) as usize];
for shred in slot_shreds.iter_mut() {
// Get the parent slot of the slot in the tree
let slot_parent = {
if slot == 0 {
0
} else {
(slot - 1) / branching_factor
}
};
shred.set_parent(slot_parent);
}

let shared_shreds: Vec<_> = slot_shreds
.iter()
.cloned()
.map(|shred| Arc::new(RwLock::new(shred)))
.collect();
let mut coding_generator = CodingGenerator::new_from_config(&erasure_config);
let coding_shreds = coding_generator.next(&shared_shreds);
assert_eq!(coding_shreds.len(), erasure_config.num_coding());

let mut rng = thread_rng();

// Randomly pick whether to insert erasure or coding shreds first
if rng.gen_bool(0.5) {
blockstore.write_shreds(slot_shreds).unwrap();
blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
} else {
blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
blockstore.write_shreds(slot_shreds).unwrap();
}
}

// Make sure everything chains correctly
let last_level =
(branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1);
for slot in 0..num_slots {
let slot_meta = blockstore.meta(slot).unwrap().unwrap();
assert_eq!(slot_meta.consumed, entries_per_slot);
assert_eq!(slot_meta.received, entries_per_slot);
assert!(slot_meta.is_connected());
let slot_parent = {
if slot == 0 {
0
} else {
(slot - 1) / branching_factor
}
};
assert_eq!(slot_meta.parent_slot, Some(slot_parent));

let expected_children: HashSet<_> = {
if slot >= last_level {
HashSet::new()
} else {
let first_child_slot = min(num_slots - 1, slot * branching_factor + 1);
let last_child_slot = min(num_slots - 1, (slot + 1) * branching_factor);
(first_child_slot..last_child_slot + 1).collect()
}
};

let result: HashSet<_> = slot_meta.next_slots.iter().cloned().collect();
if expected_children.len() != 0 {
assert_eq!(slot_meta.next_slots.len(), branching_factor as usize);
} else {
assert_eq!(slot_meta.next_slots.len(), 0);
}
assert_eq!(expected_children, result);
}

// No orphan slots should exist
assert!(blockstore.orphans_cf.is_empty().unwrap())

}
*/
#[test]
fn test_slot_range_connected_chain() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
Expand Down Expand Up @@ -7058,17 +6880,12 @@ pub mod tests {
let gap: u64 = 10;
assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
let num_entries = max_ticks_per_n_shreds(1, Some(data_buffer_size)) + 1;
let entries = create_ticks(num_entries, 0, Hash::default());
let entries = create_ticks(1, 0, Hash::default());
let mut shreds =
entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ false);
let num_shreds = shreds.len();
assert!(num_shreds > 1);
for (i, s) in shreds.iter_mut().enumerate() {
s.set_index(i as u32 * gap as u32);
s.set_slot(slot);
}
entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ true);
shreds.retain(|s| (s.index() % gap as u32) == 0);
let num_shreds = 2;
shreds.truncate(num_shreds);
blockstore.insert_shreds(shreds, None, false).unwrap();

// Index of the first shred is 0
Expand Down Expand Up @@ -7307,15 +7124,13 @@ pub mod tests {

let entries = create_ticks(100, 0, Hash::default());
let mut shreds =
entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ false);
assert!(shreds.len() > 2);
shreds.drain(2..);
entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ true);

const ONE: u64 = 1;
const OTHER: u64 = 4;
assert!(shreds.len() > OTHER as usize);

shreds[0].set_index(ONE as u32);
shreds[1].set_index(OTHER as u32);
let shreds = vec![shreds.remove(OTHER as usize), shreds.remove(ONE as usize)];

// Insert one shred at index = first_index
blockstore.insert_shreds(shreds, None, false).unwrap();
Expand Down
Loading