Skip to content

Bug fixes: ungraceful test crash fixes #637

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 39 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
8aa1073
Instant time instead of SystemTime
Jan 24, 2025
58ba334
fn clean_old_pending_outpoints + lint
Jan 25, 2025
a169b4d
Merge branch 'kaspanet:master' into master
Feb 7, 2025
0bffb60
optimization: flow.rs; fn sync_missing_relay_past_headers
Feb 7, 2025
200cda0
Merge branch 'master' into master
Feb 7, 2025
c1a7c04
Revert "optimization: flow.rs; fn sync_missing_relay_past_headers"
Feb 12, 2025
c677877
Update lib.rs
Feb 13, 2025
a8341c4
Update test_consensus.rs
Feb 13, 2025
4e91574
Update test_consensus.rs
Feb 13, 2025
328044d
Update relations.rs
Feb 13, 2025
6b08428
Update tips.rs
Feb 13, 2025
c6d3b99
Update build.rs
Feb 13, 2025
e1cf33e
Update validate.rs
Feb 13, 2025
5a6021e
Update inquirer.rs
Feb 13, 2025
29012b9
Update relations.rs
Feb 13, 2025
80ec39e
Update access.rs
Feb 13, 2025
ef1b52e
Update set_access.rs
Feb 13, 2025
74a370a
Update utils.rs ; create_temp_db returns Result
Feb 13, 2025
a6d39cf
Update processor.rs ; create_temp_db returns Result
Feb 13, 2025
a68d477
Update index.rs ; create_temp_db returns Result
Feb 13, 2025
9a6be4c
Update single.rs ; UtxosChangedSubscription
Feb 13, 2025
68869e1
Update connection_handler.rs ; fn serve
Feb 13, 2025
69c2e54
Update main.rs
Feb 13, 2025
724b3ed
Update network.rs
Feb 13, 2025
8fe8629
Update consensus_integration_tests.rs
Feb 13, 2025
75bec2c
linting
Feb 13, 2025
00756be
tmp files created by db/consensus not dropped since Arc used, which l…
Feb 13, 2025
3cfdb74
add error reporting to get_temp_dir creation, since testing sometimes…
Feb 13, 2025
40454b6
we can unwrap since error is reported earlier, otherwise double error…
Feb 13, 2025
fc848e6
linting
Feb 13, 2025
f19d651
add cargo test non--release to check overflows
Feb 14, 2025
78f194c
added warn msg to UtxosChangedSubscription
Feb 14, 2025
9eeadf3
Merge branch 'kaspanet:master' into testing-fixes
Feb 26, 2025
b0fdde1
refactor: add error! instead of log::error
Feb 26, 2025
1770afa
cargo test to nextest
Mar 1, 2025
162ee59
Merge branch 'master' into testing-fixes
Mar 20, 2025
b4b7e0f
Merge branch 'kaspanet:master' into testing-fixes
Apr 1, 2025
52b65b6
allocate 250k input vector to heap, so it does not stack overflow, be…
Apr 1, 2025
fb2607a
fix: missing run
Apr 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ jobs:
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}

- name: Run cargo tests non--release
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We moved away from test and switched to using nextest. But I do wonder why nextest doesn't capture the errors that test is displaying here.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cargo test still shows tests failing that nextest does not, so what is a recommended solution?

run: cargo nextest run --no-fail-fast

- name: Run cargo build with devnet-prealloc feature
run: cargo build --release --features devnet-prealloc --workspace --all --tests --benches

Expand Down
2 changes: 1 addition & 1 deletion components/addressmanager/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ mod address_store_with_cache {
// Assert that initial distribution is skewed, and hence not uniform from the outset.
assert!(bucket_reduction_ratio >= 1.25);

let db = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let db = create_temp_db!(ConnBuilder::default().with_files_limit(10)).expect("Failed to create temp db");
let config = Config::new(SIMNET_PARAMS);
let (am, _) = AddressManager::new(Arc::new(config), db.1, Arc::new(TickService::default()));

Expand Down
4 changes: 2 additions & 2 deletions consensus/src/consensus/test_consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ impl TestConsensus {

/// Creates a test consensus instance based on `config` with a temp DB and the provided `notification_sender`
pub fn with_notifier(config: &Config, notification_sender: Sender<Notification>, context: SubscriptionContext) -> Self {
let (db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let notification_root = Arc::new(ConsensusNotificationRoot::with_context(notification_sender, context));
let counters = Default::default();
let tx_script_cache_counters = Default::default();
Expand All @@ -89,7 +89,7 @@ impl TestConsensus {

/// Creates a test consensus instance based on `config` with a temp DB and no notifier
pub fn new(config: &Config) -> Self {
let (db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let (dummy_notification_sender, _) = async_channel::unbounded();
let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender));
let counters = Default::default();
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/model/stores/relations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ mod tests {

#[test]
fn test_db_relations_store() {
let (lt, db) = create_temp_db!(kaspa_database::prelude::ConnBuilder::default().with_files_limit(10));
let (lt, db) = create_temp_db!(kaspa_database::prelude::ConnBuilder::default().with_files_limit(10)).unwrap();
test_relations_store(DbRelationsStore::new(
db,
0,
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/model/stores/tips.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ mod tests {

#[test]
fn test_update_tips() {
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let mut store = DbTipsStore::new(db.clone());
store.add_tip(1.into(), &[]).unwrap();
store.add_tip(3.into(), &[]).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/processes/pruning_proof/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ impl PruningProofManager {
return vec![];
}

let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap();
let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db);

Expand Down
2 changes: 1 addition & 1 deletion consensus/src/processes/pruning_proof/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ impl PruningProofManager {

let headers_estimate = self.estimate_proof_unique_size(proof);

let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize);
let headers_store =
Arc::new(DbHeadersStore::new(db.clone(), CachePolicy::Count(headers_estimate), CachePolicy::Count(headers_estimate)));
Expand Down
4 changes: 2 additions & 2 deletions consensus/src/processes/reachability/inquirer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ mod tests {
/// Runs a DAG test-case with full verification using the staging store mechanism.
/// Note: runtime is quadratic in the number of blocks so should be used with mildly small DAGs (~50)
fn run_dag_test_case_with_staging(test: &DagTestCase) {
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let cache_policy = CachePolicy::Count(test.blocks.len() / 3);
let reachability = RwLock::new(DbReachabilityStore::new(db.clone(), cache_policy, cache_policy));
let mut relations = DbRelationsStore::with_prefix(db.clone(), &[], CachePolicy::Empty, CachePolicy::Empty);
Expand Down Expand Up @@ -520,7 +520,7 @@ mod tests {
run_dag_test_case(&mut relations, &mut reachability, &test);

// Run with direct DB stores
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let cache_policy = CachePolicy::Count(test.blocks.len() / 3);
let mut reachability = DbReachabilityStore::new(db.clone(), cache_policy, cache_policy);
let mut relations = DbRelationsStore::new(db, 0, cache_policy, cache_policy);
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/processes/relations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ mod tests {

#[test]
fn test_delete_level_relations_zero_cache() {
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let mut relations = DbRelationsStore::new(db.clone(), 0, CachePolicy::Empty, CachePolicy::Empty);
relations.insert(ORIGIN, Default::default()).unwrap();
relations.insert(1.into(), Arc::new(vec![ORIGIN])).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion database/src/access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ mod tests {

#[test]
fn test_delete_all() {
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let access = CachedDbAccess::<Hash, u64>::new(db.clone(), CachePolicy::Count(2), vec![1, 2]);

access.write_many(DirectDbWriter::new(&db), &mut (0..16).map(|i| (i.into(), 2))).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion database/src/set_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ mod tests {

#[test]
fn test_delete_bucket() {
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).expect("Failed to create temp db");
let access = DbSetAccess::<Hash, u64>::new(db.clone(), vec![1, 2]);

for i in 0..16 {
Expand Down
31 changes: 23 additions & 8 deletions database/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,23 +41,38 @@ impl Drop for DbLifetime {
}
}

pub fn get_kaspa_tempdir() -> TempDir {
pub fn get_kaspa_tempdir() -> Result<TempDir, std::io::Error> {
let global_tempdir = std::env::temp_dir();
let kaspa_tempdir = global_tempdir.join("rusty-kaspa");
std::fs::create_dir_all(kaspa_tempdir.as_path()).unwrap();
let db_tempdir = tempfile::tempdir_in(kaspa_tempdir.as_path()).unwrap();
db_tempdir
std::fs::create_dir_all(&kaspa_tempdir).map_err(|err| {
std::io::Error::new(err.kind(), format!("Failed to create kaspa directory '{}': {}", kaspa_tempdir.display(), err))
})?;
tempfile::tempdir_in(&kaspa_tempdir).map_err(|err| {
std::io::Error::new(err.kind(), format!("Failed to create db tempdir in '{}': {}", kaspa_tempdir.display(), err))
})
}

/// Creates a DB within a temp directory under `<OS SPECIFIC TEMP DIR>/kaspa-rust`
/// Callers must keep the `TempDbLifetime` guard for as long as they wish the DB to exist.
#[macro_export]
macro_rules! create_temp_db {
($conn_builder: expr) => {{
let db_tempdir = $crate::utils::get_kaspa_tempdir();
let db_path = db_tempdir.path().to_owned();
let db = $conn_builder.with_db_path(db_path).build().unwrap();
($crate::utils::DbLifetime::new(db_tempdir, std::sync::Arc::downgrade(&db)), db)
// Create the temporary directory.
let db_tempdir = $crate::utils::get_kaspa_tempdir().unwrap();
// Extract and clone the DB path for later use (for error messages).
let db_tempdir_path = db_tempdir.path().to_owned();
// Build the database.
$conn_builder
.with_db_path(db_tempdir_path.clone())
.build()
.map(|db| {
// On success, move `db_tempdir` into the DbLifetime.
($crate::utils::DbLifetime::new(db_tempdir, std::sync::Arc::downgrade(&db)), db)
})
.map_err(|e| {
// Use the cloned path for the error message.
format!("Failed to create temp db at {}: {}", db_tempdir_path.display(), e)
})
}};
}

Expand Down
2 changes: 1 addition & 1 deletion indexes/processor/src/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ mod tests {
impl NotifyPipeline {
fn new() -> Self {
let (consensus_sender, consensus_receiver) = unbounded();
let (utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let config = Arc::new(Config::new(DEVNET_PARAMS));
let tc = TestConsensus::new(&config);
tc.init();
Expand Down
2 changes: 1 addition & 1 deletion indexes/utxoindex/src/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ mod tests {

// Initialize all components, and virtual change emulator proxy.
let mut virtual_change_emulator = VirtualChangeEmulator::new();
let (_utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let config = Config::new(DEVNET_PARAMS);
let tc = Arc::new(TestConsensus::new(&config));
let consensus_manager = Arc::new(ConsensusManager::from_consensus(tc.consensus_clone()));
Expand Down
16 changes: 11 additions & 5 deletions notify/src/subscription/single.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,11 +383,17 @@ impl Display for UtxosChangedSubscription {

impl Drop for UtxosChangedSubscription {
fn drop(&mut self) {
trace!(
"UtxosChangedSubscription: {} in total (drop {})",
UTXOS_CHANGED_SUBSCRIPTIONS.fetch_sub(1, Ordering::SeqCst) - 1,
self
);
// Prevent underflow; [ERROR] thread 'tokio-runtime-worker' panicked at notify/src/subscription/single.rs:388:13: attempt to subtract with overflow
let _ = UTXOS_CHANGED_SUBSCRIPTIONS.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |count| {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if we just did:

UTXOS_CHANGED_SUBSCRIPTIONS.fetch_sub(1, Ordering::SeqCst).saturating_sub(1)

Also, I wonder what the root cause of UTXOS_CHANGED_SUBSCRIPTIONS being 0 is. The above fixes the underflow, but the incorrect usage that allowed the situation still needs to be determined. the expectation here is that UTXOS_CHANGED_SUBSCRIPTIONS should always be above 0 when drop occurs OR document the situations when it can be 0.

Copy link
Author

@ghost ghost Feb 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

afaik saturating_sub only applies to old value from fetch_sub not the atomic itself, so it doesnt prevent actual atomic variable from becoming negative. Suggested fetch_update solution only applies if the value is greater than zero

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image
btw this happens frequently in testing

if count > 0 {
Some(count - 1)
} else {
log::warn!("Tried subtracting UtxosChangedSubscription when it's already zero!");
None
}
});

trace!("UtxosChangedSubscription: {} in total (drop {})", UTXOS_CHANGED_SUBSCRIPTIONS.load(Ordering::SeqCst), self);
}
}

Expand Down
6 changes: 4 additions & 2 deletions protocol/p2p/src/core/connection_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use crate::pb::{
};
use crate::{ConnectionInitializer, Router};
use futures::FutureExt;
use kaspa_core::{debug, info};
use kaspa_core::{debug, error, info};
use kaspa_utils::networking::NetAddress;
use kaspa_utils_tower::{
counters::TowerConnectionCounters,
Expand Down Expand Up @@ -85,9 +85,11 @@ impl ConnectionHandler {
.serve_with_shutdown(serve_address.into(), termination_receiver.map(drop))
.await;

// log:error! was panic! before, but crashes testing ungracefully (error! not implemented)
// Now error is propagated
match serve_result {
Ok(_) => info!("P2P Server stopped: {}", serve_address),
Err(err) => panic!("P2P, Server {serve_address} stopped with error: {err:?}"),
Err(err) => error!("P2P, Server {serve_address} stopped with error: {err:?}"),
}
});
Ok(termination_sender)
Expand Down
3 changes: 2 additions & 1 deletion simpa/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,8 @@ fn main_impl(mut args: Args) {
}

// Benchmark the DAG validation time
let (_lifetime2, db2) = create_temp_db!(ConnBuilder::default().with_parallelism(num_cpus::get()).with_files_limit(default_fd));
let (_lifetime2, db2) =
create_temp_db!(ConnBuilder::default().with_parallelism(num_cpus::get()).with_files_limit(default_fd)).unwrap();
let (dummy_notification_sender, _) = unbounded();
let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender));
let consensus2 = Arc::new(Consensus::new(
Expand Down
6 changes: 3 additions & 3 deletions simpa/src/simulator/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ impl KaspaNetworkSimulator {
(true, Some(dir), false, _) => create_permanent_db!(dir, builder),

(_, _, true, Some(rocksdb_stats_period_sec)) => {
create_temp_db!(builder.enable_stats().with_stats_period(rocksdb_stats_period_sec))
create_temp_db!(builder.enable_stats().with_stats_period(rocksdb_stats_period_sec)).unwrap()
}
(_, _, true, None) => create_temp_db!(builder.enable_stats()),
(_, _, false, _) => create_temp_db!(builder),
(_, _, true, None) => create_temp_db!(builder.enable_stats()).unwrap(),
(_, _, false, _) => create_temp_db!(builder).unwrap(),
};

let (dummy_notification_sender, _) = unbounded();
Expand Down
2 changes: 1 addition & 1 deletion testing/integration/src/common/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ impl Daemon {
}

pub fn with_manager(client_manager: Arc<ClientManager>, fd_total_budget: i32) -> Daemon {
let appdir_tempdir = get_kaspa_tempdir();
let appdir_tempdir = get_kaspa_tempdir().unwrap();
client_manager.args.write().appdir = Some(appdir_tempdir.path().to_str().unwrap().to_owned());
let (core, _) = create_core_with_runtime(&Default::default(), &client_manager.args.read(), fd_total_budget);
let async_service = &Arc::downcast::<AsyncRuntime>(core.find(AsyncRuntime::IDENT).unwrap().arc_any()).unwrap();
Expand Down
21 changes: 14 additions & 7 deletions testing/integration/src/consensus_integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ fn reachability_stretch_test(use_attack_json: bool) {
map.get_mut(&blocks[0]).unwrap().parents.push(root);

// Act
let (_temp_db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_temp_db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let mut store = DbReachabilityStore::new(db.clone(), CachePolicy::Count(50_000), CachePolicy::Count(50_000));
let mut relations = DbRelationsStore::new(db, 0, CachePolicy::Count(100_000), CachePolicy::Count(100_000)); // TODO: remove level
let mut builder = DagBuilder::new(&mut store, &mut relations);
Expand Down Expand Up @@ -955,9 +955,9 @@ async fn json_test(file_path: &str, concurrency: bool) {
let notify_service = Arc::new(NotifyService::new(tc.notification_root(), notification_recv, subscription_context.clone()));

// External storage for storing block bodies. This allows separating header and body processing phases
let (_external_db_lifetime, external_storage) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_external_db_lifetime, external_storage) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let external_block_store = DbBlockTransactionsStore::new(external_storage, CachePolicy::Count(config.perf.block_data_cache_size));
let (_utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10));
let (_utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)).unwrap();
let consensus_manager = Arc::new(ConsensusManager::new(Arc::new(TestConsensusFactory::new(tc.clone()))));
let utxoindex = UtxoIndex::new(consensus_manager.clone(), utxoindex_db).unwrap();
let index_service = Arc::new(IndexService::new(
Expand Down Expand Up @@ -1739,7 +1739,7 @@ async fn staging_consensus_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).build();

let db_tempdir = get_kaspa_tempdir();
let db_tempdir = get_kaspa_tempdir().unwrap();
let db_path = db_tempdir.path().to_owned();
let consensus_db_dir = db_path.join("consensus");
let meta_db_dir = db_path.join("meta");
Expand Down Expand Up @@ -1773,6 +1773,7 @@ async fn staging_consensus_test() {

core.shutdown();
core.join(joins);
drop(consensus_manager);
}

/// Tests the KIP-10 transaction introspection opcode activation by verifying that:
Expand Down Expand Up @@ -1826,7 +1827,7 @@ async fn run_kip10_activation_test() {
let mut genesis_multiset = MuHash::new();
consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset);
consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap();
consensus.init();
let wait_handles = consensus.init();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add comments on why this is necessary

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this was done in previous tests as well.
Here

  pub fn init(&self) -> Vec<JoinHandle<()>> {
        self.consensus.run_processors()
    }

and

    pub fn shutdown(&mut self) {
        self.core.shutdown();
        self.join();
    }

where core is wrapped in Arc, so its reference counted. Without it some background threads might stay running, thought it might be related to getting System File Descriptor problems and not getting /tmp/rusty-kaspa cleanups early.


// Build blockchain up to one block before activation
let mut index = 0;
Expand Down Expand Up @@ -1885,6 +1886,8 @@ async fn run_kip10_activation_test() {
let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.clone()]).await;
assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid)));
assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id));
consensus.shutdown(wait_handles);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here and the rest of similar changes: Add comments on why this is necessary.

drop(consensus);
}

#[tokio::test]
Expand Down Expand Up @@ -1981,7 +1984,7 @@ async fn payload_activation_test() {
let mut genesis_multiset = MuHash::new();
consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset);
consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap();
consensus.init();
let wait_handles = consensus.init();

// Build blockchain up to one block before activation
let mut index = 0;
Expand Down Expand Up @@ -2048,6 +2051,8 @@ async fn payload_activation_test() {

assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid)));
assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id));
consensus.shutdown(wait_handles);
drop(consensus);
}

#[tokio::test]
Expand Down Expand Up @@ -2114,7 +2119,7 @@ async fn runtime_sig_op_counting_test() {
let mut genesis_multiset = MuHash::new();
consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset);
consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap();
consensus.init();
let wait_handles = consensus.init();

// Build blockchain up to one block before activation
let mut index = 0;
Expand Down Expand Up @@ -2184,4 +2189,6 @@ async fn runtime_sig_op_counting_test() {
// Test 2: After activation, tx should be accepted as runtime counting only sees 1 executed sig op
let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx]).await;
assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid)));
consensus.shutdown(wait_handles);
drop(consensus);
}
4 changes: 3 additions & 1 deletion wallet/core/src/tx/generator/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,9 @@ fn test_generator_inputs_32k_outputs_2_fees_exclude() -> Result<()> {
#[test]
fn test_generator_inputs_250k_outputs_2_sweep() -> Result<()> {
let f = 130.0;
let generator = make_generator(test_network_id(), &[f; 250_000], &[], Fees::None, change_address, PaymentDestination::Change);
let head: Vec<f64> = vec![f; 250_000]; // <-- NOTE: this is allocated in heap unlike before, to
// prevent stackoverflow
let generator = make_generator(test_network_id(), &head, &[], Fees::None, change_address, PaymentDestination::Change);
generator.unwrap().harness().accumulate(2875).finalize();
Ok(())
}