Skip to content

Commit 67b6d4f

Browse files
committed
Use separate schemas for each unit test.
1 parent 9c48bce commit 67b6d4f

File tree

4 files changed

+54
-34
lines changed

4 files changed

+54
-34
lines changed

src/lib.rs

+9-2
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
5454

5555
pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
5656
network_graph: Arc<NetworkGraph<L>>,
57-
logger: L
57+
logger: L,
5858
}
5959

6060
pub struct SerializedResponse {
@@ -86,7 +86,7 @@ impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Ta
8686
let arc_network_graph = Arc::new(network_graph);
8787
Self {
8888
network_graph: arc_network_graph,
89-
logger
89+
logger,
9090
}
9191
}
9292

@@ -145,6 +145,13 @@ pub(crate) async fn connect_to_db() -> Client {
145145
}
146146
});
147147

148+
if cfg!(test) {
149+
let schema_name = tests::db_test_schema();
150+
let schema_creation_command = format!("CREATE SCHEMA IF NOT EXISTS {}", schema_name);
151+
client.execute(&schema_creation_command, &[]).await.unwrap();
152+
client.execute(&format!("SET search_path TO {}", schema_name), &[]).await.unwrap();
153+
}
154+
148155
client.execute("set time zone UTC", &[]).await.unwrap();
149156
client
150157
}

src/lookup.rs

+6-7
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,8 @@ pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaS
8484
.map(|c| c.1.announcement_message.as_ref().unwrap().contents.short_channel_id as i64)
8585
.collect::<Vec<_>>()
8686
};
87-
if cfg!(test) {
88-
log_info!(logger, "Channel IDs: {:?}", channel_ids);
89-
}
87+
#[cfg(test)]
88+
log_info!(logger, "Channel IDs: {:?}", channel_ids);
9089
log_info!(logger, "Last sync timestamp: {}", last_sync_timestamp);
9190
let last_sync_timestamp_float = last_sync_timestamp as f64;
9291

@@ -166,7 +165,7 @@ pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaS
166165
let scid: i64 = current_row.get("short_channel_id");
167166
let current_seen_timestamp = current_row.get::<_, i64>("seen") as u32;
168167

169-
log_trace!(logger, "Channel {} with first update to complete bidirectional data since last sync seen at: {}", scid, current_seen_timestamp);
168+
log_gossip!(logger, "Channel {} with first update to complete bidirectional data since last sync seen at: {}", scid, current_seen_timestamp);
170169

171170
// the newer of the two oldest seen directional updates came after last sync timestamp
172171
let current_channel_delta = delta_set.entry(scid as u64).or_insert(ChannelDelta::default());
@@ -209,7 +208,7 @@ pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaS
209208
let current_row = row_res.unwrap();
210209
let scid: i64 = current_row.get("short_channel_id");
211210

212-
log_trace!(logger, "Channel {} with newest update in less recently updated direction being at least 6 days ago", scid);
211+
log_gossip!(logger, "Channel {} with newest update in less recently updated direction being at least 6 days ago", scid);
213212

214213
// annotate this channel as requiring that reminders be sent to the client
215214
let current_channel_delta = delta_set.entry(scid as u64).or_insert(ChannelDelta::default());
@@ -357,14 +356,14 @@ pub(super) async fn fetch_channel_updates<L: Deref>(delta_set: &mut DeltaSet, cl
357356
seen: current_seen_timestamp,
358357
update: unsigned_channel_update.clone(),
359358
});
360-
log_trace!(logger, "Channel {} latest update in direction 0: {} (seen: {})", scid, unsigned_channel_update.timestamp, current_seen_timestamp)
359+
log_gossip!(logger, "Channel {} latest update in direction 0: {} (seen: {})", scid, unsigned_channel_update.timestamp, current_seen_timestamp)
361360
} else if direction && !previously_seen_directions.1 {
362361
previously_seen_directions.1 = true;
363362
update_delta.latest_update_after_seen = Some(UpdateDelta {
364363
seen: current_seen_timestamp,
365364
update: unsigned_channel_update.clone(),
366365
});
367-
log_trace!(logger, "Channel {} latest update in direction 1: {} (seen: {})", scid, unsigned_channel_update.timestamp, current_seen_timestamp)
366+
log_gossip!(logger, "Channel {} latest update in direction 1: {} (seen: {})", scid, unsigned_channel_update.timestamp, current_seen_timestamp)
368367
}
369368
}
370369

src/serialization.rs

+3-6
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,11 @@ use std::ops::Deref;
55

66
use bitcoin::BlockHash;
77
use lightning::ln::msgs::{UnsignedChannelAnnouncement, UnsignedChannelUpdate};
8-
use lightning::log_trace;
8+
use lightning::log_gossip;
99
use lightning::util::logger::Logger;
1010
use lightning::util::ser::{BigSize, Writeable};
1111
use crate::config;
1212

13-
use crate::config;
1413
use crate::lookup::{DeltaSet, DirectedUpdateDelta};
1514

1615
pub(super) struct SerializationSet {
@@ -143,14 +142,12 @@ pub(super) fn serialize_delta_set<L: Deref>(delta_set: DeltaSet, last_sync_times
143142
let current_announcement_seen = channel_announcement_delta.seen;
144143
let is_new_announcement = current_announcement_seen >= last_sync_timestamp;
145144
let is_newly_included_announcement = if let Some(first_update_seen) = channel_delta.first_bidirectional_updates_seen {
146-
#[cfg(test)]
147-
log_trace!(logger, "Channel {} first bidirectional update seen: {}", scid, first_update_seen);
145+
log_gossip!(logger, "Channel {} first bidirectional update seen: {}", scid, first_update_seen);
148146
first_update_seen >= last_sync_timestamp
149147
} else {
150148
false
151149
};
152-
#[cfg(test)]
153-
log_trace!(logger, "Channel {} announcement seen at {} (new: {}, newly included: {})", scid, current_announcement_seen, is_new_announcement, is_newly_included_announcement);
150+
log_gossip!(logger, "Channel {} announcement seen at {} (new: {}, newly included: {})", scid, current_announcement_seen, is_new_announcement, is_newly_included_announcement);
154151
let send_announcement = is_new_announcement || is_newly_included_announcement;
155152
if send_announcement {
156153
serialization_set.latest_seen = max(serialization_set.latest_seen, current_announcement_seen);

src/tests/mod.rs

+36-19
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,14 @@
22
#![allow(unused_imports)]
33
//! Multi-module tests that use database fixtures
44
5+
use std::cell::{RefCell, RefMut};
56
use std::sync::Arc;
67
use std::time::{SystemTime, UNIX_EPOCH};
78
use bitcoin::{BlockHash, Network};
89
use bitcoin::secp256k1::ecdsa::Signature;
910
use bitcoin::secp256k1::{Secp256k1, SecretKey};
1011
use bitcoin::hashes::Hash;
12+
use bitcoin::hashes::hex::ToHex;
1113
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
1214
use lightning::ln::features::ChannelFeatures;
1315
use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
@@ -21,6 +23,10 @@ use crate::types::{GossipMessage, tests::TestLogger};
2123

2224
const CLIENT_BACKDATE_INTERVAL: u32 = 3600 * 24 * 7; // client backdates RGS by a week
2325

26+
thread_local! {
27+
static DB_TEST_SCHEMA:RefCell<Option<String>> = RefCell::new(None);
28+
}
29+
2430
fn blank_signature() -> Signature {
2531
Signature::from_compact(&[0u8; 64]).unwrap()
2632
}
@@ -33,6 +39,29 @@ fn current_time() -> u32 {
3339
SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs() as u32
3440
}
3541

42+
pub(crate) fn db_test_schema() -> String {
43+
DB_TEST_SCHEMA.with(|suffix_reference| {
44+
let mut suffix_option = suffix_reference.borrow_mut();
45+
match suffix_option.as_ref() {
46+
None => {
47+
let current_time = SystemTime::now();
48+
let unix_time = current_time.duration_since(UNIX_EPOCH).expect("Time went backwards");
49+
let timestamp_seconds = unix_time.as_secs();
50+
let timestamp_nanos = unix_time.as_nanos();
51+
let preimage = format!("{}", timestamp_nanos);
52+
let suffix = Sha256dHash::hash(preimage.as_bytes()).into_inner().to_hex();
53+
// the schema must start with a letter
54+
let schema = format!("test_{}_{}", timestamp_seconds, suffix);
55+
suffix_option.replace(schema.clone());
56+
schema
57+
}
58+
Some(suffix) => {
59+
suffix.clone()
60+
}
61+
}
62+
})
63+
}
64+
3665
fn generate_announcement(short_channel_id: u64) -> ChannelAnnouncement {
3766
let secp_context = Secp256k1::new();
3867

@@ -88,25 +117,13 @@ fn generate_update(scid: u64, direction: bool, timestamp: u32, expiry_delta: u16
88117
}
89118

90119
async fn clean_test_db() {
91-
let connection_config = config::db_connection_config();
92-
let (client, connection) = connection_config.connect(NoTls).await.unwrap();
93-
94-
tokio::spawn(async move {
95-
if let Err(e) = connection.await {
96-
panic!("connection error: {}", e);
97-
}
98-
});
99-
100-
client.query("TRUNCATE TABLE channel_announcements RESTART IDENTITY CASCADE", &[]).await.unwrap();
101-
client.query("TRUNCATE TABLE channel_updates RESTART IDENTITY CASCADE", &[]).await.unwrap();
102-
client.query("TRUNCATE TABLE config RESTART IDENTITY CASCADE", &[]).await.unwrap();
120+
let client = crate::connect_to_db().await;
121+
let schema = db_test_schema();
122+
client.execute(&format!("DROP SCHEMA IF EXISTS {} CASCADE", schema), &[]).await.unwrap();
103123
}
104124

105125
#[tokio::test]
106126
async fn test_trivial_setup() {
107-
// start off with a clean slate
108-
clean_test_db().await;
109-
110127
let logger = Arc::new(TestLogger::new());
111128
let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
112129
let network_graph_arc = Arc::new(network_graph);
@@ -149,12 +166,12 @@ async fn test_trivial_setup() {
149166
let update_result = rgs.update_network_graph(&serialization.data).unwrap();
150167
println!("update result: {}", update_result);
151168
// the update result must be a multiple of our snapshot granularity
152-
assert_eq!(update_result % config::SNAPSHOT_CALCULATION_INTERVAL, 0);
169+
assert_eq!(update_result % config::snapshot_generation_interval(), 0);
153170
assert!(update_result < timestamp);
154171

155172
let timestamp_delta = timestamp - update_result;
156173
println!("timestamp delta: {}", timestamp_delta);
157-
assert!(timestamp_delta < config::SNAPSHOT_CALCULATION_INTERVAL);
174+
assert!(timestamp_delta < config::snapshot_generation_interval());
158175

159176
let readonly_graph = client_graph_arc.read_only();
160177
let channels = readonly_graph.channels();
@@ -200,7 +217,7 @@ async fn test_unidirectional_intermediate_update_consideration() {
200217
network_graph_arc.update_channel_unsigned(&update_3.contents).unwrap();
201218
network_graph_arc.update_channel_unsigned(&update_4.contents).unwrap();
202219

203-
receiver.send(GossipMessage::ChannelAnnouncement(announcement)).await.unwrap();
220+
receiver.send(GossipMessage::ChannelAnnouncement(announcement, Some(current_timestamp))).await.unwrap();
204221
receiver.send(GossipMessage::ChannelUpdate(update_1)).await.unwrap();
205222
receiver.send(GossipMessage::ChannelUpdate(update_2)).await.unwrap();
206223
receiver.send(GossipMessage::ChannelUpdate(update_3)).await.unwrap();
@@ -247,7 +264,7 @@ async fn test_unidirectional_intermediate_update_consideration() {
247264
println!("last sync timestamp: {}", last_sync_timestamp);
248265
println!("next timestamp: {}", next_timestamp);
249266
// the update result must be a multiple of our snapshot granularity
250-
assert_eq!(next_timestamp % config::SNAPSHOT_CALCULATION_INTERVAL, 0);
267+
assert_eq!(next_timestamp % config::snapshot_generation_interval(), 0);
251268

252269
let readonly_graph = client_graph_arc.read_only();
253270
let channels = readonly_graph.channels();

0 commit comments

Comments
 (0)